text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pretend from cloudevents.http import from_http import functions_framework def test_http_view_func_wrapper(): function = pretend.call_recorder(lambda request: "Hello") request_object = pretend.stub() local_proxy = pretend.stub(_get_current_object=lambda: request_object) view_func = functions_framework._http_view_func_wrapper(function, local_proxy) view_func("/some/path") assert function.calls == [pretend.call(request_object)] def test_event_view_func_wrapper(monkeypatch): data = pretend.stub() json = { "context": { "eventId": "some-eventId", "timestamp": "some-timestamp", "eventType": "some-eventType", "resource": "some-resource", }, "data": data, } request = pretend.stub(headers={}, get_json=lambda: json) context_stub = pretend.stub() context_class = pretend.call_recorder(lambda *a, **kw: context_stub) monkeypatch.setattr(functions_framework, "Context", context_class) function = pretend.call_recorder(lambda data, context: "Hello") view_func = functions_framework._event_view_func_wrapper(function, request) view_func("/some/path") assert function.calls == [pretend.call(data, context_stub)] assert context_class.calls == [ pretend.call( eventId="some-eventId", timestamp="some-timestamp", eventType="some-eventType", resource="some-resource", ) ] def test_run_cloud_event(): headers = {"Content-Type": "application/cloudevents+json"} data = json.dumps( { "source": "from-galaxy-far-far-away", "type": "cloud_event.greet.you", "specversion": "1.0", "id": "f6a65fcd-eed2-429d-9f71-ec0663d83025", "time": "2020-08-13T02:12:14.946587+00:00", "data": {"name": "john"}, } ) request = pretend.stub(headers=headers, get_data=lambda: data) function = pretend.call_recorder(lambda cloud_event: "hello") functions_framework._run_cloud_event(function, request) expected_cloud_event = from_http(request.headers, request.get_data()) assert function.calls == [pretend.call(expected_cloud_event)] def test_cloud_event_view_func_wrapper(): headers = {"Content-Type": "application/cloudevents+json"} data = json.dumps( { "source": "from-galaxy-far-far-away", "type": "cloud_event.greet.you", "specversion": "1.0", "id": "f6a65fcd-eed2-429d-9f71-ec0663d83025", "time": "2020-08-13T02:12:14.946587+00:00", "data": {"name": "john"}, } ) request = pretend.stub(headers=headers, get_data=lambda: data) event = from_http(request.headers, request.get_data()) function = pretend.call_recorder(lambda cloud_event: cloud_event) view_func = functions_framework._cloud_event_view_func_wrapper(function, request) view_func("/some/path") assert function.calls == [pretend.call(event)] def test_binary_cloud_event_view_func_wrapper(): headers = { "ce-specversion": "1.0", "ce-source": "from-galaxy-far-far-away", "ce-type": "cloud_event.greet.you", "ce-id": "f6a65fcd-eed2-429d-9f71-ec0663d83025", "ce-time": "2020-08-13T02:12:14.946587+00:00", } data = json.dumps({"name": "john"}) request = pretend.stub(headers=headers, get_data=lambda: data) event = from_http(request.headers, request.get_data()) function = pretend.call_recorder(lambda cloud_event: cloud_event) view_func = functions_framework._cloud_event_view_func_wrapper(function, request) view_func("/some/path") assert function.calls == [pretend.call(event)] def test_binary_event_view_func_wrapper(monkeypatch): data = pretend.stub() request = pretend.stub( headers={ "ce-type": "something", "ce-specversion": "something", "ce-source": "something", "ce-id": "something", "ce-eventId": "some-eventId", "ce-timestamp": "some-timestamp", "ce-eventType": "some-eventType", "ce-resource": "some-resource", }, get_data=lambda: data, ) context_stub = pretend.stub() context_class = pretend.call_recorder(lambda *a, **kw: context_stub) monkeypatch.setattr(functions_framework, "Context", context_class) function = pretend.call_recorder(lambda data, context: "Hello") view_func = functions_framework._event_view_func_wrapper(function, request) view_func("/some/path") assert function.calls == [pretend.call(data, context_stub)] assert context_class.calls == [ pretend.call( eventId="some-eventId", timestamp="some-timestamp", eventType="some-eventType", resource="some-resource", ) ] def test_legacy_event_view_func_wrapper(monkeypatch): data = pretend.stub() json = { "eventId": "some-eventId", "timestamp": "some-timestamp", "eventType": "some-eventType", "resource": "some-resource", "data": data, } request = pretend.stub(headers={}, get_json=lambda: json) context_stub = pretend.stub() context_class = pretend.call_recorder(lambda *a, **kw: context_stub) monkeypatch.setattr(functions_framework, "Context", context_class) function = pretend.call_recorder(lambda data, context: "Hello") view_func = functions_framework._event_view_func_wrapper(function, request) view_func("/some/path") assert function.calls == [pretend.call(data, context_stub)] assert context_class.calls == [ pretend.call( eventId="some-eventId", timestamp="some-timestamp", eventType="some-eventType", resource="some-resource", ) ]
GoogleCloudPlatform/functions-framework-python
tests/test_view_functions.py
Python
apache-2.0
6,493
[ "Galaxy" ]
eebdafd37758b1d30072b3161a5225f285b292234e22db4a4f034064e9617f47
""" Notes: http://docs.readthedocs.io/en/latest/getting_started.html pip install sphinx sphinx-autobuild sphinx_rtd_theme sphinxcontrib-napoleon cd ~/code/ubelt mkdir docs cd docs sphinx-quickstart # need to edit the conf.py cd ~/code/ubelt/docs make html sphinx-apidoc -f -o ~/code/ubelt/docs/source ~/code/ubelt/ubelt --separate make html Also: To turn on PR checks https://docs.readthedocs.io/en/stable/guides/autobuild-docs-for-pull-requests.html https://readthedocs.org/dashboard/ubelt/advanced/ ensure your github account is connected to readthedocs https://readthedocs.org/accounts/social/connections/ """ # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- # import ubelt import sphinx_rtd_theme from os.path import exists from os.path import dirname from os.path import join def parse_version(fpath): """ Statically parse the version number from a python file """ import ast if not exists(fpath): raise ValueError('fpath={!r} does not exist'.format(fpath)) with open(fpath, 'r') as file_: sourcecode = file_.read() pt = ast.parse(sourcecode) class VersionVisitor(ast.NodeVisitor): def visit_Assign(self, node): for target in node.targets: if getattr(target, 'id', None) == '__version__': self.version = node.value.s visitor = VersionVisitor() visitor.visit(pt) return visitor.version project = 'UBelt' copyright = '2018, Jon Crall' author = 'Jon Crall' # The short X.Y version # version = '.'.join(ubelt.__version__.split('.')[0:2]) # # The full version, including alpha/beta/rc tags # release = ubelt.__version__ modpath = join(dirname(dirname(dirname(__file__))), 'ubelt', '__init__.py') # The full version, including alpha/beta/rc tags release = parse_version(modpath) version = '.'.join(release.split('.')[0:2]) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.autosummary', ] todo_include_todos = True napoleon_google_docstring = True napoleon_use_param = False napoleon_use_ivar = True autodoc_inherit_docstrings = False autodoc_member_order = 'bysource' # autodoc_mock_imports = ['torch', 'torchvision', 'visdom'] intersphinx_mapping = { # 'pytorch': ('http://pytorch.org/docs/master/', None), 'python': ('https://docs.python.org/3', None), 'click': ('https://click.palletsprojects.com/', None), # 'xxhash': ('https://pypi.org/project/xxhash/', None), # 'pygments': ('https://pygments.org/docs/', None), # 'tqdm': ('https://tqdm.github.io/', None), } __dev_note__ = """ python -m sphinx.ext.intersphinx https://docs.python.org/3/objects.inv python -m sphinx.ext.intersphinx https://ubelt.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv """ # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'collapse_navigation': False, 'display_version': True, # 'logo_only': True, } # html_logo = '.static/ubelt.svg' # html_favicon = '.static/ubelt.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'UBeltdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'UBelt.tex', 'UBelt Documentation', 'Jon Crall', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'ubelt', 'UBelt Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'UBelt', 'UBelt Documentation', author, 'UBelt', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- from sphinx.domains.python import PythonDomain # NOQA class PatchedPythonDomain(PythonDomain): """ References: https://github.com/sphinx-doc/sphinx/issues/3866 """ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): # TODO: can use this to resolve references nicely if target.startswith('ub.'): target = 'ubelt.' + target[3] return_value = super(PatchedPythonDomain, self).resolve_xref( env, fromdocname, builder, typ, target, node, contnode) return return_value def setup(app): app.add_domain(PatchedPythonDomain, override=True) if 1: # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html from sphinx.application import Sphinx from typing import Any, List what = None # Custom process to transform docstring lines # Remove "Ignore" blocks def process(app: Sphinx, what_: str, name: str, obj: Any, options: Any, lines: List[str] ) -> None: if what and what_ not in what: return orig_lines = lines[:] # text = '\n'.join(lines) # if 'Example' in text and 'CommandLine' in text: # import xdev # xdev.embed() ignore_tags = tuple(['Ignore']) mode = None # buffer = None new_lines = [] for i, line in enumerate(orig_lines): # See if the line triggers a mode change if line.startswith(ignore_tags): mode = 'ignore' elif line.startswith('CommandLine'): mode = 'cmdline' elif line and not line.startswith(' '): # if the line startswith anything but a space, we are no # longer in the previous nested scope mode = None if mode is None: new_lines.append(line) elif mode == 'ignore': # print('IGNORE line = {!r}'.format(line)) pass elif mode == 'cmdline': if line.startswith('CommandLine'): new_lines.append('.. rubric:: CommandLine') new_lines.append('') new_lines.append('.. code-block:: bash') new_lines.append('') # new_lines.append(' # CommandLine') else: # new_lines.append(line.strip()) new_lines.append(line) else: raise KeyError(mode) lines[:] = new_lines # make sure there is a blank line at the end if lines and lines[-1]: lines.append('') app.connect('autodoc-process-docstring', process) else: # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings # Register a sphinx.ext.autodoc.between listener to ignore everything # between lines that contain the word IGNORE # from sphinx.ext.autodoc import between # app.connect('autodoc-process-docstring', between('^ *Ignore:$', exclude=True)) pass return app
Erotemic/ubelt
docs/source/conf.py
Python
apache-2.0
11,032
[ "VisIt" ]
8dcb42db2f4032930908587c490035c15e51b8c8978569cce4992c3ae55d0f9b
# -*- coding: utf-8 -*- """Algorithms for spectral clustering""" # Author: Gael Varoquaux gael.varoquaux@normalesup.org # Brian Cheung # Wei LI <kuantkid@gmail.com> # License: BSD 3 clause import warnings import numpy as np from ..base import BaseEstimator, ClusterMixin from ..utils import check_random_state, as_float_array from ..utils.validation import check_array from ..utils.extmath import norm from ..metrics.pairwise import pairwise_kernels from ..neighbors import kneighbors_graph from ..manifold import spectral_embedding from .k_means_ import k_means def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None): """Search for a partition matrix (clustering) which is closest to the eigenvector embedding. Parameters ---------- vectors : array-like, shape: (n_samples, n_clusters) The embedding space of the samples. copy : boolean, optional, default: True Whether to copy vectors, or perform in-place normalization. max_svd_restarts : int, optional, default: 30 Maximum number of attempts to restart SVD if convergence fails n_iter_max : int, optional, default: 30 Maximum number of iterations to attempt in rotation and partition matrix search if machine precision convergence is not reached random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the of the rotation matrix Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ----- The eigenvector embedding is used to iteratively search for the closest discrete partition. First, the eigenvector embedding is normalized to the space of partition matrices. An optimal discrete partition matrix closest to this normalized embedding multiplied by an initial rotation is calculated. Fixing this discrete partition matrix, an optimal rotation matrix is calculated. These two calculations are performed until convergence. The discrete partition matrix is returned as the clustering solution. Used in spectral clustering, this method tends to be faster and more robust to random initialization than k-means. """ from scipy.sparse import csc_matrix from scipy.linalg import LinAlgError random_state = check_random_state(random_state) vectors = as_float_array(vectors, copy=copy) eps = np.finfo(float).eps n_samples, n_components = vectors.shape # Normalize the eigenvectors to an equal length of a vector of ones. # Reorient the eigenvectors to point in the negative direction with respect # to the first element. This may have to do with constraining the # eigenvectors to lie in a specific quadrant to make the discretization # search easier. norm_ones = np.sqrt(n_samples) for i in range(vectors.shape[1]): vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \ * norm_ones if vectors[0, i] != 0: vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i]) # Normalize the rows of the eigenvectors. Samples should lie on the unit # hypersphere centered at the origin. This transforms the samples in the # embedding space to the space of partition matrices. vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis] svd_restarts = 0 has_converged = False # If there is an exception we try to randomize and rerun SVD again # do this max_svd_restarts times. while (svd_restarts < max_svd_restarts) and not has_converged: # Initialize first column of rotation matrix with a row of the # eigenvectors rotation = np.zeros((n_components, n_components)) rotation[:, 0] = vectors[random_state.randint(n_samples), :].T # To initialize the rest of the rotation matrix, find the rows # of the eigenvectors that are as orthogonal to each other as # possible c = np.zeros(n_samples) for j in range(1, n_components): # Accumulate c to ensure row is as orthogonal as possible to # previous picks as well as current one c += np.abs(np.dot(vectors, rotation[:, j - 1])) rotation[:, j] = vectors[c.argmin(), :].T last_objective_value = 0.0 n_iter = 0 while not has_converged: n_iter += 1 t_discrete = np.dot(vectors, rotation) labels = t_discrete.argmax(axis=1) vectors_discrete = csc_matrix( (np.ones(len(labels)), (np.arange(0, n_samples), labels)), shape=(n_samples, n_components)) t_svd = vectors_discrete.T * vectors try: U, S, Vh = np.linalg.svd(t_svd) svd_restarts += 1 except LinAlgError: print("SVD did not converge, randomizing and trying again") break ncut_value = 2.0 * (n_samples - S.sum()) if ((abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max)): has_converged = True else: # otherwise calculate rotation and continue last_objective_value = ncut_value rotation = np.dot(Vh.T, U.T) if not has_converged: raise LinAlgError('SVD did not converge') return labels def spectral_clustering(affinity, n_clusters=8, n_components=None, eigen_solver=None, random_state=None, n_init=10, eigen_tol=0.0, assign_labels='kmeans'): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- affinity : array-like or sparse matrix, shape: (n_samples, n_samples) The affinity matrix describing the relationship of the samples to embed. **Must be symmetric**. Possible examples: - adjacency matrix of a graph, - heat kernel of the pairwise distance matrix of the samples, - symmetric k-nearest neighbours connectivity matrix of the samples. n_clusters : integer, optional Number of clusters to extract. n_components : integer, optional, default is n_clusters Number of eigen vectors to use for the spectral embedding eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. See the 'Multiclass spectral clustering' paper referenced below for more details on the discretization approach. Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ------ The graph should contain only one connect component, elsewhere the results make little sense. This algorithm solves the normalized cut for k=2: it is a normalized spectral clustering. """ if assign_labels not in ('kmeans', 'discretize'): raise ValueError("The 'assign_labels' parameter should be " "'kmeans' or 'discretize', but '%s' was given" % assign_labels) random_state = check_random_state(random_state) n_components = n_clusters if n_components is None else n_components maps = spectral_embedding(affinity, n_components=n_components, eigen_solver=eigen_solver, random_state=random_state, eigen_tol=eigen_tol, drop_first=False) if assign_labels == 'kmeans': _, labels, _ = k_means(maps, n_clusters, random_state=random_state, n_init=n_init) else: labels = discretize(maps, random_state=random_state) return labels class SpectralClustering(BaseEstimator, ClusterMixin): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. When calling ``fit``, an affinity matrix is constructed using either kernel function such the Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``:: np.exp(-gamma * d(X,X) ** 2) or a k-nearest neighbors connectivity matrix. Alternatively, using ``precomputed``, a user-provided affinity matrix can be used. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- n_clusters : integer, optional The dimension of the projection subspace. affinity : string, array-like or callable, default 'rbf' If a string, this may be one of 'nearest_neighbors', 'precomputed', 'rbf' or one of the kernels supported by `sklearn.metrics.pairwise_kernels`. Only kernels that produce similarity scores (non-negative values that increase with similarity) should be used. This property is not checked by the clustering algorithm. gamma : float, default=1.0 Scaling factor of RBF, polynomial, exponential chi^2 and sigmoid affinity kernel. Ignored for ``affinity='nearest_neighbors'``. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. n_neighbors : integer Number of neighbors to use when constructing the affinity matrix using the nearest neighbors method. Ignored for ``affinity='rbf'``. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. kernel_params : dictionary of string to any, optional Parameters (keyword arguments) and values for kernel passed as callable object. Ignored by other kernels. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- affinity_matrix_ : array-like, shape (n_samples, n_samples) Affinity matrix used for clustering. Available only if after calling ``fit``. labels_ : Labels of each point Notes ----- If you have an affinity matrix, such as a distance matrix, for which 0 means identical elements, and high values means very dissimilar elements, it can be transformed in a similarity matrix that is well suited for the algorithm by applying the Gaussian (RBF, heat) kernel:: np.exp(- dist_matrix ** 2 / (2. * delta ** 2)) Where ``delta`` is a free parameter representing the width of the Gaussian kernel. Another alternative is to take a symmetric version of the k nearest neighbors connectivity matrix of the points. If the pyamg package is installed, it is used: this greatly speeds up computation. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf """ def __init__(self, n_clusters=8, eigen_solver=None, random_state=None, n_init=10, gamma=1., affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None, n_jobs=1): self.n_clusters = n_clusters self.eigen_solver = eigen_solver self.random_state = random_state self.n_init = n_init self.gamma = gamma self.affinity = affinity self.n_neighbors = n_neighbors self.eigen_tol = eigen_tol self.assign_labels = assign_labels self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params self.n_jobs = n_jobs def fit(self, X, y=None): """Creates an affinity matrix for X using the selected affinity, then applies spectral clustering to this affinity matrix. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) OR, if affinity==`precomputed`, a precomputed affinity matrix of shape (n_samples, n_samples) """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) if X.shape[0] == X.shape[1] and self.affinity != "precomputed": warnings.warn("The spectral clustering API has changed. ``fit``" "now constructs an affinity matrix from data. To use" " a custom affinity matrix, " "set ``affinity=precomputed``.") if self.affinity == 'nearest_neighbors': connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs) self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) elif self.affinity == 'precomputed': self.affinity_matrix_ = X else: params = self.kernel_params if params is None: params = {} if not callable(self.affinity): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity, filter_params=True, **params) random_state = check_random_state(self.random_state) self.labels_ = spectral_clustering(self.affinity_matrix_, n_clusters=self.n_clusters, eigen_solver=self.eigen_solver, random_state=random_state, n_init=self.n_init, eigen_tol=self.eigen_tol, assign_labels=self.assign_labels) return self @property def _pairwise(self): return self.affinity == "precomputed"
meduz/scikit-learn
sklearn/cluster/spectral.py
Python
bsd-3-clause
18,536
[ "Brian", "Gaussian" ]
25776606314c1543e3ebc1bc98d82a66867ad08692440a59cb5fd13f1499ab56
import numpy as np import quantities as pq def spatial_rate_map(x, y, t, sptr, binsize=0.01*pq.m, box_xlen=1*pq.m, box_ylen=1*pq.m, mask_unvisited=True, convolve=True, return_bins=False, smoothing=0.02): """Divide a 2D space in bins of size binsize**2, count the number of spikes in each bin and divide by the time spent in respective bins. The map can then be convolved with a gaussian kernel of size csize determined by the smoothing factor, binsize and box_xlen. Parameters ---------- sptr : neo.SpikeTrain x : quantities.Quantity array in m 1d vector of x positions y : quantities.Quantity array in m 1d vector of y positions t : quantities.Quantity array in s 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : quantities scalar in m side length of quadratic box mask_unvisited: bool mask bins which has not been visited by nans convolve : bool convolve the rate map with a 2D Gaussian kernel Returns ------- out : rate map if return_bins = True out : rate map, xbins, ybins """ from exana.misc.tools import is_quantities if not all([len(var) == len(var2) for var in [x,y,t] for var2 in [x,y,t]]): raise ValueError('x, y, t must have same number of elements') if box_xlen < x.max() or box_ylen < y.max(): raise ValueError('box length must be larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals) remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals) if remainderx != 0 or remaindery != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') is_quantities([x, y, t], 'vector') is_quantities(binsize, 'scalar') t = t.rescale('s') box_xlen = box_xlen.rescale('m').magnitude box_ylen = box_ylen.rescale('m').magnitude binsize = binsize.rescale('m').magnitude x = x.rescale('m').magnitude y = y.rescale('m').magnitude # interpolate one extra timepoint t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) * pq.s spikes_in_bin, _ = np.histogram(sptr.times, t_) time_in_bin = np.diff(t_.magnitude) xbins = np.arange(0, box_xlen + binsize, binsize) ybins = np.arange(0, box_ylen + binsize, binsize) ix = np.digitize(x, xbins, right=True) iy = np.digitize(y, ybins, right=True) spike_pos = np.zeros((xbins.size, ybins.size)) time_pos = np.zeros((xbins.size, ybins.size)) for n in range(len(x)): spike_pos[ix[n], iy[n]] += spikes_in_bin[n] time_pos[ix[n], iy[n]] += time_in_bin[n] # correct for shifting of map spike_pos = spike_pos[1:, 1:] time_pos = time_pos[1:, 1:] with np.errstate(divide='ignore', invalid='ignore'): rate = np.divide(spike_pos, time_pos) if convolve: rate[np.isnan(rate)] = 0. # for convolution from astropy.convolution import Gaussian2DKernel, convolve_fft csize = (box_xlen / binsize) * smoothing kernel = Gaussian2DKernel(csize) rate = convolve_fft(rate, kernel) # TODO edge correction if mask_unvisited: was_in_bin = np.asarray(time_pos, dtype=bool) rate[np.invert(was_in_bin)] = np.nan if return_bins: return rate.T, xbins, ybins else: return rate.T def gridness(rate_map, box_xlen, box_ylen, return_acorr=False, step_size=0.1*pq.m): '''Calculates gridness of a rate map. Calculates the normalized autocorrelation (A) of a rate map B where A is given as A = 1/n\Sum_{x,y}(B - \bar{B})^{2}/\sigma_{B}^{2}. Further, the Pearsson's product-moment correlation coefficients is calculated between A and A_{rot} rotated 30 and 60 degrees. Finally the gridness is calculated as the difference between the minimum of coefficients at 60 degrees and the maximum of coefficients at 30 degrees i.e. gridness = min(r60) - max(r30). In order to focus the analysis on symmetry of A the the central and the outer part of the gridness is maximized by increasingly mask A at steps of ``step_size``. This function is inspired by Lukas Solankas gridcells package from Matt Nolans lab. Parameters ---------- rate_map : numpy.ndarray box_xlen : quantities scalar in m side length of quadratic box step_size : quantities scalar in m step size in masking return_acorr : bool return autocorrelation map or not Returns ------- out : gridness, (autocorrelation map) ''' from scipy.ndimage.interpolation import rotate import numpy.ma as ma from exana.misc.tools import (is_quantities, fftcorrelate2d, masked_corrcoef2d) is_quantities([box_xlen, box_ylen, step_size], 'scalar') box_xlen = box_xlen.rescale('m').magnitude box_ylen = box_ylen.rescale('m').magnitude step_size = step_size.rescale('m').magnitude tmp_map = rate_map.copy() tmp_map[~np.isfinite(tmp_map)] = 0 acorr = fftcorrelate2d(tmp_map, tmp_map, mode='full', normalize=True) rows, cols = acorr.shape b_x = np.linspace(-box_xlen/2., box_xlen/2., rows) b_y = np.linspace(-box_ylen/2., box_ylen/2., cols) B_x, B_y = np.meshgrid(b_x, b_y) grids = [] acorrs = [] # TODO find size of middle gaussian and exclude for outer in np.arange(box_xlen/4, box_xlen/2, step_size): m_acorr = ma.masked_array(acorr, mask=np.sqrt(B_x**2 + B_y**2) > outer) for inner in np.arange(0, box_xlen/4, step_size): m_acorr = \ ma.masked_array(m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < inner) angles = range(30, 180+30, 30) corr = [] # Rotate and compute correlation coefficient for angle in angles: rot_acorr = rotate(m_acorr, angle, reshape=False) corr.append(masked_corrcoef2d(rot_acorr, m_acorr)[0, 1]) r60 = corr[1::2] r30 = corr[::2] grids.append(np.min(r60) - np.max(r30)) acorrs.append(m_acorr) if return_acorr: return max(grids), acorr, # acorrs[grids.index(max(grids))] else: return max(grids) def occupancy_map(x, y, t, binsize=0.01*pq.m, box_xlen=1*pq.m, box_ylen=1*pq.m, convolve=True, return_bins=False, smoothing=0.02): '''Divide a 2D space in bins of size binsize**2, count the time spent in each bin. The map can be convolved with a gaussian kernel of size csize determined by the smoothing factor, binsize and box_xlen. Parameters ---------- x : quantities.Quantity array in m 1d vector of x positions y : quantities.Quantity array in m 1d vector of y positions t : quantities.Quantity array in s 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : quantities scalar in m side length of quadratic box mask_unvisited: bool mask bins which has not been visited by nans convolve : bool convolve the rate map with a 2D Gaussian kernel Returns ------- occupancy_map : numpy.ndarray if return_bins = True out : occupancy_map, xbins, ybins ''' from exana.misc.tools import is_quantities if not all([len(var) == len(var2) for var in [ x, y, t] for var2 in [x, y, t]]): raise ValueError('x, y, t must have same number of elements') if box_xlen < x.max() or box_ylen < y.max(): raise ValueError( 'box length must be larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals) remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals) if remainderx != 0 or remaindery != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') is_quantities([x, y, t], 'vector') is_quantities(binsize, 'scalar') t = t.rescale('s') box_xlen = box_xlen.rescale('m').magnitude box_ylen = box_ylen.rescale('m').magnitude binsize = binsize.rescale('m').magnitude x = x.rescale('m').magnitude y = y.rescale('m').magnitude # interpolate one extra timepoint t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) * pq.s time_in_bin = np.diff(t_.magnitude) xbins = np.arange(0, box_xlen + binsize, binsize) ybins = np.arange(0, box_ylen + binsize, binsize) ix = np.digitize(x, xbins, right=True) iy = np.digitize(y, ybins, right=True) time_pos = np.zeros((xbins.size, ybins.size)) for n in range(len(x)): time_pos[ix[n], iy[n]] += time_in_bin[n] # correct for shifting of map since digitize returns values at right edges time_pos = time_pos[1:, 1:] if convolve: from astropy.convolution import Gaussian2DKernel, convolve_fft csize = (box_xlen / binsize) * smoothing kernel = Gaussian2DKernel(csize) time_pos = convolve_fft(time_pos, kernel) # TODO edge correction if return_bins: return time_pos.T, xbins, ybins else: return time_pos.T def nvisits_map(x, y, t, binsize=0.01*pq.m, box_xlen=1*pq.m, box_ylen=1*pq.m, return_bins=False): '''Divide a 2D space in bins of size binsize**2, count the number of visits in each bin. The map can be convolved with a gaussian kernel of size determined by the smoothing factor, binsize and box_xlen. Parameters ---------- x : quantities.Quantity array in m 1d vector of x positions y : quantities.Quantity array in m 1d vector of y positions t : quantities.Quantity array in s 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : quantities scalar in m side length of quadratic box Returns ------- nvisits_map : numpy.ndarray if return_bins = True out : nvisits_map, xbins, ybins ''' from exana.misc.tools import is_quantities if not all([len(var) == len(var2) for var in [ x, y, t] for var2 in [x, y, t]]): raise ValueError('x, y, t must have same number of elements') if box_xlen < x.max() or box_ylen < y.max(): raise ValueError( 'box length must be larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals) remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals) if remainderx != 0 or remaindery != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') is_quantities([x, y, t], 'vector') is_quantities(binsize, 'scalar') t = t.rescale('s') box_xlen = box_xlen.rescale('m').magnitude box_ylen = box_ylen.rescale('m').magnitude binsize = binsize.rescale('m').magnitude x = x.rescale('m').magnitude y = y.rescale('m').magnitude xbins = np.arange(0, box_xlen + binsize, binsize) ybins = np.arange(0, box_ylen + binsize, binsize) ix = np.digitize(x, xbins, right=True) iy = np.digitize(y, ybins, right=True) nvisits_map = np.zeros((xbins.size, ybins.size)) for n in range(len(x)): if n == 0: nvisits_map[ix[n], iy[n]] = 1 else: if ix[n-1] != ix[n] or iy[n-1] != iy[n]: nvisits_map[ix[n], iy[n]] += 1 # correct for shifting of map since digitize returns values at right edges nvisits_map = nvisits_map[1:, 1:] if return_bins: return nvisits_map.T, xbins, ybins else: return nvisits_map.T def spatial_rate_map_1d(x, t, sptr, binsize=0.01*pq.m, track_len=1*pq.m, mask_unvisited=True, convolve=True, return_bins=False, smoothing=0.02): """Take x coordinates of linear track data, divide in bins of binsize, count the number of spikes in each bin and divide by the time spent in respective bins. The map can then be convolved with a gaussian kernel of size csize determined by the smoothing factor, binsize and box_xlen. Parameters ---------- sptr : neo.SpikeTrain x : quantities.Quantity array in m 1d vector of x positions t : quantities.Quantity array in s 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : quantities scalar in m side length of quadratic box mask_unvisited: bool mask bins which has not been visited by nans convolve : bool convolve the rate map with a 2D Gaussian kernel Returns ------- out : rate map if return_bins = True out : rate map, xbins """ from exana.misc.tools import is_quantities if not all([len(var) == len(var2) for var in [x, t] for var2 in [x, t]]): raise ValueError('x, t must have same number of elements') if track_len < x.max(): raise ValueError('track length must be\ larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(track_len)*decimals) % dec(float(binsize)*decimals) if remainderx != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') is_quantities([x, t], 'vector') is_quantities(binsize, 'scalar') t = t.rescale('s') track_len = track_len.rescale('m').magnitude binsize = binsize.rescale('m').magnitude x = x.rescale('m').magnitude # interpolate one extra timepoint t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) * pq.s spikes_in_bin, _ = np.histogram(sptr.times, t_) time_in_bin = np.diff(t_.magnitude) xbins = np.arange(0, track_len + binsize, binsize) ix = np.digitize(x, xbins, right=True) spike_pos = np.zeros(xbins.size) time_pos = np.zeros(xbins.size) for n in range(len(x)): spike_pos[ix[n]] += spikes_in_bin[n] time_pos[ix[n]] += time_in_bin[n] # correct for shifting of map since digitize returns values at right edges spike_pos = spike_pos[1:] time_pos = time_pos[1:] with np.errstate(divide='ignore', invalid='ignore'): rate = np.divide(spike_pos, time_pos) if convolve: rate[np.isnan(rate)] = 0. # for convolution from astropy.convolution import Gaussian2DKernel, convolve_fft csize = (track_len / binsize) * smoothing kernel = Gaussian2DKernel(csize) rate = convolve_fft(rate, kernel) # TODO edge correction if mask_unvisited: was_in_bin = np.asarray(time_pos, dtype=bool) rate[np.invert(was_in_bin)] = np.nan if return_bins: return rate.T, xbins else: return rate.T def separate_fields(rate_map, laplace_thrsh = 0, center_method = 'maxima', cutoff_method='none', box_xlen=1*pq.m, box_ylen=1*pq.m,index=False): """Separates fields using the laplacian to identify fields separated by a negative second derivative. Parameters ---------- rate_map : np 2d array firing rate in each bin laplace_thrsh : float value of laplacian to separate fields by relative to the minima. Should be on the interval 0 to 1, where 0 cuts off at 0 and 1 cuts off at min(laplace(rate_map)). Default 0. center_method : string method to find field centers. Valid options = ['center_of_mass', 'maxima','gaussian_fit'] cutoff_method (optional) : string or function function to exclude small fields. If local field value of function is lower than global function value, the field is excluded. Valid string_options = ['median', 'mean','none']. index : bool, default False return bump center values as index or xy-pos Returns ------- fields : numpy array, shape like rate_map. contains areas all filled with same value, corresponding to fields in rate_map. The values are in range(1,nFields + 1), sorted by size of the field (sum of all field values). 0 elsewhere. n_field : int field count bump_centers : (n_field x 2) np ndarray Coordinates of field centers """ cutoff_functions = {'mean':np.mean, 'median':np.median, 'none':None} if not callable(cutoff_method): try: cutoff_func = cutoff_functions[cutoff_method] except KeyError: msg = "invalid cutoff_method flag '%s'" % cutoff_method raise ValueError(msg) else: cutoff_func = cutoff_method from scipy import ndimage l = ndimage.laplace(rate_map) l[l>laplace_thrsh*np.min(l)] = 0 # Labels areas of the laplacian not connected by values > 0. fields, n_fields = ndimage.label(l) # index 0 is the background indx = np.arange(1,n_fields+1) # Use cutoff method to remove unwanted fields if cutoff_method != 'none': try: total_value = cutoff_func(fields) except: print('Unexpected error, cutoff_func doesnt like the input:') raise field_values = ndimage.labeled_comprehension(rate_map, fields, indx, cutoff_func, float, 0) try: is_field = field_values >= total_value except: print('cutoff_func return_values doesnt want to compare:') raise if np.sum(is_field) == 0: return np.zeros(rate_map.shape), 0, np.array([[],[]]) for i in indx: if not is_field[i-1]: fields[fields == i] = 0 n_fields = ndimage.label(fields, output=fields) indx = np.arange(1,n_fields + 1) # Sort by largest mean sizes = ndimage.labeled_comprehension(rate_map, fields, indx, np.mean, float, 0) size_sort = np.argsort(sizes)[::-1] new = np.zeros_like(fields) for i in np.arange(n_fields): new[fields == size_sort[i]+1] = i+1 fields = new bc = get_bump_centers(rate_map,labels=fields,ret_index=index,indices=indx,method=center_method, units=box_xlen.units) # TODO exclude fields where maxima is on the edge of the field? return fields, n_fields, bc def get_bump_centers(rate_map, labels, ret_index=False, indices=None, method='maxima', units=1*pq.m): """Finds center of fields at labels.""" from scipy import ndimage if method not in ['maxima','center_of_mass','gaussian_fit']: msg = "invalid center_method flag '%s'" % method raise ValueError(msg) if indices is None: indices = np.arange(1,np.max(labels)+1) if method == 'maxima': bc = ndimage.maximum_position(rate_map, labels=labels, index=indices) elif method == 'center_of_mass': bc = ndimage.center_of_mass(rate_map, labels=labels, index=indices) elif method == 'gaussian_fit': from exana.tracking.tools import fit_gauss_asym bc = np.zeros((len(indices),2)) import matplotlib.pyplot as plt for i in indices: r = rate_map.copy() r[labels != i] = 0 popt = fit_gauss_asym(r, return_data=False) # TODO Find out which axis is x and which is y bc[i-1] = (popt[2],popt[1]) if ret_index: msg = 'ret_index not implemented for gaussian fit' raise NotImplementedError(msg) if not ret_index and not method=='gaussian_fit': bc = (bc + np.array((0.5,0.5)))/rate_map.shape return np.array(bc)*units def find_avg_dist(rate_map, thrsh = 0, plot=False): """Uses autocorrelation and separate_fields to find average distance between bumps. Is dependent on high gridness to get separate bumps in the autocorrelation Parameters ---------- rate_map : np 2d array firing rate in each bin thrsh (optional) : float, default 0 cutoff value for the laplacian of the autocorrelation function. Should be a negative number. Gives better separation if bumps are connected by "bridges" or saddles where the laplacian is negative. plot (optional) : bool, default False plot acorr and the separated acorr, with bump centers Returns ------- avg_dist : float relative units from 0 to 1 of the box size """ from scipy.ndimage import maximum_position from exana.misc.tools import fftcorrelate2d # autocorrelate. Returns array (2x - 1) the size of rate_map acorr = fftcorrelate2d(rate_map,rate_map, mode = 'full', normalize = True) #acorr[acorr<0] = 0 # TODO Fix this f, nf, bump_centers = separate_fields(acorr,laplace_thrsh=thrsh, center_method='maxima',cutoff_method='median') # TODO Find a way to find valid value for # thrsh, or remove. bump_centers = np.array(bump_centers) # find dists from center in (autocorrelation)relative units (from 0 to 1) distances = np.linalg.norm(bump_centers - (0.5,0.5), axis = 1) dist_sort = np.argsort(distances) distances = distances[dist_sort] # use maximum 6 closest values except center value avg_dist = np.median(distances[1:7]) # correct for difference in shapes avg_dist *= acorr.shape[0]/rate_map.shape[0] # = 1.98 # TODO : raise warning if too big difference between points if plot: import matplotlib.pyplot as plt fig,[ax1,ax2] = plt.subplots(1,2) ax1.imshow(acorr,extent = (0,1,0,1),origin='lower') ax1.scatter(*(bump_centers[:,::-1].T)) ax2.imshow(f,extent = (0,1,0,1),origin='lower') ax2.scatter(*(bump_centers[:,::-1].T)) return avg_dist def fit_hex(bump_centers, avg_dist=None, plot_bumps = False, method='best'): """Fits a hex grid to a given set of bumps. Uses the three bumps most Parameters ---------- bump_centers : Nx2 np.array x,y positions of bump centers, x,y /in (0,1) avg_dist (optional): float average spacing between bumps plot_bumps (optional): bool if True, plots at the three bumps most likely to be in correct hex-position to the current matplotlib axes. method (optional): string, valid options: ['closest', 'best'] method to find angle from neighboring bumps. 'closest' uses six bumps nearest to center bump 'best' uses the two bumps nearest to avg_dist Returns ------- displacement : float distance of bump closest to the center in meters orientation : float orientation of hexagon (in degrees) """ valid_methods = ['closest', 'best'] if method not in valid_methods: msg = "invalid method flag '%s'" % method raise ValueError(msg) bump_centers = np.array(bump_centers) # sort by distance to center d = np.linalg.norm(bump_centers - (0.5,0.5), axis=1) d_sort = np.argsort(d) dist_sorted = bump_centers[d_sort] center_bump = dist_sorted[0]; others = dist_sorted[1:] displacement = d[d_sort][0] # others distances to center bumps relpos = others - center_bump reldist = np.linalg.norm(relpos, axis=1) if method == 'closest': # get 6 closest bumps rel_sort = np.argsort(reldist) closest = others[rel_sort][:6] relpos = relpos[rel_sort][:6] elif method == 'best': # get 2 bumps such that /sum_{i\neqj}(\abs{r_i-r_j}-avg_ist)^2 is minimized squares = 1e32*np.ones((others.shape[0], others.shape[0])) for i in range(len(relpos)): for j in range(i,len(relpos)): rel1 = (reldist[i] - avg_dist)**2 rel2 = (reldist[j] - avg_dist)**2 rel3 = (np.linalg.norm(relpos[i]-relpos[j]) - avg_dist)**2 squares[i,j] = rel1 + rel2 + rel3 rel_slice = np.unravel_index(np.argmin(squares), squares.shape) rel_slice = np.array(rel_slice) #rel_sort = np.argsort(np.abs(reldist-avg_dist)) closest = others[rel_slice] relpos = relpos[rel_slice] # sort by angle a = np.arctan2(relpos[:,1], relpos[:,0])%(2*np.pi) a_sort = np.argsort(a) # extract lowest angle and convert to degrees orientation = a[a_sort][0] *180/np.pi # hex grid is symmetric under rotations of 60deg orientation %= 60 if plot_bumps: import matplotlib.pyplot as plt ax=plt.gca() i = 1 xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() dx = xmax-xmin; dy = ymax - ymin closest = closest[a_sort] edges = [center_bump] if method == 'best' else [] edges += [c for c in closest] edges = np.array(edges)*(dx,dy) + (xmin, ymin) poly = plt.Polygon(edges, alpha=0.5,color='r') ax.add_artist(poly) return displacement, orientation def calculate_grid_geometry(rate_map, plot_fields=False, **kwargs): """Calculates quantitative information about grid field. Find bump centers, bump spacing, center diplacement and hexagon orientation Parameters ---------- rate_map : np 2d array firing rate in each bin plot_fields : if True, plots the field labels with field centers to the current matplotlib ax. Default False Returns ------- bump_centers : 2d np.array x,y positions of bump centers avg_dist : float average spacing between bumps, \in [0,1] displacement : float distance of bump closest to the center orientation : float orientation of hexagon (in degrees) Other parameters ---------------- thrsh : float, default 0 see find_avg_dist() center_method : string, valid options: ['maxima', 'center_of_mass'] default: 'center_of_mass' see separate_fields() method : string, valid options: ['closest', 'best'] see fit_hex() Examples -------- >>> import numpy as np >>> rate_map = np.zeros((5,5)) >>> pos = np.array([ [0,2], ... [1,0],[1,4], ... [2,2], ... [3,0],[3,4], ... [4,2]]) >>> for(i,j) in pos: ... rate_map[i,j] = 1 ... >>> calculate_grid_geometry(rate_map) (array([[ 0.5, 0.9], [ 0.9, 0.7], [ 0.1, 0.7], [ 0.5, 0.5], [ 0.9, 0.3], [ 0.1, 0.3], [ 0.5, 0.1]]) * m, 0.44721359549995793, 0.0, 26.565051177077983) >>> """ from scipy.ndimage import mean, center_of_mass # TODO: smooth data? # smooth_rate_map = lambda x:x # rate_map = smooth_rate_map(rate_map) center_method = kwargs.pop('center_method',None) if center_method: fields, nfields, bump_centers = separate_fields(rate_map, center_method=center_method) else: fields, nfields, bump_centers = separate_fields(rate_map) if bump_centers.size == 0: import warnings msg = 'couldnt find bump centers, returning None' warnings.warn(msg, RuntimeWarning, stacklevel=2) return None,None,None,None, sh = np.array(rate_map.shape) if plot_fields: print(fields) import matplotlib.pyplot as plt x=np.linspace(0,1,sh[0]+1) y=np.linspace(0,1,sh[1]+1) x,y = np.meshgrid(x,y) ax = plt.gca() print('nfields: ',nfields) plt.pcolormesh(x,y, fields) # switch from row-column to x-y bump_centers = bump_centers[:,::-1] thrsh = kwargs.pop('thrsh', None) if thrsh: avg_dist = find_avg_dist(rate_map, thrsh) else: avg_dist = find_avg_dist(rate_map) displacement, orientation = fit_hex(bump_centers, avg_dist, plot_bumps=plot_fields, **kwargs) return bump_centers, avg_dist, displacement, orientation class RandomDisplacementBounds(object): """random displacement with bounds""" def __init__(self, xmin, xmax, stepsize=0.5): self.xmin = np.array(xmin) self.xmax = np.array(xmax) self.stepsize = stepsize def __call__(self, x): """take a random step but ensure the new position is within the bounds""" while True: # this could be done in a much more clever way, but it will work for example purposes xnew = x + (self.xmax-self.xmin)*np.random.uniform(-self.stepsize, self.stepsize, np.shape(x)) if np.all(xnew < self.xmax) and np.all(xnew > self.xmin): break return xnew def optimize_sep_fields(rate_map,step = 0.04, niter=40, T = 1.0, method = 'SLSQP', glob=True, x0 = [0.065,0.1],callback=None): """Optimizes the separation of the fields by minimizing an error function Parameters: ----------- rate_map : method : valid methods=['L-BFGS-B', 'TNC', 'SLSQP'] x0 : list initial values for smoothing smoothing and laplace_thrsh Returns: -------- res : Result of the optimization. Contains smoothing and laplace_thrsh in attribute res.x""" from scipy import optimize from exana.tracking.tools import separation_error_func as err_func valid_methods = ['L-BFGS-B', 'TNC', 'SLSQP'] if method not in valid_methods: raise ValueError('invalid method flag %s' %method) rate_map[np.isnan(rate_map)] = 0. method = 'SLSQP' xmin = [0.025, 0] xmax = [0.2, 1] bounds = [(low,high) for low,high in zip(xmin,xmax)] obj_func = lambda args: err_func(args[0], args[1], rate_map) if glob: take_step = RandomDisplacementBounds(xmin, xmax,stepsize=step) minimizer_kwargs = dict(method=method, bounds=bounds) res = optimize.basinhopping(obj_func, x0, niter=niter, T = T, minimizer_kwargs=minimizer_kwargs, take_step=take_step,callback=callback) else: res = optimize.minimize(obj_func, x0, method=method, bounds = bounds, options={'disp': True}) return res if __name__ == "__main__": import doctest doctest.testmod()
CINPLA/expipe-dev
exana/exana/tracking/fields.py
Python
gpl-3.0
31,411
[ "Gaussian" ]
0e765aad9689fb70b4dc7805930159d219100f9f53fc23ffdb6b72994c555cd5
# mencoder "mf://mpg_288x288x24_lam15_t273_a0353140_0000_rv.dat_%d.png" -mf fps=20 -o anim.avi -ovc lavc -lavcopts vcodec=msmpeg4v2:vbitrate=500 import numpy import sys from mmdlab.datareader import readfile_metgas_boxes from mmdlab.utils import make_mlab_scalar_field from mmdlab.utils import show_scalar_planes from mmdlab.utils import show_vector_planes from mmdlab.utils import init_mlab_scene from mmdlab.utils import showable from mmdlab.utils import saveimg from mmdlab.utils import offscreen import mmdlab from mayavi import mlab import mayavi import time import pylab from numba import jit from numba import float64, int8 import pp import threading from multiprocessing.dummy import Pool as ThreadPool def rotate(deg, v, i): mayavi.mlab.view(*v) mayavi.mlab.view(azimuth=i*deg) for rot in xrange(deg): mayavi.mlab.view(azimuth=i*deg+rot) print "saving to ./advid/{num:05d}.png".format(num=i*deg+rot) mayavi.mlab.savefig("./advid/{num:05d}.png".format(num=i*deg+rot)) mayavi.mlab.close() #@offscreen def draw_rot(data,i,v): f = open("./jobs/%d.log"%i,"w") f.write("Job do") size = int(800), int(600) mayavi.mlab.options.offscreen = True fig = mayavi.mlab.figure('Viz', size=size,bgcolor=(0,0,0)) print "Reading data" m,g = mmdlab.datareader.readfile_metgas_boxes(data,region=[0,20,0,20,9,20]) f.write("readed") if i > 74: m.z -=2.1184 g.z -=2.1184 f.write("fig") fig.scene.anti_aliasing_frames = 0 print "Drawing metal" mayavi.mlab.points3d(m.x, m.y, m.z, scale_mode="none",scale_factor=m.d, color=(0.5,0.5,0.5)) print "Drawing gas" mayavi.mlab.points3d(g.x, g.y, g.z, scale_mode="none",scale_factor=g.d, color=(0.5,1,0.5), ) if v == None: v = mayavi.mlab.view() rotate(20, v, i) f.write("rotated") f.write("Job done") return v def do_mlab(): global view f = open(sys.argv[1]+"/index.list","r") lines = f.readlines() tasks = [] lc = len(lines) for i,line in enumerate(lines): tasks.append( (sys.argv[1]+line[:-1],i)) f = open(sys.argv[2]+"/index.list","r") lines = f.readlines() for i,line in enumerate(lines): tasks.append((sys.argv[2]+line[:-1],i+lc)) view = draw_rot(tasks[0][0], 0, None) job_server = pp.Server(ncpus = 5) jobs = [ job_server.submit(draw_rot,(data,i,view,), (rotate,), ("mmdlab","mmdlab.datareader","mayavi","mayavi.mlab",)) for data,i in tasks[1:]]; import time job_server.print_stats() time.sleep(5) job_server.print_stats() time.sleep(5) job_server.print_stats() for i,job in enumerate(jobs): print "Job ", i print job() print "Job ", i," done" do_mlab()
detorto/mdvis
src/rotate_reg.py
Python
mit
2,586
[ "Mayavi" ]
75d3c5b5c752d0ee8e80dcea616fad01dc67069da3188bcb10475913b418f9ec
""" Dataproviders that use either: - the file contents and/or metadata from a Galaxy DatasetInstance as their source. - or provide data in some way relevant to bioinformatic data (e.g. parsing genomic regions from their source) """ import base import line import column import external from galaxy.util import sqlite import sys from bx import seq as bx_seq from bx import wiggle as bx_wig from bx import bbi as bx_bbi _TODO = """ use bx as much as possible gff3 hierarchies change SamtoolsDataProvider to use pysam """ import logging log = logging.getLogger( __name__ ) # ----------------------------------------------------------------------------- base for using a Glx dataset class DatasetDataProvider( base.DataProvider ): """ Class that uses the file contents and/or metadata from a Galaxy DatasetInstance as its source. DatasetDataProvider can be seen as the intersection between a datatype's metadata and a dataset's file contents. It (so far) mainly provides helper and conv. methods for using dataset metadata to set up and control how the data is provided. """ def __init__( self, dataset, **kwargs ): """ :param dataset: the Galaxy dataset whose file will be the source :type dataset: model.DatasetInstance """ # precondition: dataset is a galaxy.model.DatasetInstance self.dataset = dataset # this dataset file is obviously the source # TODO: this might be a good place to interface with the object_store... super( DatasetDataProvider, self ).__init__( open( dataset.file_name, 'rb' ) ) # TODO: this is a bit of a mess @classmethod def get_column_metadata_from_dataset( cls, dataset ): """ Convenience class method to get column metadata from a dataset. :returns: a dictionary of `column_count`, `column_types`, and `column_names` if they're available, setting each to `None` if not. """ # re-map keys to fit ColumnarProvider.__init__ kwargs params = {} params[ 'column_count' ] = dataset.metadata.columns params[ 'column_types' ] = dataset.metadata.column_types params[ 'column_names' ] = dataset.metadata.column_names or getattr( dataset.datatype, 'column_names', None ) return params def get_metadata_column_types( self, indeces=None ): """ Return the list of `column_types` for this dataset or `None` if unavailable. :param indeces: the indeces for the columns of which to return the types. Optional: defaults to None (return all types) :type indeces: list of ints """ metadata_column_types = ( self.dataset.metadata.column_types or getattr( self.dataset.datatype, 'column_types', None ) or None ) if not metadata_column_types: return metadata_column_types if indeces: column_types = [] for index in indeces: column_type = metadata_column_types[ index ] if index < len( metadata_column_types ) else None column_types.append( column_type ) return column_types return metadata_column_types def get_metadata_column_names( self, indeces=None ): """ Return the list of `column_names` for this dataset or `None` if unavailable. :param indeces: the indeces for the columns of which to return the names. Optional: defaults to None (return all names) :type indeces: list of ints """ metadata_column_names = ( self.dataset.metadata.column_names or getattr( self.dataset.datatype, 'column_names', None ) or None ) if not metadata_column_names: return metadata_column_names if indeces: column_names = [] for index in indeces: column_type = metadata_column_names[ index ] if index < len( metadata_column_names ) else None column_names.append( column_type ) return column_names return metadata_column_names # TODO: merge the next two def get_indeces_by_column_names( self, list_of_column_names ): """ Return the list of column indeces when given a list of column_names. :param list_of_column_names: the names of the columns of which to get indeces. :type list_of_column_names: list of strs :raises KeyError: if column_names are not found :raises ValueError: if an entry in list_of_column_names is not in column_names """ metadata_column_names = ( self.dataset.metadata.column_names or getattr( self.dataset.datatype, 'column_names', None ) or None ) if not metadata_column_names: raise KeyError( 'No column_names found for ' + 'datatype: %s, dataset: %s' % ( str( self.dataset.datatype ), str( self.dataset ) ) ) indeces = [] # if indeces and column_names: # pull using indeces and re-name with given names - no need to alter (does as super would) # pass for column_name in list_of_column_names: indeces.append( metadata_column_names.index( column_name ) ) return indeces def get_metadata_column_index_by_name( self, name ): """ Return the 1-base index of a sources column with the given `name`. """ # metadata columns are 1-based indeces column = getattr( self.dataset.metadata, name ) return ( column - 1 ) if ( isinstance( column, int ) and column > 0 ) else None def get_genomic_region_indeces( self, check=False ): """ Return a list of column indeces for 'chromCol', 'startCol', 'endCol' from a source representing a genomic region. :param check: if True will raise a ValueError if any were not found. :type check: bool :raises ValueError: if check is `True` and one or more indeces were not found. :returns: list of column indeces for the named columns. """ region_column_names = ( 'chromCol', 'startCol', 'endCol' ) region_indices = [ self.get_metadata_column_index_by_name( name ) for name in region_column_names ] if check and not all( map( lambda i: i is not None, region_indices) ): raise ValueError( "Could not determine proper column indices for chrom, start, end: %s" % ( str( region_indices ) ) ) return region_indices class ConvertedDatasetDataProvider( DatasetDataProvider ): """ Class that uses the file contents of a dataset after conversion to a different format. """ def __init__( self, dataset, **kwargs ): raise NotImplementedError( 'Abstract class' ) self.original_dataset = dataset self.converted_dataset = self.convert_dataset( dataset, **kwargs ) super( ConvertedDatasetDataProvider, self ).__init__( self.converted_dataset, **kwargs ) # NOTE: now self.converted_dataset == self.dataset def convert_dataset( self, dataset, **kwargs ): """ Convert the given dataset in some way. """ return dataset # ----------------------------------------------------------------------------- uses metadata for settings class DatasetColumnarDataProvider( column.ColumnarDataProvider ): """ Data provider that uses a DatasetDataProvider as its source and the dataset's metadata to buuild settings for the ColumnarDataProvider it's inherited from. """ def __init__( self, dataset, **kwargs ): """ All kwargs are inherited from ColumnarDataProvider. .. seealso:: column.ColumnarDataProvider If no kwargs are given, this class will attempt to get those kwargs from the dataset source's metadata. If any kwarg is given, it will override and be used in place of any metadata available. """ dataset_source = DatasetDataProvider( dataset ) if not kwargs.get( 'column_types', None ): indeces = kwargs.get( 'indeces', None ) kwargs[ 'column_types' ] = dataset_source.get_metadata_column_types( indeces=indeces ) super( DatasetColumnarDataProvider, self ).__init__( dataset_source, **kwargs ) class DatasetDictDataProvider( column.DictDataProvider ): """ Data provider that uses a DatasetDataProvider as its source and the dataset's metadata to buuild settings for the DictDataProvider it's inherited from. """ def __init__( self, dataset, **kwargs ): """ All kwargs are inherited from DictDataProvider. .. seealso:: column.DictDataProvider If no kwargs are given, this class will attempt to get those kwargs from the dataset source's metadata. If any kwarg is given, it will override and be used in place of any metadata available. The relationship between column_names and indeces is more complex: +-----------------+-------------------------------+-----------------------+ | | Indeces given | Indeces NOT given | +=================+===============================+=======================+ | Names given | pull indeces, rename w/ names | pull by name | +=================+-------------------------------+-----------------------+ | Names NOT given | pull indeces, name w/ meta | pull all, name w/meta | +=================+-------------------------------+-----------------------+ """ dataset_source = DatasetDataProvider( dataset ) # TODO: getting too complicated - simplify at some lvl, somehow # if no column_types given, get column_types from indeces (or all if indeces == None) indeces = kwargs.get( 'indeces', None ) column_names = kwargs.get( 'column_names', None ) if not indeces and column_names: # pull columns by name indeces = kwargs[ 'indeces' ] = dataset_source.get_indeces_by_column_names( column_names ) elif indeces and not column_names: # pull using indeces, name with meta column_names = kwargs[ 'column_names' ] = dataset_source.get_metadata_column_names( indeces=indeces ) elif not indeces and not column_names: # pull all indeces and name using metadata column_names = kwargs[ 'column_names' ] = dataset_source.get_metadata_column_names( indeces=indeces ) # if no column_types given, use metadata column_types if not kwargs.get( 'column_types', None ): kwargs[ 'column_types' ] = dataset_source.get_metadata_column_types( indeces=indeces ) super( DatasetDictDataProvider, self ).__init__( dataset_source, **kwargs ) # ----------------------------------------------------------------------------- provides a bio-relevant datum class GenomicRegionDataProvider( column.ColumnarDataProvider ): """ Data provider that parses chromosome, start, and end data from a file using the datasets metadata settings. Is a ColumnarDataProvider that uses a DatasetDataProvider as its source. If `named_columns` is true, will return dictionaries with the keys 'chrom', 'start', 'end'. """ # dictionary keys when named_columns=True COLUMN_NAMES = [ 'chrom', 'start', 'end' ] settings = { 'chrom_column' : 'int', 'start_column' : 'int', 'end_column' : 'int', 'named_columns' : 'bool', } def __init__( self, dataset, chrom_column=None, start_column=None, end_column=None, named_columns=False, **kwargs ): """ :param dataset: the Galaxy dataset whose file will be the source :type dataset: model.DatasetInstance :param chrom_column: optionally specify the chrom column index :type chrom_column: int :param start_column: optionally specify the start column index :type start_column: int :param end_column: optionally specify the end column index :type end_column: int :param named_columns: optionally return dictionaries keying each column with 'chrom', 'start', or 'end'. Optional: defaults to False :type named_columns: bool """ # TODO: allow passing in a string format e.g. "{chrom}:{start}-{end}" dataset_source = DatasetDataProvider( dataset ) if chrom_column is None: chrom_column = dataset_source.get_metadata_column_index_by_name( 'chromCol' ) if start_column is None: start_column = dataset_source.get_metadata_column_index_by_name( 'startCol' ) if end_column is None: end_column = dataset_source.get_metadata_column_index_by_name( 'endCol' ) indeces = [ chrom_column, start_column, end_column ] if not all( map( lambda i: i is not None, indeces ) ): raise ValueError( "Could not determine proper column indeces for" + " chrom, start, end: %s" % ( str( indeces ) ) ) kwargs.update({ 'indeces' : indeces }) if not kwargs.get( 'column_types', None ): kwargs.update({ 'column_types' : dataset_source.get_metadata_column_types( indeces=indeces ) }) self.named_columns = named_columns if self.named_columns: self.column_names = self.COLUMN_NAMES super( GenomicRegionDataProvider, self ).__init__( dataset_source, **kwargs ) def __iter__( self ): parent_gen = super( GenomicRegionDataProvider, self ).__iter__() for column_values in parent_gen: if self.named_columns: yield dict( zip( self.column_names, column_values ) ) else: yield column_values # TODO: this optionally provides the same data as the above and makes GenomicRegionDataProvider redundant # GenomicRegionDataProvider is a better name, tho class IntervalDataProvider( column.ColumnarDataProvider ): """ Data provider that parses chromosome, start, and end data (as well as strand and name if set in the metadata) using the dataset's metadata settings. If `named_columns` is true, will return dictionaries with the keys 'chrom', 'start', 'end' (and 'strand' and 'name' if available). """ COLUMN_NAMES = [ 'chrom', 'start', 'end', 'strand', 'name' ] settings = { 'chrom_column' : 'int', 'start_column' : 'int', 'end_column' : 'int', 'strand_column' : 'int', 'name_column' : 'int', 'named_columns' : 'bool', } def __init__( self, dataset, chrom_column=None, start_column=None, end_column=None, strand_column=None, name_column=None, named_columns=False, **kwargs ): """ :param dataset: the Galaxy dataset whose file will be the source :type dataset: model.DatasetInstance :param named_columns: optionally return dictionaries keying each column with 'chrom', 'start', 'end', 'strand', or 'name'. Optional: defaults to False :type named_columns: bool """ # TODO: allow passing in a string format e.g. "{chrom}:{start}-{end}" dataset_source = DatasetDataProvider( dataset ) # get genomic indeces and add strand and name self.column_names = [] indeces = [] # TODO: this is sort of involved and oogly if chrom_column is None: chrom_column = dataset_source.get_metadata_column_index_by_name( 'chromCol' ) if chrom_column is not None: self.column_names.append( 'chrom' ) indeces.append( chrom_column ) if start_column is None: start_column = dataset_source.get_metadata_column_index_by_name( 'startCol' ) if start_column is not None: self.column_names.append( 'start' ) indeces.append( start_column ) if end_column is None: end_column = dataset_source.get_metadata_column_index_by_name( 'endCol' ) if end_column is not None: self.column_names.append( 'end' ) indeces.append( end_column ) if strand_column is None: strand_column = dataset_source.get_metadata_column_index_by_name( 'strandCol' ) if strand_column is not None: self.column_names.append( 'strand' ) indeces.append( strand_column ) if name_column is None: name_column = dataset_source.get_metadata_column_index_by_name( 'nameCol' ) if name_column is not None: self.column_names.append( 'name' ) indeces.append( name_column ) kwargs.update({ 'indeces' : indeces }) if not kwargs.get( 'column_types', None ): kwargs.update({ 'column_types' : dataset_source.get_metadata_column_types( indeces=indeces ) }) self.named_columns = named_columns super( IntervalDataProvider, self ).__init__( dataset_source, **kwargs ) def __iter__( self ): parent_gen = super( IntervalDataProvider, self ).__iter__() for column_values in parent_gen: if self.named_columns: yield dict( zip( self.column_names, column_values ) ) else: yield column_values # TODO: ideally with these next two - you'd allow pulling some region from the sequence # WITHOUT reading the entire seq into memory - possibly apply some version of limit/offset class FastaDataProvider( base.FilteredDataProvider ): """ Class that returns fasta format data in a list of maps of the form: { id: <fasta header id>, sequence: <joined lines of nucleotide/amino data> } """ settings = { 'ids' : 'list:str', } def __init__( self, source, ids=None, **kwargs ): """ :param ids: optionally return only ids (and sequences) that are in this list. Optional: defaults to None (provide all ids) :type ids: list or None """ source = bx_seq.fasta.FastaReader( source ) # TODO: validate is a fasta super( FastaDataProvider, self ).__init__( source, **kwargs ) self.ids = ids # how to do ids? def __iter__( self ): parent_gen = super( FastaDataProvider, self ).__iter__() for fasta_record in parent_gen: yield { 'id' : fasta_record.name, 'seq' : fasta_record.text } class TwoBitFastaDataProvider( DatasetDataProvider ): """ Class that returns fasta format data in a list of maps of the form: { id: <fasta header id>, sequence: <joined lines of nucleotide/amino data> } """ settings = { 'ids' : 'list:str', } def __init__( self, source, ids=None, **kwargs ): """ :param ids: optionally return only ids (and sequences) that are in this list. Optional: defaults to None (provide all ids) :type ids: list or None """ source = bx_seq.twobit.TwoBitFile( source ) # TODO: validate is a 2bit super( FastaDataProvider, self ).__init__( source, **kwargs ) # could do in order provided with twobit self.ids = ids or self.source.keys() def __iter__( self ): for id_ in self.ids: yield { 'id': id_, 'seq': self.source[ id_ ] } # TODO: class WiggleDataProvider( base.LimitedOffsetDataProvider ): """ Class that returns chrom, pos, data from a wiggle source. """ COLUMN_NAMES = [ 'chrom', 'pos', 'value' ] settings = { 'named_columns' : 'bool', 'column_names' : 'list:str', } def __init__( self, source, named_columns=False, column_names=None, **kwargs ): """ :param named_columns: optionally return dictionaries keying each column with 'chrom', 'start', 'end', 'strand', or 'name'. Optional: defaults to False :type named_columns: bool :param column_names: an ordered list of strings that will be used as the keys for each column in the returned dictionaries. The number of key, value pairs each returned dictionary has will be as short as the number of column names provided. :type column_names: """ # TODO: validate is a wig # still good to maintain a ref to the raw source bc Reader won't self.raw_source = source self.parser = bx_wig.Reader( source ) super( WiggleDataProvider, self ).__init__( self.parser, **kwargs ) self.named_columns = named_columns self.column_names = column_names or self.COLUMN_NAMES def __iter__( self ): parent_gen = super( WiggleDataProvider, self ).__iter__() for three_tuple in parent_gen: if self.named_columns: yield dict( zip( self.column_names, three_tuple ) ) else: # list is not strictly necessary - but consistent yield list( three_tuple ) class BigWigDataProvider( base.LimitedOffsetDataProvider ): """ Class that returns chrom, pos, data from a wiggle source. """ COLUMN_NAMES = [ 'chrom', 'pos', 'value' ] settings = { 'named_columns' : 'bool', 'column_names' : 'list:str', } def __init__( self, source, chrom, start, end, named_columns=False, column_names=None, **kwargs ): """ :param chrom: which chromosome within the bigbed file to extract data for :type chrom: str :param start: the start of the region from which to extract data :type start: int :param end: the end of the region from which to extract data :type end: int :param named_columns: optionally return dictionaries keying each column with 'chrom', 'start', 'end', 'strand', or 'name'. Optional: defaults to False :type named_columns: bool :param column_names: an ordered list of strings that will be used as the keys for each column in the returned dictionaries. The number of key, value pairs each returned dictionary has will be as short as the number of column names provided. :type column_names: """ raise NotImplementedError( 'Work in progress' ) # TODO: validate is a wig # still good to maintain a ref to the raw source bc Reader won't self.raw_source = source self.parser = bx_bbi.bigwig_file.BigWigFile( source ) super( BigWigDataProvider, self ).__init__( self.parser, **kwargs ) self.named_columns = named_columns self.column_names = column_names or self.COLUMN_NAMES def __iter__( self ): parent_gen = super( BigWigDataProvider, self ).__iter__() for three_tuple in parent_gen: if self.named_columns: yield dict( zip( self.column_names, three_tuple ) ) else: # list is not strictly necessary - but consistent yield list( three_tuple ) # ----------------------------------------------------------------------------- binary, external conversion or tool class DatasetSubprocessDataProvider( external.SubprocessDataProvider ): """ Create a source from running a subprocess on a dataset's file. Uses a subprocess as its source and has a dataset (gen. as an input file for the process). """ # TODO: below should be a subclass of this and not RegexSubprocess def __init__( self, dataset, *args, **kwargs ): """ :param args: the list of strings used to build commands. :type args: variadic function args """ raise NotImplementedError( 'Abstract class' ) super( DatasetSubprocessDataProvider, self ).__init__( *args, **kwargs ) self.dataset = dataset class SamtoolsDataProvider( line.RegexLineDataProvider ): """ Data provider that uses samtools on a Sam or Bam file as its source. This can be piped through other providers (column, map, genome region, etc.). .. note:: that only the samtools 'view' command is currently implemented. """ FLAGS_WO_ARGS = 'bhHSu1xXcB' FLAGS_W_ARGS = 'fFqlrs' VALID_FLAGS = FLAGS_WO_ARGS + FLAGS_W_ARGS def __init__( self, dataset, options_string='', options_dict=None, regions=None, **kwargs ): """ :param options_string: samtools options in string form (flags separated by spaces) Optional: defaults to '' :type options_string: str :param options_dict: dictionary of samtools options Optional: defaults to None :type options_dict: dict or None :param regions: list of samtools regions strings Optional: defaults to None :type regions: list of str or None """ # TODO: into validate_source # precondition: dataset.datatype is a tabular.Sam or binary.Bam self.dataset = dataset options_dict = options_dict or {} # ensure regions are strings regions = [ str( r ) for r in regions ] if regions else [] # TODO: view only for now # TODO: not properly using overriding super's validate_opts, command here subcommand = 'view' # TODO:?? do we need a path to samtools? subproc_args = self.build_command_list( subcommand, options_string, options_dict, regions ) # TODO: the composition/inheritance here doesn't make a lot sense subproc_provider = external.SubprocessDataProvider( *subproc_args ) super( SamtoolsDataProvider, self ).__init__( subproc_provider, **kwargs ) def build_command_list( self, subcommand, options_string, options_dict, regions ): """ Convert all init args to list form. """ command = [ 'samtools', subcommand ] # add options and switches, input file, regions list (if any) command.extend( self.to_options_list( options_string, options_dict ) ) command.append( self.dataset.file_name ) command.extend( regions ) return command def to_options_list( self, options_string, options_dict ): """ Convert both options_string and options_dict to list form while filtering out non-'valid' options. """ opt_list = [] # strip out any user supplied bash switch formating -> string of option chars # then compress to single option string of unique, VALID flags with prefixed bash switch char '-' options_string = options_string.strip( '- ' ) validated_flag_list = set([ flag for flag in options_string if flag in self.FLAGS_WO_ARGS ]) # if sam add -S # TODO: not the best test in the world... if( ( self.dataset.ext == 'sam' ) and ( 'S' not in validated_flag_list ) ): validated_flag_list.append( 'S' ) if validated_flag_list: opt_list.append( '-' + ''.join( validated_flag_list ) ) for flag, arg in options_dict.items(): if flag in self.FLAGS_W_ARGS: opt_list.extend([ '-' + flag, str( arg ) ]) return opt_list @classmethod def extract_options_from_dict( cls, dictionary ): """ Separrates valid samtools key/value pair options from a dictionary and returns both as a 2-tuple. """ # handy for extracting options from kwargs - but otherwise... # TODO: could be abstracted to util.extract( dict, valid_keys_list ) options_dict = {} new_kwargs = {} for key, value in dictionary.items(): if key in cls.FLAGS_W_ARGS: options_dict[ key ] = value else: new_kwargs[ key ] = value return options_dict, new_kwargs class BcftoolsDataProvider( line.RegexLineDataProvider ): """ Data provider that uses an bcftools on a bcf (or vcf?) file as its source. This can be piped through other providers (column, map, genome region, etc.). """ def __init__( self, dataset, **kwargs ): # TODO: as samtools raise NotImplementedError() super( BcftoolsDataProvider, self ).__init__( dataset, **kwargs ) class BGzipTabixDataProvider( base.DataProvider ): """ Data provider that uses an g(un)zip on a file as its source. This can be piped through other providers (column, map, genome region, etc.). """ def __init__( self, dataset, **kwargs ): # TODO: as samtools - need more info on output format raise NotImplementedError() super( BGzipTabixDataProvider, self ).__init__( dataset, **kwargs ) class SQliteDataProvider( base.DataProvider ): """ Data provider that uses a sqlite database file as its source. Allows any query to be run and returns the resulting rows as sqlite3 row objects """ settings = { 'query': 'str' } def __init__( self, source, query=None, **kwargs ): self.query = query self.connection = sqlite.connect(source.dataset.file_name) super( SQliteDataProvider, self ).__init__( source, **kwargs ) def __iter__( self ): if (self.query is not None) and sqlite.is_read_only_query(self.query): for row in self.connection.cursor().execute(self.query): yield row else: yield class SQliteDataTableProvider( base.DataProvider ): """ Data provider that uses a sqlite database file as its source. Allows any query to be run and returns the resulting rows as arrays of arrays """ settings = { 'query': 'str', 'headers': 'bool', 'limit': 'int' } def __init__( self, source, query=None, headers=False, limit=sys.maxsize, **kwargs ): self.query = query self.headers = headers self.limit = limit self.connection = sqlite.connect(source.dataset.file_name) super( SQliteDataTableProvider, self ).__init__( source, **kwargs ) def __iter__( self ): if (self.query is not None) and sqlite.is_read_only_query(self.query): cur = self.connection.cursor() results = cur.execute(self.query) if self.headers: yield [col[0] for col in cur.description] for i, row in enumerate(results): if i >= self.limit: break yield [val for val in row] else: yield class SQliteDataDictProvider( base.DataProvider ): """ Data provider that uses a sqlite database file as its source. Allows any query to be run and returns the resulting rows as arrays of dicts """ settings = { 'query': 'str' } def __init__( self, source, query=None, **kwargs ): self.query = query self.connection = sqlite.connect(source.dataset.file_name) super( SQliteDataDictProvider, self ).__init__( source, **kwargs ) def __iter__( self ): if (self.query is not None) and sqlite.is_read_only_query(self.query): cur = self.connection.cursor() for row in cur.execute(self.query): yield [dict((cur.description[i][0], value) for i, value in enumerate(row))] else: yield
icaoberg/cellorganizer-galaxy-tools
datatypes/dataproviders/dataset.py
Python
gpl-3.0
31,600
[ "Galaxy", "pysam" ]
1b1a9e301543632b4e245923b4f85cfd1db0204ab83c57834f6d12bbf639c580
import params from neuron import h from neuron import numpy import mkmitral pc = h.ParallelContext() nhost = int(pc.nhost()) rank = int(pc.id()) matrank = (41, 61) domain = ((-500, 1500, 50), (-500, 2500, 50)) def f(ii): i = int(ii) density = numpy.zeros(matrank) for gid in range(i , params.Nmitral, nhost): m = mkmitral.mkmitral(gid) for sec in m.secdens: accumulate_density(sec, density, domain) print gid return density def accumulate_density(sec, density, domain): sec.push() for i in range(int(h.n3d())): x,y = (h.x3d(i), h.y3d(i)) r = (round(x, domain[0]),round(y, domain[1])) if not False in r: density[r] += 1 h.pop_section() def round(x, d): # integer toward 0 min, max, inc where min is 0, if not in interval #return False if (x < d[0] or x > d[1]): return False return int((x - d[0])/d[2]) def compute(): for i in range(nhost): pc.submit(f, i) den = numpy.zeros(matrank) while(pc.working()): den += pc.pyret() return den if __name__ == '__main__': pc.runworker() density = compute() pc.done() print "density max = ", density.max() density = density * (20/density.max()) import pickle pickle.dump(density, open('density.dat', 'w')) #following works on linux if using openmpi from mayavi.mlab import barchart,show barchart(density) show()
JustasB/MitralSuite
Models/Migliore2014/mitral_dend_density.py
Python
mit
1,354
[ "Mayavi", "NEURON" ]
84e60ef0d5ffe6aad327bbf686455cd4586f54ed9f37ea2d2dd905fc25a5765f
#!/usr/bin/env python # Original provided by Mark Olesen import os,sys from platform import uname # from __future__ import print_function try: from PyFoam.ThirdParty.six import print_ except ImportError: def print_(*args): # Simple workaround to report the failure for a in args: sys.stdout.write(str(a)) sys.stdout.write(" ") sys.stdout.write("\n") print_("PROBLEM:") print_("'from PyFoam.ThirdParty.six import print_' did not work. Seems that this is not the correct PyFoam-library\n") description=""" print_ setup """ print_("Machine info:"," | ".join(uname())) print_() print_("Python version:",sys.version) print_() print_("Python executable:",sys.executable) print_() if sys.version_info<(2,3): print_("\nUnsupported Python-version (at least 2.3). Recommended is 2.6 or 2.7") elif sys.version_info<(2,4): print_("\nThis Python version does not support all features needed by PyFoam (get at least 2.4. Recommended is 2.6 or 2.7") elif sys.version_info<(2,6): print_("This version may not work anymore due to the port of PyFoam to Python 3") elif sys.version_info<(3,): print_("Python 2.7 is one development platform for PyFoam (along with Python 3)") elif sys.version_info>=(3,): print_("Python 3 is supported with PyFoam") try: print_("PYTHONPATH:", os.environ["PYTHONPATH"]) except KeyError: print_("PYTHONPATH is not set") print_() print_("Location of this utility:",sys.argv[0]) print_() try: import PyFoam import PyFoam.FoamInformation except ImportError: print_("PyFoam not in PYTHONPATH or regular search path. Don't see no sense in continuing") print_("Regular Python search-path:",sys.path) print_() sys.exit(-1) installed=PyFoam.FoamInformation.foamInstalledVersions() print_("Version", PyFoam.FoamInformation.foamVersion(), "Fork",PyFoam.FoamInformation.foamFork(), "of the installed",len(installed),"versions:") installedKeys=list(installed.keys()) installedKeys.sort() try: formatString="%%%ds : %%s" % max([1+len(a[0])+len(a[1]) for a in installedKeys]) except ValueError: # max failing because list is empty formatString="%s : %s" for k in installedKeys: print_(formatString % (k[0]+"-"+k[1],installed[k])) if PyFoam.FoamInformation.oldAppConvention(): print_(" This version of OpenFOAM uses the old calling convention") print_() print_("pyFoam-Version:",PyFoam.versionString()) # hardcodedVersion=(0,6,6,"development") hardcodedVersion=(0,6,5) if PyFoam.version()!=hardcodedVersion: print_("ALERT: Reported version",PyFoam.version(), "is different from hardcoded version", hardcodedVersion,"-> probably inconsistent library installation") print_() print_("Path where PyFoam was found (PyFoam.__path__) is",PyFoam.__path__) print_() print_("Configuration search path:",PyFoam.configuration().configSearchPath()) print_("Configuration files (used):",PyFoam.configuration().configFiles()) libLoc={} def testLibrary(name, textMissing=None, subModule=None, textThere=None, minVersion=None, versionAttribute="__version__"): global libLoc print_("%-20s : " % name, end=' ') try: module=name exec("import "+name) if subModule: exec("from "+name+" import "+subModule) module=subModule print_("Yes", end=' ') version=None try: version=eval(module+"."+versionAttribute) except AttributeError: pass if version: print_("\t version:",version, end=' ') if minVersion: if version<minVersion: print_("Insufficient version. At least",minVersion, "recommended for all features",end=' ') else: print_("Matches required version",minVersion,end=' ') if textThere: print_("\t",textThere, end=' ') print_() libLoc[name]=eval(name+'.__file__') return True except ImportError: print_("No", end=' ') if textMissing: print_("\t",textMissing, end=' ') print_() return False except RuntimeError: print_("Problem", end=' ') if textMissing: print_("\t",textMissing, end=' ') print_() return False except SyntaxError: print_("Syntax Error", end=' ') if textMissing: print_("\t",textMissing, end=' ') print_() return False except ValueError: print_("Value Error", end=' ') if textMissing: print_("\t",textMissing, end=' ') print_() return False except AttributeError: print_("Attribute Error", end=' ') if textMissing: print_("\t",textMissing, end=' ') print_() return False print_("\nInstalled libraries:") testLibrary("cython","Not used. Maybe will by used later to spped up parts of PyFoam") testLibrary("cProfile","Not a problem. Can't profile using this library") testLibrary("docutils","Not necessary. Needed for RestructuredText to HTML conversion") testLibrary("Gnuplot","Not a problem. Version from ThirdParty is used") testLibrary("hotshot","Not a problem. Can't profile using this library") testLibrary("line_profiler","Not a problem. Can't profile using this library") testLibrary("ipdb","Not necessary. Only makes debugging more comfortable") testLibrary("IPython", "Not necessary. But the interactive shell may be more comfortable", minVersion="2.0.0") testLibrary("matplotlib","Only Gnuplot-plotting possible") # testLibrary("matplotlib.pyplot","Only Gnuplot-plotting possible") testLibrary("mercurial","Not a problem. Used for experimental case handling", subModule="config",versionAttribute="util.version()") # testLibrary("nose","Only needed for running the unit-tests (developers only)") testLibrary("pytest","Only needed for running the unit-tests (developers only)") numpyPresent=testLibrary("numpy","Plotting and data comparison won't work") if not numpyPresent: numpypyPresent=testLibrary("numpypy","This workaround for PyPy does not work","This seems to by PyPy") if numpypyPresent: numpyPresent=testLibrary("numpy","Does not work in pypy","Numpy works with workaround") testLibrary("openpyxl","Not a problem. Only used for exporting pandas-data to Excel-files (advanced)") testLibrary("pandas","Not a problem. Only used for handling of advanced data-handling") testLibrary("ply","Not a problem. Version from ThirdParty is used") testLibrary("profile","Not a problem. Can't profile using this library") testLibrary("psyco","Not a problem. Acceleration not possible") testLibrary("PyQt4","Only some experimental GUI-stuff relies on this", subModule="Qt",versionAttribute="QT_VERSION_STR") testLibrary("PyQt4.Qwt5","Only an alternate plotting back-end") testLibrary("scipy","Not yet used. Possibly use signal-fitting etc") testLibrary("Tkinter","Not a problem. Used for the old version of DisplayBlockmesh and some matplotlib-implementations") testLibrary("twisted","Not yet used. Possibly reimplement MetaServer with it") testLibrary("vtk","Not a problem. Only used for some utilities", versionAttribute="VTK_VERSION") testLibrary("xlwt","Not a problem. Only used for exporting pandas-data to Excel-files", versionAttribute="__VERSION__") testLibrary("xlrd","Not a problem. Only used for importing Excel-files to pandas-data", versionAttribute="__VERSION__") print_() print_("Library locations") for l in sorted(libLoc.keys(),key=lambda a:a.lower()): print_("%-20s : %s" % (l,libLoc[l])) from os import path print_() print_("Checking additional envirnoment variables") def checkVar(name,description,additionalCheck): print_("\nChecking for",name,":",description) if name in os.environ: print_(name,"set to",os.environ[name]) if not path.isdir(os.environ[name]): print_("MISCONFIGURATION:",os.environ[name],"is no directory") else: additionalCheck(name) else: print_(name,"missing from environment") def checkPyFoamLocation(name): expectedPath=path.split(path.split(path.abspath(sys.argv[0]))[0])[0] if not path.samefile(expectedPath,os.environ[name]): print_("MISCONFIGURATION: PYFOAM_DIR expected to be",expectedPath) checkVar("PYFOAM_DIR", "Location of the PyFoam-installation. Not strictly necessary", checkPyFoamLocation) def checkPyFoamSiteLocation(name): binDir=path.join(os.environ[name],"bin") etcDir=path.join(os.environ[name],"etc") libDir=path.join(os.environ[name],"lib") if not path.isdir(binDir): print_("MISCONFIGURATION: no directory",binDir,"for site-specific scripts") else: found=False for p in os.environ["PATH"].split(":"): if path.isdir(p): if path.samefile(p,binDir): found=True break if not found: print_("MISCONFIGURATION:",binDir,"is not in the PATH",os.environ["PATH"]) else: print_("Site-specific scripts should be added to",binDir) if not path.isdir(etcDir): print_("MISCONFIGURATION: no directory",etcDir,"for site-specific configurations") else: print_("Site-specific configurations can be added to",etcDir) if not path.isdir(libDir): print_("MISCONFIGURATION: no directory",libDir,"for site-specific library files") else: print_("Site-specific library files can be added to",libDir, "Do NOT add to PYTHONPATH but import as PyFoam.Site") checkVar("PYFOAM_SITE_DIR", "Location of non-PyFoam-disctributions script. Set and used by some Foam-distributions", checkPyFoamSiteLocation) # Should work with Python3 and Python2
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam
bin/pyFoamVersion.py
Python
gpl-2.0
10,018
[ "VTK" ]
329b92fb1841079951a98427c6b65bdca3b8a68deddce2fcf82245bb70c9ed82
''' Steps for problem.feature lettuce tests ''' # pylint: disable=C0111 # pylint: disable=W0621 from lettuce import world, step from lettuce.django import django_url from common import i_am_registered_for_the_course from problems_setup import PROBLEM_DICT, answer_problem, problem_has_answer, add_problem_to_course from nose.tools import assert_equal @step(u'I am viewing a "([^"]*)" problem with "([^"]*)" attempt') def view_problem_with_attempts(step, problem_type, attempts): i_am_registered_for_the_course(step, 'model_course') # Ensure that the course has this problem type add_problem_to_course(world.scenario_dict['COURSE'].number, problem_type, {'max_attempts': attempts}) # Go to the one section in the factory-created course # which should be loaded with the correct problem chapter_name = world.scenario_dict['SECTION'].display_name.replace(" ", "_") section_name = chapter_name url = django_url('/courses/%s/%s/%s/courseware/%s/%s' % (world.scenario_dict['COURSE'].org, world.scenario_dict['COURSE'].number, world.scenario_dict['COURSE'].display_name.replace(' ', '_'), chapter_name, section_name,)) world.browser.visit(url) @step(u'I am viewing a "([^"]*)" that shows the answer "([^"]*)"') def view_problem_with_show_answer(step, problem_type, answer): i_am_registered_for_the_course(step, 'model_course') # Ensure that the course has this problem type add_problem_to_course('model_course', problem_type, {'showanswer': answer}) # Go to the one section in the factory-created course # which should be loaded with the correct problem chapter_name = world.scenario_dict['SECTION'].display_name.replace(" ", "_") section_name = chapter_name url = django_url('/courses/%s/%s/%s/courseware/%s/%s' % (world.scenario_dict['COURSE'].org, world.scenario_dict['COURSE'].number, world.scenario_dict['COURSE'].display_name.replace(' ', '_'), chapter_name, section_name,)) world.browser.visit(url) @step(u'I am viewing a "([^"]*)" problem') def view_problem(step, problem_type): i_am_registered_for_the_course(step, 'model_course') # Ensure that the course has this problem type add_problem_to_course('model_course', problem_type) # Go to the one section in the factory-created course # which should be loaded with the correct problem chapter_name = world.scenario_dict['SECTION'].display_name.replace(" ", "_") section_name = chapter_name url = django_url('/courses/%s/%s/%s/courseware/%s/%s' % (world.scenario_dict['COURSE'].org, world.scenario_dict['COURSE'].number, world.scenario_dict['COURSE'].display_name.replace(' ', '_'), chapter_name, section_name,)) world.browser.visit(url) @step(u'External graders respond "([^"]*)"') def set_external_grader_response(step, correctness): assert(correctness in ['correct', 'incorrect']) response_dict = {'correct': True if correctness == 'correct' else False, 'score': 1 if correctness == 'correct' else 0, 'msg': 'Your problem was graded %s' % correctness} # Set the fake xqueue server to always respond # correct/incorrect when asked to grade a problem world.xqueue_server.set_grade_response(response_dict) @step(u'I answer a "([^"]*)" problem "([^"]*)ly"') def answer_problem_step(step, problem_type, correctness): """ Mark a given problem type correct or incorrect, then submit it. *problem_type* is a string representing the type of problem (e.g. 'drop down') *correctness* is in ['correct', 'incorrect'] """ # Change the answer on the page input_problem_answer(step, problem_type, correctness) # Submit the problem check_problem(step) @step(u'I input an answer on a "([^"]*)" problem "([^"]*)ly"') def input_problem_answer(_, problem_type, correctness): """ Have the browser input an answer (either correct or incorrect) """ assert(correctness in ['correct', 'incorrect']) assert(problem_type in PROBLEM_DICT) answer_problem(problem_type, correctness) @step(u'I check a problem') def check_problem(step): # first scroll down so the loading mathjax button does not # cover up the Check button world.browser.execute_script("window.scrollTo(0,1024)") world.css_click("input.check") # Wait for the problem to finish re-rendering world.wait_for_ajax_complete() @step(u'The "([^"]*)" problem displays a "([^"]*)" answer') def assert_problem_has_answer(step, problem_type, answer_class): ''' Assert that the problem is displaying a particular answer. These correspond to the same correct/incorrect answers we set in answer_problem() We can also check that a problem has been left blank by setting answer_class='blank' ''' assert answer_class in ['correct', 'incorrect', 'blank'] assert problem_type in PROBLEM_DICT problem_has_answer(problem_type, answer_class) @step(u'I reset the problem') def reset_problem(_step): world.css_click('input.reset') # Wait for the problem to finish re-rendering world.wait_for_ajax_complete() @step(u'I press the button with the label "([^"]*)"$') def press_the_button_with_label(_step, buttonname): button_css = 'button span.show-label' elem = world.css_find(button_css).first world.css_has_text(button_css, elem) world.css_click(button_css) @step(u'The "([^"]*)" button does( not)? appear') def action_button_present(_step, buttonname, doesnt_appear): button_css = 'section.action input[value*="%s"]' % buttonname if bool(doesnt_appear): assert world.is_css_not_present(button_css) else: assert world.is_css_present(button_css) @step(u'the Show/Hide button label is "([^"]*)"$') def show_hide_label_is(_step, label_name): # The label text is changed by static/xmodule_js/src/capa/display.js # so give it some time to change on the page. label_css = 'button.show span.show-label' world.wait_for(lambda _: world.css_has_text(label_css, label_name)) @step(u'I should see a score of "([^"]*)"$') def see_score(_step, score): # The problem progress is changed by # cms/static/xmodule_js/src/capa/display.js # so give it some time to render on the page. score_css = 'section.problem-progress' expected_text = '({})'.format(score) world.wait_for(lambda _: world.css_has_text(score_css, expected_text)) @step(u'[Mm]y "([^"]*)" answer is( NOT)? marked "([^"]*)"') def assert_answer_mark(_step, problem_type, isnt_marked, correctness): """ Assert that the expected answer mark is visible for a given problem type. *problem_type* is a string identifying the type of problem (e.g. 'drop down') *correctness* is in ['correct', 'incorrect', 'unanswered'] """ # Determine which selector(s) to look for based on correctness assert(correctness in ['correct', 'incorrect', 'unanswered']) assert(problem_type in PROBLEM_DICT) # At least one of the correct selectors should be present for sel in PROBLEM_DICT[problem_type][correctness]: if bool(isnt_marked): has_expected = world.is_css_not_present(sel) else: has_expected = world.is_css_present(sel) # As soon as we find the selector, break out of the loop if has_expected: break # Expect that we found the expected selector assert(has_expected)
TsinghuaX/edx-platform
lms/djangoapps/courseware/features/problems.py
Python
agpl-3.0
7,541
[ "VisIt" ]
b5d0441bea9a98fc577d03d02c1b5b6d11cd2cf3fabc4be8ac9958b5572ef016
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Tests for the TreeKDE. """ import numpy as np from KDEpy.TreeKDE import TreeKDE import itertools import pytest args = list(itertools.product([[-1, 0, 1, 10], [1, 2, 3, 4], [1, 1, 1, 2]], [1, 2, 3])) @pytest.mark.parametrize("data, split_index", args) def test_additivity(data, split_index): """ Test the additive propery of the KDE. """ x = np.linspace(-10, 10) # Fit to add data y = TreeKDE("epa").fit(data).evaluate(x) # Fit to splits, and compensate for smaller data using weights weight_1 = split_index / len(data) y_1 = TreeKDE("epa").fit(data[:split_index]).evaluate(x) * weight_1 weight_2 = (len(data) - split_index) / len(data) y_2 = TreeKDE("epa").fit(data[split_index:]).evaluate(x) * weight_2 # Additive property of the functions assert np.allclose(y, y_1 + y_2) @pytest.mark.parametrize("data, split_index", args) def test_additivity_with_weights(data, split_index): """ Test the additive propery of the KDE. """ x = np.linspace(-10, 15) weights = np.arange(len(data)) + 1 weights = weights / np.sum(weights) # Fit to add data y = TreeKDE("epa").fit(data, weights).evaluate(x) # Split up the data and the weights data = list(data) weights = list(weights) data_first_split = data[:split_index] data_second_split = data[split_index:] weights_first_split = weights[:split_index] weights_second_split = weights[split_index:] # Fit to splits, and compensate for smaller data using weights y_1 = TreeKDE("epa").fit(data_first_split, weights_first_split).evaluate(x) * sum(weights_first_split) y_2 = TreeKDE("epa").fit(data_second_split, weights_second_split).evaluate(x) * sum(weights_second_split) # Additive property of the functions assert np.allclose(y, y_1 + y_2) @pytest.mark.parametrize( "kernel, bw, n, expected_result", [ ( "box", 0.1, 5, np.array([2.101278e-19, 3.469447e-18, 1.924501e00, 0.000000e00, 9.622504e-01]), ), ( "box", 0.2, 5, np.array([3.854941e-18, 2.929755e-17, 9.622504e-01, 0.000000e00, 4.811252e-01]), ), ("box", 0.6, 3, np.array([0.1603751, 0.4811252, 0.4811252])), ("tri", 0.6, 3, np.array([0.1298519, 0.5098009, 0.3865535])), ( "epa", 0.1, 6, np.array( [ 0.000000e00, 7.285839e-17, 2.251871e-01, 1.119926e00, 0.000000e00, 1.118034e00, ] ), ), ( "biweight", 2, 5, np.array([0.1524078, 0.1655184, 0.1729870, 0.1743973, 0.1696706]), ), ], ) def test_against_R_density(kernel, bw, n, expected_result): """ Test against the following function call in R: d <- density(c(0, 0.1, 1), kernel="{kernel}", bw={bw}, n={n}, from=-1, to=1); d$y """ data = np.array([0, 0.1, 1]) x = np.linspace(-1, 1, num=n) y = TreeKDE(kernel, bw=bw).fit(data).evaluate(x) assert np.allclose(y, expected_result, atol=10 ** (-2.7)) @pytest.mark.parametrize( "bw, n, expected_result", [ (1, 3, np.array([0.17127129, 0.34595518, 0.30233275])), ( 0.1, 5, np.array( [ 2.56493684e-22, 4.97598466e-06, 2.13637668e00, 4.56012216e-04, 1.32980760e00, ] ), ), (0.01, 3, np.array([0.0, 13.29807601, 13.29807601])), ], ) def test_against_scipy_density(bw, n, expected_result): """ Test against the following function call in SciPy: data = np.array([0, 0.1, 1]) x = np.linspace(-1, 1, {n}) bw = {bw}/np.asarray(data).std(ddof=1) density_estimate = gaussian_kde(dataset = data, bw_method = bw) y = density_estimate.evaluate(x) # Note that scipy weights its bandwidth by the covariance of the # input data. To make the results comparable to the other methods, # we divide the bandwidth by the sample standard deviation here. """ data = np.array([0, 0.1, 1]) x = np.linspace(-1, 1, num=n) y = TreeKDE(kernel="gaussian", bw=bw).fit(data).evaluate(x) error = np.mean((y - expected_result) ** 2) assert error < 1e-10 if __name__ == "__main__": # --durations=10 <- May be used to show potentially slow tests pytest.main(args=[".", "--doctest-modules", "-v"])
tommyod/KDEpy
KDEpy/tests/test_TreeKDE.py
Python
gpl-3.0
4,770
[ "Gaussian" ]
daa55cf7d2a3adb3667907d1c90a578c5db4e73cefb3d25852f22304653fb5d3
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for builder_layers.""" from absl.testing import parameterized from lingvo import compat as tf from lingvo.core import builder_layers as layers from lingvo.core import cluster_factory from lingvo.core import hyperparams from lingvo.core import layers as lingvo_layers from lingvo.core import py_utils from lingvo.core import test_utils from lingvo.core import tshape import numpy as np class FCLayerTestNestedMapFPropInput(lingvo_layers.FCLayer): """lingvo_layers.FCLayer with nested map as input signature in FProp. This is for testing compliance of RepeatLayer with NestedMap inputs in FProp. """ def FProp(self, theta, in_nmap): """Overriding FProp input signature for FCLayer. Args: theta: NestedMap containing weights of layer. in_nmap: NestedMap containing at least the following: - features: The inputs tensor. Shaped [..., input_dim]. - paddings: The paddings tensor. Shaped [..., 1], where all but the last dimension match. Returns: out_nmap: NestedMap containing the following: - features: Output after applying projection (see super() for details). - paddings: Output (unused) paddings. """ outputs = super().FProp(theta, in_nmap.features, in_nmap.paddings) in_nmap.features = outputs return in_nmap class BuilderLayerTest(test_utils.TestCase, parameterized.TestCase): def testCreateNestedMapLayerFProp(self): with self.session(): x = tf.constant(1) y = tf.constant(2) params = layers.CreateNestedMapLayer.Params().Set( name='map', keys=['x', 'y']) layer = params.Instantiate() layer_out = self.evaluate(layer.FPropDefaultTheta(x, y)) self.assertEqual(1, layer_out.x) self.assertEqual(2, layer_out.y) def testFirstNLayerFProp(self): with self.session(): params = layers.FirstNLayer.Params() params.name = 'fn' params.n = 2 fn_layer = layers.FirstNLayer(params) a = tf.constant(1) b = tf.constant(2) c = tf.constant(3) fn_out = self.evaluate(fn_layer.FPropDefaultTheta(a, b, c)) self.assertEqual((1, 2), fn_out) def testArgIndexLayerFProp(self): with self.session(): params = layers.ArgIndexLayer.Params().Set(name='argidx', idx=[1, 3]) argidx_layer = layers.ArgIndexLayer(params) args = [tf.constant(i) for i in range(5)] argidx_out = self.evaluate(argidx_layer.FPropDefaultTheta(*args)) self.assertEqual((1, 3), argidx_out) def testSequentialLayer(self): g = tf.Graph() with g.as_default(), self.SetEval(True): tf.random.set_seed(24332) p = layers.SequentialLayer.Params().Set( name='seq', repeat=2, sub=[ lingvo_layers.FCLayer.Params().Set( name='foo', input_dim=32, output_dim=8), lingvo_layers.FCLayer.Params().Set( name='bar', input_dim=8, output_dim=8), lingvo_layers.FCLayer.Params().Set( name='baz', input_dim=8, output_dim=32), lingvo_layers.DropoutLayer.Params().Set( name='dropout', keep_prob=0.5) ]) l = p.Instantiate() x = tf.random.normal(shape=[2, 32]) y = l.FPropDefaultTheta(x) l.vars.Transform(lambda x: x.shape).VLog(0, 'vars: ') with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) act = x_val # relu(act \dot w + b) for i in range(2): act = np.maximum(0, np.dot(act, w.rep[i].foo.w) + w.rep[i].foo.b) act = np.maximum(0, np.dot(act, w.rep[i].bar.w) + w.rep[i].bar.b) act = np.maximum(0, np.dot(act, w.rep[i].baz.w) + w.rep[i].baz.b) self.assertAllClose(act, y_val) def testEmptySequentialLayer(self): g = tf.Graph() with g.as_default(): tf.random.set_seed(24332) p = layers.SequentialLayer.Params().Set(name='seq') l = p.Instantiate() x = tf.random.normal(shape=[2, 32]) y = l.FPropDefaultTheta(x) self.assertIsInstance(y, tf.Tensor) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val = self.evaluate([x, y]) self.assertAllEqual(x_val, y_val) def testEmptySequentialLayerFPropMeta(self): g = tf.Graph() with g.as_default(): p = layers.SequentialLayer.Params().Set(name='seq') l = p.Instantiate() x = py_utils.NestedMap(val=tf.random.normal(shape=[2, 32])) y = l.FPropDefaultTheta(x) self.assertIsInstance(y.val, tf.Tensor) y_shape = l.FPropMeta( p, py_utils.Transform(lambda t: tshape.Shape(t.shape), x)).out_shapes[0] self.assertEqual(y.val.shape.as_list(), y_shape.val.ToTensorShape().as_list()) def testUnarySequentialLayer(self): g = tf.Graph() with g.as_default(), self.SetEval(True): tf.random.set_seed(24332) p = layers.UnarySequentialLayer.Params().Set( name='seq', sub=[ lingvo_layers.FCLayer.Params().Set( name='foo', input_dim=32, output_dim=8), lingvo_layers.FCLayer.Params().Set( name='bar', input_dim=8, output_dim=8), lingvo_layers.FCLayer.Params().Set( name='baz', input_dim=8, output_dim=32), lingvo_layers.DropoutLayer.Params().Set( name='dropout', keep_prob=0.5) ]) l = p.Instantiate() x = tf.random.normal(shape=[2, 32]) y = l.FPropDefaultTheta(x) l.vars.Transform(lambda x: x.shape).VLog(0, 'vars: ') with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) act = x_val # relu(act \dot w + b) act = np.maximum(0, np.dot(act, w.foo.w) + w.foo.b) act = np.maximum(0, np.dot(act, w.bar.w) + w.bar.b) act = np.maximum(0, np.dot(act, w.baz.w) + w.baz.b) self.assertAllClose(act, y_val) def testParallelLayer(self): g = tf.Graph() with g.as_default(), self.SetEval(True): tf.random.set_seed(24332) p = layers.ParallelLayer.Params().Set( name='test', merge=lambda xs: tuple([tf.add_n(x) for x in zip(*xs)]), sub=[ lingvo_layers.FCLayer.Params().Set( name='foo', input_dim=32, output_dim=4), lingvo_layers.FCLayer.Params().Set( name='bar', input_dim=32, output_dim=4), layers.SequentialLayer.Params().Set( name='seq', sub=[ lingvo_layers.FCLayer.Params().Set( name='baz', input_dim=32, output_dim=4), lingvo_layers.DropoutLayer.Params().Set( name='dropout', keep_prob=0.5) ]) ]) l = p.Instantiate() x = tf.random.normal(shape=[2, 32]) y = l.FPropDefaultTheta(x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) out = [] act = x_val # relu(act \dot w + b) out += [np.maximum(0, np.matmul(act, w.foo.w) + w.foo.b)] self.assertEqual(out[-1].shape, (2, 4)) out += [np.maximum(0, np.matmul(act, w.bar.w) + w.bar.b)] self.assertEqual(out[-1].shape, (2, 4)) out += [np.maximum(0, np.matmul(act, w.seq.baz.w) + w.seq.baz.b)] self.assertEqual(out[-1].shape, (2, 4)) np_result = out[0] for v in out[1:]: np_result = np.add(np_result, v) self.assertAllClose(np_result, y_val) def testParallelMatmulLayer(self): g = tf.Graph() with g.as_default(): tf.random.set_seed(24332) def MergeFn(xs): result = [] for x in zip(*xs): val = x[0] for v in x[1:]: val = tf.matmul(val, v) result.append(val) return tuple(result) p = layers.ParallelLayer.Params().Set( name='parallel', merge=MergeFn, sub=[ lingvo_layers.FCLayer.Params().Set( name='foo', input_dim=32, output_dim=4), lingvo_layers.FCLayer.Params().Set( name='bar', input_dim=32, output_dim=4), lingvo_layers.FCLayer.Params().Set( name='baz', input_dim=32, output_dim=4) ]) l = p.Instantiate() x = tf.random.normal(shape=[2, 4, 32]) y = l.FPropDefaultTheta(x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) out = [] act = x_val # relu(act \dot w + b) out += [np.maximum(0, np.matmul(act, w.foo.w) + w.foo.b)] self.assertEqual(out[-1].shape, (2, 4, 4)) out += [np.maximum(0, np.matmul(act, w.bar.w) + w.bar.b)] self.assertEqual(out[-1].shape, (2, 4, 4)) out += [np.maximum(0, np.matmul(act, w.baz.w) + w.baz.b)] self.assertEqual(out[-1].shape, (2, 4, 4)) np_result = out[0] for v in out[1:]: np_result = np.matmul(np_result, v) self.assertAllClose(np_result, y_val, atol=1e-5, rtol=1e-5) def testParalellMultiOutputsLayer(self): g = tf.Graph() with g.as_default(): tf.random.set_seed(24332) def Merge(xs): rets = [] for x in zip(*xs): if x[0] is None: rets.append(None) else: rets.append(tf.add_n(list(x))) return tuple(rets) p = layers.ParallelLayer.Params().Set( name='parallel', merge=Merge, sub=[ lingvo_layers.ConvLayer.Params().Set( name='p%d' % i, filter_shape=(3, 3, 3, 5), filter_stride=(1, 1), batch_norm=False) for i in range(3) ]) l = p.Instantiate() x = tf.zeros(shape=[2, 32, 32, 3]) y0, y1 = l.FPropDefaultTheta(x) y_sum = tf.reduce_sum(y0) # Ensures the 2nd return value (None) are handled properly. self.assertEqual(None, y1) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) y_sum_val = self.evaluate(y_sum) self.assertEqual(y_sum_val, 0.) def testMapLayer(self): g = tf.Graph() with g.as_default(): tf.random.set_seed(24332) p = layers.MapLayer.Params().Set( name='map', fn=tf.reduce_max, kwargs={'axis': 1}) l = p.Instantiate() x0, x1 = [tf.random.normal(shape=[2, 3, 5])] * 2 y0, y1 = l.FPropDefaultTheta(x0, x1) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) vx0, vx1, vy0, vy1 = self.evaluate([x0, x1, y0, y1]) self.assertAllClose(np.max(vx0, 1), vy0) self.assertAllClose(np.max(vx1, 1), vy1) def testLinearLayer(self): g = tf.Graph() with g.as_default(): tf.random.set_seed(24332) p = layers.LinearLayer.Params().Set( name='test', input_dims=10, output_dims=5) l = p.Instantiate() xs = [] ys = [] for shape in ([2, 10], [2, 3, 10], [2, 3, 5, 10], [2, 3, 5, 7, 10]): x = tf.random.normal(shape=shape) y = l.FPropDefaultTheta(x) xs += [x] ys += [y] with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) xs_val, ys_val, w_val = self.evaluate([xs, ys, l.vars]) self.assertEqual(w_val.w.shape, (10, 5)) for (xv, yv) in zip(xs_val, ys_val): self.assertAllClose(np.matmul(xv, w_val.w), yv) def testBiasLayer(self): g = tf.Graph() with g.as_default(): tf.random.set_seed(24332) p = layers.BiasLayer.Params().Set(name='test', dims=10) l = p.Instantiate() x = tf.random.normal(shape=[2, 10]) y = l.FPropDefaultTheta(x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val, w_val = self.evaluate([x, y, l.vars]) self.assertEqual(w_val.b.shape, (10,)) self.assertAllClose(x_val + w_val.b, y_val) def testGraphTensors(self): graph_tensors = layers.GraphTensors() graph_tensors.StoreTensor('t', py_utils.NestedMap(a=py_utils.NestedMap(b='c'))) self.assertEqual('c', graph_tensors.GetTensor('t.a.b')) def testSignatureParsing(self): sig = layers.GraphSignature('a,b->c') self.assertEqual(['a', 'b'], sig.inputs) self.assertEqual(['c'], sig.outputs) sig = layers.GraphSignature('[a,b],d->c') self.assertEqual([['a', 'b'], 'd'], sig.inputs) self.assertEqual(['c'], sig.outputs) # also test nested structures, like nested lists and dicts. sig = layers.GraphSignature('(x=a,y=b)->c') self.assertEqual([{'x': 'a', 'y': 'b'}], sig.inputs) self.assertEqual(['c'], sig.outputs) # Make sure that empty lists and dicts work. sig = layers.GraphSignature('(x=[]),()->d') self.assertEqual([{'x': []}, {}], sig.inputs) sig = layers.GraphSignature('(x=a,y=[f,(z=g.h)]),[d,e]->j') self.assertEqual([{ 'x': 'a', 'y': ['f', { 'z': 'g.h' }] }, ['d', 'e']], sig.inputs) self.assertEqual(['j'], sig.outputs) def testGraphLayer(self): g = tf.Graph() with g.as_default(), self.SetEval(True): tf.random.set_seed(24332) def _FnMeta(*shapes): return py_utils.NestedMap(flops=1, out_shapes=shapes) p = layers.GraphLayer.Params().Set( name='graph', input_endpoints=['x'], output_endpoints=['y'], sub=[ ('x.a->y.c', layers.FnLayer.Params().Set(fn=lambda x: 2 * x, fn_meta=_FnMeta)), ('x.b->y.d', layers.FnLayer.Params().Set( name='bar', fn=lambda x: x + 2, fn_meta=_FnMeta)), ('y.c,y.d->y.e, y.f', layers.FnLayer.Params().Set( name='baz', fn=lambda x, y: (x + y, x - y), fn_meta=_FnMeta)), ]) l = p.Instantiate() x = py_utils.NestedMap(a=tf.constant(1.0), b=tf.constant(2.0)) y = l.FProp(l.theta, x) y_shape = l.FPropMeta( p, py_utils.Transform(lambda t: tshape.Shape(t.shape), x)).out_shapes[0] self.assertDictEqual( py_utils.Transform(lambda t: t.shape.as_list(), y), py_utils.Transform(lambda t: t.ToTensorShape().as_list(), y_shape)) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) y_val = self.evaluate(y) print(y_val) self.assertEqual(py_utils.NestedMap(c=2.0, d=4.0, e=6.0, f=-2.0), y_val) def testGraphLayerReusedLayers(self): g = tf.Graph() with g.as_default(), self.SetEval(True): tf.random.set_seed(24332) p = layers.GraphLayer.Params().Set( name='graph', input_endpoints=['x'], output_endpoints=['y'], sub=[ # These three lines are the same as testGraphLayer. ('x.a->y.c', 0), ('x.b->y.d', 1), ('y.c,y.d->y.e,y.f', 2), # But here, we run layer 0 again to generate y.g. ('y.f->y.g', 0), ], sub_layers=[ layers.FnLayer.Params().Set(fn=lambda x: 2 * x), layers.FnLayer.Params().Set(name='bar', fn=lambda x: x + 2), layers.FnLayer.Params().Set( name='baz', fn=lambda x, y: (x + y, x - y)), ]) l = p.Instantiate() x = py_utils.NestedMap(a=tf.constant(1.0), b=tf.constant(2.0)) y = l.FProp(l.theta, x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) y_val = self.evaluate(y) print(y_val) self.assertEqual( py_utils.NestedMap(c=2.0, d=4.0, e=6.0, f=-2.0, g=-4.0), y_val) def testSoftCondLayer(self): num_experts = 100 with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(24332) p = layers.SoftCondLayer.Params().Set( name='soft_cond', cond_dim=2, num_experts=num_experts, body=lingvo_layers.FCLayer.Params().Set(input_dim=2, output_dim=2)) l = p.Instantiate() x = tf.random.normal(shape=[1, 2, 2]) y = l.FPropDefaultTheta(x) self.evaluate(tf.global_variables_initializer()) x_val, y_val, vars_val = self.evaluate([x, y, l.vars]) p_nz = layers.SoftCondLayer.Params().Set( name='soft_cond_nonzeros', cond_dim=2, num_experts=num_experts, nonzeros_mean=True, body=lingvo_layers.FCLayer.Params().Set(input_dim=2, output_dim=2)) l_nz = p_nz.Instantiate() x_nz = tf.random.normal(shape=[1, 2, 2]) y_nz = l_nz.FPropDefaultTheta(x_nz) self.evaluate(tf.global_variables_initializer()) x_nz_val, y_nz_val, vars_nz_val = self.evaluate([x_nz, y_nz, l_nz.vars]) np_val = x_val[0] np_nz_val = x_nz_val[0] taks_weight = np.exp(-1.0 * np.dot(np.mean(np_val, 0), vars_val.w)) taks_weight = 1.0 / (1.0 + taks_weight) nzs = np.count_nonzero(np_nz_val, 0).astype('float32') + 1e-10 taks_weight_nz = np.exp(-1.0 * np.dot(np.sum(np_nz_val, 0) / nzs, vars_nz_val.w)) taks_weight_nz = 1.0 / (1.0 + taks_weight_nz) weighted_weight = np.einsum('i,ijk->jk', taks_weight, vars_val.body.w) weighted_weight_nz = np.einsum('i,ijk->jk', taks_weight_nz, vars_nz_val.body.w) weighted_bias = np.einsum('i,ij->j', taks_weight, vars_val.body.b) weighted_bias_nz = np.einsum('i,ij->j', taks_weight_nz, vars_nz_val.body.b) np_val_out = np.maximum(0, np.dot(np_val, weighted_weight) + weighted_bias) np_val_out_nz = np.maximum( 0, np.dot(np_nz_val, weighted_weight_nz) + weighted_bias_nz) self.assertAllClose(np_val_out, y_val[0]) self.assertAllClose(np_val_out_nz, y_nz_val[0]) def testRepeatLayer(self): repeat = 100 with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(24332) p = layers.RepeatLayer.Params().Set( name='recurrent', repeat=repeat, body=lingvo_layers.FCLayer.Params().Set(input_dim=2, output_dim=2)) l = p.Instantiate() x = tf.random.normal(shape=[2, 2]) y = l.FPropDefaultTheta(x) self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) np_val = x_val # relu(act \dot w + b) for i in range(repeat): np_val = np.maximum(0, np.dot(np_val, w.body.w[i]) + w.body.b[i]) self.assertAllClose(np_val, y_val) @parameterized.parameters(('eval_only', True), ('always', False)) def testRepeatLayerUnrolledEval(self, unroll, do_eval): repeat = 100 with cluster_factory.ForTestingWorker( mode='sync', job='trainer_client', do_eval=do_eval): tf.random.set_seed(24332) p = layers.RepeatLayer.Params().Set( name='recurrent', repeat=repeat, per_layer_vars=True, unroll=unroll, body=lingvo_layers.FCLayer.Params().Set(input_dim=2, output_dim=2)) l = p.Instantiate() x = tf.random.normal(shape=[2, 2]) y = l.FPropDefaultTheta(x) self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) np_val = x_val # relu(act \dot w + b) for i in range(repeat): body_i = w['body_iter_%05d' % i] np_val = np.maximum(0, np.dot(np_val, body_i.w) + body_i.b) self.assertAllClose(np_val, y_val) def testRepeatLayerNestedMapFPropInputSignature(self): """Tests RepeatLayer having body layer with NestedMap in FProp signature.""" repeat = 100 input_dim, output_dim = 2, 2 # Reference RepeatLayer. ref_p = layers.RepeatLayer.Params().Set( name='ref_recurrent', repeat=repeat, body=lingvo_layers.FCLayer.Params().Set( input_dim=input_dim, output_dim=output_dim)) # RepeatLayer with NestedMap in `body` FProp input signature. new_p = layers.RepeatLayer.Params().Set( name='nested_map_recurrent', repeat=repeat, body=FCLayerTestNestedMapFPropInput.Params().Set( input_dim=input_dim, output_dim=output_dim)) # Verify FProp output equality for both layers. ref_layer = ref_p.Instantiate() new_layer = new_p.Instantiate() assign_op = [ tf.assign(dst, src) for (src, dst) in zip(ref_layer.vars.Flatten(), new_layer.vars.Flatten()) ] with self.session() as sess: tf.random.set_seed(24332) sess.run(tf.global_variables_initializer()) sess.run(assign_op) inputs = tf.random.normal(shape=[2, 2]) paddings = tf.zeros((2, 1)) ref_outputs = ref_layer.FPropDefaultTheta(inputs) new_out_nmap = new_layer.FPropDefaultTheta( py_utils.NestedMap(features=inputs, paddings=paddings)) ref_out_vals = sess.run(ref_outputs) new_out_vals = sess.run(new_out_nmap.features) self.assertAllClose(ref_out_vals, new_out_vals) def testRepeatLayerNestedMapBProp(self): """Tests RepeatLayer having body layer with mutable NestedMap.""" repeat = 3 input_dim, output_dim = 2, 2 # RepeatLayer with NestedMap in `body` FProp input signature. p = layers.RepeatLayer.Params().Set( name='nested_map_recurrent', repeat=repeat, body=FCLayerTestNestedMapFPropInput.Params().Set( input_dim=input_dim, output_dim=output_dim)) # Verify FProp output equality for both layers. layer = p.Instantiate() with self.session() as sess: tf.random.set_seed(24332) sess.run(tf.global_variables_initializer()) inputs = tf.random.normal(shape=[2, 5, 2]) paddings = tf.zeros((2, 5, 1)) args = py_utils.NestedMap(features=inputs, paddings=paddings) outputs = layer.FPropDefaultTheta(args) # Mutate 'args' before the bprop. args.features = tf.transpose(args.features, [1, 0, 2]) args.paddings = tf.transpose(args.paddings, [1, 0, 2]) in_grads = tf.gradients(ys=tf.nest.flatten(outputs), xs=[inputs]) sess.run(in_grads) def testRepeatLayerNestedMapFPropInputRaisesErrorWithNoneInput(self): """Tests RepeatLayer raise ValueError with None values in input map.""" repeat = 100 # RpeatLayer with NestedMap in FProp input signature. p = layers.RepeatLayer.Params().Set( name='nested_map_recurrent', repeat=repeat, body=FCLayerTestNestedMapFPropInput.Params().Set( input_dim=2, output_dim=2)) layer = p.Instantiate() with self.session() as sess: tf.random.set_seed(24332) sess.run(tf.global_variables_initializer()) inputs = tf.random.normal(shape=[2, 2]) # Set paddings to None. paddings = None with self.assertRaisesRegex( ValueError, 'Each value in the input NestedMap must be a tensor.'): layer.FPropDefaultTheta( py_utils.NestedMap(features=inputs, paddings=paddings)) def testParallelRepeatLayerLayer(self): repeat = 100 body_p = layers.SequentialLayer.Params().Set( name='body', sub=[ layers.LinearLayer.Params().Set( name='ln1', input_dims=2, output_dims=4), layers.FnLayer.Params().Set( name='relu', fn=tf.nn.relu, fn_meta=lambda x: py_utils.NestedMap(flops=1, out_shapes=(x,))), layers.LinearLayer.Params().Set( name='ln2', input_dims=4, output_dims=2) ]) with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(24332) p = layers.ParallelRepeatLayer.Params().Set( name='moe', repeat=repeat, body=body_p) l = p.Instantiate() x = tf.random.normal(shape=[repeat, 2, 2]) y = l.FPropDefaultTheta(x) self.evaluate(tf.global_variables_initializer()) x_val, y_val, w = self.evaluate([x, y, l.vars]) np_val = [] for i in range(repeat): # relu(act \dot w_1) \dot w_2 np_val.append( np.dot( np.maximum(0, np.dot(x_val[i], w.body.ln1.w[i])), w.body.ln2.w[i])) np_val = np.stack(np_val) self.assertAllClose(np_val, y_val) def testRematerializationLayer(self): with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(24332) def MulSumFnMeta(x): return py_utils.NestedMap(flops=2, out_shapes=(x,)) def AddFnMeta(x, y): del y return py_utils.NestedMap(flops=2, out_shapes=(x,)) p = layers.GraphLayer.Params().Set( name='graph', input_endpoints=['a', 'b'], output_endpoints=['e'], sub=[ ('a->c', layers.FnLayer.Params().Set( fn=lambda x: 2 * x, fn_meta=MulSumFnMeta)), ('b->d', layers.FnLayer.Params().Set( name='bar', fn=lambda x: x + 2, fn_meta=MulSumFnMeta)), ('c,d->e', layers.FnLayer.Params().Set( name='baz', fn=lambda x, y: x + y, fn_meta=AddFnMeta)), ]) p = layers.RematerializationLayer.Params().Set(name='remat', body=p) l = p.Instantiate() x = tf.constant(1.0) y = tf.constant(2.0) z = l.FProp(l.theta, x, y) self.evaluate(tf.global_variables_initializer()) z_val = self.evaluate(z) print(z_val) self.assertAllClose(6.0, z_val) def testPrintShapeLayer(self): g = tf.Graph() with g.as_default(): p = layers.PrintShapeLayer.Params().Set(name='test') l = p.Instantiate() x = tf.constant(1.0) y = l.FPropDefaultTheta(x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) x_val, y_val = self.evaluate([x, y]) self.assertEqual(x_val, y_val) def testReshapeLayer(self): g = tf.Graph() with g.as_default(): p = layers.ReshapeLayer.Params().Set(name='test', shape=[-1, 2, 1]) l = p.Instantiate() x = tf.constant([[1.0, 2.0], [3.0, 2.0]]) y = l.FPropDefaultTheta(x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) self.assertAllEqual(self.evaluate(tf.shape(x)), [2, 2]) self.assertAllEqual(self.evaluate(tf.shape(y)), [2, 2, 1]) def testConcatLayer(self): g = tf.Graph() with g.as_default(): p = layers.ConcatLayer.Params().Set(name='test', axis=1) l = p.Instantiate() x = tf.constant([[1.0, 2.0], [3.0, 4.0]]) y = tf.constant([[41.0, 42.0], [43.0, 44.0]]) y = l.FPropDefaultTheta(x, y) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) self.assertAllEqual( self.evaluate(y), [[1.0, 2.0, 41.0, 42.0], [3.0, 4.0, 43.0, 44.0]]) def testSliceLayer(self): g = tf.Graph() with g.as_default(): p1 = layers.SliceHelper()[:, 0].Set(name='test1') l1 = p1.Instantiate() p2 = layers.SliceHelper()[:, 1:3].Set(name='test2') # Test serialization/deserialization. rebuilt_p2 = hyperparams.InstantiableParams.FromProto(p2.ToProto()) l2 = rebuilt_p2.Instantiate() x = tf.constant([[1.0, 2.0, 3.0], [3.0, 2.0, 1.0]]) y1 = l1.FPropDefaultTheta(x) y2 = l2.FPropDefaultTheta(x) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) self.assertAllEqual(self.evaluate(y1), [1.0, 3.0]) self.assertAllEqual(self.evaluate(y2), [[2.0, 3.0], [2.0, 1.0]]) if __name__ == '__main__': tf.test.main()
tensorflow/lingvo
lingvo/core/builder_layers_test.py
Python
apache-2.0
28,447
[ "MOE" ]
73a9bdbdf1c293a882c8b52bbac653e30dfa738ef3e7e38d9322a60342123450
# -*- coding: utf8 -*- import os import bitcoin from secp256k1 import PrivateKey import pytest import serpent from rlp.utils import decode_hex from ethereum import tester, utils, abi from ethereum.utils import safe_ord, big_endian_to_int # Test EVM contracts serpent_code = ''' def main(a,b): return(a ^ b) ''' def test_evm(): evm_code = serpent.compile(serpent_code) translator = abi.ContractTranslator(serpent.mk_full_signature( serpent_code)) data = translator.encode('main', [2, 5]) s = tester.state() c = s.evm(evm_code) o = translator.decode('main', s.send(tester.k0, c, 0, data)) assert o == [32] # Test serpent compilation of variables using _with_, doing a simple # arithmetic calculation 20 * 30 + 10 = 610 sixten_code =\ ''' (with 'x 10 (with 'y 20 (with 'z 30 (seq (set 'a (add (mul (get 'y) (get 'z)) (get 'x))) (return (ref 'a) 32) ) ) ) ) ''' def test_sixten(): s = tester.state() c = decode_hex('1231231231231234564564564564561231231231') s.block.set_code(c, serpent.compile_lll(sixten_code)) o1 = s.send(tester.k0, c, 0) assert utils.big_endian_to_int(o1) == 610 with_code = \ """ def f1(): o = array(4) with x = 5: o[0] = x with y = 7: o[1] = y with x = 8: o[2] = x o[3] = x return(o:arr) def f2(): with x = 5: with y = 7: x = 2 return(x) def f3(): with x = 5: with y = seq(x = 7, 2): return(x) def f4(): o = array(4) with x = 5: o[0] = x with y = 7: o[1] = y with x = x: o[2] = x with y = x: o[3] = y return(o:arr) """ def test_with(): s = tester.state() c = s.abi_contract(with_code) assert c.f1() == [5, 7, 8, 5] assert c.f2() == 2 assert c.f3() == 7 assert c.f4() == [5, 7, 5, 5] # Test Serpent's import mechanism mul2_code = \ ''' def double(v): log(v) return(v*2) ''' filename = "mul2_qwertyuioplkjhgfdsa.se" returnten_code = \ ''' extern mul2: [double:i] x = create("%s") log(x) return(x.double(5)) ''' % filename def test_returnten(): s = tester.state() open(filename, 'w').write(mul2_code) c = s.contract(returnten_code) o1 = s.send(tester.k0, c, 0) os.remove(filename) assert utils.big_endian_to_int(o1) == 10 # Test inset inset_inner_code = \ ''' def g(n): return(n + 10) def f(n): return n*2 ''' filename2 = "inner_qwertyuioplkjhgfdsa.se" inset_outer_code = \ ''' inset("%s") def foo(): res = self.g(12) return res ''' % filename2 def test_inset(): s = tester.state() open(filename2, 'w').write(inset_inner_code) c = s.abi_contract(inset_outer_code) assert c.foo() == 22 os.remove(filename2) # Inset at the end instead inset_inner_code2 = \ ''' def g(n): return(n + 10) def f(n): return n*2 ''' filename25 = "inner_qwertyuioplkjhgfdsa.se" inset_outer_code2 = \ ''' def foo(): res = self.g(12) return res inset("%s") ''' % filename25 def test_inset2(): s = tester.state() open(filename25, 'w').write(inset_inner_code2) c = s.abi_contract(inset_outer_code2) assert c.foo() == 22 os.remove(filename25) # Test a simple namecoin implementation namecoin_code =\ ''' def main(k, v): if !self.storage[k]: self.storage[k] = v return(1) else: return(0) ''' def test_namecoin(): s = tester.state() c = s.abi_contract(namecoin_code) o1 = c.main("george", 45) assert o1 == 1 o2 = c.main("george", 20) assert o2 == 0 o3 = c.main("harry", 60) assert o3 == 1 assert s.block.to_dict() # Test a simple currency implementation currency_code = ''' data balances[2^160] def init(): self.balances[msg.sender] = 1000 def query(addr): return(self.balances[addr]) def send(to, value): from = msg.sender fromvalue = self.balances[from] if fromvalue >= value: self.balances[from] = fromvalue - value self.balances[to] = self.balances[to] + value log(from, to, value) return(1) else: return(0) ''' def test_currency(): s = tester.state() c = s.abi_contract(currency_code, sender=tester.k0) o1 = c.send(tester.a2, 200) assert o1 == 1 o2 = c.send(tester.a2, 900) assert o2 == 0 o3 = c.query(tester.a0) assert o3 == 800 o4 = c.query(tester.a2) assert o4 == 200 # Test a data feed data_feed_code = ''' data creator data values[] def init(): self.creator = msg.sender def set(k, v): if msg.sender == self.creator: self.values[k] = v return(1) else: return(0) def get(k): return(self.values[k]) ''' def test_data_feeds(): s = tester.state() c = s.abi_contract(data_feed_code, sender=tester.k0) o2 = c.get(500) assert o2 == 0 o3 = c.set(500, 19) assert o3 == 1 o4 = c.get(500) assert o4 == 19 o5 = c.set(500, 726, sender=tester.k1) assert o5 == 0 o6 = c.set(500, 726) assert o6 == 1 return s, c # Test an example hedging contract, using the data feed. This tests # contracts calling other contracts hedge_code = ''' extern datafeed: [set:ii, get:i] data partyone data partytwo data hedgeValue data datafeed data index data fiatValue data maturity def main(datafeed, index): if !self.partyone: self.partyone = msg.sender self.hedgeValue = msg.value self.datafeed = datafeed self.index = index return(1) elif !self.partytwo: ethvalue = self.hedgeValue if msg.value >= ethvalue: self.partytwo = msg.sender c = self.datafeed.get(self.index) othervalue = ethvalue * c self.fiatValue = othervalue self.maturity = block.timestamp + 500 return(othervalue) else: othervalue = self.fiatValue ethvalue = othervalue / self.datafeed.get(self.index) if ethvalue >= self.balance: send(self.partyone, self.balance) return(3) elif block.timestamp > self.maturity: send(self.partytwo, self.balance - ethvalue) send(self.partyone, ethvalue) return(4) else: return(5) ''' def test_hedge(): s, c = test_data_feeds() c2 = s.abi_contract(hedge_code, sender=tester.k0) # Have the first party register, sending 10^16 wei and # asking for a hedge using currency code 500 o1 = c2.main(c.address, 500, value=10 ** 16) assert o1 == 1 # Have the second party register. It should receive the # amount of units of the second currency that it is # entitled to. Note that from the previous test this is # set to 726 o2 = c2.main(0, 0, value=10 ** 16, sender=tester.k2) assert o2 == 7260000000000000000 snapshot = s.snapshot() # Set the price of the asset down to 300 wei o3 = c.set(500, 300) assert o3 == 1 # Finalize the contract. Expect code 3, meaning a margin call o4 = c2.main(0, 0) assert o4 == 3 s.revert(snapshot) # Don't change the price. Finalize, and expect code 5, meaning # the time has not expired yet o5 = c2.main(0, 0) assert o5 == 5 s.mine(100, tester.a3) # Mine ten blocks, and try. Expect code 4, meaning a normal execution # where both get their share o6 = c2.main(0, 0) assert o6 == 4 # Test the LIFO nature of call arither_code = ''' def init(): self.storage[0] = 10 def f1(): self.storage[0] += 1 def f2(): self.storage[0] *= 10 self.f1() self.storage[0] *= 10 def f3(): return(self.storage[0]) ''' def test_lifo(): s = tester.state() c = s.abi_contract(arither_code) c.f2() assert c.f3() == 1010 # Test suicides and suicide reverts suicider_code = ''' def mainloop(rounds): self.storage[15] = 40 self.suicide() i = 0 while i < rounds: i += 1 self.storage[i] = i def entry(rounds): self.storage[15] = 20 self.mainloop(rounds, gas=msg.gas - 600) def ping_ten(): return(10) def suicide(): suicide(0) def ping_storage15(): return(self.storage[15]) ''' def test_suicider(): s = tester.state() c = s.abi_contract(suicider_code) prev_gas_limit = tester.gas_limit tester.gas_limit = 200000 # Run normally: suicide processes, so the attempt to ping the # contract fails c.entry(5) o2 = c.ping_ten() assert o2 is None c = s.abi_contract(suicider_code) # Run the suicider in such a way that it suicides in a sub-call, # then runs out of gas, leading to a revert of the suicide and the # storage mutation c.entry(8000) # Check that the suicide got reverted o2 = c.ping_ten() assert o2 == 10 # Check that the storage op got reverted o3 = c.ping_storage15() assert o3 == 20 tester.gas_limit = prev_gas_limit # Test reverts reverter_code = ''' def entry(): self.non_recurse(gas=100000) self.recurse(gas=100000) def non_recurse(): send(7, 9) self.storage[8080] = 4040 self.storage[160160] = 2020 def recurse(): send(8, 9) self.storage[8081] = 4039 self.storage[160161] = 2019 self.recurse() while msg.gas > 0: self.storage["waste_some_gas"] = 0 ''' def test_reverter(): s = tester.state() c = s.abi_contract(reverter_code, endowment=10 ** 15) c.entry() assert s.block.get_storage_data(c.address, 8080) == 4040 assert s.block.get_balance(decode_hex('0' * 39 + '7')) == 9 assert s.block.get_storage_data(c.address, 8081) == 0 assert s.block.get_balance(decode_hex('0' * 39 + '8')) == 0 # Test stateless contracts add1_code = \ ''' def main(x): self.storage[1] += x ''' filename3 = "stateless_qwertyuioplkjhgfdsa.se" callcode_test_code = \ ''' extern add1: [main:i] x = create("%s") x.main(6) x.main(4, call=code) x.main(60, call=code) x.main(40) return(self.storage[1]) ''' % filename3 def test_callcode(): s = tester.state() open(filename3, 'w').write(add1_code) c = s.contract(callcode_test_code) o1 = s.send(tester.k0, c, 0) os.remove(filename3) assert utils.big_endian_to_int(o1) == 64 # https://github.com/ethereum/serpent/issues/8 array_code = ''' def main(): a = array(1) a[0] = 1 return(a, items=1) ''' def test_array(): s = tester.state() c = s.abi_contract(array_code) assert c.main() == [1] array_code2 = ''' def main(): a = array(1) something = 2 a[0] = 1 return(a, items=1) ''' def test_array2(): s = tester.state() c = s.abi_contract(array_code2) assert c.main() == [1] array_code3 = """ def main(): a = array(3) return(a, items=3) """ def test_array3(): s = tester.state() c = s.abi_contract(array_code3) assert c.main() == [0, 0, 0] calltest_code = """ def main(): self.first(1, 2, 3, 4, 5) self.second(2, 3, 4, 5, 6) self.third(3, 4, 5, 6, 7) def first(a, b, c, d, e): self.storage[1] = a * 10000 + b * 1000 + c * 100 + d * 10 + e def second(a, b, c, d, e): self.storage[2] = a * 10000 + b * 1000 + c * 100 + d * 10 + e def third(a, b, c, d, e): self.storage[3] = a * 10000 + b * 1000 + c * 100 + d * 10 + e def get(k): return(self.storage[k]) """ def test_calls(): s = tester.state() c = s.abi_contract(calltest_code) c.main() assert 12345 == c.get(1) assert 23456 == c.get(2) assert 34567 == c.get(3) c.first(4, 5, 6, 7, 8) assert 45678 == c.get(1) c.second(5, 6, 7, 8, 9) assert 56789 == c.get(2) storage_object_test_code = """ extern moo: [ping, query_chessboard:ii, query_items:ii, query_person, query_stats:i, testping:ii, testping2:i] data chessboard[8][8] data users[100](health, x, y, items[5]) data person(head, arms[2](elbow, fingers[5]), legs[2]) def ping(): self.chessboard[0][0] = 1 self.chessboard[0][1] = 2 self.chessboard[3][0] = 3 self.users[0].health = 100 self.users[1].x = 15 self.users[1].y = 12 self.users[1].items[2] = 9 self.users[80].health = self self.users[80].items[3] = self self.person.head = 555 self.person.arms[0].elbow = 556 self.person.arms[0].fingers[0] = 557 self.person.arms[0].fingers[4] = 558 self.person.legs[0] = 559 self.person.arms[1].elbow = 656 self.person.arms[1].fingers[0] = 657 self.person.arms[1].fingers[4] = 658 self.person.legs[1] = 659 self.person.legs[1] += 1000 def query_chessboard(x, y): return(self.chessboard[x][y]) def query_stats(u): return([self.users[u].health, self.users[u].x, self.users[u].y]:arr) def query_items(u, i): return(self.users[u].items[i]) def query_person(): a = array(15) a[0] = self.person.head a[1] = self.person.arms[0].elbow a[2] = self.person.arms[1].elbow a[3] = self.person.legs[0] a[4] = self.person.legs[1] i = 0 while i < 5: a[5 + i] = self.person.arms[0].fingers[i] a[10 + i] = self.person.arms[1].fingers[i] i += 1 return(a:arr) def testping(x, y): return([self.users[80].health.testping2(x), self.users[80].items[3].testping2(y)]:arr) def testping2(x): return(x*x) """ def test_storage_objects(): s = tester.state() c = s.abi_contract(storage_object_test_code) c.ping() assert 1 == c.query_chessboard(0, 0) assert 2 == c.query_chessboard(0, 1) assert 3 == c.query_chessboard(3, 0) assert [100, 0, 0] == c.query_stats(0) assert [0, 15, 12] == c.query_stats(1) assert 0 == c.query_items(1, 3) assert 0 == c.query_items(0, 2) assert 9 == c.query_items(1, 2) assert [555, 556, 656, 559, 1659, 557, 0, 0, 0, 558, 657, 0, 0, 0, 658] == c.query_person() assert [361, 441] == c.testping(19, 21) infinite_storage_object_test_code = """ data chessboard[][8] data users[100](health, x, y, items[]) data person(head, arms[](elbow, fingers[5]), legs[2]) def ping(): self.chessboard[0][0] = 1 self.chessboard[0][1] = 2 self.chessboard[3][0] = 3 self.users[0].health = 100 self.users[1].x = 15 self.users[1].y = 12 self.users[1].items[2] = 9 self.person.head = 555 self.person.arms[0].elbow = 556 self.person.arms[0].fingers[0] = 557 self.person.arms[0].fingers[4] = 558 self.person.legs[0] = 559 self.person.arms[1].elbow = 656 self.person.arms[1].fingers[0] = 657 self.person.arms[1].fingers[4] = 658 self.person.legs[1] = 659 self.person.legs[1] += 1000 def query_chessboard(x, y): return(self.chessboard[x][y]) def query_stats(u): return([self.users[u].health, self.users[u].x, self.users[u].y]:arr) def query_items(u, i): return(self.users[u].items[i]) def query_person(): a = array(15) a[0] = self.person.head a[1] = self.person.arms[0].elbow a[2] = self.person.arms[1].elbow a[3] = self.person.legs[0] a[4] = self.person.legs[1] i = 0 while i < 5: a[5 + i] = self.person.arms[0].fingers[i] a[10 + i] = self.person.arms[1].fingers[i] i += 1 return(a:arr) """ def test_infinite_storage_objects(): s = tester.state() c = s.abi_contract(infinite_storage_object_test_code) c.ping() assert 1 == c.query_chessboard(0, 0) assert 2 == c.query_chessboard(0, 1) assert 3 == c.query_chessboard(3, 0) assert [100, 0, 0] == c.query_stats(0) assert [0, 15, 12] == c.query_stats(1) assert 0 == c.query_items(1, 3) assert 0 == c.query_items(0, 2) assert 9 == c.query_items(1, 2) assert [555, 556, 656, 559, 1659, 557, 0, 0, 0, 558, 657, 0, 0, 0, 658] == c.query_person() fail1 = """ data person(head, arms[2](elbow, fingers[5]), legs[2]) x = self.person.arms[0] """ fail2 = """ data person(head, arms[2](elbow, fingers[5]), legs[2]) x = self.person.arms[0].fingers """ fail3 = """ data person(head, arms[2](elbow, fingers[5]), legs[2]) x = self.person.arms[0].fingers[4][3] """ fail4 = """ data person(head, arms[2](elbow, fingers[5]), legs[2]) x = self.person.arms.elbow[0].fingers[4] """ fail5 = """ data person(head, arms[2](elbow, fingers[5]), legs[2]) x = self.person.arms[0].fingers[4].nail """ fail6 = """ data person(head, arms[2](elbow, fingers[5]), legs[2]) x = self.person.arms[0].elbow.skin """ fail7 = """ def return_array(): return([1,2,3], items=3) def main(): return(self.return_array()) """ def test_storagevar_fails(): s = tester.state() success1, success2, success3, success4, success5, success6 = \ 0, 0, 0, 0, 0, 0 try: s.contract(fail1) except Exception as e: success1 = "Storage variable access not deep enough" in str(e) assert success1, e try: s.contract(fail2) except Exception as e: success2 = "Too few array index lookups" in str(e) assert success2, e try: s.contract(fail3) except Exception as e: success3 = "Too many array index lookups" in str(e) assert success3, e try: s.contract(fail4) except Exception as e: success4 = "Too few array index lookups" in str(e) assert success4, e try: s.contract(fail5) except Exception as e: success5 = "Invalid object member" in str(e) assert success5, e try: s.contract(fail6) except Exception as e: success6 = "Invalid object member" in str(e) assert success6, e def test_type_system_fails(): s = tester.state() success7 = False try: s.contract(fail7) except Exception as e: success7 = "Please specify maximum" in str(e) assert success7, e working_returnarray_code = """ def return_array(): return([1,2,3], items=3) def main(): return(self.return_array(outitems=3):arr) """ def test_returnarray_code(): s = tester.state() c = s.abi_contract(working_returnarray_code) assert c.main() == [1, 2, 3] crowdfund_code = """ data campaigns[2^80](recipient, goal, deadline, contrib_total, contrib_count, contribs[2^50](sender, value)) def create_campaign(id, recipient, goal, timelimit): if self.campaigns[id].recipient: return(0) self.campaigns[id].recipient = recipient self.campaigns[id].goal = goal self.campaigns[id].deadline = block.timestamp + timelimit def contribute(id): # Update contribution total total_contributed = self.campaigns[id].contrib_total + msg.value self.campaigns[id].contrib_total = total_contributed # Record new contribution sub_index = self.campaigns[id].contrib_count self.campaigns[id].contribs[sub_index].sender = msg.sender self.campaigns[id].contribs[sub_index].value = msg.value self.campaigns[id].contrib_count = sub_index + 1 # Enough funding? if total_contributed >= self.campaigns[id].goal: send(self.campaigns[id].recipient, total_contributed) self.clear(id) return(1) # Expired? if block.timestamp > self.campaigns[id].deadline: i = 0 c = self.campaigns[id].contrib_count while i < c: send(self.campaigns[id].contribs[i].sender, self.campaigns[id].contribs[i].value) i += 1 self.clear(id) return(2) # Progress report [2, id] def progress_report(id): return(self.campaigns[id].contrib_total) # Clearing function for internal use def clear(self, id): if self == msg.sender: self.campaigns[id].recipient = 0 self.campaigns[id].goal = 0 self.campaigns[id].deadline = 0 c = self.campaigns[id].contrib_count self.campaigns[id].contrib_count = 0 self.campaigns[id].contrib_total = 0 i = 0 while i < c: self.campaigns[id].contribs[i].sender = 0 self.campaigns[id].contribs[i].value = 0 i += 1 """ def test_crowdfund(): s = tester.state() c = s.abi_contract(crowdfund_code) # Create a campaign with id 100 c.create_campaign(100, 45, 100000, 2) # Create a campaign with id 200 c.create_campaign(200, 48, 100000, 2) # Make some contributions c.contribute(100, value=1, sender=tester.k1) assert 1 == c.progress_report(100) c.contribute(200, value=30000, sender=tester.k2) c.contribute(100, value=59049, sender=tester.k3) assert 59050 == c.progress_report(100) c.contribute(200, value=70001, sender=tester.k4) # Expect the 100001 units to be delivered to the destination # account for campaign 2 assert 100001 == s.block.get_balance(utils.int_to_addr(48)) mida1 = s.block.get_balance(tester.a1) mida3 = s.block.get_balance(tester.a3) # Mine 5 blocks to expire the campaign s.mine(5) # Ping the campaign after expiry c.contribute(100, value=1) # Expect refunds assert mida1 + 1 == s.block.get_balance(tester.a1) assert mida3 + 59049 == s.block.get_balance(tester.a3) saveload_code = """ data store[1000] def kall(): a = text("sir bobalot to the rescue !!1!1!!1!1") save(self.store[0], a, chars=60) b = load(self.store[0], chars=60) c = load(self.store[0], chars=33) return([a[0], a[1], b[0], b[1], c[0], c[1]]:arr) """ def test_saveload(): s = tester.state() c = s.abi_contract(saveload_code) o = c.kall() assert o[0] == 0x73697220626f62616c6f7420746f207468652072657363756520212131213121, bitcoin.encode(o[0], 16) assert o[1] == 0x2131213100000000000000000000000000000000000000000000000000000000, bitcoin.encode(o[1], 16) assert o[2] == 0x73697220626f62616c6f7420746f207468652072657363756520212131213121, bitcoin.encode(o[2], 16) assert o[3] == 0x2131213100000000000000000000000000000000000000000000000000000000, bitcoin.encode(o[3], 16) assert o[4] == 0x73697220626f62616c6f7420746f207468652072657363756520212131213121, bitcoin.encode(o[4], 16) assert o[5] == 0x2100000000000000000000000000000000000000000000000000000000000000, bitcoin.encode(o[5], 16) saveload_code2 = """ data buf data buf2 mystr = text("01ab") save(self.buf, mystr:str) save(self.buf2, mystr, chars=4) """ def test_saveload2(): s = tester.state() c = s.contract(saveload_code2) s.send(tester.k0, c, 0) assert bitcoin.encode(s.block.get_storage_data(c, 0), 256) == b'01ab' + b'\x00' * 28 assert bitcoin.encode(s.block.get_storage_data(c, 1), 256) == b'01ab' + b'\x00' * 28 sdiv_code = """ def kall(): return([2^255 / 2^253, 2^255 % 3]:arr) """ def test_sdiv(): s = tester.state() c = s.abi_contract(sdiv_code) assert [-4, -2] == c.kall() basic_argcall_code = """ def argcall(args:arr): log(1) o = (args[0] + args[1] * 10 + args[2] * 100) log(4) return o def argkall(args:arr): log(2) o = self.argcall(args) log(3) return o """ def test_argcall(): s = tester.state() c = s.abi_contract(basic_argcall_code) assert 375 == c.argcall([5, 7, 3]) assert 376 == c.argkall([6, 7, 3]) more_complex_argcall_code = """ def argcall(args:arr): args[0] *= 2 args[1] *= 2 return(args:arr) def argkall(args:arr): return(self.argcall(args, outsz=2):arr) """ def test_argcall2(): s = tester.state() c = s.abi_contract(more_complex_argcall_code) assert [4, 8] == c.argcall([2, 4]) assert [6, 10] == c.argkall([3, 5]) sort_code = """ def sort(args:arr): if len(args) < 2: return(args:arr) h = array(len(args)) hpos = 0 l = array(len(args)) lpos = 0 i = 1 while i < len(args): if args[i] < args[0]: l[lpos] = args[i] lpos += 1 else: h[hpos] = args[i] hpos += 1 i += 1 x = slice(h, items=0, items=hpos) h = self.sort(x, outsz=hpos) l = self.sort(slice(l, items=0, items=lpos), outsz=lpos) o = array(len(args)) i = 0 while i < lpos: o[i] = l[i] i += 1 o[lpos] = args[0] i = 0 while i < hpos: o[lpos + 1 + i] = h[i] i += 1 return(o:arr) """ @pytest.mark.timeout(100) def test_sort(): s = tester.state() c = s.abi_contract(sort_code) assert c.sort([9]) == [9] assert c.sort([9, 5]) == [5, 9] assert c.sort([9, 3, 5]) == [3, 5, 9] assert c.sort([80, 234, 112, 112, 29]) == [29, 80, 112, 112, 234] filename9 = "mul2_qwertyuioplkjhgfdsabarbar.se" sort_tester_code = \ ''' extern sorter: [sort:a] data sorter def init(): self.sorter = create("%s") def test(args:arr): return(self.sorter.sort(args, outsz=len(args)):arr) ''' % filename9 @pytest.mark.timeout(100) def test_indirect_sort(): s = tester.state() open(filename9, 'w').write(sort_code) c = s.abi_contract(sort_tester_code) os.remove(filename9) assert c.test([80, 234, 112, 112, 29]) == [29, 80, 112, 112, 234] multiarg_code = """ def kall(a:arr, b, c:arr, d:str, e): x = a[0] + 10 * b + 100 * c[0] + 1000 * a[1] + 10000 * c[1] + 100000 * e return([x, getch(d, 0) + getch(d, 1) + getch(d, 2), len(d)]:arr) """ def test_multiarg_code(): s = tester.state() c = s.abi_contract(multiarg_code) o = c.kall([1, 2, 3], 4, [5, 6, 7], "doge", 8) assert o == [862541, safe_ord('d') + safe_ord('o') + safe_ord('g'), 4] peano_code = """ macro padd($x, psuc($y)): psuc(padd($x, $y)) macro padd($x, z()): $x macro dec(psuc($x)): dec($x) + 1 macro dec(z()): 0 macro pmul($x, z()): z() macro pmul($x, psuc($y)): padd(pmul($x, $y), $x) macro pexp($x, z()): one() macro pexp($x, psuc($y)): pmul($x, pexp($x, $y)) macro fac(z()): one() macro fac(psuc($x)): pmul(psuc($x), fac($x)) macro one(): psuc(z()) macro two(): psuc(psuc(z())) macro three(): psuc(psuc(psuc(z()))) macro five(): padd(three(), two()) def main(): return([dec(pmul(three(), pmul(three(), three()))), dec(fac(five()))]:arr) """ def test_macros(): s = tester.state() c = s.abi_contract(peano_code) assert c.main() == [27, 120] type_code = """ type f: [a, b, c, d, e] macro f($a) + f($b): f(add($a, $b)) macro f($a) - f($b): f(sub($a, $b)) macro f($a) * f($b): f(mul($a, $b) / 10000) macro f($a) / f($b): f(sdiv($a * 10000, $b)) macro f($a) % f($b): f(smod($a, $b)) macro f($v) = f($w): $v = $w macro(10) f($a): $a / 10000 macro fify($a): f($a * 10000) a = fify(5) b = fify(2) c = a / b e = c + (a / b) return(e) """ def test_types(): s = tester.state() c = s.contract(type_code) assert utils.big_endian_to_int(s.send(tester.k0, c, 0)) == 5 ecrecover_code = """ def test_ecrecover(h:uint256, v:uint256, r:uint256, s:uint256): return(ecrecover(h, v, r, s)) """ def test_ecrecover(): s = tester.state() c = s.abi_contract(ecrecover_code) priv = utils.sha3('some big long brainwallet password') pub = bitcoin.privtopub(priv) msghash = utils.sha3('the quick brown fox jumps over the lazy dog') pk = PrivateKey(priv, raw=True) signature = pk.ecdsa_recoverable_serialize( pk.ecdsa_sign_recoverable(msghash, raw=True) ) signature = signature[0] + chr(signature[1]) V = ord(signature[64]) + 27 R = big_endian_to_int(signature[0:32]) S = big_endian_to_int(signature[32:64]) assert bitcoin.ecdsa_raw_verify(msghash, (V, R, S), pub) addr = utils.big_endian_to_int(utils.sha3(bitcoin.encode_pubkey(pub, 'bin')[1:])[12:]) assert utils.big_endian_to_int(utils.privtoaddr(priv)) == addr result = c.test_ecrecover(utils.big_endian_to_int(msghash), V, R, S) assert result == addr sha256_code = """ def main(): return([sha256(0, chars=0), sha256(3), sha256(text("doge"), chars=3), sha256(text("dog"):str), sha256([0,0,0,0,0]:arr), sha256([0,0,0,0,0,0], items=5)]:arr) """ def test_sha256(): s = tester.state() c = s.abi_contract(sha256_code) assert c.main() == [ 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - 2 ** 256, 0xd9147961436944f43cd99d28b2bbddbf452ef872b30c8279e255e7daafc7f946 - 2 ** 256, 0xcd6357efdd966de8c0cb2f876cc89ec74ce35f0968e11743987084bd42fb8944 - 2 ** 256, 0xcd6357efdd966de8c0cb2f876cc89ec74ce35f0968e11743987084bd42fb8944 - 2 ** 256, 0xb393978842a0fa3d3e1470196f098f473f9678e72463cb65ec4ab5581856c2e4 - 2 ** 256, 0xb393978842a0fa3d3e1470196f098f473f9678e72463cb65ec4ab5581856c2e4 - 2 ** 256 ] ripemd160_code = """ def main(): return([ripemd160(0, chars=0), ripemd160(3), ripemd160(text("doge"), chars=3), ripemd160(text("dog"):str), ripemd160([0,0,0,0,0]:arr), ripemd160([0,0,0,0,0,0], items=5)]:arr) """ def test_ripemd160(): s = tester.state() c = s.abi_contract(ripemd160_code) assert c.main() == [ 0x9c1185a5c5e9fc54612808977ee8f548b2258d31, 0x44d90e2d3714c8663b632fcf0f9d5f22192cc4c8, 0x2a5756a3da3bc6e4c66a65028f43d31a1290bb75, 0x2a5756a3da3bc6e4c66a65028f43d31a1290bb75, 0x9164cab7f680fd7a790080f2e76e049811074349, 0x9164cab7f680fd7a790080f2e76e049811074349] sha3_code = """ def main(): return([sha3(0, chars=0), sha3(3), sha3(text("doge"), chars=3), sha3(text("dog"):str), sha3([0,0,0,0,0]:arr), sha3([0,0,0,0,0,0], items=5)]:arr) """ def test_sha3(): s = tester.state() c = s.abi_contract(sha3_code) assert c.main() == [ 0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470 - 2 ** 256, 0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b - 2 ** 256, 0x41791102999c339c844880b23950704cc43aa840f3739e365323cda4dfa89e7a, 0x41791102999c339c844880b23950704cc43aa840f3739e365323cda4dfa89e7a, 0xdfded4ed5ac76ba7379cfe7b3b0f53e768dca8d45a34854e649cfc3c18cbd9cd - 2 ** 256, 0xdfded4ed5ac76ba7379cfe7b3b0f53e768dca8d45a34854e649cfc3c18cbd9cd - 2 ** 256 ] types_in_functions_code = """ type fixedp: [a, b] macro fixedp($x) * fixedp($y): fixedp($x * $y / 2^64) macro fixedp($x) / fixedp($y): fixedp($x * 2^64 / $y) macro raw_unfixedp(fixedp($x)): $x / 2^64 macro set(fixedp($x), $y): $x = 2^64 * $y macro fixedp($x) = fixedp($y): $x = $y def sqrdiv(a, b): return(raw_unfixedp((a / b) * (a / b))) """ def test_types_in_functions(): s = tester.state() c = s.abi_contract(types_in_functions_code) assert c.sqrdiv(25, 2) == 156 more_infinites_code = """ data a[](b, c) def testVerifyTx(): self.a[0].b = 33 self.a[0].c = 55 return(self.a[0].b) """ def test_more_infinites(): s = tester.state() c = s.abi_contract(more_infinites_code) assert c.testVerifyTx() == 33 prevhashes_code = """ def get_prevhashes(k): o = array(k) i = 0 while i < k: o[i] = block.prevhash(i) i += 1 return(o:arr) """ @pytest.mark.timeout(100) def test_prevhashes(): s = tester.state() c = s.abi_contract(prevhashes_code) s.mine(7) # Hashes of last 14 blocks including existing one o1 = [x % 2 ** 256 for x in c.get_prevhashes(14)] # hash of self = 0, hash of blocks back to genesis block as is, hash of # blocks before genesis block = 0 t1 = [0] + [utils.big_endian_to_int(b.hash) for b in s.blocks[-2::-1]] \ + [0] * 6 assert o1 == t1 s.mine(256) # Test 256 limit: only 1 <= g <= 256 generation ancestors get hashes shown o2 = [x % 2 ** 256 for x in c.get_prevhashes(270)] t2 = [0] + [utils.big_endian_to_int(b.hash) for b in s.blocks[-2:-258:-1]] \ + [0] * 13 assert o2 == t2 abi_contract_code = """ def mul2(a): return(a * 2) def returnten(): return(10) """ def test_abi_contract(): s = tester.state() c = s.abi_contract(abi_contract_code) assert c.mul2(3) == 6 assert c.returnten() == 10 mcopy_code = """ def mcopy_test(foo:str, a, b, c): info = string(32*3 + len(foo)) info[0] = a info[1] = b info[2] = c mcopy(info+(items=3), foo, len(foo)) return(info:str) """ def test_mcopy(): s = tester.state() c = s.abi_contract(mcopy_code) assert c.mcopy_test("123", 5, 6, 259) == \ b'\x00'*31+b'\x05'+b'\x00'*31+b'\x06'+b'\x00'*30+b'\x01\x03'+b'123' mcopy_code_2 = """ def mcopy_test(): myarr = array(3) myarr[0] = 99 myarr[1] = 111 myarr[2] = 119 mystr = string(96) mcopy(mystr, myarr, items=3) return(mystr:str) """ def test_mcopy2(): s = tester.state() c = s.abi_contract(mcopy_code_2) assert c.mcopy_test() == \ b''.join([utils.zpad(utils.int_to_big_endian(x), 32) for x in [99, 111, 119]]) array_saveload_code = """ data a[5] def array_saveload(): a = [1,2,3,4,5] save(self.a[0], a, items=5) a = load(self.a[0], items=4) log(len(a)) return(load(self.a[0], items=4):arr) """ def test_saveload3(): s = tester.state() c = s.abi_contract(array_saveload_code) assert c.array_saveload() == [1, 2, 3, 4] string_manipulation_code = """ def f1(istring:str): setch(istring, 0, "a") setch(istring, 1, "b") return(istring:str) def t1(): istring = text("cd") res = self.f1(istring, outchars=2) return([getch(res,0), getch(res,1)]:arr) # should return [97,98] """ def test_string_manipulation(): s = tester.state() c = s.abi_contract(string_manipulation_code) assert c.t1() == [97, 98] more_infinite_storage_object_code = """ data block[2^256](_blockHeader(_prevBlock)) data numAncestorDepths data logs[2] def initAncestorDepths(): self.numAncestorDepths = 2 def testStoreB(number, blockHash, hashPrevBlock, i): self.block[blockHash]._blockHeader._prevBlock = hashPrevBlock self.logs[i] = self.numAncestorDepths def test2(): self.initAncestorDepths() self.testStoreB(45, 45, 44, 0) self.testStoreB(46, 46, 45, 1) return ([self.logs[0], self.logs[1]]:arr) """ def test_more_infinite_storage(): s = tester.state() c = s.abi_contract(more_infinite_storage_object_code) assert c.test2() == [2, 2] double_array_code = """ def foo(a:arr, b:arr): i = 0 tot = 0 while i < len(a): tot = tot * 10 + a[i] i += 1 j = 0 tot2 = 0 while j < len(b): tot2 = tot2 * 10 + b[j] j += 1 return ([tot, tot2]:arr) def bar(a:arr, m:str, b:arr): return(self.foo(a, b, outitems=2):arr) """ def test_double_array(): s = tester.state() c = s.abi_contract(double_array_code) assert c.foo([1, 2, 3], [4, 5, 6, 7]) == [123, 4567] assert c.bar([1, 2, 3], "moo", [4, 5, 6, 7]) == [123, 4567] abi_logging_code = """ event rabbit(x) event frog(y:indexed) event moose(a, b:str, c:indexed, d:arr) event chicken(m:address:indexed) def test_rabbit(eks): log(type=rabbit, eks) def test_frog(why): log(type=frog, why) def test_moose(eh, bee:str, see, dee:arr): log(type=moose, eh, bee, see, dee) def test_chicken(em:address): log(type=chicken, em) """ def test_abi_logging(): s = tester.state() c = s.abi_contract(abi_logging_code) o = [] s.block.log_listeners.append(lambda x: o.append(c.translator.listen(x))) c.test_rabbit(3) assert o == [{"_event_type": b"rabbit", "x": 3}] o.pop() c.test_frog(5) assert o == [{"_event_type": b"frog", "y": 5}] o.pop() c.test_moose(7, "nine", 11, [13, 15, 17]) assert o == [{"_event_type": b"moose", "a": 7, "b": b"nine", "c": 11, "d": [13, 15, 17]}] o.pop() c.test_chicken(tester.a0) assert o == [{"_event_type": b"chicken", "m": utils.encode_hex(tester.a0)}] o.pop() new_format_inner_test_code = """ def foo(a, b:arr, c:str): return a * 10 + b[1] """ filename4 = "nfitc2635987162498621846198246.se" new_format_outer_test_code = """ extern blah: [foo:[int256,int256[],bytes]:int256] def bar(): x = create("%s") return x.foo(17, [3, 5, 7], text("dog")) """ % filename4 def test_new_format(): s = tester.state() open(filename4, 'w').write(new_format_inner_test_code) c = s.abi_contract(new_format_outer_test_code) assert c.bar() == 175 abi_address_output_test_code = """ data addrs[] def get_address(key): return(self.addrs[key]:address) def register(key, addr:address): if not self.addrs[key]: self.addrs[key] = addr """ def test_abi_address_output(): s = tester.state() c = s.abi_contract(abi_address_output_test_code) c.register(123, b'1212121212121212121212121212121212121212') c.register(123, b'3434343434343434343434343434343434343434') c.register(125, b'5656565656565656565656565656565656565656') assert c.get_address(123) == b'1212121212121212121212121212121212121212' assert c.get_address(125) == b'5656565656565656565656565656565656565656' filename5 = 'abi_output_tester_1264876521746198724124' abi_address_caller_code = """ extern foo: [get_address:[int256]:address, register:[int256,address]:_] data sub def init(): self.sub = create("%s") def get_address(key): return(self.sub.get_address(key):address) def register(key, addr:address): self.sub.register(key, addr) """ % filename5 def test_inner_abi_address_output(): s = tester.state() open(filename5, 'w').write(abi_address_output_test_code) c = s.abi_contract(abi_address_caller_code) c.register(123, b'1212121212121212121212121212121212121212') c.register(123, b'3434343434343434343434343434343434343434') c.register(125, b'5656565656565656565656565656565656565656') assert c.get_address(123) == b'1212121212121212121212121212121212121212' assert c.get_address(125) == b'5656565656565656565656565656565656565656' string_logging_code = """ event foo(x:string:indexed, y:bytes:indexed, z:str:indexed) def moo(): log(type=foo, text("bob"), text("cow"), text("dog")) """ def test_string_logging(): s = tester.state() c = s.abi_contract(string_logging_code) o = [] s.block.log_listeners.append(lambda x: o.append(c.translator.listen(x))) c.moo() assert o == [{"_event_type": "foo", "x": "bob", "__hash_x": utils.sha3("bob"), "y": "cow", "__hash_y": utils.sha3("cow"), "z": "dog", "__hash_z": utils.sha3("dog")}] params_code = """ data blah def init(): self.blah = $FOO def garble(): return(self.blah) def marble(): return(text($BAR):str) """ def test_params_contract(): s = tester.state() c = s.abi_contract(params_code, FOO=4, BAR='horse') assert c.garble() == 4 assert c.marble() == 'horse' prefix_types_in_functions_code = """ type fixedp: fp_ macro fixedp($x) * fixedp($y): fixedp($x * $y / 2^64) macro fixedp($x) / fixedp($y): fixedp($x * 2^64 / $y) macro raw_unfixedp(fixedp($x)): $x / 2^64 macro set(fixedp($x), $y): $x = 2^64 * $y macro fixedp($x) = fixedp($y): $x = $y def sqrdiv(fp_a, fp_b): return(raw_unfixedp((fp_a / fp_b) * (fp_a / fp_b))) """ def test_prefix_types_in_functions(): s = tester.state() c = s.abi_contract(prefix_types_in_functions_code) assert c.sqrdiv(25, 2) == 156 # test_evm = None # test_sixten = None # test_with = None # test_returnten = None # test_namecoin = None # test_inset = None # test_currency = None # test_data_feeds = None # test_hedge = None # test_lifo = None # test_suicider = None # test_reverter = None # test_callcode = None # test_array = None # test_array2 = None # test_array3 = None # test_calls = None # test_storage_objects = None # test_infinite_storage_objects = None # test_storagevar_fails = None # test_type_system_fails = None # test_returnarray_code = None # test_saveload = None # test_saveload2 = None # test_crowdfund = None # test_sdiv = None # test_argcall = None # test_argcall2 = None # test_sort = None # test_indirect_sort = None # test_multiarg_code = None # test_macros = None # test_types = None # test_sha256 = None # test_sha3 = None # test_types_in_functions = None # test_more_infinites = None # test_prevhashes = None # test_abi_contract = None # test_mcopy = None # test_saveload3 = None # test_string_manipulation = None # test_more_infinite_storage = None # test_double_array = None # test_abi_logging = None # test_new_format = None # test_abi_address_output = None # test_string_logging = None # test_params_contract = None # test_prefix_types_in_functions = None
pipermerriam/pyethereum
ethereum/tests/test_contracts.py
Python
mit
40,537
[ "MOOSE" ]
c2153fd9eb56405558c1bd5e837e4d18d7671466b9b311201ee51f5612928ba6
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # def convert_to_gcisd(myci): from pyscf import scf from pyscf.ci import gcisd if isinstance(myci, gcisd.GCISD): return myci mf = scf.addons.convert_to_ghf(myci._scf) gci = gcisd.GCISD(mf) assert(myci._nocc is None) assert(myci._nmo is None) gci.__dict__.update(myci.__dict__) gci._scf = mf gci.mo_coeff = mf.mo_coeff gci.mo_occ = mf.mo_occ if isinstance(myci.frozen, (int, np.integer)): gci.frozen = myci.frozen * 2 else: raise NotImplementedError gci.ci = gcisd.from_rcisdvec(myci.ci, myci.nocc, mf.mo_coeff.orbspin) return gci
gkc1000/pyscf
pyscf/ci/addons.py
Python
apache-2.0
1,295
[ "PySCF" ]
7808252997c6416265f98709443aeb1dd3c5bf759892e8c90a5afb2499544d24
""" Record a video using Selenium + <canvas> toDataURI """ import traceback import urlparse import datetime import os import sys import subprocess import time import base64 import json import tempfile import urllib import urllib2 from cStringIO import StringIO from PIL import Image from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.keys import Keys # TODO: change perhaps? TMP_PATH = os.path.dirname(os.path.dirname(__file__)) class Recorder(object): # The recording speed FRAMES_PER_SECOND = 30.0 # Selenium instance browser = None # Output FIFO handle used for saving stream = None # music file music = 'josh.ogg' # the encoder process encoder = None # the xvfb process xvfb = None # maximum length of recording in seconds max_length = 10 # the pingback url pingback = None def __init__(self): pass def start_encoding_process(self): blocksize = self.width * self.height * 4 self.encoder = subprocess.Popen([ 'gst-launch-0.10', 'filesrc', 'location=' + self.fifo_path, 'blocksize=' + str(blocksize), '!', 'video/x-raw-rgb,bpp=32,endianness=4321,depth=24,red_mask=-16777216,green_mask=16711680,blue_mask=65280,width=%d,height=%d,framerate=%d/1' % (self.width, self.height, int(self.FRAMES_PER_SECOND)), '!', 'queue', '!', 'ffmpegcolorspace', '!', 'videorate', '!', 'x264enc', 'profile=baseline', '!', 'mux.', 'filesrc', 'location=' + self.music, '!', 'decodebin', '!', 'audiorate', '!', 'audioconvert', '!' , 'faac', '!', 'mux.', 'mp4mux', 'name=mux', '!', 'filesink', 'location=' + self.output ], stdout=sys.stdout, stderr=sys.stderr) def check_ready(self): """ Interacting with Javascript http://stackoverflow.com/questions/5585343/getting-the-return-value-of-javascript-code-in-selenium """ val = self.browser.execute_script("return window.recorder.isReady()") return bool(val) def set_filename(self, filename): filename = os.path.abspath(filename) self.browser.execute_script("window.recorder.setOutputFilename(%s)" % json.dumps(filename)) def check_done(self): """ Interacting with Javascript http://stackoverflow.com/questions/5585343/getting-the-return-value-of-javascript-code-in-selenium """ return bool(self.browser.execute_script("return window.recorder.isDone()")) def prepare_frame(self, clock): """ Render one video frame at a given moment. """ # Rember to convert to milliseconds clock *= 1000 val = self.browser.execute_script("return window.recorder.prepareFrame(%f);" % clock) return val def get_resolution(self): res = self.browser.execute_script("return window.recorder.getResolution();") self.width = int(res['width']) self.height = int(res['height']) assert self.width % 8 == 0, "Width is not divisible by 8" assert self.height % 8 == 0, "Height is not divisible by 8" return self.width, self.height def grab_frame(self): self.browser.execute_script("window.recorder.grabFrame();") def create_fifo(self): self.fifo_path = os.path.join(self.temp_dir, 'encoding_fifo') os.mkfifo(self.fifo_path) return self.fifo_path def close_stream(self): self.browser.execute_script("window.recorder.closeStream();"); def get_pingback(self): rv = self.browser.execute_script("return (window.recorder.getPingbackUri && window.recorder.getPingbackUri()) || null;") if not rv: return None return rv def send_pingback(self): location = 'file://' + self.output values = { 'location' : location } data = urllib.urlencode(values) req = urllib2.Request(location, data) response = urllib2.urlopen(req) response.read() def do_encode(self, source_uri, target_file): print "Starting Xvfb at :5" self.xvfb = subprocess.Popen(["Xvfb", ":5", "-screen", "0", "1024x768x24"]) os.environ['DISPLAY'] = ':5' self.output = target_file self.create_fifo() profile = webdriver.firefox.firefox_profile.FirefoxProfile() parsed = urlparse.urlparse(source_uri) host_base = "%s://%s" % (parsed.scheme, parsed.netloc) print "Enabling XPCOM for codebase", host_base set_pref = profile.set_preference set_pref("signed.applets.codebase_principal_support", True) set_pref("capability.principal.codebase.p0.granted", "UniversalXPConnect"); set_pref("capability.principal.codebase.p0.id", host_base); set_pref("capability.principal.codebase.p0.subjectName", ""); self.browser = webdriver.Firefox(firefox_profile=profile) self.browser.get(source_uri) # Load page assert "Slideshow Recorder" in self.browser.title print "Preparing media assets" while not self.check_ready(): time.sleep(1) self.get_resolution() print "Resolution %dx%d" % (self.width, self.height) print "Getting pingback URI" self.pingback = self.get_pingback() if self.pingback: print "Got pingback URI", self.pingback else: print "Alert, no pingback URI given!" print "Starting encoder" self.start_encoding_process() # Assume we are running Pyramid on localhost print "Setting Javascript output path to", self.set_filename(self.fifo_path) self.set_filename(self.fifo_path) print "Starting recording loop" clock = 0.0 while not self.check_done(): clock += (1.0 / self.FRAMES_PER_SECOND) print "Rendering frame: %f" % clock self.prepare_frame(clock) self.grab_frame() if clock > self.max_length: break self.close_stream() def encode(self, source_uri): self.temp_dir = tempfile.mkdtemp(dir=TMP_PATH) output_file = os.path.join(self.temp_dir, "out.mp4") def timeprint(msg): now = datetime.datetime.now().isoformat(' ') print "[%s] %s" % (now, msg) start = time.time() timeprint("Starting recording") try: success = True self.do_encode(source_uri, output_file) except Exception, e: traceback.print_exc() success = False if self.browser: timeprint("Shutting down browser") self.browser.close() if self.encoder: timeprint("Waiting for encoder to finish") rc = self.encoder.wait() timeprint("Encoder exited with code %d - exciting!" % rc) if self.xvfb: self.xvfb.terminate() if self.pingback: self.send_pingback() now = datetime.datetime.now().isoformat(' ') delta = time.time() - start timeprint("Encoding ended - time %f seconds" % delta) if __name__ == '__main__': recorder = Recorder() recorder.encode(sys.argv[1])
miohtama/slideshow9000
slideshow/recorder.py
Python
agpl-3.0
7,743
[ "exciting" ]
a3b5cc159a45899983df283a1b1677844ceede1904b59fd9672432e84647c9b5
#!/usr/bin/python #Audio Tools, a module and set of tools for manipulating audio data #Copyright (C) 2007-2012 Brian Langenberger #This program is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 2 of the License, or #(at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from . import (AudioFile, MetaData) #takes a pair of integers (or None) for the current and total values #returns a unicode string of their combined pair #for example, __number_pair__(2,3) returns u"2/3" #whereas __number_pair__(4,0) returns u"4" def __number_pair__(current, total): def empty(i): return i is None unslashed_format = u"%d" slashed_format = u"%d/%d" if (empty(current) and empty(total)): return unslashed_format % (0,) elif ((not empty(current)) and empty(total)): return unslashed_format % (current,) elif (empty(current) and (not empty(total))): return slashed_format % (0, total) else: #neither current or total are empty return slashed_format % (current, total) def limited_transfer_data(from_function, to_function, max_bytes): """transfers up to max_bytes from from_function to to_function or as many bytes as from_function generates as strings""" BUFFER_SIZE = 0x100000 s = from_function(BUFFER_SIZE) while ((len(s) > 0) and (max_bytes > 0)): if (len(s) > max_bytes): s = s[0:max_bytes] to_function(s) max_bytes -= len(s) s = from_function(BUFFER_SIZE) ####################### #MONKEY'S AUDIO ####################### class ApeTagItem: """a single item in the ApeTag, typically a unicode value""" FORMAT = "32u [ 1u 2u 29p ]" def __init__(self, item_type, read_only, key, data): """fields are as follows: item_type is 0 = UTF-8, 1 = binary, 2 = external, 3 = reserved read_only is 1 if the item is read only key is an ASCII string data is a binary string of the data itself """ self.type = item_type self.read_only = read_only self.key = key self.data = data def __eq__(self, item): for attr in ["type", "read_only", "key", "data"]: if ((not hasattr(item, attr)) or (getattr(self, attr) != getattr(item, attr))): return False else: return True def total_size(self): """returns total size of item in bytes""" return 4 + 4 + len(self.key) + 1 + len(self.data) def copy(self): """returns a duplicate ApeTagItem""" return ApeTagItem(self.type, self.read_only, self.key, self.data) def __repr__(self): return "ApeTagItem(%s,%s,%s,%s)" % \ (repr(self.type), repr(self.read_only), repr(self.key), repr(self.data)) def raw_info_pair(self): """returns a human-readable key/value pair of item data""" if (self.type == 0): # text if (self.read_only): return (self.key.decode('ascii'), u"(read only) %s" % (self.data.decode('utf-8'))) else: return (self.key.decode('ascii'), self.data.decode('utf-8')) elif (self.type == 1): # binary return (self.key.decode('ascii'), u"(binary) %d bytes" % (len(self.data))) elif (self.type == 2): # external return (self.key.decode('ascii'), u"(external) %d bytes" % (len(self.data))) else: # reserved return (self.key.decode('ascii'), u"(reserved) %d bytes" % (len(self.data))) def __str__(self): return self.data def __unicode__(self): return self.data.rstrip(chr(0)).decode('utf-8', 'replace') @classmethod def parse(cls, reader): """returns an ApeTagItem parsed from the given BitstreamReader""" (item_value_length, read_only, encoding) = reader.parse(cls.FORMAT) key = [] c = reader.read(8) while (c != 0): key.append(chr(c)) c = reader.read(8) value = reader.read_bytes(item_value_length) return cls(encoding, read_only, "".join(key), value) def build(self, writer): """writes the ApeTagItem values to the given BitstreamWriter""" writer.build("%s %db 8u %db" % (self.FORMAT, len(self.key), len(self.data)), (len(self.data), self.read_only, self.type, self.key, 0, self.data)) @classmethod def binary(cls, key, data): """returns an ApeTagItem of binary data key is an ASCII string, data is a binary string""" return cls(1, 0, key, data) @classmethod def external(cls, key, data): """returns an ApeTagItem of external data key is an ASCII string, data is a binary string""" return cls(2, 0, key, data) @classmethod def string(cls, key, data): """returns an ApeTagItem of text data key is an ASCII string, data is a unicode string""" return cls(0, 0, key, data.encode('utf-8', 'replace')) class ApeTag(MetaData): """a complete APEv2 tag""" HEADER_FORMAT = "8b 32u 32u 32u [ 1u 2u 26p 1u 1u 1u ] 64p" ITEM = ApeTagItem ATTRIBUTE_MAP = {'track_name': 'Title', 'track_number': 'Track', 'track_total': 'Track', 'album_number': 'Media', 'album_total': 'Media', 'album_name': 'Album', 'artist_name': 'Artist', #"Performer" is not a defined APEv2 key #it would be nice to have, yet would not be standard 'performer_name': 'Performer', 'composer_name': 'Composer', 'conductor_name': 'Conductor', 'ISRC': 'ISRC', 'catalog': 'Catalog', 'copyright': 'Copyright', 'publisher': 'Publisher', 'year': 'Year', 'date': 'Record Date', 'comment': 'Comment'} INTEGER_ITEMS = ('Track', 'Media') def __init__(self, tags, contains_header=True, contains_footer=True): """constructs an ApeTag from a list of ApeTagItem objects""" for tag in tags: if (not isinstance(tag, ApeTagItem)): raise ValueError("%s is not ApeTag" % (repr(tag))) self.__dict__["tags"] = list(tags) self.__dict__["contains_header"] = contains_header self.__dict__["contains_footer"] = contains_footer def __repr__(self): return "ApeTag(%s, %s, %s)" % (repr(self.tags), repr(self.contains_header), repr(self.contains_footer)) def total_size(self): """returns the minimum size of the total ApeTag, in bytes""" size = 0 if (self.contains_header): size += 32 for tag in self.tags: size += tag.total_size() if (self.contains_footer): size += 32 return size def __eq__(self, metadata): if (isinstance(metadata, ApeTag)): if (set(self.keys()) != set(metadata.keys())): return False for tag in self.tags: try: if (tag.data != metadata[tag.key].data): return False except KeyError: return False else: return True elif (isinstance(metadata, MetaData)): return MetaData.__eq__(self, metadata) else: return False def keys(self): return [tag.key for tag in self.tags] def __contains__(self, key): for tag in self.tags: if (tag.key == key): return True else: return False def __getitem__(self, key): for tag in self.tags: if (tag.key == key): return tag else: raise KeyError(key) def get(self, key, default): try: return self[key] except KeyError: return default def __setitem__(self, key, value): for i in xrange(len(self.tags)): if (self.tags[i].key == key): self.tags[i] = value return else: self.tags.append(value) def index(self, key): for (i, tag) in enumerate(self.tags): if (tag.key == key): return i else: raise ValueError(key) def __delitem__(self, key): old_tag_count = len(self.tags) self.tags = [tag for tag in self.tags if tag.key != key] if (len(self.tags) == old_tag_count): raise KeyError(key) def __getattr__(self, attr): import re if (attr == 'track_number'): try: track_text = unicode(self["Track"]) track = re.search(r'\d+', track_text) if (track is not None): track_number = int(track.group(0)) if ((track_number == 0) and (re.search(r'/.*?(\d+)', track_text) is not None)): #if track_total is nonzero and track_number is 0 #track_number is a placeholder #so treat track_number as None return None else: return track_number else: #"Track" isn't an integer return None except KeyError: #no "Track" in list of items return None elif (attr == 'track_total'): try: track = re.search(r'/.*?(\d+)', unicode(self["Track"])) if (track is not None): return int(track.group(1)) else: #no slashed integer field in "Track" return None except KeyError: #no "Track" in list of items return None elif (attr == 'album_number'): try: media_text = unicode(self["Media"]) media = re.search(r'\d+', media_text) if (media is not None): album_number = int(media.group(0)) if ((album_number == 0) and (re.search(r'/.*?(\d+)', media_text) is not None)): #if album_total is nonzero and album_number is 0 #album_number is a placeholder #so treat album_number as None return None else: return album_number else: #"Media" isn't an integer return None except KeyError: #no "Media" in list of items return None elif (attr == 'album_total'): try: media = re.search(r'/.*?(\d+)', unicode(self["Media"])) if (media is not None): return int(media.group(1)) else: #no slashed integer field in "Media" return None except KeyError: #no "Media" in list of items return None elif (attr in self.ATTRIBUTE_MAP): try: return unicode(self[self.ATTRIBUTE_MAP[attr]]) except KeyError: return None elif (attr in MetaData.FIELDS): return None else: try: return self.__dict__[attr] except AttrError: raise AttributeError(attr) #if an attribute is updated (e.g. self.track_name) #make sure to update the corresponding dict pair def __setattr__(self, attr, value): if (attr in self.ATTRIBUTE_MAP): if (value is not None): import re if (attr == 'track_number'): try: self['Track'].data = re.sub(r'\d+', str(int(value)), self['Track'].data, 1) except KeyError: self['Track'] = self.ITEM.string( 'Track', __number_pair__(value, self.track_total)) elif (attr == 'track_total'): try: if (re.search(r'/\D*\d+', self['Track'].data) is not None): self['Track'].data = re.sub( r'(/\D*)(\d+)', "\\g<1>" + str(int(value)), self['Track'].data, 1) else: self['Track'].data = "%s/%d" % ( self['Track'].data, value) except KeyError: self['Track'] = self.ITEM.string( 'Track', __number_pair__(self.track_number, value)) elif (attr == 'album_number'): try: self['Media'].data = re.sub(r'\d+', str(int(value)), self['Media'].data, 1) except KeyError: self['Media'] = self.ITEM.string( 'Media', __number_pair__(value, self.album_total)) elif (attr == 'album_total'): try: if (re.search(r'/\D*\d+', self['Media'].data) is not None): self['Media'].data = re.sub( r'(/\D*)(\d+)', "\\g<1>" + str(int(value)), self['Media'].data, 1) else: self['Media'].data = "%s/%d" % ( self['Media'].data, value) except KeyError: self['Media'] = self.ITEM.string( 'Media', __number_pair__(self.album_number, value)) else: self[self.ATTRIBUTE_MAP[attr]] = self.ITEM.string( self.ATTRIBUTE_MAP[attr], value) else: delattr(self, attr) else: self.__dict__[attr] = value def __delattr__(self, attr): import re if (attr == 'track_number'): try: #if "Track" field contains a slashed total if (re.search(r'\d+.*?/.*?\d+', self['Track'].data) is not None): #replace unslashed portion with 0 self['Track'].data = re.sub(r'\d+', str(int(0)), self['Track'].data, 1) else: #otherwise, remove "Track" field del(self['Track']) except KeyError: pass elif (attr == 'track_total'): try: track_number = re.search(r'\d+', self["Track"].data.split("/")[0]) #if track number is nonzero if (((track_number is not None) and (int(track_number.group(0)) != 0))): #if "Track" field contains a slashed total #remove slashed total from "Track" field self['Track'].data = re.sub(r'\s*/.*', "", self['Track'].data) else: #if "Track" field contains a slashed total if (re.search(r'/\D*?\d+', self['Track'].data) is not None): #remove "Track" field entirely del(self['Track']) except KeyError: pass elif (attr == 'album_number'): try: #if "Media" field contains a slashed total if (re.search(r'\d+.*?/.*?\d+', self['Media'].data) is not None): #replace unslashed portion with 0 self['Media'].data = re.sub(r'\d+', str(int(0)), self['Media'].data, 1) else: #otherwise, remove "Media" field del(self['Media']) except KeyError: pass elif (attr == 'album_total'): try: album_number = re.search(r'\d+', self["Media"].data.split("/")[0]) #if album number is nonzero if (((album_number is not None) and (int(album_number.group(0)) != 0))): #if "Media" field contains a slashed total #remove slashed total from "Media" field self['Media'].data = re.sub(r'\s*/.*', "", self['Media'].data) else: #if "Media" field contains a slashed total if (re.search(r'/\D*?\d+', self['Media'].data) is not None): #remove "Media" field entirely del(self['Media']) except KeyError: pass elif (attr in self.ATTRIBUTE_MAP): try: del(self[self.ATTRIBUTE_MAP[attr]]) except KeyError: pass elif (attr in MetaData.FIELDS): pass else: try: del(self.__dict__[attr]) except AttrError: raise AttributeError(attr) @classmethod def converted(cls, metadata): """converts a MetaData object to an ApeTag object""" if (metadata is None): return None elif (isinstance(metadata, ApeTag)): return ApeTag([tag.copy() for tag in metadata.tags], contains_header=metadata.contains_header, contains_footer=metadata.contains_footer) else: tags = cls([]) for (field, key) in cls.ATTRIBUTE_MAP.items(): if (((field not in cls.INTEGER_FIELDS) and (getattr(metadata, field) is not None))): tags[key] = cls.ITEM.string( key, unicode(getattr(metadata, field))) if (((metadata.track_number is not None) or (metadata.track_total is not None))): tags["Track"] = cls.ITEM.string( "Track", __number_pair__(metadata.track_number, metadata.track_total)) if (((metadata.album_number is not None) or (metadata.album_total is not None))): tags["Media"] = cls.ITEM.string( "Media", __number_pair__(metadata.album_number, metadata.album_total)) for image in metadata.images(): tags.add_image(image) return tags def raw_info(self): """returns the ApeTag as a human-readable unicode string""" from os import linesep from . import display_unicode #align tag values on the "=" sign if (len(self.tags) > 0): max_indent = max([len(display_unicode(tag.raw_info_pair()[0])) for tag in self.tags]) tag_strings = [u"%s%s = %s" % (u" " * (max_indent - len(display_unicode(key))), key, value) for (key, value) in [tag.raw_info_pair() for tag in self.tags]] else: tag_strings = [] return linesep.decode('ascii').join([u"APEv2:"] + tag_strings) @classmethod def supports_images(cls): """returns True""" return True def __parse_image__(self, key, type): from . import Image import cStringIO data = cStringIO.StringIO(self[key].data) description = [] c = data.read(1) while (c != '\x00'): description.append(c) c = data.read(1) return Image.new(data.read(), "".join(description).decode('utf-8', 'replace'), type) def add_image(self, image): """embeds an Image object in this metadata""" if (image.type == 0): self['Cover Art (front)'] = self.ITEM.binary( 'Cover Art (front)', image.description.encode('utf-8', 'replace') + chr(0) + image.data) elif (image.type == 1): self['Cover Art (back)'] = self.ITEM.binary( 'Cover Art (back)', image.description.encode('utf-8', 'replace') + chr(0) + image.data) def delete_image(self, image): """deletes an Image object from this metadata""" if ((image.type == 0) and 'Cover Art (front)' in self.keys()): del(self['Cover Art (front)']) elif ((image.type == 1) and 'Cover Art (back)' in self.keys()): del(self['Cover Art (back)']) def images(self): """returns a list of embedded Image objects""" #APEv2 supports only one value per key #so a single front and back cover are all that is possible img = [] if ('Cover Art (front)' in self.keys()): img.append(self.__parse_image__('Cover Art (front)', 0)) if ('Cover Art (back)' in self.keys()): img.append(self.__parse_image__('Cover Art (back)', 1)) return img @classmethod def read(cls, apefile): """returns an ApeTag object from an APEv2 tagged file object may return None if the file object has no tag""" from .bitstream import BitstreamReader apefile.seek(-32, 2) reader = BitstreamReader(apefile, 1) (preamble, version, tag_size, item_count, read_only, item_encoding, is_header, no_footer, has_header) = reader.parse(cls.HEADER_FORMAT) if ((preamble != "APETAGEX") or (version != 2000)): return None apefile.seek(-tag_size, 2) return cls([ApeTagItem.parse(reader) for i in xrange(item_count)], contains_header=has_header, contains_footer=True) def build(self, writer): """outputs an APEv2 tag to writer""" from .bitstream import BitstreamRecorder tags = BitstreamRecorder(1) for tag in self.tags: tag.build(tags) if (self.contains_header): writer.build(ApeTag.HEADER_FORMAT, ("APETAGEX", # preamble 2000, # version tags.bytes() + 32, # tag size len(self.tags), # item count 0, # read only 0, # encoding 1, # is header not self.contains_footer, # no footer self.contains_header)) # has header tags.copy(writer) if (self.contains_footer): writer.build(ApeTag.HEADER_FORMAT, ("APETAGEX", # preamble 2000, # version tags.bytes() + 32, # tag size len(self.tags), # item count 0, # read only 0, # encoding 0, # is header not self.contains_footer, # no footer self.contains_header)) # has header def clean(self, fixes_applied): import re from .text import (CLEAN_REMOVE_DUPLICATE_TAG, CLEAN_REMOVE_TRAILING_WHITESPACE, CLEAN_REMOVE_LEADING_WHITESPACE, CLEAN_FIX_TAG_FORMATTING, CLEAN_REMOVE_EMPTY_TAG) used_tags = set([]) tag_items = [] for tag in self.tags: if (tag.key.upper() in used_tags): fixes_applied.append( CLEAN_REMOVE_DUPLICATE_TAG % {"field": tag.key.decode('ascii')}) elif (tag.type == 0): used_tags.add(tag.key.upper()) text = unicode(tag) #check trailing whitespace fix1 = text.rstrip() if (fix1 != text): fixes_applied.append( CLEAN_REMOVE_TRAILING_WHITESPACE % {"field": tag.key.decode('ascii')}) #check leading whitespace fix2 = fix1.lstrip() if (fix2 != fix1): fixes_applied.append( CLEAN_REMOVE_LEADING_WHITESPACE % {"field": tag.key.decode('ascii')}) if (tag.key in self.INTEGER_ITEMS): if (u"/" in fix2): #item is a slashed field of some sort (current, total) = fix2.split(u"/", 1) current_int = re.search(r'\d+', current) total_int = re.search(r'\d+', total) if ((current_int is None) and (total_int is None)): #neither side contains an integer value #so ignore it altogether fix3 = fix2 elif ((current_int is not None) and (total_int is None)): fix3 = u"%d" % (int(current_int.group(0))) elif ((current_int is None) and (total_int is not None)): fix3 = u"%d/%d" % (0, int(total_int.group(0))) else: #both sides contain an int fix3 = u"%d/%d" % (int(current_int.group(0)), int(total_int.group(0))) else: #item contains no slash current_int = re.search(r'\d+', fix2) if (current_int is not None): #item contains an integer fix3 = unicode(int(current_int.group(0))) else: #item contains no integer value so ignore it #(although 'Track' should only contain # integers, 'Media' may contain strings # so it may be best to simply ignore that case) fix3 = fix2 if (fix3 != fix2): fixes_applied.append( CLEAN_FIX_TAG_FORMATTING % {"field": tag.key.decode('ascii')}) else: fix3 = fix2 if (len(fix3) > 0): tag_items.append(ApeTagItem.string(tag.key, fix3)) else: fixes_applied.append( CLEAN_REMOVE_EMPTY_TAG % {"field": tag.key.decode('ascii')}) else: used_tags.add(tag.key.upper()) tag_items.append(tag) return self.__class__(tag_items, self.contains_header, self.contains_footer) class ApeTaggedAudio: """a class for handling audio formats with APEv2 tags this class presumes there will be a filename attribute which can be opened and checked for tags, or written if necessary""" def get_metadata(self): """returns an ApeTag object, or None raises IOError if unable to read the file""" f = file(self.filename, 'rb') try: return ApeTag.read(f) finally: f.close() def update_metadata(self, metadata): """takes this track's current MetaData object as returned by get_metadata() and sets this track's metadata with any fields updated in that object raises IOError if unable to write the file """ if (metadata is None): return elif (not isinstance(metadata, ApeTag)): from .text import ERR_FOREIGN_METADATA raise ValueError(ERR_FOREIGN_METADATA) from .bitstream import BitstreamReader, BitstreamWriter from . import transfer_data f = file(self.filename, "r+b") f.seek(-32, 2) (preamble, version, tag_size, item_count, read_only, item_encoding, is_header, no_footer, has_header) = BitstreamReader(f, 1).parse(ApeTag.HEADER_FORMAT) if ((preamble == 'APETAGEX') and (version == 2000)): if (has_header): old_tag_size = 32 + tag_size else: old_tag_size = tag_size if (metadata.total_size() >= old_tag_size): #metadata has grown #so append it to existing file f.seek(-old_tag_size, 2) metadata.build(BitstreamWriter(f, 1)) else: #metadata has shrunk #so rewrite file with smaller metadata import tempfile from os.path import getsize rewritten = tempfile.TemporaryFile() #copy everything but the last "old_tag_size" bytes #from existing file to rewritten file f = open(self.filename, "rb") limited_transfer_data(f.read, rewritten.write, getsize(self.filename) - old_tag_size) f.close() #append new tag to rewritten file metadata.build(BitstreamWriter(rewritten, 1)) #finally, overwrite current file with rewritten file rewritten.seek(0, 0) f = open(self.filename, "wb") transfer_data(rewritten.read, f.write) f.close() rewritten.close() else: #no existing metadata, so simply append a fresh tag f = file(self.filename, "ab") metadata.build(BitstreamWriter(f, 1)) f.close() def set_metadata(self, metadata): """takes a MetaData object and sets this track's metadata raises IOError if unable to write the file""" if (metadata is None): return from .bitstream import BitstreamWriter old_metadata = self.get_metadata() new_metadata = ApeTag.converted(metadata) if (old_metadata is not None): #transfer ReplayGain tags from old metadata to new metadata for tag in ["replaygain_track_gain", "replaygain_track_peak", "replaygain_album_gain", "replaygain_album_peak"]: try: #if old_metadata has tag, shift it over new_metadata[tag] = old_metadata[tag] except KeyError: try: #otherwise, if new_metadata has tag, delete it del(new_metadata[tag]) except KeyError: #if neither has tag, ignore it continue #transfer Cuesheet from old metadata to new metadata if ("Cuesheet" in old_metadata): new_metadata["Cuesheet"] = old_metadata["Cuesheet"] elif ("Cuesheet" in new_metadata): del(new_metadata["Cuesheet"]) self.update_metadata(new_metadata) else: #delete ReplayGain tags from new metadata for tag in ["replaygain_track_gain", "replaygain_track_peak", "replaygain_album_gain", "replaygain_album_peak"]: try: del(new_metadata[tag]) except KeyError: continue #delete Cuesheet from new metadata if ("Cuesheet" in new_metadata): del(new_metadata["Cuesheet"]) #no existing metadata, so simply append a fresh tag f = file(self.filename, "ab") new_metadata.build(BitstreamWriter(f, 1)) f.close() def delete_metadata(self): """deletes the track's MetaData raises IOError if unable to write the file""" from .bitstream import BitstreamReader, BitstreamWriter from . import transfer_data f = file(self.filename, "r+b") f.seek(-32, 2) (preamble, version, tag_size, item_count, read_only, item_encoding, is_header, no_footer, has_header) = BitstreamReader(f, 1).parse(ApeTag.HEADER_FORMAT) if ((preamble == 'APETAGEX') and (version == 2000)): #there's existing metadata to delete #so rewrite file without trailing metadata tag if (has_header): old_tag_size = 32 + tag_size else: old_tag_size = tag_size import tempfile from os.path import getsize rewritten = tempfile.TemporaryFile() #copy everything but the last "old_tag_size" bytes #from existing file to rewritten file f = open(self.filename, "rb") limited_transfer_data(f.read, rewritten.write, getsize(self.filename) - old_tag_size) f.close() #finally, overwrite current file with rewritten file rewritten.seek(0, 0) f = open(self.filename, "wb") transfer_data(rewritten.read, f.write) f.close() rewritten.close() class ApeAudio(ApeTaggedAudio, AudioFile): """a Monkey's Audio file""" SUFFIX = "ape" NAME = SUFFIX DEFAULT_COMPRESSION = "5000" COMPRESSION_MODES = tuple([str(x * 1000) for x in range(1, 6)]) BINARIES = ("mac",) # FILE_HEAD = Con.Struct("ape_head", # Con.String('id', 4), # Con.ULInt16('version')) # #version >= 3.98 # APE_DESCRIPTOR = Con.Struct("ape_descriptor", # Con.ULInt16('padding'), # Con.ULInt32('descriptor_bytes'), # Con.ULInt32('header_bytes'), # Con.ULInt32('seektable_bytes'), # Con.ULInt32('header_data_bytes'), # Con.ULInt32('frame_data_bytes'), # Con.ULInt32('frame_data_bytes_high'), # Con.ULInt32('terminating_data_bytes'), # Con.String('md5', 16)) # APE_HEADER = Con.Struct("ape_header", # Con.ULInt16('compression_level'), # Con.ULInt16('format_flags'), # Con.ULInt32('blocks_per_frame'), # Con.ULInt32('final_frame_blocks'), # Con.ULInt32('total_frames'), # Con.ULInt16('bits_per_sample'), # Con.ULInt16('number_of_channels'), # Con.ULInt32('sample_rate')) # #version <= 3.97 # APE_HEADER_OLD = Con.Struct("ape_header_old", # Con.ULInt16('compression_level'), # Con.ULInt16('format_flags'), # Con.ULInt16('number_of_channels'), # Con.ULInt32('sample_rate'), # Con.ULInt32('header_bytes'), # Con.ULInt32('terminating_bytes'), # Con.ULInt32('total_frames'), # Con.ULInt32('final_frame_blocks')) def __init__(self, filename): """filename is a plain string""" AudioFile.__init__(self, filename) (self.__samplespersec__, self.__channels__, self.__bitspersample__, self.__totalsamples__) = ApeAudio.__ape_info__(filename) @classmethod def is_type(cls, file): """returns True if the given file object describes this format takes a seekable file pointer rewound to the start of the file""" return file.read(4) == "MAC " def lossless(self): """returns True""" return True @classmethod def supports_foreign_riff_chunks(cls): """returns True""" return True def has_foreign_riff_chunks(self): """returns True""" #FIXME - this isn't strictly true #I'll need a way to detect foreign chunks in APE's stream #without decoding it first, #but since I'm not supporting APE anyway, I'll take the lazy way out return True def bits_per_sample(self): """returns an integer number of bits-per-sample this track contains""" return self.__bitspersample__ def channels(self): """returns an integer number of channels this track contains""" return self.__channels__ def total_frames(self): """returns the total PCM frames of the track as an integer""" return self.__totalsamples__ def sample_rate(self): """returns the rate of the track's audio as an integer number of Hz""" return self.__samplespersec__ @classmethod def __ape_info__(cls, filename): f = file(filename, 'rb') try: file_head = cls.FILE_HEAD.parse_stream(f) if (file_head.id != 'MAC '): from .text import ERR_APE_INVALID_HEADER raise InvalidFile(ERR_APE_INVALID_HEADER) if (file_head.version >= 3980): # the latest APE file type descriptor = cls.APE_DESCRIPTOR.parse_stream(f) header = cls.APE_HEADER.parse_stream(f) return (header.sample_rate, header.number_of_channels, header.bits_per_sample, ((header.total_frames - 1) * header.blocks_per_frame) + header.final_frame_blocks) else: # old-style APE file (obsolete) header = cls.APE_HEADER_OLD.parse_stream(f) if (file_head.version >= 3950): blocks_per_frame = 0x48000 elif ((file_head.version >= 3900) or ((file_head.version >= 3800) and (header.compression_level == 4000))): blocks_per_frame = 0x12000 else: blocks_per_frame = 0x2400 if (header.format_flags & 0x01): bits_per_sample = 8 elif (header.format_flags & 0x08): bits_per_sample = 24 else: bits_per_sample = 16 return (header.sample_rate, header.number_of_channels, bits_per_sample, ((header.total_frames - 1) * blocks_per_frame) + header.final_frame_blocks) finally: f.close() def to_wave(self, wave_filename): """writes the contents of this file to the given .wav filename string raises EncodingError if some error occurs during decoding""" from . import BIN from . import transfer_data import subprocess import os if (self.filename.endswith(".ape")): devnull = file(os.devnull, "wb") sub = subprocess.Popen([BIN['mac'], self.filename, wave_filename, '-d'], stdout=devnull, stderr=devnull, creationflags=0x08000000) sub.wait() devnull.close() else: devnull = file(os.devnull, 'ab') import tempfile ape = tempfile.NamedTemporaryFile(suffix='.ape') f = file(self.filename, 'rb') transfer_data(f.read, ape.write) f.close() ape.flush() sub = subprocess.Popen([BIN['mac'], ape.name, wave_filename, '-d'], stdout=devnull, stderr=devnull, creationflags=0x08000000) sub.wait() ape.close() devnull.close() @classmethod def from_wave(cls, filename, wave_filename, compression=None): """encodes a new AudioFile from an existing .wav file takes a filename string, wave_filename string of an existing WaveAudio file and an optional compression level string encodes a new audio file from the wave's data at the given filename with the specified compression level and returns a new ApeAudio object""" from . import BIN import subprocess import os if (str(compression) not in cls.COMPRESSION_MODES): compression = cls.DEFAULT_COMPRESSION devnull = file(os.devnull, "wb") sub = subprocess.Popen([BIN['mac'], wave_filename, filename, "-c%s" % (compression)], stdout=devnull, stderr=devnull, creationflags=0x08000000) sub.wait() devnull.close() return ApeAudio(filename)
R-a-dio/python-audio-tools
audiotools/ape.py
Python
gpl-2.0
45,026
[ "Brian" ]
45a71021658aa5adac4d787f5da9032ce5f866237488ff235cf5871e7eb0f49d
from django.utils.translation import ugettext_lazy as _ from .constants import ( ALIVE, DEAD, DECLINED, DWTA, FEMALE, IND, MALE, NAIVE, NEG, NEVER, NO, NOT_APPLICABLE, OMANG, OTHER, POS, REFUSED, UNKNOWN, YES, MORNING, AFTERNOON, EVENING, ANYTIME, WEEKDAYS, WEEKENDS, NOT_SURE, NORMAL, ABNORMAL, NOT_DONE) BLANK_CHOICE_DASH = [('', '---------')] """ Try to keep these in alphabetical order """ ACU_EST = ( ('Acute', 'Acute'), ('Established', 'Established'), ) ACU_EST_NEG = ( ('Acute', 'Acute'), ('Established', 'Established'), ('Negative', 'Negative'), ) ALIVE_DEAD = ( (ALIVE, 'Alive'), (DEAD, 'Dead'), ) ALIVE_DEAD_UNKNOWN = ( (ALIVE, 'Alive'), (DEAD, 'Deceased'), (UNKNOWN, 'Unknown'), ) ART_STATUS = ( ('ON', 'Yes, on ART'), ('STOPPED', 'No, stopped ART'), (NAIVE, 'No, have never taken ART'), ) ART_STATUS_UNKNOWN = ( ('ON', 'ON ART'), ('STOPPED', 'Stopped'), (NAIVE, 'Naive'), (UNKNOWN, 'Unknown'), ) ART_STATUS_CONFIRM = ( ('OPD', '1. Show OPD/IDCC card'), ('Pills', '2. Show pills'), ('Pic', '3. Identify pictorial'), ) CONFIRMED_SUSPECTED = ( ('CONFIRMED', 'Confirmed'), ('SUSPECTED', 'Suspected'), ) COUNTRY = ( ('botswana', 'Botswana'), ('zimbabwe', 'Zimbabwe'), ('rsa', 'South Africa'), ('zambia', 'Zambia'), ('namibia', 'Namibia'), ('nigeria', 'Nigeria'), ('china', 'China'), ('india', 'India'), ('OTHER', 'Other'), ) DAYS_OF_WEEK = ( ('Monday', 'Monday'), ('Tuesday', 'Tuesday'), ('Wednesday', 'Wednesday'), ('Thursday', 'Thursday'), ('Friday', 'Friday'), ('Saturday', 'Saturday'), ('Sunday', 'Sunday'), ('AnyDay', 'Any day'), ) DATE_ESTIMATED = ( ('-', 'No'), ('D', 'Yes, estimated the Day'), ('MD', 'Yes, estimated Month and Day'), ('YMD', 'Yes, estimated Year, Month and Day'), ) DEATH_RELATIONSIP_TO_STUDY = ( ('Definitely not related', 'Definitely not related'), ('Probably not related', 'Probably not related'), ('Possible related', 'Possible related'), ('Probably related', 'Probably related'), ('Definitely related', 'Definitely related') ) FEEDING = ( ('BF', 'Breast feed'), ('FF', 'Formula feed'), ) GENDER = ( (MALE, _('Male')), (FEMALE, _('Female')), ) GENDER_UNDETERMINED = ( (MALE, _('Male')), (FEMALE, _('Female')), ('U', _('Undetermined')), ) GRADING_SCALE = ( (1, 'Grade 1'), (2, 'Grade 2'), (3, 'Grade 3'), (4, 'Grade 4'), (5, 'Grade 5'), ) GRADING_SCALE_234 = ( (2, 'Grade 2'), (3, 'Grade 3'), (4, 'Grade 4'), ) GRADING_SCALE_34 = ( (3, 'Grade 3'), (4, 'Grade 4'), ) HIV_RESULT = ( (POS, 'HIV Positive (Reactive)'), (NEG, 'HIV Negative (Non-reactive)'), (IND, 'Indeterminate'), (DECLINED, 'Participant declined testing'), ('Not performed', 'Test could not be performed (e.g. supply outage, technical problem)'), ) """do not change without inspecting implication to check_omang_field() in utils.py""" IDENTITY_TYPE = ( (OMANG, 'Omang'), ('DRIVERS', 'Driver\'s License'), ('PASSPORT', 'Passport'), ('OMANG_RCPT', 'Omang Receipt'), (OTHER, 'Other'), ) NORMAL_ABNORMAL = ( (NORMAL, 'Normal'), (ABNORMAL, 'Abnormal'), ) NORMAL_ABNORMAL_NOEXAM = ( (NORMAL, 'Normal'), (ABNORMAL, 'Abnormal'), ('NO_EXAM', 'No exam performed'), ) NORMAL_ABNORMAL_NOTEVALUATED = ( (NORMAL, 'Normal'), (ABNORMAL, 'Abnormal'), ('NOT_EVAL', 'Not evaluated'), ) POS_NEG = ( (POS, 'Positive'), (NEG, 'Negative'), (IND, 'Indeterminate'), ) POS_NEG_REFUSED = ( (POS, 'Positive'), (NEG, 'Negative'), (IND, 'Indeterminate'), ('REF', 'Refused to disclose'), ) POS_NEG_ANY = ( (POS, 'Positive'), (NEG, 'Negative'), ('ANY', 'Any'), ) POS_NEG_ONLY = ( (POS, _('Positive')), (NEG, _('Negative')), ) POS_NEG_UNKNOWN = ( (POS, _('Positive')), (NEG, _('Negative')), (UNKNOWN, _('Unknown')), ) POS_NEG_IND_UNKNOWN = ( (POS, _('Positive')), (NEG, _('Negative')), (IND, 'Indeterminate'), (UNKNOWN, _('Unknown')), ) POS_NEG_ACU = ( ('Positive', 'Positive'), ('Negative', 'Negative'), ('Possible Acute', 'Possible acute'), ('Indeterminate', 'Indeterminate'), ) POS_NEG_NOTESTED = ( (POS, 'Positive'), (NEG, 'Negative'), (NEVER, 'Never tested for HIV'), ) POS_NEG_UNTESTED_REFUSAL = ( (POS, 'Positive'), (NEG, 'Negative'), (IND, 'Indeterminate'), (NEVER, 'Never tested for HIV'), (UNKNOWN, 'Unknown'), (DWTA, 'Don\'t want to answer'), ) REFUSAL_STATUS = ( (REFUSED, 'Refused'), ('NOT_REFUSED', 'No longer refusing'), ) SEVERITY_LEVEL = ( ('mild', 'Mild'), ('moderate', 'Moderate'), ('severe', 'Severe'), ) SEXUAL_DEBUT = ( ('<=14', '14 or under'), ('15-17', ' 15 - 17'), ('>=18', '18 or above'), ) TIME_OF_WEEK = ( (WEEKDAYS, 'Weekdays'), (WEEKENDS, 'Weekends'), (ANYTIME, 'Anytime') ) TIME_OF_DAY = ( (MORNING, 'Morning'), (AFTERNOON, 'Afternoon'), (EVENING, 'Evening'), (ANYTIME, 'Anytime') ) TIME_UNITS = ( ('TODAY', 'Today'), ('DAYS', 'Days'), ('WEEKS', 'Weeks'), ('MONTHS', 'Months'), ('YEARS', 'Years'), ) URINALYSIS = ( ('NAD', 'NAD'), ('Sugar Neg', 'Sugar Neg'), ('Sugar +', 'Sugar +'), ('Sugar ++', 'Sugar ++'), ('Sugar +++', 'Sugar +++'), ('Blood', 'Blood'), ('Protein', 'Protein'), ('Cells', 'Cells'), ) YES_NO = ( (YES, _(YES)), (NO, _(NO)), ) YES_NO_DECLINED = ( (YES, YES), (NO, NO), (DECLINED, 'Yes, but subject declined copy'), ) YES_NO_OPTIONAL = ( (YES, YES), (NO, NO), ('Optional', 'Optional'), ) YES_NO_REFUSED = ( (YES, _(YES)), (NO, _(NO)), (REFUSED, _('Refused to answer')), ) YES_NO_DWTA = ( (YES, _(YES)), (NO, _(NO)), (DWTA, _('Don\'t want to answer')), ) YES_NO_NA_SPECIFY = ( (YES, 'Yes, (Specify below)'), (NO, NO), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_NA = ( (YES, YES), (NO, NO), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_NA_DWTA = ( (YES, _(YES)), (NO, _(NO)), (DWTA, _('Don\'t want to answer')), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_NA_DWTA_DNK = ( (YES, _(YES)), (NO, _(NO)), (DWTA, _('Don\'t want to answer')), ('cant_remember', 'Cannot remember'), ) YES_NO_NOT_EVALUATED = ( (YES, YES), (NO, NO), ('Not_evaluated', 'Not evaluated'), ) YES_NO_NOT_EVALUATED_NA = ( (YES, YES), (NO, NO), ('Not_evaluated', 'Not evaluated'), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_NOT_DONE = ( (YES, YES), (NO, NO), (NOT_DONE, 'Not done'), ) YES_NO_DOESNT_WORK = ( (YES, YES), (NO, NO), ('DontWork', 'Doesn\'t work'), ) YES_NO_UNKNOWN = ( (YES, YES), (NO, NO), (UNKNOWN, 'Unknown'), ) YES_NO_NA_DWTA_DNK = ( (YES, _(YES)), (NO, _(NO)), (DWTA, _('Don\'t want to answer')), ('cant_remember', 'Cannot remember')) YES_NO_UNKNOWN_NA = ( (YES, YES), (NO, NO), (UNKNOWN, 'Unknown'), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_UNSURE = ( (YES, YES), (NO, NO), (NOT_SURE, 'Not sure'), ) YES_NO_UNSURE_DWTA = ( (YES, YES), (NO, NO), (NOT_SURE, 'Not sure'), (DWTA, 'Don\'t want to answer') ) YES_NO_UNSURE_NA = ( (YES, YES), (NO, NO), (NOT_SURE, 'Not sure'), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_DONT_KNOW = ( (YES, YES), (NO, NO), ('Dont_know', 'Do not know'), ) YES_NO_DONT_KNOW_NA = ( (YES, YES), (NO, NO), ('Dont_know', 'Do not know'), (NOT_APPLICABLE, 'Not applicable'), ) YES_NO_DOESNT_WORK = ( (YES, YES), (NO, NO), ('Doesnt_work', 'Doesn\'t work'), ) ARV_DRUG_LIST = ( ('Nevirapine', 'NVP'), ('Kaletra', 'KAL'), ('Aluvia', 'ALU'), ('Truvada', 'TRV'), ('Tenoforvir', 'TDF',), ('Zidovudine', 'AZT'), ('Lamivudine', '3TC'), ('Efavirenz', 'EFV'), ('Didanosine', 'DDI'), ('Stavudine', 'D4T'), ('Nelfinavir', 'NFV'), ('Abacavir', 'ABC'), ('Combivir', 'CBV'), ('Ritonavir', 'RTV'), ('Trizivir', 'TZV'), ('Raltegravir', 'RAL'), ('Saquinavir,soft gel capsule', 'FOR'), ('Saquinavir,hard capsule', 'INV'), ('Kaletra or Aluvia', 'KAL or ALU'), ('Atripla', 'ATR'), ('HAART,unknown', 'HAART,unknown'), ) ARV_MODIFICATION_REASON = ( ('Initial dose', 'Initial dose'), ('Never started', 'Never started'), ('Toxicity decreased_resolved', 'Toxicity decreased/resolved'), ('Completed PMTCT intervention', 'Completed PMTCT intervention'), ('Completed postpartum tail', 'Completed post-partum "tail"'), ('Scheduled dose increase', 'Scheduled dose increase'), ('Confirmed infant HIV infection, ending study drug', 'Confirmed infant HIV infection, ending study drug'), ('completed protocol', 'Completion of protocol-required period of study treatment'), ('HAART not available', 'HAART not available'), ('Anemia', 'Anemia'), ('Bleeding', 'Bleeding'), ('CNS symptoms', 'CNS symptoms (sleep, psych, etc)'), ('Diarrhea', 'Diarrhea'), ('Fatigue', 'Fatigue'), ('Headache', 'Headache'), ('Hepatotoxicity', 'Hepatotoxicity'), ('Nausea', 'Nausea'), ('Neutropenia', 'Neutropenia'), ('Thrombocytopenia', 'Thrombocytopenia'), ('Vomiting', 'Vomiting'), ('Rash', 'Rash'), ('Rash resolved', 'Rash resolved'), ('Neuropathy', 'Neuropathy'), ('Hypersensitivity_allergic reaction', 'Hypersensitivity / allergic reaction'), ('Pancreatitis', 'Pancreatitis'), ('Lactic Acidiosis', 'Lactic Acidiosis'), ('Pancytopenia', 'Pancytopenia'), ('Virologic failure', 'Virologic failure'), ('Immunologic failure', 'Immunologic failure(CD4)'), ('Clinical failure', 'Clinical failure'), ('Clinician request', 'Clinician request, other reason (including convenience)'), ('Subject request', 'Subject request, other reason (including convenience)'), ('Non-adherence with clinic visits', 'Non-adherence with clinic visits'), ('Non-adherence with ARVs', 'Non-adherence with ARVs'), ('Death', 'Death'), (OTHER, 'Other'), ) ARV_STATUS = ( ('no_mod', '1. No modifications made to existing HAART treatment',), ('start', '2. Started antriretroviral treatment since last attended scheduled visit(including today)',), ('discontinued', '3. Permanently discontinued antiretroviral treatment at or before last study visit',), ('modified', ('4. Change in at least one antiretroviral medication since last ' 'attended scheduled visit (including today)(dose modification, ' 'permanent discontinuation, temporary hold, resumption / initiation ' 'after temporary hold)'),), ) ARV_STATUS_WITH_NEVER = ( ('no_mod', '1. No modifications made since the last attended scheduled visit or today'), ('start', '2. Starting today or has started since last attended scheduled visit'), ('discontinued', '3. Permanently discontinued at or before the last attended scheduled visit'), ('never started', '4. Never started'), ('modified', '5. Change in at least one medication since the last attended scheduled visit or today'), (NOT_APPLICABLE, 'Not applicable'), ) DOSE_STATUS = ( ('New', 'New'), ('Permanently discontinued', 'Permanently discontinued'), ('Temporarily held', 'Temporarily held'), ('Dose modified', 'Dose modified'), ('Resumed', 'Resumed'), ('Not initiated', 'Not initiated'), ) WHYNOPARTICIPATE_CHOICE = ( ('I don\'t have time', _('I don\'t have time')), ('I don\'t want to answer the questions', _('I don\'t want to answer the questions')), ('I don\'t want to have the blood drawn', _('I don\'t want to have the blood drawn')), ('I am afraid my information will not be private', _('I am afraid my information will not be private')), ('Fear of needles', _('Fear of needles')), ('Illiterate does not want a witness', _('Illiterate does not want a witness')), ('I don\'t want to take part', _('I don\'t want to take part')), ('I haven\'t had a chance to think about it', _('I haven\'t had a chance to think about it')), ('Have a newly born baby, not permitted', _('Have a newly born baby, not permitted')), ('The appointment was not honoured', _('The appointment was not honoured')), ('not_sure', _('I am not sure')), ('OTHER', _('Other, specify:')), ('not_answering', _('Don\'t want to answer')), )
botswana-harvard/edc-constants
edc_constants/choices.py
Python
gpl-2.0
12,706
[ "VisIt" ]
f40258816d3f388f4ec2d59340ad869a3970cbefd85e18b30a6b48c676bc3e25
""" Feedforward layers. TODO: write more documentation """ __docformat__ = 'restructedtext en' __authors__ = ("Razvan Pascanu " "KyungHyun Cho " "Caglar Gulcehre ") __contact__ = "Razvan Pascanu <r.pascanu@gmail>" import numpy import copy import theano import theano.tensor as TT from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from groundhog import utils from groundhog.utils import sample_weights, \ sample_weights_classic,\ init_bias, \ constant_shape from basic import Layer class MultiLayer(Layer): """ Implementing a standard feed forward MLP """ def __init__(self, rng, n_in, n_hids=[500,500], activation='TT.tanh', scale=0.01, sparsity=-1, rank_n_approx=0, rank_n_activ='lambda x: x', weight_noise=False, dropout = 1., init_fn='sample_weights_classic', bias_fn='init_bias', bias_scale = 0., learn_bias = True, grad_scale = 1., name=None): """ :type rng: numpy random generator :param rng: numpy random generator :type n_in: int :param n_in: number of inputs units :type n_hids: list of ints :param n_hids: Number of hidden units on each layer of the MLP :type activation: string/function or list of :param activation: Activation function for the embedding layers. If a list it needs to have a value for each layer. If not, the same activation will be applied to all layers :type scale: float or list of :param scale: depending on the initialization function, it can be the standard deviation of the Gaussian from which the weights are sampled or the largest singular value. If a single value it will be used for each layer, otherwise it has to have one value for each layer :type sparsity: int or list of :param sparsity: if a single value, it will be used for each layer, otherwise it has to be a list with as many values as layers. If negative, it means the weight matrix is dense. Otherwise it means this many randomly selected input units are connected to an output unit :type rank_n_approx: int :param rank_n_approx: It applies to the first layer only. If positive and larger than 0, the first weight matrix is factorized into two matrices. The first one goes from input to `rank_n_approx` hidden units, the second from `rank_n_approx` to the number of units on the second layer :type rank_n_activ: string or function :param rank_n_activ: Function that is applied on on the intermediary layer formed from factorizing the first weight matrix (Q: do we need this?) :type weight_noise: bool :param weight_noise: If true, the model is used with weight noise (and the right shared variable are constructed, to keep track of the noise) :type dropout: float :param dropout: the probability with which hidden units are dropped from the hidden layer. If set to 1, dropout is not used :type init_fn: string or function :param init_fn: function used to initialize the weights of the layer. We recommend using either `sample_weights_classic` or `sample_weights` defined in the utils :type bias_fn: string or function :param bias_fn: function used to initialize the biases. We recommend using `init_bias` defined in the utils :type bias_scale: float :param bias_scale: argument passed to `bias_fn`, depicting the scale of the initial bias :type learn_bias: bool :param learn_bias: flag, saying if we should learn the bias or keep it constant :type grad_scale: float or theano scalar :param grad_scale: factor with which the gradients with respect to the parameters of this layer are scaled. It is used for differentiating between the different parameters of a model. :type name: string :param name: name of the layer (used to name parameters). NB: in this library names are very important because certain parts of the code relies on name to disambiguate between variables, therefore each layer should have a unique name. """ assert rank_n_approx >= 0, "Please enter a valid rank_n_approx" self.rank_n_approx = rank_n_approx if isinstance(rank_n_activ, (str, unicode)): rank_n_activ = eval(rank_n_activ) self.rank_n_activ = rank_n_activ if type(n_hids) not in (list, tuple): n_hids = [n_hids] n_layers = len(n_hids) self.n_layers = n_layers if type(scale) not in (list, tuple): scale = [scale] * n_layers if type(sparsity) not in (list, tuple): sparsity = [sparsity] * n_layers for idx, sp in enumerate(sparsity): if sp < 0: sparsity[idx] = n_hids[idx] if type(activation) not in (list, tuple): activation = [activation] * n_layers if type(bias_scale) not in (list, tuple): bias_scale = [bias_scale] * n_layers if bias_fn not in (list, tuple): bias_fn = [bias_fn] * n_layers if init_fn not in (list, tuple): init_fn = [init_fn] * n_layers for dx in xrange(n_layers): if isinstance(bias_fn[dx], (str, unicode)): bias_fn[dx] = eval(bias_fn[dx]) if isinstance(init_fn[dx], (str, unicode)): init_fn[dx] = eval(init_fn[dx]) if isinstance(activation[dx], (str, unicode)): activation[dx] = eval(activation[dx]) super(MultiLayer, self).__init__(n_in, n_hids[-1], rng, name) self.trng = RandomStreams(self.rng.randint(int(1e6))) self.activation = activation self.scale = scale self.sparsity = sparsity self.bias_scale = bias_scale self.bias_fn = bias_fn self.init_fn = init_fn self._grad_scale = grad_scale self.weight_noise = weight_noise self.dropout = dropout self.n_hids = n_hids self.learn_bias = learn_bias self._init_params() def _init_params(self): """ Initialize the parameters of the layer, either by using sparse initialization or small isotropic noise. """ self.W_ems = [] self.b_ems = [] if self.rank_n_approx: W_em1 = self.init_fn[0](self.n_in, self.rank_n_approx, self.sparsity[0], self.scale[0], self.rng) W_em2 = self.init_fn[0](self.rank_n_approx, self.n_hids[0], self.sparsity[0], self.scale[0], self.rng) self.W_em1 = theano.shared(W_em1, name='W1_0_%s'%self.name) self.W_em2 = theano.shared(W_em2, name='W2_0_%s'%self.name) self.W_ems = [self.W_em1, self.W_em2] else: W_em = self.init_fn[0](self.n_in, self.n_hids[0], self.sparsity[0], self.scale[0], self.rng) self.W_em = theano.shared(W_em, name='W_0_%s'%self.name) self.W_ems = [self.W_em] self.b_em = theano.shared( self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng), name='b_0_%s'%self.name) self.b_ems = [self.b_em] for dx in xrange(1, self.n_layers): W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx], self.n_hids[dx], self.sparsity[dx], self.scale[dx], self.rng) W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name)) self.W_ems += [W_em] b_em = theano.shared( self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng), name='b_%d_%s'%(dx,self.name)) self.b_ems += [b_em] self.params = [x for x in self.W_ems] if self.learn_bias and self.learn_bias!='last': self.params = [x for x in self.W_ems] + [x for x in self.b_ems] elif self.learn_bias == 'last': self.params = [x for x in self.W_ems] + [x for x in self.b_ems][:-1] self.params_grad_scale = [self._grad_scale for x in self.params] if self.weight_noise: self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems] self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems] self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems] self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params] def fprop(self, state_below, use_noise=True, no_noise_bias=False, first_only = False): """ Constructs the computational graph of this layer. If the input is ints, we assume is an index, otherwise we assume is a set of floats. """ if self.weight_noise and use_noise and self.noise_params: W_ems = [(x+y) for x, y in zip(self.W_ems, self.nW_ems)] if not no_noise_bias: b_ems = [(x+y) for x, y in zip(self.b_ems, self.nb_ems)] else: b_ems = self.b_ems else: W_ems = self.W_ems b_ems = self.b_ems if self.rank_n_approx: if first_only: emb_val = self.rank_n_activ(utils.dot(state_below, W_ems[0])) self.out = emb_val return emb_val emb_val = TT.dot( self.rank_n_activ(utils.dot(state_below, W_ems[0])), W_ems[1]) if b_ems: emb_val += b_ems[0] st_pos = 1 else: emb_val = utils.dot(state_below, W_ems[0]) if b_ems: emb_val += b_ems[0] st_pos = 0 emb_val = self.activation[0](emb_val) if self.dropout < 1.: if use_noise: emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype) else: emb_val = emb_val * self.dropout for dx in xrange(1, self.n_layers): emb_val = utils.dot(emb_val, W_ems[st_pos+dx]) if b_ems: emb_val = self.activation[dx](emb_val+ b_ems[dx]) else: emb_val = self.activation[dx](emb_val) if self.dropout < 1.: if use_noise: emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype) else: emb_val = emb_val * self.dropout self.out = emb_val return emb_val class LastState(Layer): """ This layer is used to construct the embedding of the encoder by taking the last state of the recurrent model """ def __init__(self, ntimes = False, n = TT.constant(0)): """ :type ntimes: bool :param ntimes: If the last state needs to be repeated `n` times :type n: int, theano constant, None :param n: how many times the last state is repeated """ self.ntimes = ntimes self.n = n super(LastState, self).__init__(0, 0, None) def fprop(self, all_states): if self.ntimes: stateshape0 = all_states.shape[0] shape0 = TT.switch(TT.gt(self.n, 0), self.n, all_states.shape[0]) single_frame = TT.shape_padleft(all_states[stateshape0-1]) mask = TT.alloc(numpy.float32(1), shape0, *[1 for k in xrange(all_states.ndim-1)]) rval = single_frame * mask self.out = rval return rval single_frame = all_states[all_states.shape[0]-1] self.out = single_frame return single_frame last = LastState() last_ntimes = LastState(ntimes=True) class GaussianNoise(Layer): """ This layer is used to construct the embedding of the encoder by taking the last state of the recurrent model """ def __init__(self, rng, std = 0.1, ndim=0, avg =0, shape_fn=None): """ """ assert rng is not None, "random number generator should not be empty!" super(GaussianNoise, self).__init__(0, 0, rng) self.std = scale self.avg = self.avg self.ndim = ndim self.shape_fn = shape_fn if self.shape_fn: # Name is not important as it is not a parameter of the model self.noise_term = theano.shared(numpy.zeros((2,)*ndim, dtype=theano.config.floatX), name='ndata') self.noise_params += [self.noise_term] self.noise_params_shape_fn += [shape_fn] self.trng = RandomStreams(rng.randint(1e5)) def fprop(self, x): self.out = x if self.scale: if self.shape_fn: self.out += self.noise_term else: self.out += self.trng.normal(self.out.shape, std=self.std, avg = self.avg, dtype=self.out.dtype) return self.out class BinaryOp(Layer): """ This layer is used to construct the embedding of the encoder by taking the last state of the recurrent model """ def __init__(self, op = 'lambda x,y: x+y', name=None): if type(op) is str: op = eval(op) self.op = op super(BinaryOp, self).__init__(0, 0, None, name) def fprop(self, x, y): self.out = self.op(x, y) return self.out class DropOp(Layer): """ This layers randomly drops elements of the input by multiplying with a mask sampled from a binomial distribution """ def __init__(self, rng = None, name=None, dropout=1.): super(DropOp, self).__init__(0, 0, None, name) self.dropout = dropout if dropout < 1.: self.trng = RandomStreams(rng.randint(1e5)) def fprop(self, state_below, use_noise = True): self.out = state_below if self.dropout < 1.: if use_noise: self.out = self.out * self.trng.binomial(self.out.shape, n=1, p=self.dropout, dtype=self.out.dtype) else: self.out = self.out * self.dropout return self.out class UnaryOp(Layer): """ This layer is used to construct an embedding of the encoder by doing a max pooling over the hidden state """ def __init__(self, activation = 'lambda x: x', name=None): if type(activation) is str: activation = eval(activation) self.activation = activation super(UnaryOp, self).__init__(0, 0, None, name) def fprop(self, state_below): self.out = self.activation(state_below) return self.out tanh = UnaryOp('lambda x: TT.tanh(x)') sigmoid = UnaryOp('lambda x: TT.nnet.sigmoid(x)') rectifier = UnaryOp('lambda x: x*(x>0)') hard_sigmoid = UnaryOp('lambda x: x*(x>0)*(x<1)') hard_tanh = UnaryOp('lambda x: x*(x>-1)*(x<1)') class Shift(Layer): """ This layer is used to construct the embedding of the encoder by taking the last state of the recurrent model """ def __init__(self, n=1, name=None): self.n = n super(Shift, self).__init__(0, 0, None, name) def fprop(self, var): rval = TT.zeros_like(var) if self.n >0: rval = TT.set_subtensor(rval[self.n:], var[:-self.n]) elif self.n<0: rval = TT.set_subtensor(rval[:self.n], var[-self.n:]) self.out = rval return rval class MinPooling(Layer): """ This layer is used to construct an embedding of the encoder by doing a max pooling over the hidden state """ def __init__(self, ntimes=False, name=None): self.ntimes = ntimes super(MinPooling, self).__init__(0, 0, None, name) def fprop(self, all_states): shape0 = all_states.shape[0] single_frame = all_states.min(0) if self.ntimes: single_frame = TT.shape_padleft(all_states.max(0)) mask = TT.alloc(numpy.float32(1), shape0, *[1 for k in xrange(all_states.ndim-1)]) rval = single_frame * mask self.out = rval return rval self.out = single_frame return single_frame minpool = MinPooling() minpool_ntimes = MinPooling(ntimes=True) class MaxPooling(Layer): """ This layer is used to construct an embedding of the encoder by doing a max pooling over the hidden state """ def __init__(self, ntimes=False, name=None): self.ntimes = ntimes super(MaxPooling, self).__init__(0, 0, None, name) def fprop(self, all_states): shape0 = all_states.shape[0] single_frame = all_states.max(0) if self.ntimes: single_frame = TT.shape_padleft(all_states.max(0)) mask = TT.alloc(numpy.float32(1), shape0, *[1 for k in xrange(all_states.ndim-1)]) rval = single_frame * mask self.out = rval return rval self.out = single_frame return single_frame maxpool = MaxPooling() maxpool_ntimes = MaxPooling(ntimes=True)
tomsbergmanis/gh_rnn_lm
groundhog/layers/ff_layers.py
Python
bsd-3-clause
18,627
[ "Gaussian" ]
a8fb7697dd951e048b2ec89409e1766c7ac7bcd1cc3945b5563d08cd3e2e817b
# -*- coding: utf-8 -*- """ List of registered IEEE 24-bit Organizationally Unique IDentifiers. Original data file: http://standards.ieee.org/regauth/oui/oui.txt """ REGISTERED_OUID = { 0x000000: u'XEROX CORPORATION', 0x000001: u'XEROX CORPORATION', 0x000002: u'XEROX CORPORATION', 0x000003: u'XEROX CORPORATION', 0x000004: u'XEROX CORPORATION', 0x000005: u'XEROX CORPORATION', 0x000006: u'XEROX CORPORATION', 0x000007: u'XEROX CORPORATION', 0x000008: u'XEROX CORPORATION', 0x000009: u'XEROX CORPORATION', 0x00000A: u'OMRON TATEISI ELECTRONICS CO.', 0x00000B: u'MATRIX CORPORATION', 0x00000C: u'CISCO SYSTEMS, INC.', 0x00000D: u'FIBRONICS LTD.', 0x00000E: u'FUJITSU LIMITED', 0x00000F: u'NEXT, INC.', 0x000010: u'SYTEK INC.', 0x000011: u'NORMEREL SYSTEMES', 0x000012: u'INFORMATION TECHNOLOGY LIMITED', 0x000013: u'CAMEX', 0x000014: u'NETRONIX', 0x000015: u'DATAPOINT CORPORATION', 0x000016: u'DU PONT PIXEL SYSTEMS.', 0x000017: u'TEKELEC', 0x000018: u'WEBSTER COMPUTER CORPORATION', 0x000019: u'APPLIED DYNAMICS INTERNATIONAL', 0x00001A: u'ADVANCED MICRO DEVICES', 0x00001B: u'NOVELL INC.', 0x00001C: u'BELL TECHNOLOGIES', 0x00001D: u'CABLETRON SYSTEMS, INC.', 0x00001E: u'TELSIST INDUSTRIA ELECTRONICA', 0x00001F: u'Telco Systems, Inc.', 0x000020: u'DATAINDUSTRIER DIAB AB', 0x000021: u'SUREMAN COMP. & COMMUN. CORP.', 0x000022: u'VISUAL TECHNOLOGY INC.', 0x000023: u'ABB INDUSTRIAL SYSTEMS AB', 0x000024: u'CONNECT AS', 0x000025: u'RAMTEK CORP.', 0x000026: u'SHA-KEN CO., LTD.', 0x000027: u'JAPAN RADIO COMPANY', 0x000028: u'PRODIGY SYSTEMS CORPORATION', 0x000029: u'IMC NETWORKS CORP.', 0x00002A: u'TRW - SEDD/INP', 0x00002B: u'CRISP AUTOMATION, INC', 0x00002C: u'AUTOTOTE LIMITED', 0x00002D: u'CHROMATICS INC', 0x00002E: u'SOCIETE EVIRA', 0x00002F: u'TIMEPLEX INC.', 0x000030: u'VG LABORATORY SYSTEMS LTD', 0x000031: u'QPSX COMMUNICATIONS PTY LTD', 0x000032: u'Marconi plc', 0x000033: u'EGAN MACHINERY COMPANY', 0x000034: u'NETWORK RESOURCES CORPORATION', 0x000035: u'SPECTRAGRAPHICS CORPORATION', 0x000036: u'ATARI CORPORATION', 0x000037: u'OXFORD METRICS LIMITED', 0x000038: u'CSS LABS', 0x000039: u'TOSHIBA CORPORATION', 0x00003A: u'CHYRON CORPORATION', 0x00003B: u'i Controls, Inc.', 0x00003C: u'AUSPEX SYSTEMS INC.', 0x00003D: u'UNISYS', 0x00003E: u'SIMPACT', 0x00003F: u'SYNTREX, INC.', 0x000040: u'APPLICON, INC.', 0x000041: u'ICE CORPORATION', 0x000042: u'METIER MANAGEMENT SYSTEMS LTD.', 0x000043: u'MICRO TECHNOLOGY', 0x000044: u'CASTELLE CORPORATION', 0x000045: u'FORD AEROSPACE & COMM. CORP.', 0x000046: u'OLIVETTI NORTH AMERICA', 0x000047: u'NICOLET INSTRUMENTS CORP.', 0x000048: u'SEIKO EPSON CORPORATION', 0x000049: u'APRICOT COMPUTERS, LTD', 0x00004A: u'ADC CODENOLL TECHNOLOGY CORP.', 0x00004B: u'ICL DATA OY', 0x00004C: u'NEC CORPORATION', 0x00004D: u'DCI CORPORATION', 0x00004E: u'AMPEX CORPORATION', 0x00004F: u'LOGICRAFT, INC.', 0x000050: u'RADISYS CORPORATION', 0x000051: u'HOB ELECTRONIC GMBH & CO. KG', 0x000052: u'Intrusion.com, Inc.', 0x000053: u'COMPUCORP', 0x000054: u'MODICON, INC.', 0x000055: u'COMMISSARIAT A L`ENERGIE ATOM.', 0x000056: u'DR. B. STRUCK', 0x000057: u'SCITEX CORPORATION LTD.', 0x000058: u'RACORE COMPUTER PRODUCTS INC.', 0x000059: u'HELLIGE GMBH', 0x00005A: u'SysKonnect GmbH', 0x00005B: u'ELTEC ELEKTRONIK AG', 0x00005C: u'TELEMATICS INTERNATIONAL INC.', 0x00005D: u'CS TELECOM', 0x00005E: u'USC INFORMATION SCIENCES INST', 0x00005F: u'SUMITOMO ELECTRIC IND., LTD.', 0x000060: u'KONTRON ELEKTRONIK GMBH', 0x000061: u'GATEWAY COMMUNICATIONS', 0x000062: u'BULL HN INFORMATION SYSTEMS', 0x000063: u'BARCO CONTROL ROOMS GMBH', 0x000064: u'YOKOGAWA DIGITAL COMPUTER CORP', 0x000065: u'Network General Corporation', 0x000066: u'TALARIS SYSTEMS, INC.', 0x000067: u'SOFT * RITE, INC.', 0x000068: u'ROSEMOUNT CONTROLS', 0x000069: u'CONCORD COMMUNICATIONS INC', 0x00006A: u'COMPUTER CONSOLES INC.', 0x00006B: u'SILICON GRAPHICS INC./MIPS', 0x00006C: u'PRIVATE', 0x00006D: u'CRAY COMMUNICATIONS, LTD.', 0x00006E: u'ARTISOFT, INC.', 0x00006F: u'Madge Ltd.', 0x000070: u'HCL LIMITED', 0x000071: u'ADRA SYSTEMS INC.', 0x000072: u'MINIWARE TECHNOLOGY', 0x000073: u'SIECOR CORPORATION', 0x000074: u'RICOH COMPANY LTD.', 0x000075: u'Nortel Networks', 0x000076: u'ABEKAS VIDEO SYSTEM', 0x000077: u'INTERPHASE CORPORATION', 0x000078: u'LABTAM LIMITED', 0x000079: u'NETWORTH INCORPORATED', 0x00007A: u'DANA COMPUTER INC.', 0x00007B: u'RESEARCH MACHINES', 0x00007C: u'AMPERE INCORPORATED', 0x00007D: u'SUN MICROSYSTEMS, INC.', 0x00007E: u'CLUSTRIX CORPORATION', 0x00007F: u'LINOTYPE-HELL AG', 0x000080: u'CRAY COMMUNICATIONS A/S', 0x000081: u'BAY NETWORKS', 0x000082: u'LECTRA SYSTEMES SA', 0x000083: u'TADPOLE TECHNOLOGY PLC', 0x000084: u'SUPERNET', 0x000085: u'CANON INC.', 0x000086: u'MEGAHERTZ CORPORATION', 0x000087: u'HITACHI, LTD.', 0x000088: u'COMPUTER NETWORK TECH. CORP.', 0x000089: u'CAYMAN SYSTEMS INC.', 0x00008A: u'DATAHOUSE INFORMATION SYSTEMS', 0x00008B: u'INFOTRON', 0x00008C: u'Alloy Computer Products (Australia) Pty Ltd', 0x00008D: u'VERDIX CORPORATION', 0x00008E: u'SOLBOURNE COMPUTER, INC.', 0x00008F: u'RAYTHEON COMPANY', 0x000090: u'MICROCOM', 0x000091: u'ANRITSU CORPORATION', 0x000092: u'COGENT DATA TECHNOLOGIES', 0x000093: u'PROTEON INC.', 0x000094: u'ASANTE TECHNOLOGIES', 0x000095: u'SONY TEKTRONIX CORP.', 0x000096: u'MARCONI ELECTRONICS LTD.', 0x000097: u'EPOCH SYSTEMS', 0x000098: u'CROSSCOMM CORPORATION', 0x000099: u'MTX, INC.', 0x00009A: u'RC COMPUTER A/S', 0x00009B: u'INFORMATION INTERNATIONAL, INC', 0x00009C: u'ROLM MIL-SPEC COMPUTERS', 0x00009D: u'LOCUS COMPUTING CORPORATION', 0x00009E: u'MARLI S.A.', 0x00009F: u'AMERISTAR TECHNOLOGIES INC.', 0x0000A0: u'SANYO Electric Co., Ltd.', 0x0000A1: u'MARQUETTE ELECTRIC CO.', 0x0000A2: u'BAY NETWORKS', 0x0000A3: u'NETWORK APPLICATION TECHNOLOGY', 0x0000A4: u'ACORN COMPUTERS LIMITED', 0x0000A5: u'COMPATIBLE SYSTEMS CORP.', 0x0000A6: u'NETWORK GENERAL CORPORATION', 0x0000A7: u'NETWORK COMPUTING DEVICES INC.', 0x0000A8: u'STRATUS COMPUTER INC.', 0x0000A9: u'NETWORK SYSTEMS CORP.', 0x0000AA: u'XEROX CORPORATION', 0x0000AB: u'LOGIC MODELING CORPORATION', 0x0000AC: u'CONWARE COMPUTER CONSULTING', 0x0000AD: u'BRUKER INSTRUMENTS INC.', 0x0000AE: u'DASSAULT ELECTRONIQUE', 0x0000AF: u'NUCLEAR DATA INSTRUMENTATION', 0x0000B0: u'RND-RAD NETWORK DEVICES', 0x0000B1: u'ALPHA MICROSYSTEMS INC.', 0x0000B2: u'TELEVIDEO SYSTEMS, INC.', 0x0000B3: u'CIMLINC INCORPORATED', 0x0000B4: u'EDIMAX COMPUTER COMPANY', 0x0000B5: u'DATABILITY SOFTWARE SYS. INC.', 0x0000B6: u'MICRO-MATIC RESEARCH', 0x0000B7: u'DOVE COMPUTER CORPORATION', 0x0000B8: u'SEIKOSHA CO., LTD.', 0x0000B9: u'MCDONNELL DOUGLAS COMPUTER SYS', 0x0000BA: u'SIIG, INC.', 0x0000BB: u'TRI-DATA', 0x0000BC: u'ALLEN-BRADLEY CO. INC.', 0x0000BD: u'MITSUBISHI CABLE COMPANY', 0x0000BE: u'THE NTI GROUP', 0x0000BF: u'SYMMETRIC COMPUTER SYSTEMS', 0x0000C0: u'WESTERN DIGITAL CORPORATION', 0x0000C1: u'Madge Ltd.', 0x0000C2: u'INFORMATION PRESENTATION TECH.', 0x0000C3: u'HARRIS CORP COMPUTER SYS DIV', 0x0000C4: u'WATERS DIV. OF MILLIPORE', 0x0000C5: u'FARALLON COMPUTING/NETOPIA', 0x0000C6: u'EON SYSTEMS', 0x0000C7: u'ARIX CORPORATION', 0x0000C8: u'ALTOS COMPUTER SYSTEMS', 0x0000C9: u'EMULEX CORPORATION', 0x0000CA: u'ARRIS International', 0x0000CB: u'COMPU-SHACK ELECTRONIC GMBH', 0x0000CC: u'DENSAN CO., LTD.', 0x0000CD: u'Allied Telesyn Research Ltd.', 0x0000CE: u'MEGADATA CORP.', 0x0000CF: u'HAYES MICROCOMPUTER PRODUCTS', 0x0000D0: u'DEVELCON ELECTRONICS LTD.', 0x0000D1: u'ADAPTEC INCORPORATED', 0x0000D2: u'SBE, INC.', 0x0000D3: u'WANG LABORATORIES INC.', 0x0000D4: u'PURE DATA LTD.', 0x0000D5: u'MICROGNOSIS INTERNATIONAL', 0x0000D6: u'PUNCH LINE HOLDING', 0x0000D7: u'DARTMOUTH COLLEGE', 0x0000D8: u'NOVELL, INC.', 0x0000D9: u'NIPPON TELEGRAPH & TELEPHONE', 0x0000DA: u'ATEX', 0x0000DB: u'BRITISH TELECOMMUNICATIONS PLC', 0x0000DC: u'HAYES MICROCOMPUTER PRODUCTS', 0x0000DD: u'TCL INCORPORATED', 0x0000DE: u'CETIA', 0x0000DF: u'BELL & HOWELL PUB SYS DIV', 0x0000E0: u'QUADRAM CORP.', 0x0000E1: u'GRID SYSTEMS', 0x0000E2: u'ACER TECHNOLOGIES CORP.', 0x0000E3: u'INTEGRATED MICRO PRODUCTS LTD', 0x0000E4: u'IN2 GROUPE INTERTECHNIQUE', 0x0000E5: u'SIGMEX LTD.', 0x0000E6: u'APTOR PRODUITS DE COMM INDUST', 0x0000E7: u'STAR GATE TECHNOLOGIES', 0x0000E8: u'ACCTON TECHNOLOGY CORP.', 0x0000E9: u'ISICAD, INC.', 0x0000EA: u'UPNOD AB', 0x0000EB: u'MATSUSHITA COMM. IND. CO. LTD.', 0x0000EC: u'MICROPROCESS', 0x0000ED: u'APRIL', 0x0000EE: u'NETWORK DESIGNERS, LTD.', 0x0000EF: u'KTI', 0x0000F0: u'SAMSUNG ELECTRONICS CO., LTD.', 0x0000F1: u'MAGNA COMPUTER CORPORATION', 0x0000F2: u'SPIDER COMMUNICATIONS', 0x0000F3: u'GANDALF DATA LIMITED', 0x0000F4: u'ALLIED TELESYN INTERNATIONAL', 0x0000F5: u'DIAMOND SALES LIMITED', 0x0000F6: u'APPLIED MICROSYSTEMS CORP.', 0x0000F7: u'YOUTH KEEP ENTERPRISE CO LTD', 0x0000F8: u'DIGITAL EQUIPMENT CORPORATION', 0x0000F9: u'QUOTRON SYSTEMS INC.', 0x0000FA: u'MICROSAGE COMPUTER SYSTEMS INC', 0x0000FB: u'RECHNER ZUR KOMMUNIKATION', 0x0000FC: u'MEIKO', 0x0000FD: u'HIGH LEVEL HARDWARE', 0x0000FE: u'ANNAPOLIS MICRO SYSTEMS', 0x0000FF: u'CAMTEC ELECTRONICS LTD.', 0x000100: u'EQUIP\'TRANS', 0x000101: u'PRIVATE', 0x000102: u'3COM CORPORATION', 0x000103: u'3COM CORPORATION', 0x000104: u'DVICO Co., Ltd.', 0x000105: u'BECKHOFF GmbH', 0x000106: u'Tews Datentechnik GmbH', 0x000107: u'Leiser GmbH', 0x000108: u'AVLAB Technology, Inc.', 0x000109: u'Nagano Japan Radio Co., Ltd.', 0x00010A: u'CIS TECHNOLOGY INC.', 0x00010B: u'Space CyberLink, Inc.', 0x00010C: u'System Talks Inc.', 0x00010D: u'CORECO, INC.', 0x00010E: u'Bri-Link Technologies Co., Ltd', 0x00010F: u'McDATA Corporation', 0x000110: u'Gotham Networks', 0x000111: u'iDigm Inc.', 0x000112: u'Shark Multimedia Inc.', 0x000113: u'OLYMPUS CORPORATION', 0x000114: u'KANDA TSUSHIN KOGYO CO., LTD.', 0x000115: u'EXTRATECH CORPORATION', 0x000116: u'Netspect Technologies, Inc.', 0x000117: u'CANAL +', 0x000118: u'EZ Digital Co., Ltd.', 0x000119: u'RTUnet (Australia)', 0x00011A: u'EEH DataLink GmbH', 0x00011B: u'Unizone Technologies, Inc.', 0x00011C: u'Universal Talkware Corporation', 0x00011D: u'Centillium Communications', 0x00011E: u'Precidia Technologies, Inc.', 0x00011F: u'RC Networks, Inc.', 0x000120: u'OSCILLOQUARTZ S.A.', 0x000121: u'Watchguard Technologies, Inc.', 0x000122: u'Trend Communications, Ltd.', 0x000123: u'DIGITAL ELECTRONICS CORP.', 0x000124: u'Acer Incorporated', 0x000125: u'YAESU MUSEN CO., LTD.', 0x000126: u'PAC Labs', 0x000127: u'OPEN Networks Pty Ltd', 0x000128: u'EnjoyWeb, Inc.', 0x000129: u'DFI Inc.', 0x00012A: u'Telematica Sistems Inteligente', 0x00012B: u'TELENET Co., Ltd.', 0x00012C: u'Aravox Technologies, Inc.', 0x00012D: u'Komodo Technology', 0x00012E: u'PC Partner Ltd.', 0x00012F: u'Twinhead International Corp', 0x000130: u'Extreme Networks', 0x000131: u'Detection Systems, Inc.', 0x000132: u'Dranetz - BMI', 0x000133: u'KYOWA Electronic Instruments C', 0x000134: u'SIG Positec Systems AG', 0x000135: u'KDC Corp.', 0x000136: u'CyberTAN Technology, Inc.', 0x000137: u'IT Farm Corporation', 0x000138: u'XAVi Technologies Corp.', 0x000139: u'Point Multimedia Systems', 0x00013A: u'SHELCAD COMMUNICATIONS, LTD.', 0x00013B: u'BNA SYSTEMS', 0x00013C: u'TIW SYSTEMS', 0x00013D: u'RiscStation Ltd.', 0x00013E: u'Ascom Tateco AB', 0x00013F: u'Neighbor World Co., Ltd.', 0x000140: u'Sendtek Corporation', 0x000141: u'CABLE PRINT', 0x000142: u'Cisco Systems, Inc.', 0x000143: u'Cisco Systems, Inc.', 0x000144: u'EMC Corporation', 0x000145: u'WINSYSTEMS, INC.', 0x000146: u'Tesco Controls, Inc.', 0x000147: u'Zhone Technologies', 0x000148: u'X-traWeb Inc.', 0x000149: u'T.D.T. Transfer Data Test GmbH', 0x00014A: u'Sony Corporation', 0x00014B: u'Ennovate Networks, Inc.', 0x00014C: u'Berkeley Process Control', 0x00014D: u'Shin Kin Enterprises Co., Ltd', 0x00014E: u'WIN Enterprises, Inc.', 0x00014F: u'ADTRAN INC', 0x000150: u'GILAT COMMUNICATIONS, LTD.', 0x000151: u'Ensemble Communications', 0x000152: u'CHROMATEK INC.', 0x000153: u'ARCHTEK TELECOM CORPORATION', 0x000154: u'G3M Corporation', 0x000155: u'Promise Technology, Inc.', 0x000156: u'FIREWIREDIRECT.COM, INC.', 0x000157: u'SYSWAVE CO., LTD', 0x000158: u'Electro Industries/Gauge Tech', 0x000159: u'S1 Corporation', 0x00015A: u'Digital Video Broadcasting', 0x00015B: u'ITALTEL S.p.A/RF-UP-I', 0x00015C: u'CADANT INC.', 0x00015D: u'Sun Microsystems, Inc', 0x00015E: u'BEST TECHNOLOGY CO., LTD.', 0x00015F: u'DIGITAL DESIGN GmbH', 0x000160: u'ELMEX Co., LTD.', 0x000161: u'Meta Machine Technology', 0x000162: u'Cygnet Technologies, Inc.', 0x000163: u'Cisco Systems, Inc.', 0x000164: u'Cisco Systems, Inc.', 0x000165: u'AirSwitch Corporation', 0x000166: u'TC GROUP A/S', 0x000167: u'HIOKI E.E. CORPORATION', 0x000168: u'VITANA CORPORATION', 0x000169: u'Celestix Networks Pte Ltd.', 0x00016A: u'ALITEC', 0x00016B: u'LightChip, Inc.', 0x00016C: u'FOXCONN', 0x00016D: u'CarrierComm Inc.', 0x00016E: u'Conklin Corporation', 0x00016F: u'HAITAI ELECTRONICS CO., LTD.', 0x000170: u'ESE Embedded System Engineer\'g', 0x000171: u'Allied Data Technologies', 0x000172: u'TechnoLand Co., LTD.', 0x000173: u'AMCC', 0x000174: u'CyberOptics Corporation', 0x000175: u'Radiant Communications Corp.', 0x000176: u'Orient Silver Enterprises', 0x000177: u'EDSL', 0x000178: u'MARGI Systems, Inc.', 0x000179: u'WIRELESS TECHNOLOGY, INC.', 0x00017A: u'Chengdu Maipu Electric Industrial Co., Ltd.', 0x00017B: u'Heidelberger Druckmaschinen AG', 0x00017C: u'AG-E GmbH', 0x00017D: u'ThermoQuest', 0x00017E: u'ADTEK System Science Co., Ltd.', 0x00017F: u'Experience Music Project', 0x000180: u'AOpen, Inc.', 0x000181: u'Nortel Networks', 0x000182: u'DICA TECHNOLOGIES AG', 0x000183: u'ANITE TELECOMS', 0x000184: u'SIEB & MEYER AG', 0x000185: u'Aloka Co., Ltd.', 0x000186: u'Uwe Disch', 0x000187: u'i2SE GmbH', 0x000188: u'LXCO Technologies ag', 0x000189: u'Refraction Technology, Inc.', 0x00018A: u'ROI COMPUTER AG', 0x00018B: u'NetLinks Co., Ltd.', 0x00018C: u'Mega Vision', 0x00018D: u'AudeSi Technologies', 0x00018E: u'Logitec Corporation', 0x00018F: u'Kenetec, Inc.', 0x000190: u'SMK-M', 0x000191: u'SYRED Data Systems', 0x000192: u'Texas Digital Systems', 0x000193: u'Hanbyul Telecom Co., Ltd.', 0x000194: u'Capital Equipment Corporation', 0x000195: u'Sena Technologies, Inc.', 0x000196: u'Cisco Systems, Inc.', 0x000197: u'Cisco Systems, Inc.', 0x000198: u'Darim Vision', 0x000199: u'HeiSei Electronics', 0x00019A: u'LEUNIG GmbH', 0x00019B: u'Kyoto Microcomputer Co., Ltd.', 0x00019C: u'JDS Uniphase Inc.', 0x00019D: u'E-Control Systems, Inc.', 0x00019E: u'ESS Technology, Inc.', 0x00019F: u'Phonex Broadband', 0x0001A0: u'Infinilink Corporation', 0x0001A1: u'Mag-Tek, Inc.', 0x0001A2: u'Logical Co., Ltd.', 0x0001A3: u'GENESYS LOGIC, INC.', 0x0001A4: u'Microlink Corporation', 0x0001A5: u'Nextcomm, Inc.', 0x0001A6: u'Scientific-Atlanta Arcodan A/S', 0x0001A7: u'UNEX TECHNOLOGY CORPORATION', 0x0001A8: u'Welltech Computer Co., Ltd.', 0x0001A9: u'BMW AG', 0x0001AA: u'Airspan Communications, Ltd.', 0x0001AB: u'Main Street Networks', 0x0001AC: u'Sitara Networks, Inc.', 0x0001AD: u'Coach Master International d.b.a. CMI Worldwide, Inc.', 0x0001AE: u'Trex Enterprises', 0x0001AF: u'Motorola Computer Group', 0x0001B0: u'Fulltek Technology Co., Ltd.', 0x0001B1: u'General Bandwidth', 0x0001B2: u'Digital Processing Systems, Inc.', 0x0001B3: u'Precision Electronic Manufacturing', 0x0001B4: u'Wayport, Inc.', 0x0001B5: u'Turin Networks, Inc.', 0x0001B6: u'SAEJIN T&M Co., Ltd.', 0x0001B7: u'Centos, Inc.', 0x0001B8: u'Netsensity, Inc.', 0x0001B9: u'SKF Condition Monitoring', 0x0001BA: u'IC-Net, Inc.', 0x0001BB: u'Frequentis', 0x0001BC: u'Brains Corporation', 0x0001BD: u'Peterson Electro-Musical Products, Inc.', 0x0001BE: u'Gigalink Co., Ltd.', 0x0001BF: u'Teleforce Co., Ltd.', 0x0001C0: u'CompuLab, Ltd.', 0x0001C1: u'Vitesse Semiconductor Corporation', 0x0001C2: u'ARK Research Corp.', 0x0001C3: u'Acromag, Inc.', 0x0001C4: u'NeoWave, Inc.', 0x0001C5: u'Simpler Networks', 0x0001C6: u'Quarry Technologies', 0x0001C7: u'Cisco Systems, Inc.', 0x0001C8: u'THOMAS CONRAD CORP.', 0x0001C8: u'CONRAD CORP.', 0x0001C9: u'Cisco Systems, Inc.', 0x0001CA: u'Geocast Network Systems, Inc.', 0x0001CB: u'EVR', 0x0001CC: u'Japan Total Design Communication Co., Ltd.', 0x0001CD: u'ARtem', 0x0001CE: u'Custom Micro Products, Ltd.', 0x0001CF: u'Alpha Data Parallel Systems, Ltd.', 0x0001D0: u'VitalPoint, Inc.', 0x0001D1: u'CoNet Communications, Inc.', 0x0001D2: u'MacPower Peripherals, Ltd.', 0x0001D3: u'PAXCOMM, Inc.', 0x0001D4: u'Leisure Time, Inc.', 0x0001D5: u'HAEDONG INFO & COMM CO., LTD', 0x0001D6: u'MAN Roland Druckmaschinen AG', 0x0001D7: u'F5 Networks, Inc.', 0x0001D8: u'Teltronics, Inc.', 0x0001D9: u'Sigma, Inc.', 0x0001DA: u'WINCOMM Corporation', 0x0001DB: u'Freecom Technologies GmbH', 0x0001DC: u'Activetelco', 0x0001DD: u'Avail Networks', 0x0001DE: u'Trango Systems, Inc.', 0x0001DF: u'ISDN Communications, Ltd.', 0x0001E0: u'Fast Systems, Inc.', 0x0001E1: u'Kinpo Electronics, Inc.', 0x0001E2: u'Ando Electric Corporation', 0x0001E3: u'Siemens AG', 0x0001E4: u'Sitera, Inc.', 0x0001E5: u'Supernet, Inc.', 0x0001E6: u'Hewlett-Packard Company', 0x0001E7: u'Hewlett-Packard Company', 0x0001E8: u'Force10 Networks, Inc.', 0x0001E9: u'Litton Marine Systems B.V.', 0x0001EA: u'Cirilium Corp.', 0x0001EB: u'C-COM Corporation', 0x0001EC: u'Ericsson Group', 0x0001ED: u'SETA Corp.', 0x0001EE: u'Comtrol Europe, Ltd.', 0x0001EF: u'Camtel Technology Corp.', 0x0001F0: u'Tridium, Inc.', 0x0001F1: u'Innovative Concepts, Inc.', 0x0001F2: u'Mark of the Unicorn, Inc.', 0x0001F3: u'QPS, Inc.', 0x0001F4: u'Enterasys Networks', 0x0001F5: u'ERIM S.A.', 0x0001F6: u'Association of Musical Electronics Industry', 0x0001F7: u'Image Display Systems, Inc.', 0x0001F8: u'Adherent Systems, Ltd.', 0x0001F9: u'TeraGlobal Communications Corp.', 0x0001FA: u'HOROSCAS', 0x0001FB: u'DoTop Technology, Inc.', 0x0001FC: u'Keyence Corporation', 0x0001FD: u'Digital Voice Systems, Inc.', 0x0001FE: u'DIGITAL EQUIPMENT CORPORATION', 0x0001FF: u'Data Direct Networks, Inc.', 0x000200: u'Net & Sys Co., Ltd.', 0x000201: u'IFM Electronic gmbh', 0x000202: u'Amino Communications, Ltd.', 0x000203: u'Woonsang Telecom, Inc.', 0x000204: u'Bodmann Industries Elektronik GmbH', 0x000205: u'Hitachi Denshi, Ltd.', 0x000206: u'Telital R&D Denmark A/S', 0x000207: u'VisionGlobal Network Corp.', 0x000208: u'Unify Networks, Inc.', 0x000209: u'Shenzhen SED Information Technology Co., Ltd.', 0x00020A: u'Gefran Spa', 0x00020B: u'Native Networks, Inc.', 0x00020C: u'Metro-Optix', 0x00020D: u'Micronpc.com', 0x00020E: u'Laurel Networks, Inc.', 0x00020F: u'AATR', 0x000210: u'Fenecom', 0x000211: u'Nature Worldwide Technology Corp.', 0x000212: u'SierraCom', 0x000213: u'S.D.E.L.', 0x000214: u'DTVRO', 0x000215: u'Cotas Computer Technology A/B', 0x000216: u'Cisco Systems, Inc.', 0x000217: u'Cisco Systems, Inc.', 0x000218: u'Advanced Scientific Corp', 0x000219: u'Paralon Technologies', 0x00021A: u'Zuma Networks', 0x00021B: u'Kollmorgen-Servotronix', 0x00021C: u'Network Elements, Inc.', 0x00021D: u'Data General Communication Ltd.', 0x00021E: u'SIMTEL S.R.L.', 0x00021F: u'Aculab PLC', 0x000220: u'Canon Aptex, Inc.', 0x000221: u'DSP Application, Ltd.', 0x000222: u'Chromisys, Inc.', 0x000223: u'ClickTV', 0x000224: u'C-COR', 0x000225: u'Certus Technology, Inc.', 0x000226: u'XESystems, Inc.', 0x000227: u'ESD GmbH', 0x000228: u'Necsom, Ltd.', 0x000229: u'Adtec Corporation', 0x00022A: u'Asound Electronic', 0x00022B: u'SAXA, Inc.', 0x00022C: u'ABB Bomem, Inc.', 0x00022D: u'Agere Systems', 0x00022E: u'TEAC Corp. R& D', 0x00022F: u'P-Cube, Ltd.', 0x000230: u'Intersoft Electronics', 0x000231: u'Ingersoll-Rand', 0x000232: u'Avision, Inc.', 0x000233: u'Mantra Communications, Inc.', 0x000234: u'Imperial Technology, Inc.', 0x000235: u'Paragon Networks International', 0x000236: u'INIT GmbH', 0x000237: u'Cosmo Research Corp.', 0x000238: u'Serome Technology, Inc.', 0x000239: u'Visicom', 0x00023A: u'ZSK Stickmaschinen GmbH', 0x00023B: u'Redback Networks', 0x00023C: u'Creative Technology, Ltd.', 0x00023D: u'NuSpeed, Inc.', 0x00023E: u'Selta Telematica S.p.a', 0x00023F: u'Compal Electronics, Inc.', 0x000240: u'Seedek Co., Ltd.', 0x000241: u'Amer.com', 0x000242: u'Videoframe Systems', 0x000243: u'Raysis Co., Ltd.', 0x000244: u'SURECOM Technology Co.', 0x000245: u'Lampus Co, Ltd.', 0x000246: u'All-Win Tech Co., Ltd.', 0x000247: u'Great Dragon Information Technology (Group) Co., Ltd.', 0x000248: u'Pilz GmbH & Co.', 0x000249: u'Aviv Infocom Co, Ltd.', 0x00024A: u'Cisco Systems, Inc.', 0x00024B: u'Cisco Systems, Inc.', 0x00024C: u'SiByte, Inc.', 0x00024D: u'Mannesman Dematic Colby Pty. Ltd.', 0x00024E: u'Datacard Group', 0x00024F: u'IPM Datacom S.R.L.', 0x000250: u'Geyser Networks, Inc.', 0x000251: u'Soma Networks, Inc.', 0x000252: u'Carrier Corporation', 0x000253: u'Televideo, Inc.', 0x000254: u'WorldGate', 0x000255: u'IBM Corporation', 0x000256: u'Alpha Processor, Inc.', 0x000257: u'Microcom Corp.', 0x000258: u'Flying Packets Communications', 0x000259: u'Tsann Kuen China (Shanghai)Enterprise Co., Ltd. IT Group', 0x00025A: u'Catena Networks', 0x00025B: u'Cambridge Silicon Radio', 0x00025C: u'SCI Systems (Kunshan) Co., Ltd.', 0x00025D: u'Calix Networks', 0x00025E: u'High Technology Ltd', 0x00025F: u'Nortel Networks', 0x000260: u'Accordion Networks, Inc.', 0x000261: u'Tilgin AB', 0x000262: u'Soyo Group Soyo Com Tech Co., Ltd', 0x000263: u'UPS Manufacturing SRL', 0x000264: u'AudioRamp.com', 0x000265: u'Virditech Co. Ltd.', 0x000266: u'Thermalogic Corporation', 0x000267: u'NODE RUNNER, INC.', 0x000268: u'Harris Government Communications', 0x000269: u'Nadatel Co., Ltd', 0x00026A: u'Cocess Telecom Co., Ltd.', 0x00026B: u'BCM Computers Co., Ltd.', 0x00026C: u'Philips CFT', 0x00026D: u'Adept Telecom', 0x00026E: u'NeGeN Access, Inc.', 0x00026F: u'Senao International Co., Ltd.', 0x000270: u'Crewave Co., Ltd.', 0x000271: u'Vpacket Communications', 0x000272: u'CC&C Technologies, Inc.', 0x000273: u'Coriolis Networks', 0x000274: u'Tommy Technologies Corp.', 0x000275: u'SMART Technologies, Inc.', 0x000276: u'Primax Electronics Ltd.', 0x000277: u'Cash Systemes Industrie', 0x000278: u'Samsung Electro-Mechanics Co., Ltd.', 0x000279: u'Control Applications, Ltd.', 0x00027A: u'IOI Technology Corporation', 0x00027B: u'Amplify Net, Inc.', 0x00027C: u'Trilithic, Inc.', 0x00027D: u'Cisco Systems, Inc.', 0x00027E: u'Cisco Systems, Inc.', 0x00027F: u'ask-technologies.com', 0x000280: u'Mu Net, Inc.', 0x000281: u'Madge Ltd.', 0x000282: u'ViaClix, Inc.', 0x000283: u'Spectrum Controls, Inc.', 0x000284: u'AREVA T&D', 0x000285: u'Riverstone Networks', 0x000286: u'Occam Networks', 0x000287: u'Adapcom', 0x000288: u'GLOBAL VILLAGE COMMUNICATION', 0x000289: u'DNE Technologies', 0x00028A: u'Ambit Microsystems Corporation', 0x00028B: u'VDSL Systems OY', 0x00028C: u'Micrel-Synergy Semiconductor', 0x00028D: u'Movita Technologies, Inc.', 0x00028E: u'Rapid 5 Networks, Inc.', 0x00028F: u'Globetek, Inc.', 0x000290: u'Woorigisool, Inc.', 0x000291: u'Open Network Co., Ltd.', 0x000292: u'Logic Innovations, Inc.', 0x000293: u'Solid Data Systems', 0x000294: u'Tokyo Sokushin Co., Ltd.', 0x000295: u'IP.Access Limited', 0x000296: u'Lectron Co,. Ltd.', 0x000297: u'C-COR.net', 0x000298: u'Broadframe Corporation', 0x000299: u'Apex, Inc.', 0x00029A: u'Storage Apps', 0x00029B: u'Kreatel Communications AB', 0x00029C: u'3COM', 0x00029D: u'Merix Corp.', 0x00029E: u'Information Equipment Co., Ltd.', 0x00029F: u'L-3 Communication Aviation Recorders', 0x0002A0: u'Flatstack Ltd.', 0x0002A1: u'World Wide Packets', 0x0002A2: u'Hilscher GmbH', 0x0002A3: u'ABB Power Automation', 0x0002A4: u'AddPac Technology Co., Ltd.', 0x0002A5: u'Compaq Computer Corporation', 0x0002A6: u'Effinet Systems Co., Ltd.', 0x0002A7: u'Vivace Networks', 0x0002A8: u'Air Link Technology', 0x0002A9: u'RACOM, s.r.o.', 0x0002AA: u'PLcom Co., Ltd.', 0x0002AB: u'CTC Union Technologies Co., Ltd.', 0x0002AC: u'3PAR data', 0x0002AD: u'Pentax Corpotation', 0x0002AE: u'Scannex Electronics Ltd.', 0x0002AF: u'TeleCruz Technology, Inc.', 0x0002B0: u'Hokubu Communication & Industrial Co., Ltd.', 0x0002B1: u'Anritsu, Ltd.', 0x0002B2: u'Cablevision', 0x0002B3: u'Intel Corporation', 0x0002B4: u'DAPHNE', 0x0002B5: u'Avnet, Inc.', 0x0002B6: u'Acrosser Technology Co., Ltd.', 0x0002B7: u'Watanabe Electric Industry Co., Ltd.', 0x0002B8: u'WHI KONSULT AB', 0x0002B9: u'Cisco Systems, Inc.', 0x0002BA: u'Cisco Systems, Inc.', 0x0002BB: u'Continuous Computing', 0x0002BC: u'LVL 7 Systems, Inc.', 0x0002BD: u'Bionet Co., Ltd.', 0x0002BE: u'Totsu Engineering, Inc.', 0x0002BF: u'dotRocket, Inc.', 0x0002C0: u'Bencent Tzeng Industry Co., Ltd.', 0x0002C1: u'Innovative Electronic Designs, Inc.', 0x0002C2: u'Net Vision Telecom', 0x0002C3: u'Arelnet Ltd.', 0x0002C4: u'Vector International BUBA', 0x0002C5: u'Evertz Microsystems Ltd.', 0x0002C6: u'Data Track Technology PLC', 0x0002C7: u'ALPS ELECTRIC Co., Ltd.', 0x0002C8: u'Technocom Communications Technology (pte) Ltd', 0x0002C9: u'Mellanox Technologies', 0x0002CA: u'EndPoints, Inc.', 0x0002CB: u'TriState Ltd.', 0x0002CC: u'M.C.C.I', 0x0002CD: u'TeleDream, Inc.', 0x0002CE: u'FoxJet, Inc.', 0x0002CF: u'ZyGate Communications, Inc.', 0x0002D0: u'Comdial Corporation', 0x0002D1: u'Vivotek, Inc.', 0x0002D2: u'Workstation AG', 0x0002D3: u'NetBotz, Inc.', 0x0002D4: u'PDA Peripherals, Inc.', 0x0002D5: u'ACR', 0x0002D6: u'NICE Systems', 0x0002D7: u'EMPEG Ltd', 0x0002D8: u'BRECIS Communications Corporation', 0x0002D9: u'Reliable Controls', 0x0002DA: u'ExiO Communications, Inc.', 0x0002DB: u'NETSEC', 0x0002DC: u'Fujitsu General Limited', 0x0002DD: u'Bromax Communications, Ltd.', 0x0002DE: u'Astrodesign, Inc.', 0x0002DF: u'Net Com Systems, Inc.', 0x0002E0: u'ETAS GmbH', 0x0002E1: u'Integrated Network Corporation', 0x0002E2: u'NDC Infared Engineering', 0x0002E3: u'LITE-ON Communications, Inc.', 0x0002E4: u'JC HYUN Systems, Inc.', 0x0002E5: u'Timeware Ltd.', 0x0002E6: u'Gould Instrument Systems, Inc.', 0x0002E7: u'CAB GmbH & Co KG', 0x0002E8: u'E.D.&A.', 0x0002E9: u'CS Systemes De Securite - C3S', 0x0002EA: u'Focus Enhancements', 0x0002EB: u'Pico Communications', 0x0002EC: u'Maschoff Design Engineering', 0x0002ED: u'DXO Telecom Co., Ltd.', 0x0002EE: u'Nokia Danmark A/S', 0x0002EF: u'CCC Network Systems Group Ltd.', 0x0002F0: u'AME Optimedia Technology Co., Ltd.', 0x0002F1: u'Pinetron Co., Ltd.', 0x0002F2: u'eDevice, Inc.', 0x0002F3: u'Media Serve Co., Ltd.', 0x0002F4: u'PCTEL, Inc.', 0x0002F5: u'VIVE Synergies, Inc.', 0x0002F6: u'Equipe Communications', 0x0002F7: u'ARM', 0x0002F8: u'SEAKR Engineering, Inc.', 0x0002F9: u'Mimos Semiconductor SDN BHD', 0x0002FA: u'DX Antenna Co., Ltd.', 0x0002FB: u'Baumuller Aulugen-Systemtechnik GmbH', 0x0002FC: u'Cisco Systems, Inc.', 0x0002FD: u'Cisco Systems, Inc.', 0x0002FE: u'Viditec, Inc.', 0x0002FF: u'Handan BroadInfoCom', 0x000300: u'NetContinuum, Inc.', 0x000301: u'Avantas Networks Corporation', 0x000302: u'Charles Industries, Ltd.', 0x000303: u'JAMA Electronics Co., Ltd.', 0x000304: u'Pacific Broadband Communications', 0x000305: u'Smart Network Devices GmbH', 0x000306: u'Fusion In Tech Co., Ltd.', 0x000307: u'Secure Works, Inc.', 0x000308: u'AM Communications, Inc.', 0x000309: u'Texcel Technology PLC', 0x00030A: u'Argus Technologies', 0x00030B: u'Hunter Technology, Inc.', 0x00030C: u'Telesoft Technologies Ltd.', 0x00030D: u'Uniwill Computer Corp.', 0x00030E: u'Core Communications Co., Ltd.', 0x00030F: u'Digital China (Shanghai) Networks Ltd.', 0x000310: u'Link Evolution Corp.', 0x000311: u'Micro Technology Co., Ltd.', 0x000312: u'TR-Systemtechnik GmbH', 0x000313: u'Access Media SPA', 0x000314: u'Teleware Network Systems', 0x000315: u'Cidco Incorporated', 0x000316: u'Nobell Communications, Inc.', 0x000317: u'Merlin Systems, Inc.', 0x000318: u'Cyras Systems, Inc.', 0x000319: u'Infineon AG', 0x00031A: u'Beijing Broad Telecom Ltd., China', 0x00031B: u'Cellvision Systems, Inc.', 0x00031C: u'Svenska Hardvarufabriken AB', 0x00031D: u'Taiwan Commate Computer, Inc.', 0x00031E: u'Optranet, Inc.', 0x00031F: u'Condev Ltd.', 0x000320: u'Xpeed, Inc.', 0x000321: u'Reco Research Co., Ltd.', 0x000322: u'IDIS Co., Ltd.', 0x000323: u'Cornet Technology, Inc.', 0x000324: u'SANYO Multimedia Tottori Co., Ltd.', 0x000325: u'Arima Computer Corp.', 0x000326: u'Iwasaki Information Systems Co., Ltd.', 0x000327: u'ACT\'L', 0x000328: u'Mace Group, Inc.', 0x000329: u'F3, Inc.', 0x00032A: u'UniData Communication Systems, Inc.', 0x00032B: u'GAI Datenfunksysteme GmbH', 0x00032C: u'ABB Industrie AG', 0x00032D: u'IBASE Technology, Inc.', 0x00032E: u'Scope Information Management, Ltd.', 0x00032F: u'Global Sun Technology, Inc.', 0x000330: u'Imagenics, Co., Ltd.', 0x000331: u'Cisco Systems, Inc.', 0x000332: u'Cisco Systems, Inc.', 0x000333: u'Digitel Co., Ltd.', 0x000334: u'Newport Electronics', 0x000335: u'Mirae Technology', 0x000336: u'Zetes Technologies', 0x000337: u'Vaone, Inc.', 0x000338: u'Oak Technology', 0x000339: u'Eurologic Systems, Ltd.', 0x00033A: u'Silicon Wave, Inc.', 0x00033B: u'TAMI Tech Co., Ltd.', 0x00033C: u'Daiden Co., Ltd.', 0x00033D: u'ILSHin Lab', 0x00033E: u'Tateyama System Laboratory Co., Ltd.', 0x00033F: u'BigBand Networks, Ltd.', 0x000340: u'Floware Wireless Systems, Ltd.', 0x000341: u'Axon Digital Design', 0x000342: u'Nortel Networks', 0x000343: u'Martin Professional A/S', 0x000344: u'Tietech.Co., Ltd.', 0x000345: u'Routrek Networks Corporation', 0x000346: u'Hitachi Kokusai Electric, Inc.', 0x000347: u'Intel Corporation', 0x000348: u'Norscan Instruments, Ltd.', 0x000349: u'Vidicode Datacommunicatie B.V.', 0x00034A: u'RIAS Corporation', 0x00034B: u'Nortel Networks', 0x00034C: u'Shanghai DigiVision Technology Co., Ltd.', 0x00034D: u'Chiaro Networks, Ltd.', 0x00034E: u'Pos Data Company, Ltd.', 0x00034F: u'Sur-Gard Security', 0x000350: u'BTICINO SPA', 0x000351: u'Diebold, Inc.', 0x000352: u'Colubris Networks', 0x000353: u'Mitac, Inc.', 0x000354: u'Fiber Logic Communications', 0x000355: u'TeraBeam Internet Systems', 0x000356: u'Wincor Nixdorf GmbH & Co KG', 0x000357: u'Intervoice-Brite, Inc.', 0x000358: u'Hanyang Digitech Co., Ltd.', 0x000359: u'DigitalSis', 0x00035A: u'Photron Limited', 0x00035B: u'BridgeWave Communications', 0x00035C: u'Saint Song Corp.', 0x00035D: u'Bosung Hi-Net Co., Ltd.', 0x00035E: u'Metropolitan Area Networks, Inc.', 0x00035F: u'Prueftechnik Condition Monitoring GmbH & Co. KG', 0x000360: u'PAC Interactive Technology, Inc.', 0x000361: u'Widcomm, Inc.', 0x000362: u'Vodtel Communications, Inc.', 0x000363: u'Miraesys Co., Ltd.', 0x000364: u'Scenix Semiconductor, Inc.', 0x000365: u'Kira Information & Communications, Ltd.', 0x000366: u'ASM Pacific Technology', 0x000367: u'Jasmine Networks, Inc.', 0x000368: u'Embedone Co., Ltd.', 0x000369: u'Nippon Antenna Co., Ltd.', 0x00036A: u'Mainnet, Ltd.', 0x00036B: u'Cisco Systems, Inc.', 0x00036C: u'Cisco Systems, Inc.', 0x00036D: u'Runtop, Inc.', 0x00036E: u'Nicon Systems (Pty) Limited', 0x00036F: u'Telsey SPA', 0x000370: u'NXTV, Inc.', 0x000371: u'Acomz Networks Corp.', 0x000372: u'ULAN', 0x000373: u'Aselsan A.S', 0x000374: u'Hunter Watertech', 0x000375: u'NetMedia, Inc.', 0x000376: u'Graphtec Technology, Inc.', 0x000377: u'Gigabit Wireless', 0x000378: u'HUMAX Co., Ltd.', 0x000379: u'Proscend Communications, Inc.', 0x00037A: u'Taiyo Yuden Co., Ltd.', 0x00037B: u'IDEC IZUMI Corporation', 0x00037C: u'Coax Media', 0x00037D: u'Stellcom', 0x00037E: u'PORTech Communications, Inc.', 0x00037F: u'Atheros Communications, Inc.', 0x000380: u'SSH Communications Security Corp.', 0x000381: u'Ingenico International', 0x000382: u'A-One Co., Ltd.', 0x000383: u'Metera Networks, Inc.', 0x000384: u'AETA', 0x000385: u'Actelis Networks, Inc.', 0x000386: u'Ho Net, Inc.', 0x000387: u'Blaze Network Products', 0x000388: u'Fastfame Technology Co., Ltd.', 0x000389: u'Plantronics', 0x00038A: u'America Online, Inc.', 0x00038B: u'PLUS-ONE I&T, Inc.', 0x00038C: u'Total Impact', 0x00038D: u'PCS Revenue Control Systems, Inc.', 0x00038E: u'Atoga Systems, Inc.', 0x00038F: u'Weinschel Corporation', 0x000390: u'Digital Video Communications, Inc.', 0x000391: u'Advanced Digital Broadcast, Ltd.', 0x000392: u'Hyundai Teletek Co., Ltd.', 0x000393: u'Apple Computer, Inc.', 0x000394: u'Connect One', 0x000395: u'California Amplifier', 0x000396: u'EZ Cast Co., Ltd.', 0x000397: u'Watchfront Electronics', 0x000398: u'WISI', 0x000399: u'Dongju Informations & Communications Co., Ltd.', 0x00039A: u'SiConnect', 0x00039B: u'NetChip Technology, Inc.', 0x00039C: u'OptiMight Communications, Inc.', 0x00039D: u'BENQ CORPORATION', 0x00039E: u'Tera System Co., Ltd.', 0x00039F: u'Cisco Systems, Inc.', 0x0003A0: u'Cisco Systems, Inc.', 0x0003A1: u'HIPER Information & Communication, Inc.', 0x0003A2: u'Catapult Communications', 0x0003A3: u'MAVIX, Ltd.', 0x0003A4: u'Data Storage and Information Management', 0x0003A5: u'Medea Corporation', 0x0003A6: u'Traxit Technology, Inc.', 0x0003A7: u'Unixtar Technology, Inc.', 0x0003A8: u'IDOT Computers, Inc.', 0x0003A9: u'AXCENT Media AG', 0x0003AA: u'Watlow', 0x0003AB: u'Bridge Information Systems', 0x0003AC: u'Fronius Schweissmaschinen', 0x0003AD: u'Emerson Energy Systems AB', 0x0003AE: u'Allied Advanced Manufacturing Pte, Ltd.', 0x0003AF: u'Paragea Communications', 0x0003B0: u'Xsense Technology Corp.', 0x0003B1: u'Hospira Inc.', 0x0003B2: u'Radware', 0x0003B3: u'IA Link Systems Co., Ltd.', 0x0003B4: u'Macrotek International Corp.', 0x0003B5: u'Entra Technology Co.', 0x0003B6: u'QSI Corporation', 0x0003B7: u'ZACCESS Systems', 0x0003B8: u'NetKit Solutions, LLC', 0x0003B9: u'Hualong Telecom Co., Ltd.', 0x0003BA: u'Sun Microsystems', 0x0003BB: u'Signal Communications Limited', 0x0003BC: u'COT GmbH', 0x0003BD: u'OmniCluster Technologies, Inc.', 0x0003BE: u'Netility', 0x0003BF: u'Centerpoint Broadband Technologies, Inc.', 0x0003C0: u'RFTNC Co., Ltd.', 0x0003C1: u'Packet Dynamics Ltd', 0x0003C2: u'Solphone K.K.', 0x0003C3: u'Micronik Multimedia', 0x0003C4: u'Tomra Systems ASA', 0x0003C5: u'Mobotix AG', 0x0003C6: u'ICUE Systems, Inc.', 0x0003C7: u'hopf Elektronik GmbH', 0x0003C8: u'CML Emergency Services', 0x0003C9: u'TECOM Co., Ltd.', 0x0003CA: u'MTS Systems Corp.', 0x0003CB: u'Nippon Systems Development Co., Ltd.', 0x0003CC: u'Momentum Computer, Inc.', 0x0003CD: u'Clovertech, Inc.', 0x0003CE: u'ETEN Technologies, Inc.', 0x0003CF: u'Muxcom, Inc.', 0x0003D0: u'KOANKEISO Co., Ltd.', 0x0003D1: u'Takaya Corporation', 0x0003D2: u'Crossbeam Systems, Inc.', 0x0003D3: u'Internet Energy Systems, Inc.', 0x0003D4: u'Alloptic, Inc.', 0x0003D5: u'Advanced Communications Co., Ltd.', 0x0003D6: u'RADVision, Ltd.', 0x0003D7: u'NextNet Wireless, Inc.', 0x0003D8: u'iMPath Networks, Inc.', 0x0003D9: u'Secheron SA', 0x0003DA: u'Takamisawa Cybernetics Co., Ltd.', 0x0003DB: u'Apogee Electronics Corp.', 0x0003DC: u'Lexar Media, Inc.', 0x0003DD: u'Comark Corp.', 0x0003DE: u'OTC Wireless', 0x0003DF: u'Desana Systems', 0x0003E0: u'RadioFrame Networks, Inc.', 0x0003E1: u'Winmate Communication, Inc.', 0x0003E2: u'Comspace Corporation', 0x0003E3: u'Cisco Systems, Inc.', 0x0003E4: u'Cisco Systems, Inc.', 0x0003E5: u'Hermstedt SG', 0x0003E6: u'Entone Technologies, Inc.', 0x0003E7: u'Logostek Co. Ltd.', 0x0003E8: u'Wavelength Digital Limited', 0x0003E9: u'Akara Canada, Inc.', 0x0003EA: u'Mega System Technologies, Inc.', 0x0003EB: u'Atrica', 0x0003EC: u'ICG Research, Inc.', 0x0003ED: u'Shinkawa Electric Co., Ltd.', 0x0003EE: u'MKNet Corporation', 0x0003EF: u'Oneline AG', 0x0003F0: u'Redfern Broadband Networks', 0x0003F1: u'Cicada Semiconductor, Inc.', 0x0003F2: u'Seneca Networks', 0x0003F3: u'Dazzle Multimedia, Inc.', 0x0003F4: u'NetBurner', 0x0003F5: u'Chip2Chip', 0x0003F6: u'Allegro Networks, Inc.', 0x0003F7: u'Plast-Control GmbH', 0x0003F8: u'SanCastle Technologies, Inc.', 0x0003F9: u'Pleiades Communications, Inc.', 0x0003FA: u'TiMetra Networks', 0x0003FB: u'Toko Seiki Company, Ltd.', 0x0003FC: u'Intertex Data AB', 0x0003FD: u'Cisco Systems, Inc.', 0x0003FE: u'Cisco Systems, Inc.', 0x0003FF: u'Microsoft Corporation', 0x000400: u'LEXMARK INTERNATIONAL, INC.', 0x000401: u'Osaki Electric Co., Ltd.', 0x000402: u'Nexsan Technologies, Ltd.', 0x000403: u'Nexsi Corporation', 0x000404: u'Makino Milling Machine Co., Ltd.', 0x000405: u'ACN Technologies', 0x000406: u'Fa. Metabox AG', 0x000407: u'Topcon Positioning Systems, Inc.', 0x000408: u'Sanko Electronics Co., Ltd.', 0x000409: u'Cratos Networks', 0x00040A: u'Sage Systems', 0x00040B: u'3com Europe Ltd.', 0x00040C: u'KANNO Work\'s Ltd.', 0x00040D: u'Avaya, Inc.', 0x00040E: u'AVM GmbH', 0x00040F: u'Asus Network Technologies, Inc.', 0x000410: u'Spinnaker Networks, Inc.', 0x000411: u'Inkra Networks, Inc.', 0x000412: u'WaveSmith Networks, Inc.', 0x000413: u'SNOM Technology AG', 0x000414: u'Umezawa Musen Denki Co., Ltd.', 0x000415: u'Rasteme Systems Co., Ltd.', 0x000416: u'Parks S/A Comunicacoes Digitais', 0x000417: u'ELAU AG', 0x000418: u'Teltronic S.A.U.', 0x000419: u'Fibercycle Networks, Inc.', 0x00041A: u'ines GmbH', 0x00041B: u'Digital Interfaces Ltd.', 0x00041C: u'ipDialog, Inc.', 0x00041D: u'Corega of America', 0x00041E: u'Shikoku Instrumentation Co., Ltd.', 0x00041F: u'Sony Computer Entertainment, Inc.', 0x000420: u'Slim Devices, Inc.', 0x000421: u'Ocular Networks', 0x000422: u'Gordon Kapes, Inc.', 0x000423: u'Intel Corporation', 0x000424: u'TMC s.r.l.', 0x000425: u'Atmel Corporation', 0x000426: u'Autosys', 0x000427: u'Cisco Systems, Inc.', 0x000428: u'Cisco Systems, Inc.', 0x000429: u'Pixord Corporation', 0x00042A: u'Wireless Networks, Inc.', 0x00042B: u'IT Access Co., Ltd.', 0x00042C: u'Minet, Inc.', 0x00042D: u'Sarian Systems, Ltd.', 0x00042E: u'Netous Technologies, Ltd.', 0x00042F: u'International Communications Products, Inc.', 0x000430: u'Netgem', 0x000431: u'GlobalStreams, Inc.', 0x000432: u'Voyetra Turtle Beach, Inc.', 0x000433: u'Cyberboard A/S', 0x000434: u'Accelent Systems, Inc.', 0x000435: u'Comptek International, Inc.', 0x000436: u'ELANsat Technologies, Inc.', 0x000437: u'Powin Information Technology, Inc.', 0x000438: u'Nortel Networks', 0x000439: u'Rosco Entertainment Technology, Inc.', 0x00043A: u'Intelligent Telecommunications, Inc.', 0x00043B: u'Lava Computer Mfg., Inc.', 0x00043C: u'SONOS Co., Ltd.', 0x00043D: u'INDEL AG', 0x00043E: u'Telencomm', 0x00043F: u'Electronic Systems Technology, Inc.', 0x000440: u'cyberPIXIE, Inc.', 0x000441: u'Half Dome Systems, Inc.', 0x000442: u'NACT', 0x000443: u'Agilent Technologies, Inc.', 0x000444: u'Western Multiplex Corporation', 0x000445: u'LMS Skalar Instruments GmbH', 0x000446: u'CYZENTECH Co., Ltd.', 0x000447: u'Acrowave Systems Co., Ltd.', 0x000448: u'Polaroid Professional Imaging', 0x000449: u'Mapletree Networks', 0x00044A: u'iPolicy Networks, Inc.', 0x00044B: u'NVIDIA', 0x00044C: u'JENOPTIK', 0x00044D: u'Cisco Systems, Inc.', 0x00044E: u'Cisco Systems, Inc.', 0x00044F: u'Leukhardt Systemelektronik GmbH', 0x000450: u'DMD Computers SRL', 0x000451: u'Medrad, Inc.', 0x000452: u'RocketLogix, Inc.', 0x000453: u'YottaYotta, Inc.', 0x000454: u'Quadriga UK', 0x000455: u'ANTARA.net', 0x000456: u'PipingHot Networks', 0x000457: u'Universal Access Technology, Inc.', 0x000458: u'Fusion X Co., Ltd.', 0x000459: u'Veristar Corporation', 0x00045A: u'The Linksys Group, Inc.', 0x00045B: u'Techsan Electronics Co., Ltd.', 0x00045C: u'Mobiwave Pte Ltd', 0x00045D: u'BEKA Elektronik', 0x00045E: u'PolyTrax Information Technology AG', 0x00045F: u'Evalue Technology, Inc.', 0x000460: u'Knilink Technology, Inc.', 0x000461: u'EPOX Computer Co., Ltd.', 0x000462: u'DAKOS Data & Communication Co., Ltd.', 0x000463: u'Bosch Security Systems', 0x000464: u'Fantasma Networks, Inc.', 0x000465: u'i.s.t isdn-support technik GmbH', 0x000466: u'ARMITEL Co.', 0x000467: u'Wuhan Research Institute of MII', 0x000468: u'Vivity, Inc.', 0x000469: u'Innocom, Inc.', 0x00046A: u'Navini Networks', 0x00046B: u'Palm Wireless, Inc.', 0x00046C: u'Cyber Technology Co., Ltd.', 0x00046D: u'Cisco Systems, Inc.', 0x00046E: u'Cisco Systems, Inc.', 0x00046F: u'Digitel S/A Industria Eletronica', 0x000470: u'ipUnplugged AB', 0x000471: u'IPrad', 0x000472: u'Telelynx, Inc.', 0x000473: u'Photonex Corporation', 0x000474: u'LEGRAND', 0x000475: u'3 Com Corporation', 0x000476: u'3 Com Corporation', 0x000477: u'Scalant Systems, Inc.', 0x000478: u'G. Star Technology Corporation', 0x000479: u'Radius Co., Ltd.', 0x00047A: u'AXXESSIT ASA', 0x00047B: u'Schlumberger', 0x00047C: u'Skidata AG', 0x00047D: u'Pelco', 0x00047E: u'Optelecom=NKF', 0x00047F: u'Chr. Mayr GmbH & Co. KG', 0x000480: u'Foundry Networks, Inc.', 0x000481: u'Econolite Control Products, Inc.', 0x000482: u'Medialogic Corp.', 0x000483: u'Deltron Technology, Inc.', 0x000484: u'Amann GmbH', 0x000485: u'PicoLight', 0x000486: u'ITTC, University of Kansas', 0x000487: u'Cogency Semiconductor, Inc.', 0x000488: u'Eurotherm Controls', 0x000489: u'YAFO Networks, Inc.', 0x00048A: u'Temia Vertriebs GmbH', 0x00048B: u'Poscon Corporation', 0x00048C: u'Nayna Networks, Inc.', 0x00048D: u'Tone Commander Systems, Inc.', 0x00048E: u'Ohm Tech Labs, Inc.', 0x00048F: u'TD Systems Corp.', 0x000490: u'Optical Access', 0x000491: u'Technovision, Inc.', 0x000492: u'Hive Internet, Ltd.', 0x000493: u'Tsinghua Unisplendour Co., Ltd.', 0x000494: u'Breezecom, Ltd.', 0x000495: u'Tejas Networks', 0x000496: u'Extreme Networks', 0x000497: u'MacroSystem Digital Video AG', 0x000498: u'Mahi Networks', 0x000499: u'Chino Corporation', 0x00049A: u'Cisco Systems, Inc.', 0x00049B: u'Cisco Systems, Inc.', 0x00049C: u'Surgient Networks, Inc.', 0x00049D: u'Ipanema Technologies', 0x00049E: u'Wirelink Co., Ltd.', 0x00049F: u'Freescale Semiconductor', 0x0004A0: u'Verity Instruments, Inc.', 0x0004A1: u'Pathway Connectivity', 0x0004A2: u'L.S.I. Japan Co., Ltd.', 0x0004A3: u'Microchip Technology, Inc.', 0x0004A4: u'NetEnabled, Inc.', 0x0004A5: u'Barco Projection Systems NV', 0x0004A6: u'SAF Tehnika Ltd.', 0x0004A7: u'FabiaTech Corporation', 0x0004A8: u'Broadmax Technologies, Inc.', 0x0004A9: u'SandStream Technologies, Inc.', 0x0004AA: u'Jetstream Communications', 0x0004AB: u'Comverse Network Systems, Inc.', 0x0004AC: u'IBM CORP.', 0x0004AD: u'Malibu Networks', 0x0004AE: u'Liquid Metronics', 0x0004AF: u'Digital Fountain, Inc.', 0x0004B0: u'ELESIGN Co., Ltd.', 0x0004B1: u'Signal Technology, Inc.', 0x0004B2: u'ESSEGI SRL', 0x0004B3: u'Videotek, Inc.', 0x0004B4: u'CIAC', 0x0004B5: u'Equitrac Corporation', 0x0004B6: u'Stratex Networks, Inc.', 0x0004B7: u'AMB i.t. Holding', 0x0004B8: u'Kumahira Co., Ltd.', 0x0004B9: u'S.I. Soubou, Inc.', 0x0004BA: u'KDD Media Will Corporation', 0x0004BB: u'Bardac Corporation', 0x0004BC: u'Giantec, Inc.', 0x0004BD: u'Motorola BCS', 0x0004BE: u'OptXCon, Inc.', 0x0004BF: u'VersaLogic Corp.', 0x0004C0: u'Cisco Systems, Inc.', 0x0004C1: u'Cisco Systems, Inc.', 0x0004C2: u'Magnipix, Inc.', 0x0004C3: u'CASTOR Informatique', 0x0004C4: u'Allen & Heath Limited', 0x0004C5: u'ASE Technologies, USA', 0x0004C6: u'Yamaha Motor Co., Ltd.', 0x0004C7: u'NetMount', 0x0004C8: u'LIBA Maschinenfabrik GmbH', 0x0004C9: u'Micro Electron Co., Ltd.', 0x0004CA: u'FreeMs Corp.', 0x0004CB: u'Tdsoft Communication, Ltd.', 0x0004CC: u'Peek Traffic B.V.', 0x0004CD: u'Informedia Research Group', 0x0004CE: u'Patria Ailon', 0x0004CF: u'Seagate Technology', 0x0004D0: u'Softlink s.r.o.', 0x0004D1: u'Drew Technologies, Inc.', 0x0004D2: u'Adcon Telemetry GmbH', 0x0004D3: u'Toyokeiki Co., Ltd.', 0x0004D4: u'Proview Electronics Co., Ltd.', 0x0004D5: u'Hitachi Communication Systems, Inc.', 0x0004D6: u'Takagi Industrial Co., Ltd.', 0x0004D7: u'Omitec Instrumentation Ltd.', 0x0004D8: u'IPWireless, Inc.', 0x0004D9: u'Titan Electronics, Inc.', 0x0004DA: u'Relax Technology, Inc.', 0x0004DB: u'Tellus Group Corp.', 0x0004DC: u'Nortel Networks', 0x0004DD: u'Cisco Systems, Inc.', 0x0004DE: u'Cisco Systems, Inc.', 0x0004DF: u'Teracom Telematica Ltda.', 0x0004E0: u'Procket Networks', 0x0004E1: u'Infinior Microsystems', 0x0004E2: u'SMC Networks, Inc.', 0x0004E3: u'Accton Technology Corp.', 0x0004E4: u'Daeryung Ind., Inc.', 0x0004E5: u'Glonet Systems, Inc.', 0x0004E6: u'Banyan Network Private Limited', 0x0004E7: u'Lightpointe Communications, Inc', 0x0004E8: u'IER, Inc.', 0x0004E9: u'Infiniswitch Corporation', 0x0004EA: u'Hewlett-Packard Company', 0x0004EB: u'Paxonet Communications, Inc.', 0x0004EC: u'Memobox SA', 0x0004ED: u'Billion Electric Co., Ltd.', 0x0004EE: u'Lincoln Electric Company', 0x0004EF: u'Polestar Corp.', 0x0004F0: u'International Computers, Ltd', 0x0004F1: u'WhereNet', 0x0004F2: u'Polycom', 0x0004F3: u'FS FORTH-SYSTEME GmbH', 0x0004F4: u'Infinite Electronics Inc.', 0x0004F5: u'SnowShore Networks, Inc.', 0x0004F6: u'Amphus', 0x0004F7: u'Omega Band, Inc.', 0x0004F8: u'QUALICABLE TV Industria E Com., Ltda', 0x0004F9: u'Xtera Communications, Inc.', 0x0004FA: u'NBS Technologies Inc.', 0x0004FB: u'Commtech, Inc.', 0x0004FC: u'Stratus Computer (DE), Inc.', 0x0004FD: u'Japan Control Engineering Co., Ltd.', 0x0004FE: u'Pelago Networks', 0x0004FF: u'Acronet Co., Ltd.', 0x000500: u'Cisco Systems, Inc.', 0x000501: u'Cisco Systems, Inc.', 0x000502: u'APPLE COMPUTER', 0x000503: u'ICONAG', 0x000504: u'Naray Information & Communication Enterprise', 0x000505: u'Systems Integration Solutions, Inc.', 0x000506: u'Reddo Networks AB', 0x000507: u'Fine Appliance Corp.', 0x000508: u'Inetcam, Inc.', 0x000509: u'AVOC Nishimura Ltd.', 0x00050A: u'ICS Spa', 0x00050B: u'SICOM Systems, Inc.', 0x00050C: u'Network Photonics, Inc.', 0x00050D: u'Midstream Technologies, Inc.', 0x00050E: u'3ware, Inc.', 0x00050F: u'Tanaka S/S Ltd.', 0x000510: u'Infinite Shanghai Communication Terminals Ltd.', 0x000511: u'Complementary Technologies Ltd', 0x000512: u'MeshNetworks, Inc.', 0x000513: u'VTLinx Multimedia Systems, Inc.', 0x000514: u'KDT Systems Co., Ltd.', 0x000515: u'Nuark Co., Ltd.', 0x000516: u'SMART Modular Technologies', 0x000517: u'Shellcomm, Inc.', 0x000518: u'Jupiters Technology', 0x000519: u'Siemens Building Technologies AG,', 0x00051A: u'3Com Europe Ltd.', 0x00051B: u'Magic Control Technology Corporation', 0x00051C: u'Xnet Technology Corp.', 0x00051D: u'Airocon, Inc.', 0x00051E: u'Brocade Communications Systems, Inc.', 0x00051F: u'Taijin Media Co., Ltd.', 0x000520: u'Smartronix, Inc.', 0x000521: u'Control Microsystems', 0x000522: u'LEA*D Corporation, Inc.', 0x000523: u'AVL List GmbH', 0x000524: u'BTL System (HK) Limited', 0x000525: u'Puretek Industrial Co., Ltd.', 0x000526: u'IPAS GmbH', 0x000527: u'SJ Tek Co. Ltd', 0x000528: u'New Focus, Inc.', 0x000529: u'Shanghai Broadan Communication Technology Co., Ltd', 0x00052A: u'Ikegami Tsushinki Co., Ltd.', 0x00052B: u'HORIBA, Ltd.', 0x00052C: u'Supreme Magic Corporation', 0x00052D: u'Zoltrix International Limited', 0x00052E: u'Cinta Networks', 0x00052F: u'Leviton Voice and Data', 0x000530: u'Andiamo Systems, Inc.', 0x000531: u'Cisco Systems, Inc.', 0x000532: u'Cisco Systems, Inc.', 0x000533: u'Sanera Systems, Inc.', 0x000534: u'Northstar Engineering Ltd.', 0x000535: u'Chip PC Ltd.', 0x000536: u'Danam Communications, Inc.', 0x000537: u'Nets Technology Co., Ltd.', 0x000538: u'Merilus, Inc.', 0x000539: u'A Brand New World in Sweden AB', 0x00053A: u'Willowglen Services Pte Ltd', 0x00053B: u'Harbour Networks Ltd., Co. Beijing', 0x00053C: u'Xircom', 0x00053D: u'Agere Systems', 0x00053E: u'KID Systeme GmbH', 0x00053F: u'VisionTek, Inc.', 0x000540: u'FAST Corporation', 0x000541: u'Advanced Systems Co., Ltd.', 0x000542: u'Otari, Inc.', 0x000543: u'IQ Wireless GmbH', 0x000544: u'Valley Technologies, Inc.', 0x000545: u'Internet Photonics', 0x000546: u'KDDI Network & Solultions Inc.', 0x000547: u'Starent Networks', 0x000548: u'Disco Corporation', 0x000549: u'Salira Optical Network Systems', 0x00054A: u'Ario Data Networks, Inc.', 0x00054B: u'Micro Innovation AG', 0x00054C: u'RF Innovations Pty Ltd', 0x00054D: u'Brans Technologies, Inc.', 0x00054E: u'Philips Components', 0x00054F: u'PRIVATE', 0x000550: u'Vcomms Limited', 0x000551: u'F & S Elektronik Systeme GmbH', 0x000552: u'Xycotec Computer GmbH', 0x000553: u'DVC Company, Inc.', 0x000554: u'Rangestar Wireless', 0x000555: u'Japan Cash Machine Co., Ltd.', 0x000556: u'360 Systems', 0x000557: u'Agile TV Corporation', 0x000558: u'Synchronous, Inc.', 0x000559: u'Intracom S.A.', 0x00055A: u'Power Dsine Ltd.', 0x00055B: u'Charles Industries, Ltd.', 0x00055C: u'Kowa Company, Ltd.', 0x00055D: u'D-Link Systems, Inc.', 0x00055E: u'Cisco Systems, Inc.', 0x00055F: u'Cisco Systems, Inc.', 0x000560: u'LEADER COMM.CO., LTD', 0x000561: u'nac Image Technology, Inc.', 0x000562: u'Digital View Limited', 0x000563: u'J-Works, Inc.', 0x000564: u'Tsinghua Bitway Co., Ltd.', 0x000565: u'Tailyn Communication Company Ltd.', 0x000566: u'Secui.com Corporation', 0x000567: u'Etymonic Design, Inc.', 0x000568: u'Piltofish Networks AB', 0x000569: u'VMWARE, Inc.', 0x00056A: u'Heuft Systemtechnik GmbH', 0x00056B: u'C.P. Technology Co., Ltd.', 0x00056C: u'Hung Chang Co., Ltd.', 0x00056D: u'Pacific Corporation', 0x00056E: u'National Enhance Technology, Inc.', 0x00056F: u'Innomedia Technologies Pvt. Ltd.', 0x000570: u'Baydel Ltd.', 0x000571: u'Seiwa Electronics Co.', 0x000572: u'Deonet Co., Ltd.', 0x000573: u'Cisco Systems, Inc.', 0x000574: u'Cisco Systems, Inc.', 0x000575: u'CDS-Electronics BV', 0x000576: u'NSM Technology Ltd.', 0x000577: u'SM Information & Communication', 0x000578: u'PRIVATE', 0x000579: u'Universal Control Solution Corp.', 0x00057A: u'Hatteras Networks', 0x00057B: u'Chung Nam Electronic Co., Ltd.', 0x00057C: u'RCO Security AB', 0x00057D: u'Sun Communications, Inc.', 0x00057E: u'Eckelmann Steuerungstechnik GmbH', 0x00057F: u'Acqis Technology', 0x000580: u'Fibrolan Ltd.', 0x000581: u'Snell & Wilcox Ltd.', 0x000582: u'ClearCube Technology', 0x000583: u'ImageCom Limited', 0x000584: u'AbsoluteValue Systems, Inc.', 0x000585: u'Juniper Networks, Inc.', 0x000586: u'Lucent Technologies', 0x000587: u'Locus, Incorporated', 0x000588: u'Sensoria Corp.', 0x000589: u'National Datacomputer', 0x00058A: u'Netcom Co., Ltd.', 0x00058B: u'IPmental, Inc.', 0x00058C: u'Opentech Inc.', 0x00058D: u'Lynx Photonic Networks, Inc.', 0x00058E: u'Flextronics International GmbH & Co. Nfg. KG', 0x00058F: u'CLCsoft co.', 0x000590: u'Swissvoice Ltd.', 0x000591: u'Active Silicon Ltd.', 0x000592: u'Pultek Corp.', 0x000593: u'Grammar Engine Inc.', 0x000594: u'IXXAT Automation GmbH', 0x000595: u'Alesis Corporation', 0x000596: u'Genotech Co., Ltd.', 0x000597: u'Eagle Traffic Control Systems', 0x000598: u'CRONOS S.r.l.', 0x000599: u'DRS Test and Energy Management or DRS-TEM', 0x00059A: u'Cisco Systems, Inc.', 0x00059B: u'Cisco Systems, Inc.', 0x00059C: u'Kleinknecht GmbH, Ing. Buero', 0x00059D: u'Daniel Computing Systems, Inc.', 0x00059E: u'Zinwell Corporation', 0x00059F: u'Yotta Networks, Inc.', 0x0005A0: u'MOBILINE Kft.', 0x0005A1: u'Zenocom', 0x0005A2: u'CELOX Networks', 0x0005A3: u'QEI, Inc.', 0x0005A4: u'Lucid Voice Ltd.', 0x0005A5: u'KOTT', 0x0005A6: u'Extron Electronics', 0x0005A7: u'Hyperchip, Inc.', 0x0005A8: u'WYLE ELECTRONICS', 0x0005A9: u'Princeton Networks, Inc.', 0x0005AA: u'Moore Industries International Inc.', 0x0005AB: u'Cyber Fone, Inc.', 0x0005AC: u'Northern Digital, Inc.', 0x0005AD: u'Topspin Communications, Inc.', 0x0005AE: u'Mediaport USA', 0x0005AF: u'InnoScan Computing A/S', 0x0005B0: u'Korea Computer Technology Co., Ltd.', 0x0005B1: u'ASB Technology BV', 0x0005B2: u'Medison Co., Ltd.', 0x0005B3: u'Asahi-Engineering Co., Ltd.', 0x0005B4: u'Aceex Corporation', 0x0005B5: u'Broadcom Technologies', 0x0005B6: u'INSYS Microelectronics GmbH', 0x0005B7: u'Arbor Technology Corp.', 0x0005B8: u'Electronic Design Associates, Inc.', 0x0005B9: u'Airvana, Inc.', 0x0005BA: u'Area Netwoeks, Inc.', 0x0005BB: u'Myspace AB', 0x0005BC: u'Resorsys Ltd.', 0x0005BD: u'ROAX BV', 0x0005BE: u'Kongsberg Seatex AS', 0x0005BF: u'JustEzy Technology, Inc.', 0x0005C0: u'Digital Network Alacarte Co., Ltd.', 0x0005C1: u'A-Kyung Motion, Inc.', 0x0005C2: u'Soronti, Inc.', 0x0005C3: u'Pacific Instruments, Inc.', 0x0005C4: u'Telect, Inc.', 0x0005C5: u'Flaga HF', 0x0005C6: u'Triz Communications', 0x0005C7: u'I/F-COM A/S', 0x0005C8: u'VERYTECH', 0x0005C9: u'LG Innotek', 0x0005CA: u'Hitron Technology, Inc.', 0x0005CB: u'ROIS Technologies, Inc.', 0x0005CC: u'Sumtel Communications, Inc.', 0x0005CD: u'Denon, Ltd.', 0x0005CE: u'Prolink Microsystems Corporation', 0x0005CF: u'Thunder River Technologies, Inc.', 0x0005D0: u'Solinet Systems', 0x0005D1: u'Metavector Technologies', 0x0005D2: u'DAP Technologies', 0x0005D3: u'eProduction Solutions, Inc.', 0x0005D4: u'FutureSmart Networks, Inc.', 0x0005D5: u'Speedcom Wireless', 0x0005D6: u'Titan Wireless', 0x0005D7: u'Vista Imaging, Inc.', 0x0005D8: u'Arescom, Inc.', 0x0005D9: u'Techno Valley, Inc.', 0x0005DA: u'Apex Automationstechnik', 0x0005DB: u'Nentec GmbH', 0x0005DC: u'Cisco Systems, Inc.', 0x0005DD: u'Cisco Systems, Inc.', 0x0005DE: u'Gi Fone Korea, Inc.', 0x0005DF: u'Electronic Innovation, Inc.', 0x0005E0: u'Empirix Corp.', 0x0005E1: u'Trellis Photonics, Ltd.', 0x0005E2: u'Creativ Network Technologies', 0x0005E3: u'LightSand Communications, Inc.', 0x0005E4: u'Red Lion Controls L.P.', 0x0005E5: u'Renishaw PLC', 0x0005E6: u'Egenera, Inc.', 0x0005E7: u'Netrake Corp.', 0x0005E8: u'TurboWave, Inc.', 0x0005E9: u'Unicess Network, Inc.', 0x0005EA: u'Rednix', 0x0005EB: u'Blue Ridge Networks, Inc.', 0x0005EC: u'Mosaic Systems Inc.', 0x0005ED: u'Technikum Joanneum GmbH', 0x0005EE: u'BEWATOR Group', 0x0005EF: u'ADOIR Digital Technology', 0x0005F0: u'SATEC', 0x0005F1: u'Vrcom, Inc.', 0x0005F2: u'Power R, Inc.', 0x0005F3: u'Weboyn', 0x0005F4: u'System Base Co., Ltd.', 0x0005F5: u'OYO Geospace Corp.', 0x0005F6: u'Young Chang Co. Ltd.', 0x0005F7: u'Analog Devices, Inc.', 0x0005F8: u'Real Time Access, Inc.', 0x0005F9: u'TOA Corporation', 0x0005FA: u'IPOptical, Inc.', 0x0005FB: u'ShareGate, Inc.', 0x0005FC: u'Schenck Pegasus Corp.', 0x0005FD: u'PacketLight Networks Ltd.', 0x0005FE: u'Traficon N.V.', 0x0005FF: u'SNS Solutions, Inc.', 0x000600: u'Toshiba Teli Corporation', 0x000601: u'Otanikeiki Co., Ltd.', 0x000602: u'Cirkitech Electronics Co.', 0x000603: u'Baker Hughes Inc.', 0x000604: u'@Track Communications, Inc.', 0x000605: u'Inncom International, Inc.', 0x000606: u'RapidWAN, Inc.', 0x000607: u'Omni Directional Control Technology Inc.', 0x000608: u'At-Sky SAS', 0x000609: u'Crossport Systems', 0x00060A: u'Blue2space', 0x00060B: u'Paceline Systems Corporation', 0x00060C: u'Melco Industries, Inc.', 0x00060D: u'Wave7 Optics', 0x00060E: u'IGYS Systems, Inc.', 0x00060F: u'Narad Networks Inc', 0x000610: u'Abeona Networks Inc', 0x000611: u'Zeus Wireless, Inc.', 0x000612: u'Accusys, Inc.', 0x000613: u'Kawasaki Microelectronics Incorporated', 0x000614: u'Prism Holdings', 0x000615: u'Kimoto Electric Co., Ltd.', 0x000616: u'Tel Net Co., Ltd.', 0x000617: u'Redswitch Inc.', 0x000618: u'DigiPower Manufacturing Inc.', 0x000619: u'Connection Technology Systems', 0x00061A: u'Zetari Inc.', 0x00061B: u'Portable Systems, IBM Japan Co, Ltd', 0x00061C: u'Hoshino Metal Industries, Ltd.', 0x00061D: u'MIP Telecom, Inc.', 0x00061E: u'Maxan Systems', 0x00061F: u'Vision Components GmbH', 0x000620: u'Serial System Ltd.', 0x000621: u'Hinox, Co., Ltd.', 0x000622: u'Chung Fu Chen Yeh Enterprise Corp.', 0x000623: u'MGE UPS Systems France', 0x000624: u'Gentner Communications Corp.', 0x000625: u'The Linksys Group, Inc.', 0x000626: u'MWE GmbH', 0x000627: u'Uniwide Technologies, Inc.', 0x000628: u'Cisco Systems, Inc.', 0x000629: u'IBM CORPORATION', 0x00062A: u'Cisco Systems, Inc.', 0x00062B: u'INTRASERVER TECHNOLOGY', 0x00062C: u'Network Robots, Inc.', 0x00062D: u'TouchStar Technologies, L.L.C.', 0x00062E: u'Aristos Logic Corp.', 0x00062F: u'Pivotech Systems Inc.', 0x000630: u'Adtranz Sweden', 0x000631: u'Optical Solutions, Inc.', 0x000632: u'Mesco Engineering GmbH', 0x000633: u'Smiths Heimann Biometric Systems', 0x000634: u'GTE Airfone Inc.', 0x000635: u'PacketAir Networks, Inc.', 0x000636: u'Jedai Broadband Networks', 0x000637: u'Toptrend-Meta Information (ShenZhen) Inc.', 0x000638: u'Sungjin C&C Co., Ltd.', 0x000639: u'Newtec', 0x00063A: u'Dura Micro, Inc.', 0x00063B: u'Arcturus Networks, Inc.', 0x00063C: u'NMI Electronics Ltd', 0x00063D: u'Microwave Data Systems Inc.', 0x00063E: u'Opthos Inc.', 0x00063F: u'Everex Communications Inc.', 0x000640: u'White Rock Networks', 0x000641: u'ITCN', 0x000642: u'Genetel Systems Inc.', 0x000643: u'SONO Computer Co., Ltd.', 0x000644: u'NEIX Inc.', 0x000645: u'Meisei Electric Co. Ltd.', 0x000646: u'ShenZhen XunBao Network Technology Co Ltd', 0x000647: u'Etrali S.A.', 0x000648: u'Seedsware, Inc.', 0x000649: u'Quante', 0x00064A: u'Honeywell Co., Ltd. (KOREA)', 0x00064B: u'Alexon Co., Ltd.', 0x00064C: u'Invicta Networks, Inc.', 0x00064D: u'Sencore', 0x00064E: u'Broad Net Technology Inc.', 0x00064F: u'PRO-NETS Technology Corporation', 0x000650: u'Tiburon Networks, Inc.', 0x000651: u'Aspen Networks Inc.', 0x000652: u'Cisco Systems, Inc.', 0x000653: u'Cisco Systems, Inc.', 0x000654: u'Maxxio Technologies', 0x000655: u'Yipee, Inc.', 0x000656: u'Tactel AB', 0x000657: u'Market Central, Inc.', 0x000658: u'Helmut Fischer GmbH & Co. KG', 0x000659: u'EAL (Apeldoorn) B.V.', 0x00065A: u'Strix Systems', 0x00065B: u'Dell Computer Corp.', 0x00065C: u'Malachite Technologies, Inc.', 0x00065D: u'Heidelberg Web Systems', 0x00065E: u'Photuris, Inc.', 0x00065F: u'ECI Telecom - NGTS Ltd.', 0x000660: u'NADEX Co., Ltd.', 0x000661: u'NIA Home Technologies Corp.', 0x000662: u'MBM Technology Ltd.', 0x000663: u'Human Technology Co., Ltd.', 0x000664: u'Fostex Corporation', 0x000665: u'Sunny Giken, Inc.', 0x000666: u'Roving Networks', 0x000667: u'Tripp Lite', 0x000668: u'Vicon Industries Inc.', 0x000669: u'Datasound Laboratories Ltd', 0x00066A: u'InfiniCon Systems, Inc.', 0x00066B: u'Sysmex Corporation', 0x00066C: u'Robinson Corporation', 0x00066D: u'Compuprint S.P.A.', 0x00066E: u'Delta Electronics, Inc.', 0x00066F: u'Korea Data Systems', 0x000670: u'Upponetti Oy', 0x000671: u'Softing AG', 0x000672: u'Netezza', 0x000673: u'Optelecom-nkf', 0x000674: u'Spectrum Control, Inc.', 0x000675: u'Banderacom, Inc.', 0x000676: u'Novra Technologies Inc.', 0x000677: u'SICK AG', 0x000678: u'Marantz Japan, Inc.', 0x000679: u'Konami Corporation', 0x00067A: u'JMP Systems', 0x00067B: u'Toplink C&C Corporation', 0x00067C: u'CISCO SYSTEMS, INC.', 0x00067D: u'Takasago Ltd.', 0x00067E: u'WinCom Systems, Inc.', 0x00067F: u'Rearden Steel Technologies', 0x000680: u'Card Access, Inc.', 0x000681: u'Goepel Electronic GmbH', 0x000682: u'Convedia', 0x000683: u'Bravara Communications, Inc.', 0x000684: u'Biacore AB', 0x000685: u'NetNearU Corporation', 0x000686: u'ZARDCOM Co., Ltd.', 0x000687: u'Omnitron Systems Technology, Inc.', 0x000688: u'Telways Communication Co., Ltd.', 0x000689: u'yLez Technologies Pte Ltd', 0x00068A: u'NeuronNet Co. Ltd. R&D Center', 0x00068B: u'AirRunner Technologies, Inc.', 0x00068C: u'3Com Corporation', 0x00068D: u'SEPATON, Inc.', 0x00068E: u'HID Corporation', 0x00068F: u'Telemonitor, Inc.', 0x000690: u'Euracom Communication GmbH', 0x000691: u'PT Inovacao', 0x000692: u'Intruvert Networks, Inc.', 0x000693: u'Flexus Computer Technology, Inc.', 0x000694: u'Mobillian Corporation', 0x000695: u'Ensure Technologies, Inc.', 0x000696: u'Advent Networks', 0x000697: u'R & D Center', 0x000698: u'egnite Software GmbH', 0x000699: u'Vida Design Co.', 0x00069A: u'e & Tel', 0x00069B: u'AVT Audio Video Technologies GmbH', 0x00069C: u'Transmode Systems AB', 0x00069D: u'Petards Mobile Intelligence', 0x00069E: u'UNIQA, Inc.', 0x00069F: u'Kuokoa Networks', 0x0006A0: u'Mx Imaging', 0x0006A1: u'Celsian Technologies, Inc.', 0x0006A2: u'Microtune, Inc.', 0x0006A3: u'Bitran Corporation', 0x0006A4: u'INNOWELL Corp.', 0x0006A5: u'PINON Corp.', 0x0006A6: u'Artistic Licence (UK) Ltd', 0x0006A7: u'Primarion', 0x0006A8: u'KC Technology, Inc.', 0x0006A9: u'Universal Instruments Corp.', 0x0006AA: u'Miltope Corporation', 0x0006AB: u'W-Link Systems, Inc.', 0x0006AC: u'Intersoft Co.', 0x0006AD: u'KB Electronics Ltd.', 0x0006AE: u'Himachal Futuristic Communications Ltd', 0x0006AF: u'PRIVATE', 0x0006B0: u'Comtech EF Data Corp.', 0x0006B1: u'Sonicwall', 0x0006B2: u'Linxtek Co.', 0x0006B3: u'Diagraph Corporation', 0x0006B4: u'Vorne Industries, Inc.', 0x0006B5: u'Luminent, Inc.', 0x0006B6: u'Nir-Or Israel Ltd.', 0x0006B7: u'TELEM GmbH', 0x0006B8: u'Bandspeed Pty Ltd', 0x0006B9: u'A5TEK Corp.', 0x0006BA: u'Westwave Communications', 0x0006BB: u'ATI Technologies Inc.', 0x0006BC: u'Macrolink, Inc.', 0x0006BD: u'BNTECHNOLOGY Co., Ltd.', 0x0006BE: u'Baumer Optronic GmbH', 0x0006BF: u'Accella Technologies Co., Ltd.', 0x0006C0: u'United Internetworks, Inc.', 0x0006C1: u'CISCO SYSTEMS, INC.', 0x0006C2: u'Smartmatic Corporation', 0x0006C3: u'Schindler Elevators Ltd.', 0x0006C4: u'Piolink Inc.', 0x0006C5: u'INNOVI Technologies Limited', 0x0006C6: u'lesswire AG', 0x0006C7: u'RFNET Technologies Pte Ltd (S)', 0x0006C8: u'Sumitomo Metal Micro Devices, Inc.', 0x0006C9: u'Technical Marketing Research, Inc.', 0x0006CA: u'American Computer & Digital Components, Inc. (ACDC)', 0x0006CB: u'Jotron Electronics A/S', 0x0006CC: u'JMI Electronics Co., Ltd.', 0x0006CD: u'Kodak IL Ltd.', 0x0006CE: u'DATENO', 0x0006CF: u'Thales Avionics In-Flight Systems, LLC', 0x0006D0: u'Elgar Electronics Corp.', 0x0006D1: u'Tahoe Networks, Inc.', 0x0006D2: u'Tundra Semiconductor Corp.', 0x0006D3: u'Alpha Telecom, Inc. U.S.A.', 0x0006D4: u'Interactive Objects, Inc.', 0x0006D5: u'Diamond Systems Corp.', 0x0006D6: u'Cisco Systems, Inc.', 0x0006D7: u'Cisco Systems, Inc.', 0x0006D8: u'Maple Optical Systems', 0x0006D9: u'IPM-Net S.p.A.', 0x0006DA: u'ITRAN Communications Ltd.', 0x0006DB: u'ICHIPS Co., Ltd.', 0x0006DC: u'Syabas Technology (Amquest)', 0x0006DD: u'AT & T Laboratories - Cambridge Ltd', 0x0006DE: u'Flash Technology', 0x0006DF: u'AIDONIC Corporation', 0x0006E0: u'MAT Co., Ltd.', 0x0006E1: u'Techno Trade s.a', 0x0006E2: u'Ceemax Technology Co., Ltd.', 0x0006E3: u'Quantitative Imaging Corporation', 0x0006E4: u'Citel Technologies Ltd.', 0x0006E5: u'Fujian Newland Computer Ltd. Co.', 0x0006E6: u'DongYang Telecom Co., Ltd.', 0x0006E7: u'Bit Blitz Communications Inc.', 0x0006E8: u'Optical Network Testing, Inc.', 0x0006E9: u'Intime Corp.', 0x0006EA: u'ELZET80 Mikrocomputer GmbH&Co. KG', 0x0006EB: u'Global Data', 0x0006EC: u'M/A COM Private Radio System Inc.', 0x0006ED: u'Inara Networks', 0x0006EE: u'Shenyang Neu-era Information & Technology Stock Co., Ltd', 0x0006EF: u'Maxxan Systems, Inc.', 0x0006F0: u'Digeo, Inc.', 0x0006F1: u'Optillion', 0x0006F2: u'Platys Communications', 0x0006F3: u'AcceLight Networks', 0x0006F4: u'Prime Electronics & Satellitics Inc.', 0x0006F8: u'CPU Technology, Inc.', 0x0006F9: u'Mitsui Zosen Systems Research Inc.', 0x0006FA: u'IP SQUARE Co, Ltd.', 0x0006FB: u'Hitachi Printing Solutions, Ltd.', 0x0006FC: u'Fnet Co., Ltd.', 0x0006FD: u'Comjet Information Systems Corp.', 0x0006FE: u'Celion Networks, Inc.', 0x0006FF: u'Sheba Systems Co., Ltd.', 0x000700: u'Zettamedia Korea', 0x000701: u'RACAL-DATACOM', 0x000702: u'Varian Medical Systems', 0x000703: u'CSEE Transport', 0x000705: u'Endress & Hauser GmbH & Co', 0x000706: u'Sanritz Corporation', 0x000707: u'Interalia Inc.', 0x000708: u'Bitrage Inc.', 0x000709: u'Westerstrand Urfabrik AB', 0x00070A: u'Unicom Automation Co., Ltd.', 0x00070B: u'Octal, SA', 0x00070C: u'SVA-Intrusion.com Co. Ltd.', 0x00070D: u'Cisco Systems Inc.', 0x00070E: u'Cisco Systems Inc.', 0x00070F: u'Fujant, Inc.', 0x000710: u'Adax, Inc.', 0x000711: u'Acterna', 0x000712: u'JAL Information Technology', 0x000713: u'IP One, Inc.', 0x000714: u'Brightcom', 0x000715: u'General Research of Electronics, Inc.', 0x000716: u'J & S Marine Ltd.', 0x000717: u'Wieland Electric GmbH', 0x000718: u'iCanTek Co., Ltd.', 0x000719: u'Mobiis Co., Ltd.', 0x00071A: u'Finedigital Inc.', 0x00071B: u'Position Technology Inc.', 0x00071C: u'AT&T Fixed Wireless Services', 0x00071D: u'Satelsa Sistemas Y Aplicaciones De Telecomunicaciones, S.A.', 0x00071E: u'Tri-M Engineering / Nupak Dev. Corp.', 0x00071F: u'European Systems Integration', 0x000720: u'Trutzschler GmbH & Co. KG', 0x000721: u'Formac Elektronik GmbH', 0x000722: u'Nielsen Media Research', 0x000723: u'ELCON Systemtechnik GmbH', 0x000724: u'Telemax Co., Ltd.', 0x000725: u'Bematech International Corp.', 0x000727: u'Zi Corporation (HK) Ltd.', 0x000728: u'Neo Telecom', 0x000729: u'Kistler Instrumente AG', 0x00072A: u'Innovance Networks', 0x00072B: u'Jung Myung Telecom Co., Ltd.', 0x00072C: u'Fabricom', 0x00072D: u'CNSystems', 0x00072E: u'North Node AB', 0x00072F: u'Intransa, Inc.', 0x000730: u'Hutchison OPTEL Telecom Technology Co., Ltd.', 0x000731: u'Spiricon, Inc.', 0x000732: u'AAEON Technology Inc.', 0x000733: u'DANCONTROL Engineering', 0x000734: u'ONStor, Inc.', 0x000735: u'Flarion Technologies, Inc.', 0x000736: u'Data Video Technologies Co., Ltd.', 0x000737: u'Soriya Co. Ltd.', 0x000738: u'Young Technology Co., Ltd.', 0x000739: u'Motion Media Technology Ltd.', 0x00073A: u'Inventel Systemes', 0x00073B: u'Tenovis GmbH & Co KG', 0x00073C: u'Telecom Design', 0x00073D: u'Nanjing Postel Telecommunications Co., Ltd.', 0x00073E: u'China Great-Wall Computer Shenzhen Co., Ltd.', 0x00073F: u'Woojyun Systec Co., Ltd.', 0x000740: u'Melco Inc.', 0x000741: u'Sierra Automated Systems', 0x000742: u'Current Technologies', 0x000743: u'Chelsio Communications', 0x000744: u'Unico, Inc.', 0x000745: u'Radlan Computer Communications Ltd.', 0x000746: u'TURCK, Inc.', 0x000747: u'Mecalc', 0x000748: u'The Imaging Source Europe', 0x000749: u'CENiX Inc.', 0x00074A: u'Carl Valentin GmbH', 0x00074B: u'Daihen Corporation', 0x00074C: u'Beicom Inc.', 0x00074D: u'Zebra Technologies Corp.', 0x00074E: u'Naughty boy co., Ltd.', 0x00074F: u'Cisco Systems, Inc.', 0x000750: u'Cisco Systems, Inc.', 0x000751: u'm.u.t. - GmbH', 0x000752: u'Rhythm Watch Co., Ltd.', 0x000753: u'Beijing Qxcomm Technology Co., Ltd.', 0x000754: u'Xyterra Computing, Inc.', 0x000755: u'Lafon SA', 0x000756: u'Juyoung Telecom', 0x000757: u'Topcall International AG', 0x000758: u'Dragonwave', 0x000759: u'Boris Manufacturing Corp.', 0x00075A: u'Air Products and Chemicals, Inc.', 0x00075B: u'Gibson Guitars', 0x00075C: u'Eastman Kodak Company', 0x00075D: u'Celleritas Inc.', 0x00075E: u'Ametek Power Instruments', 0x00075F: u'VCS Video Communication Systems AG', 0x000760: u'TOMIS Information & Telecom Corp.', 0x000761: u'Logitech SA', 0x000762: u'Group Sense Limited', 0x000763: u'Sunniwell Cyber Tech. Co., Ltd.', 0x000764: u'YoungWoo Telecom Co. Ltd.', 0x000765: u'Jade Quantum Technologies, Inc.', 0x000766: u'Chou Chin Industrial Co., Ltd.', 0x000767: u'Yuxing Electronics Company Limited', 0x000768: u'Danfoss A/S', 0x000769: u'Italiana Macchi SpA', 0x00076A: u'NEXTEYE Co., Ltd.', 0x00076B: u'Stralfors AB', 0x00076C: u'Daehanet, Inc.', 0x00076D: u'Flexlight Networks', 0x00076E: u'Sinetica Corporation Limited', 0x00076F: u'Synoptics Limited', 0x000770: u'Locusnetworks Corporation', 0x000771: u'Embedded System Corporation', 0x000772: u'Alcatel Shanghai Bell Co., Ltd.', 0x000773: u'Ascom Powerline Communications Ltd.', 0x000774: u'GuangZhou Thinker Technology Co. Ltd.', 0x000775: u'Valence Semiconductor, Inc.', 0x000776: u'Federal APD', 0x000777: u'Motah Ltd.', 0x000778: u'GERSTEL GmbH & Co. KG', 0x000779: u'Sungil Telecom Co., Ltd.', 0x00077A: u'Infoware System Co., Ltd.', 0x00077B: u'Millimetrix Broadband Networks', 0x00077C: u'OnTime Networks', 0x00077E: u'Elrest GmbH', 0x00077F: u'J Communications Co., Ltd.', 0x000780: u'Bluegiga Technologies OY', 0x000781: u'Itron Inc.', 0x000782: u'Nauticus Networks, Inc.', 0x000783: u'SynCom Network, Inc.', 0x000784: u'Cisco Systems Inc.', 0x000785: u'Cisco Systems Inc.', 0x000786: u'Wireless Networks Inc.', 0x000787: u'Idea System Co., Ltd.', 0x000788: u'Clipcomm, Inc.', 0x000789: u'Eastel Systems Corporation', 0x00078A: u'Mentor Data System Inc.', 0x00078B: u'Wegener Communications, Inc.', 0x00078C: u'Elektronikspecialisten i Borlange AB', 0x00078D: u'NetEngines Ltd.', 0x00078E: u'Garz & Friche GmbH', 0x00078F: u'Emkay Innovative Products', 0x000790: u'Tri-M Technologies (s) Limited', 0x000791: u'International Data Communications, Inc.', 0x000792: u'Suetron Electronic GmbH', 0x000793: u'Shin Satellite Public Company Limited', 0x000794: u'Simple Devices, Inc.', 0x000795: u'Elitegroup Computer System Co. (ECS)', 0x000796: u'LSI Systems, Inc.', 0x000797: u'Netpower Co., Ltd.', 0x000798: u'Selea SRL', 0x000799: u'Tipping Point Technologies, Inc.', 0x00079A: u'SmartSight Networks Inc.', 0x00079B: u'Aurora Networks', 0x00079C: u'Golden Electronics Technology Co., Ltd.', 0x00079D: u'Musashi Co., Ltd.', 0x00079E: u'Ilinx Co., Ltd.', 0x00079F: u'Action Digital Inc.', 0x0007A0: u'e-Watch Inc.', 0x0007A1: u'VIASYS Healthcare GmbH', 0x0007A2: u'Opteon Corporation', 0x0007A3: u'Ositis Software, Inc.', 0x0007A4: u'GN Netcom Ltd.', 0x0007A5: u'Y.D.K Co. Ltd.', 0x0007A6: u'Home Automation, Inc.', 0x0007A7: u'A-Z Inc.', 0x0007A8: u'Haier Group Technologies Ltd.', 0x0007A9: u'Novasonics', 0x0007AA: u'Quantum Data Inc.', 0x0007AC: u'Eolring', 0x0007AD: u'Pentacon GmbH Foto-und Feinwerktechnik', 0x0007AE: u'Britestream Networks, Inc.', 0x0007AF: u'N-Tron Corp.', 0x0007B0: u'Office Details, Inc.', 0x0007B1: u'Equator Technologies', 0x0007B2: u'Transaccess S.A.', 0x0007B3: u'Cisco Systems Inc.', 0x0007B4: u'Cisco Systems Inc.', 0x0007B5: u'Any One Wireless Ltd.', 0x0007B6: u'Telecom Technology Ltd.', 0x0007B7: u'Samurai Ind. Prods Eletronicos Ltda', 0x0007B8: u'American Predator Corp.', 0x0007B9: u'Ginganet Corporation', 0x0007BA: u'UTStarcom, Inc.', 0x0007BB: u'Candera Inc.', 0x0007BC: u'Identix Inc.', 0x0007BD: u'Radionet Ltd.', 0x0007BE: u'DataLogic SpA', 0x0007BF: u'Armillaire Technologies, Inc.', 0x0007C0: u'NetZerver Inc.', 0x0007C1: u'Overture Networks, Inc.', 0x0007C2: u'Netsys Telecom', 0x0007C3: u'Cirpack', 0x0007C4: u'JEAN Co. Ltd.', 0x0007C5: u'Gcom, Inc.', 0x0007C6: u'VDS Vosskuhler GmbH', 0x0007C7: u'Synectics Systems Limited', 0x0007C8: u'Brain21, Inc.', 0x0007C9: u'Technol Seven Co., Ltd.', 0x0007CA: u'Creatix Polymedia Ges Fur Kommunikaitonssysteme', 0x0007CB: u'Freebox SA', 0x0007CC: u'Kaba Benzing GmbH', 0x0007CD: u'NMTEL Co., Ltd.', 0x0007CE: u'Cabletime Limited', 0x0007CF: u'Anoto AB', 0x0007D0: u'Automat Engenharia de Automaoa Ltda.', 0x0007D1: u'Spectrum Signal Processing Inc.', 0x0007D2: u'Logopak Systeme', 0x0007D3: u'Stork Digital Imaging B.V.', 0x0007D4: u'Zhejiang Yutong Network Communication Co Ltd.', 0x0007D5: u'3e Technologies Int;., Inc.', 0x0007D6: u'Commil Ltd.', 0x0007D7: u'Caporis Networks AG', 0x0007D8: u'Hitron Systems Inc.', 0x0007D9: u'Splicecom', 0x0007DA: u'Neuro Telecom Co., Ltd.', 0x0007DB: u'Kirana Networks, Inc.', 0x0007DC: u'Atek Co, Ltd.', 0x0007DD: u'Cradle Technologies', 0x0007DE: u'eCopilt AB', 0x0007DF: u'Vbrick Systems Inc.', 0x0007E0: u'Palm Inc.', 0x0007E1: u'WIS Communications Co. Ltd.', 0x0007E2: u'Bitworks, Inc.', 0x0007E3: u'Navcom Technology, Inc.', 0x0007E4: u'SoftRadio Co., Ltd.', 0x0007E5: u'Coup Corporation', 0x0007E6: u'edgeflow Canada Inc.', 0x0007E7: u'FreeWave Technologies', 0x0007E8: u'St. Bernard Software', 0x0007E9: u'Intel Corporation', 0x0007EA: u'Massana, Inc.', 0x0007EB: u'Cisco Systems Inc.', 0x0007EC: u'Cisco Systems Inc.', 0x0007ED: u'Altera Corporation', 0x0007EE: u'telco Informationssysteme GmbH', 0x0007EF: u'Lockheed Martin Tactical Systems', 0x0007F0: u'LogiSync Corporation', 0x0007F1: u'TeraBurst Networks Inc.', 0x0007F2: u'IOA Corporation', 0x0007F3: u'Thinkengine Networks', 0x0007F4: u'Eletex Co., Ltd.', 0x0007F5: u'Bridgeco Co AG', 0x0007F6: u'Qqest Software Systems', 0x0007F7: u'Galtronics', 0x0007F8: u'ITDevices, Inc.', 0x0007F9: u'Phonetics, Inc.', 0x0007FA: u'ITT Co., Ltd.', 0x0007FB: u'Giga Stream UMTS Technologies GmbH', 0x0007FC: u'Adept Systems Inc.', 0x0007FD: u'LANergy Ltd.', 0x0007FE: u'Rigaku Corporation', 0x0007FF: u'Gluon Networks', 0x000800: u'MULTITECH SYSTEMS, INC.', 0x000801: u'HighSpeed Surfing Inc.', 0x000802: u'Compaq Computer Corporation', 0x000803: u'Cos Tron', 0x000804: u'ICA Inc.', 0x000805: u'Techno-Holon Corporation', 0x000806: u'Raonet Systems, Inc.', 0x000807: u'Access Devices Limited', 0x000808: u'PPT Vision, Inc.', 0x000809: u'Systemonic AG', 0x00080A: u'Espera-Werke GmbH', 0x00080B: u'Birka BPA Informationssystem AB', 0x00080C: u'VDA elettronica SrL', 0x00080D: u'Toshiba', 0x00080E: u'Motorola, BCS', 0x00080F: u'Proximion Fiber Optics AB', 0x000810: u'Key Technology, Inc.', 0x000811: u'VOIX Corporation', 0x000812: u'GM-2 Corporation', 0x000813: u'Diskbank, Inc.', 0x000814: u'TIL Technologies', 0x000815: u'CATS Co., Ltd.', 0x000816: u'Bluetags A/S', 0x000817: u'EmergeCore Networks LLC', 0x000818: u'Pixelworks, Inc.', 0x000819: u'Banksys', 0x00081A: u'Sanrad Intelligence Storage Communications (2000) Ltd.', 0x00081B: u'Windigo Systems', 0x00081C: u'@pos.com', 0x00081D: u'Ipsil, Incorporated', 0x00081E: u'Repeatit AB', 0x00081F: u'Pou Yuen Tech Corp. Ltd.', 0x000820: u'Cisco Systems Inc.', 0x000821: u'Cisco Systems Inc.', 0x000822: u'InPro Comm', 0x000823: u'Texa Corp.', 0x000824: u'Promatek Industries Ltd.', 0x000825: u'Acme Packet', 0x000826: u'Colorado Med Tech', 0x000827: u'Pirelli Broadband Solutions', 0x000828: u'Koei Engineering Ltd.', 0x000829: u'Aval Nagasaki Corporation', 0x00082A: u'Powerwallz Network Security', 0x00082B: u'Wooksung Electronics, Inc.', 0x00082C: u'Homag AG', 0x00082D: u'Indus Teqsite Private Limited', 0x00082E: u'Multitone Electronics PLC', 0x00084E: u'DivergeNet, Inc.', 0x00084F: u'Qualstar Corporation', 0x000850: u'Arizona Instrument Corp.', 0x000851: u'Canadian Bank Note Company, Ltd.', 0x000852: u'Davolink Co. Inc.', 0x000853: u'Schleicher GmbH & Co. Relaiswerke KG', 0x000854: u'Netronix, Inc.', 0x000855: u'NASA-Goddard Space Flight Center', 0x000856: u'Gamatronic Electronic Industries Ltd.', 0x000857: u'Polaris Networks, Inc.', 0x000858: u'Novatechnology Inc.', 0x000859: u'ShenZhen Unitone Electronics Co., Ltd.', 0x00085A: u'IntiGate Inc.', 0x00085B: u'Hanbit Electronics Co., Ltd.', 0x00085C: u'Shanghai Dare Technologies Co. Ltd.', 0x00085D: u'Aastra', 0x00085E: u'PCO AG', 0x00085F: u'Picanol N.V.', 0x000860: u'LodgeNet Entertainment Corp.', 0x000861: u'SoftEnergy Co., Ltd.', 0x000862: u'NEC Eluminant Technologies, Inc.', 0x000863: u'Entrisphere Inc.', 0x000864: u'Fasy S.p.A.', 0x000865: u'JASCOM CO., LTD', 0x000866: u'DSX Access Systems, Inc.', 0x000867: u'Uptime Devices', 0x000868: u'PurOptix', 0x000869: u'Command-e Technology Co.,Ltd.', 0x00086A: u'Industrie Technik IPS GmbH', 0x00086B: u'MIPSYS', 0x00086C: u'Plasmon LMS', 0x00086D: u'Missouri FreeNet', 0x00086E: u'Hyglo AB', 0x00086F: u'Resources Computer Network Ltd.', 0x000870: u'Rasvia Systems, Inc.', 0x000871: u'NORTHDATA Co., Ltd.', 0x000872: u'Sorenson Technologies, Inc.', 0x000873: u'DAP Design B.V.', 0x000874: u'Dell Computer Corp.', 0x000875: u'Acorp Electronics Corp.', 0x000876: u'SDSystem', 0x000877: u'Liebert HIROSS S.p.A.', 0x000878: u'Benchmark Storage Innovations', 0x000879: u'CEM Corporation', 0x00087A: u'Wipotec GmbH', 0x00087B: u'RTX Telecom A/S', 0x00087C: u'Cisco Systems, Inc.', 0x00087D: u'Cisco Systems Inc.', 0x00087E: u'Bon Electro-Telecom Inc.', 0x00087F: u'SPAUN electronic GmbH & Co. KG', 0x000880: u'BroadTel Canada Communications inc.', 0x000881: u'DIGITAL HANDS CO.,LTD.', 0x000882: u'SIGMA CORPORATION', 0x000883: u'Hewlett-Packard Company', 0x000884: u'Index Braille AB', 0x000885: u'EMS Dr. Thomas Wuensche', 0x000886: u'Hansung Teliann, Inc.', 0x000887: u'Maschinenfabrik Reinhausen GmbH', 0x000888: u'OULLIM Information Technology Inc,.', 0x000889: u'Echostar Technologies Corp', 0x00088A: u'Minds@Work', 0x00088B: u'Tropic Networks Inc.', 0x00088C: u'Quanta Network Systems Inc.', 0x00088D: u'Sigma-Links Inc.', 0x00088E: u'Nihon Computer Co., Ltd.', 0x00088F: u'ADVANCED DIGITAL TECHNOLOGY', 0x000890: u'AVILINKS SA', 0x000891: u'Lyan Inc.', 0x000892: u'EM Solutions', 0x000893: u'LE INFORMATION COMMUNICATION INC.', 0x000894: u'InnoVISION Multimedia Ltd.', 0x000895: u'DIRC Technologie GmbH & Co.KG', 0x000896: u'Printronix, Inc.', 0x000897: u'Quake Technologies', 0x000898: u'Gigabit Optics Corporation', 0x000899: u'Netbind, Inc.', 0x00089A: u'Alcatel Microelectronics', 0x00089B: u'ICP Electronics Inc.', 0x00089C: u'Elecs Industry Co., Ltd.', 0x00089D: u'UHD-Elektronik', 0x00089E: u'Beijing Enter-Net co.LTD', 0x00089F: u'EFM Networks', 0x0008A0: u'Stotz Feinmesstechnik GmbH', 0x0008A1: u'CNet Technology Inc.', 0x0008A2: u'ADI Engineering, Inc.', 0x0008A3: u'Cisco Systems', 0x0008A4: u'Cisco Systems', 0x0008A5: u'Peninsula Systems Inc.', 0x0008A6: u'Multiware & Image Co., Ltd.', 0x0008A7: u'iLogic Inc.', 0x0008A8: u'Systec Co., Ltd.', 0x0008A9: u'SangSang Technology, Inc.', 0x0008AA: u'KARAM', 0x0008AB: u'EnerLinx.com, Inc.', 0x0008AC: u'PRIVATE', 0x0008AD: u'Toyo-Linx Co., Ltd.', 0x0008AE: u'PacketFront Sweden AB', 0x0008AF: u'Novatec Corporation', 0x0008B0: u'BKtel communications GmbH', 0x0008B1: u'ProQuent Systems', 0x0008B2: u'SHENZHEN COMPASS TECHNOLOGY DEVELOPMENT CO.,LTD', 0x0008B3: u'Fastwel', 0x0008B4: u'SYSPOL', 0x0008B5: u'TAI GUEN ENTERPRISE CO., LTD', 0x0008B6: u'RouteFree, Inc.', 0x0008B7: u'HIT Incorporated', 0x0008B8: u'E.F. Johnson', 0x0008B9: u'KAON MEDIA Co., Ltd.', 0x0008BA: u'Erskine Systems Ltd', 0x0008BB: u'NetExcell', 0x0008BC: u'Ilevo AB', 0x0008BD: u'TEPG-US', 0x0008BE: u'XENPAK MSA Group', 0x0008BF: u'Aptus Elektronik AB', 0x0008C0: u'ASA SYSTEMS', 0x0008C1: u'Avistar Communications Corporation', 0x0008C2: u'Cisco Systems', 0x0008C3: u'Contex A/S', 0x0008C4: u'Hikari Co.,Ltd.', 0x0008C5: u'Liontech Co., Ltd.', 0x0008C6: u'Philips Consumer Communications', 0x0008C7: u'COMPAQ COMPUTER CORPORATION', 0x0008C8: u'Soneticom, Inc.', 0x0008C9: u'TechniSat Digital GmbH', 0x0008CA: u'TwinHan Technology Co.,Ltd', 0x0008CB: u'Zeta Broadband Inc.', 0x0008CC: u'Remotec, Inc.', 0x0008CD: u'With-Net Inc', 0x0008CE: u'IPMobileNet Inc.', 0x0008CF: u'Nippon Koei Power Systems Co., Ltd.', 0x0008D0: u'Musashi Engineering Co., LTD.', 0x0008D1: u'KAREL INC.', 0x0008D2: u'ZOOM Networks Inc.', 0x0008D3: u'Hercules Technologies S.A.', 0x0008D4: u'IneoQuest Technologies, Inc', 0x0008D5: u'Vanguard Managed Solutions', 0x0008D6: u'HASSNET Inc.', 0x0008D7: u'HOW CORPORATION', 0x0008D8: u'Dowkey Microwave', 0x0008D9: u'Mitadenshi Co.,LTD', 0x0008DA: u'SofaWare Technologies Ltd.', 0x0008DB: u'Corrigent Systems', 0x0008DC: u'Wiznet', 0x0008DD: u'Telena Communications, Inc.', 0x0008DE: u'3UP Systems', 0x0008DF: u'Alistel Inc.', 0x0008E0: u'ATO Technology Ltd.', 0x0008E1: u'Barix AG', 0x0008E2: u'Cisco Systems', 0x0008E3: u'Cisco Systems', 0x0008E4: u'Envenergy Inc', 0x0008E5: u'IDK Corporation', 0x0008E6: u'Littlefeet', 0x0008E7: u'SHI ControlSystems,Ltd.', 0x0008E8: u'Excel Master Ltd.', 0x0008E9: u'NextGig', 0x0008EA: u'Motion Control Engineering, Inc', 0x0008EB: u'ROMWin Co.,Ltd.', 0x0008EC: u'Zonu, Inc.', 0x0008ED: u'ST&T Instrument Corp.', 0x0008EE: u'Logic Product Development', 0x0008EF: u'DIBAL,S.A.', 0x0008F0: u'Next Generation Systems, Inc.', 0x0008F1: u'Voltaire', 0x0008F2: u'C&S Technology', 0x0008F3: u'WANY', 0x0008F4: u'Bluetake Technology Co., Ltd.', 0x0008F5: u'YESTECHNOLOGY Co.,Ltd.', 0x0008F6: u'SUMITOMO ELECTRIC HIGHTECHS.co.,ltd.', 0x0008F7: u'Hitachi Ltd, Semiconductor &amp; Integrated Circuits Gr', 0x0008F8: u'Guardall Ltd', 0x0008F9: u'Padcom, Inc.', 0x0008FA: u'Karl E.Brinkmann GmbH', 0x0008FB: u'SonoSite, Inc.', 0x0008FC: u'Gigaphoton Inc.', 0x0008FD: u'BlueKorea Co., Ltd.', 0x0008FE: u'UNIK C&C Co.,Ltd.', 0x0008FF: u'Trilogy Communications Ltd', 0x000900: u'TMT', 0x000901: u'Shenzhen Shixuntong Information & Technoligy Co', 0x000902: u'Redline Communications Inc.', 0x000903: u'Panasas, Inc', 0x000904: u'MONDIAL electronic', 0x000905: u'iTEC Technologies Ltd.', 0x000906: u'Esteem Networks', 0x000907: u'Chrysalis Development', 0x000908: u'VTech Technology Corp.', 0x000909: u'Telenor Connect A/S', 0x00090A: u'SnedFar Technology Co., Ltd.', 0x00090B: u'MTL Instruments PLC', 0x00090C: u'Mayekawa Mfg. Co. Ltd.', 0x00090D: u'LEADER ELECTRONICS CORP.', 0x00090E: u'Helix Technology Inc.', 0x00090F: u'Fortinet Inc.', 0x000910: u'Simple Access Inc.', 0x000911: u'Cisco Systems', 0x000912: u'Cisco Systems', 0x000913: u'SystemK Corporation', 0x000914: u'COMPUTROLS INC.', 0x000915: u'CAS Corp.', 0x000916: u'Listman Home Technologies, Inc.', 0x000917: u'WEM Technology Inc', 0x000918: u'SAMSUNG TECHWIN CO.,LTD', 0x000919: u'MDS Gateways', 0x00091A: u'Macat Optics & Electronics Co., Ltd.', 0x00091B: u'Digital Generation Inc.', 0x00091C: u'CacheVision, Inc', 0x00091D: u'Proteam Computer Corporation', 0x00091E: u'Firstech Technology Corp.', 0x00091F: u'A&amp;D Co., Ltd.', 0x000920: u'EpoX COMPUTER CO.,LTD.', 0x000921: u'Planmeca Oy', 0x000922: u'Touchless Sensor Technology AG', 0x000923: u'Heaman System Co., Ltd', 0x000924: u'Telebau GmbH', 0x000925: u'VSN Systemen BV', 0x000926: u'YODA COMMUNICATIONS, INC.', 0x000927: u'TOYOKEIKI CO.,LTD.', 0x000928: u'Telecore Inc', 0x000929: u'Sanyo Industries (UK) Limited', 0x00092A: u'MYTECS Co.,Ltd.', 0x00092B: u'iQstor Networks, Inc.', 0x00092C: u'Hitpoint Inc.', 0x00092D: u'High Tech Computer, Corp.', 0x00092E: u'B&Tech System Inc.', 0x00092F: u'Akom Technology Corporation', 0x000930: u'AeroConcierge Inc.', 0x000931: u'Future Internet, Inc.', 0x000932: u'Omnilux', 0x000933: u'OPTOVALLEY Co. Ltd.', 0x000934: u'Dream-Multimedia-Tv GmbH', 0x000935: u'Sandvine Incorporated', 0x000936: u'Ipetronik GmbH & Co.KG', 0x000937: u'Inventec Appliance Corp', 0x000938: u'Allot Communications', 0x000939: u'ShibaSoku Co.,Ltd.', 0x00093A: u'Molex Fiber Optics', 0x00093B: u'HYUNDAI NETWORKS INC.', 0x00093C: u'Jacques Technologies P/L', 0x00093D: u'Newisys,Inc.', 0x00093E: u'C&I Technologies', 0x00093F: u'Double-Win Enterpirse CO., LTD', 0x000940: u'AGFEO GmbH & Co. KG', 0x000941: u'Allied Telesis K.K.', 0x000942: u'CRESCO, LTD.', 0x000943: u'Cisco Systems', 0x000944: u'Cisco Systems', 0x000945: u'Palmmicro Communications Inc', 0x000946: u'Cluster Labs GmbH', 0x000947: u'Aztek, Inc.', 0x000948: u'Vista Control Systems, Corp.', 0x000949: u'Glyph Technologies Inc.', 0x00094A: u'Homenet Communications', 0x00094B: u'FillFactory NV', 0x00094C: u'Communication Weaver Co.,Ltd.', 0x00094D: u'Braintree Communications Pty Ltd', 0x00094E: u'BARTECH SYSTEMS INTERNATIONAL, INC', 0x00094F: u'elmegt GmbH & Co. KG', 0x000950: u'Independent Storage Corporation', 0x000951: u'Apogee Instruments, Inc', 0x000952: u'Auerswald GmbH & Co. KG', 0x000953: u'Linkage System Integration Co.Ltd.', 0x000954: u'AMiT spol. s. r. o.', 0x000955: u'Young Generation International Corp.', 0x000956: u'Network Systems Group, Ltd. (NSG)', 0x000957: u'Supercaller, Inc.', 0x000958: u'INTELNET S.A.', 0x000959: u'Sitecsoft', 0x00095A: u'RACEWOOD TECHNOLOGY', 0x00095B: u'Netgear, Inc.', 0x00095C: u'Philips Medical Systems - Cardiac and Monitoring Systems (CM', 0x00095D: u'Dialogue Technology Corp.', 0x00095E: u'Masstech Group Inc.', 0x00095F: u'Telebyte, Inc.', 0x000960: u'YOZAN Inc.', 0x000961: u'Switchgear and Instrumentation Ltd', 0x000962: u'Filetrac AS', 0x000963: u'Dominion Lasercom Inc.', 0x000964: u'Hi-Techniques', 0x000965: u'PRIVATE', 0x000966: u'Thales Navigation', 0x000967: u'Tachyon, Inc', 0x000968: u'TECHNOVENTURE, INC.', 0x000969: u'Meret Optical Communications', 0x00096A: u'Cloverleaf Communications Inc.', 0x00096B: u'IBM Corporation', 0x00096C: u'Imedia Semiconductor Corp.', 0x00096D: u'Powernet Technologies Corp.', 0x00096E: u'GIANT ELECTRONICS LTD.', 0x00096F: u'Beijing Zhongqing Elegant Tech. Corp.,Limited', 0x000970: u'Vibration Research Corporation', 0x000971: u'Time Management, Inc.', 0x000972: u'Securebase,Inc', 0x000973: u'Lenten Technology Co., Ltd.', 0x000974: u'Innopia Technologies, Inc.', 0x000975: u'fSONA Communications Corporation', 0x000976: u'Datasoft ISDN Systems GmbH', 0x000977: u'Brunner Elektronik AG', 0x000978: u'AIJI System Co., Ltd.', 0x000979: u'Advanced Television Systems Committee, Inc.', 0x00097A: u'Louis Design Labs.', 0x00097B: u'Cisco Systems', 0x00097C: u'Cisco Systems', 0x00097D: u'SecWell Networks Oy', 0x00097E: u'IMI TECHNOLOGY CO., LTD', 0x00097F: u'Vsecure 2000 LTD.', 0x000980: u'Power Zenith Inc.', 0x000981: u'Newport Networks', 0x000982: u'Loewe Opta GmbH', 0x000983: u'Gvision Incorporated', 0x000984: u'MyCasa Network Inc.', 0x000985: u'Auto Telecom Company', 0x000986: u'Metalink LTD.', 0x000987: u'NISHI NIPPON ELECTRIC WIRE & CABLE CO.,LTD.', 0x000988: u'Nudian Electron Co., Ltd.', 0x000989: u'VividLogic Inc.', 0x00098A: u'EqualLogic Inc', 0x00098B: u'Entropic Communications, Inc.', 0x00098C: u'Option Wireless Sweden', 0x00098D: u'Velocity Semiconductor', 0x00098E: u'ipcas GmbH', 0x00098F: u'Cetacean Networks', 0x000990: u'ACKSYS Communications & systems', 0x000991: u'GE Fanuc Automation Manufacturing, Inc.', 0x000992: u'InterEpoch Technology,INC.', 0x000993: u'Visteon Corporation', 0x000994: u'Cronyx Engineering', 0x000995: u'Castle Technology Ltd', 0x000996: u'RDI', 0x000997: u'Nortel Networks', 0x000998: u'Capinfo Company Limited', 0x000999: u'CP GEORGES RENAULT', 0x00099A: u'ELMO COMPANY, LIMITED', 0x00099B: u'Western Telematic Inc.', 0x00099C: u'Naval Research Laboratory', 0x00099D: u'Haliplex Communications', 0x00099E: u'Testech, Inc.', 0x00099F: u'VIDEX INC.', 0x0009A0: u'Microtechno Corporation', 0x0009A1: u'Telewise Communications, Inc.', 0x0009A2: u'Interface Co., Ltd.', 0x0009A3: u'Leadfly Techologies Corp. Ltd.', 0x0009A4: u'HARTEC Corporation', 0x0009A5: u'HANSUNG ELETRONIC INDUSTRIES DEVELOPMENT CO., LTD', 0x0009A6: u'Ignis Optics, Inc.', 0x0009A7: u'Bang & Olufsen A/S', 0x0009A8: u'Eastmode Pte Ltd', 0x0009A9: u'Ikanos Communications', 0x0009AA: u'Data Comm for Business, Inc.', 0x0009AB: u'Netcontrol Oy', 0x0009AC: u'LANVOICE', 0x0009AD: u'HYUNDAI SYSCOMM, INC.', 0x0009AE: u'OKANO ELECTRIC CO.,LTD', 0x0009AF: u'e-generis', 0x0009B0: u'Onkyo Corporation', 0x0009B1: u'Kanematsu Electronics, Ltd.', 0x0009B2: u'L&F Inc.', 0x0009B3: u'MCM Systems Ltd', 0x0009B4: u'KISAN TELECOM CO., LTD.', 0x0009B5: u'3J Tech. Co., Ltd.', 0x0009B6: u'Cisco Systems', 0x0009B7: u'Cisco Systems', 0x0009B8: u'Entise Systems', 0x0009B9: u'Action Imaging Solutions', 0x0009BA: u'MAKU Informationstechik GmbH', 0x0009BB: u'MathStar, Inc.', 0x0009BC: u'Integrian, Inc.', 0x0009BD: u'Epygi Technologies, Ltd.', 0x0009BE: u'Mamiya-OP Co.,Ltd.', 0x0009BF: u'Nintendo Co.,Ltd.', 0x0009C0: u'6WIND', 0x0009C1: u'PROCES-DATA A/S', 0x0009C2: u'PRIVATE', 0x0009C3: u'NETAS', 0x0009C4: u'Medicore Co., Ltd', 0x0009C5: u'KINGENE Technology Corporation', 0x0009C6: u'Visionics Corporation', 0x0009C7: u'Movistec', 0x0009C8: u'SINAGAWA TSUSHIN KEISOU SERVICE', 0x0009C9: u'BlueWINC Co., Ltd.', 0x0009CA: u'iMaxNetworks(Shenzhen)Limited.', 0x0009CB: u'HBrain', 0x0009CC: u'Moog GmbH', 0x0009CD: u'HUDSON SOFT CO.,LTD.', 0x0009CE: u'SpaceBridge Semiconductor Corp.', 0x0009CF: u'iAd GmbH', 0x0009D0: u'Versatel Networks', 0x0009D1: u'SERANOA NETWORKS INC', 0x0009D2: u'Mai Logic Inc.', 0x0009D3: u'Western DataCom Co., Inc.', 0x0009D4: u'Transtech Networks', 0x0009D5: u'Signal Communication, Inc.', 0x0009D6: u'KNC One GmbH', 0x0009D7: u'DC Security Products', 0x0009D8: u'PRIVATE', 0x0009D9: u'Neoscale Systems, Inc', 0x0009DA: u'Control Module Inc.', 0x0009DB: u'eSpace', 0x0009DC: u'Galaxis Technology AG', 0x0009DD: u'Mavin Technology Inc.', 0x0009DE: u'Samjin Information & Communications Co., Ltd.', 0x0009DF: u'Vestel Komunikasyon Sanayi ve Ticaret A.S.', 0x0009E0: u'XEMICS S.A.', 0x0009E1: u'Gemtek Technology Co., Ltd.', 0x0009E2: u'Sinbon Electronics Co., Ltd.', 0x0009E3: u'Angel Iglesias S.A.', 0x0009E4: u'K Tech Infosystem Inc.', 0x0009E5: u'Hottinger Baldwin Messtechnik GmbH', 0x0009E6: u'Cyber Switching Inc.', 0x0009E7: u'ADC Techonology', 0x0009E8: u'Cisco Systems', 0x0009E9: u'Cisco Systems', 0x0009EA: u'YEM Inc.', 0x0009EB: u'HuMANDATA LTD.', 0x0009EC: u'Daktronics, Inc.', 0x0009ED: u'CipherOptics', 0x0009EE: u'MEIKYO ELECTRIC CO.,LTD', 0x0009EF: u'Vocera Communications', 0x0009F0: u'Shimizu Technology Inc.', 0x0009F1: u'Yamaki Electric Corporation', 0x0009F2: u'Cohu, Inc., Electronics Division', 0x0009F3: u'WELL Communication Corp.', 0x0009F4: u'Alcon Laboratories, Inc.', 0x0009F5: u'Emerson Network Power Co.,Ltd', 0x0009F6: u'Shenzhen Eastern Digital Tech Ltd.', 0x0009F7: u'SED, a division of Calian', 0x0009F8: u'UNIMO TECHNOLOGY CO., LTD.', 0x0009F9: u'ART JAPAN CO., LTD.', 0x0009FB: u'Philips Medizinsysteme Boeblingen GmbH', 0x0009FC: u'IPFLEX Inc.', 0x0009FD: u'Ubinetics Limited', 0x0009FE: u'Daisy Technologies, Inc.', 0x0009FF: u'X.net 2000 GmbH', 0x000A00: u'Mediatek Corp.', 0x000A01: u'SOHOware, Inc.', 0x000A02: u'ANNSO CO., LTD.', 0x000A03: u'ENDESA SERVICIOS, S.L.', 0x000A04: u'3Com Europe Ltd', 0x000A05: u'Widax Corp.', 0x000A06: u'Teledex LLC', 0x000A07: u'WebWayOne Ltd', 0x000A08: u'ALPINE ELECTRONICS, INC.', 0x000A09: u'TaraCom Integrated Products, Inc.', 0x000A0A: u'SUNIX Co., Ltd.', 0x000A0B: u'Sealevel Systems, Inc.', 0x000A0C: u'Scientific Research Corporation', 0x000A0D: u'MergeOptics GmbH', 0x000A0E: u'Invivo Research Inc.', 0x000A0F: u'Ilryung Telesys, Inc', 0x000A10: u'FAST media integrations AG', 0x000A11: u'ExPet Technologies, Inc', 0x000A12: u'Azylex Technology, Inc', 0x000A13: u'Silent Witness', 0x000A14: u'TECO a.s.', 0x000A15: u'Silicon Data, Inc', 0x000A16: u'Lassen Research', 0x000A17: u'NESTAR COMMUNICATIONS, INC', 0x000A18: u'Vichel Inc.', 0x000A19: u'Valere Power, Inc.', 0x000A1A: u'Imerge Ltd', 0x000A1B: u'Stream Labs', 0x000A1C: u'Bridge Information Co., Ltd.', 0x000A1D: u'Optical Communications Products Inc.', 0x000A1E: u'Red-M Products Limited', 0x000A1F: u'ART WARE Telecommunication Co., Ltd.', 0x000A20: u'SVA Networks, Inc.', 0x000A21: u'Integra Telecom Co. Ltd', 0x000A22: u'Amperion Inc', 0x000A23: u'Parama Networks Inc', 0x000A24: u'Octave Communications', 0x000A25: u'CERAGON NETWORKS', 0x000A26: u'CEIA S.p.A.', 0x000A27: u'Apple Computer, Inc.', 0x000A28: u'Motorola', 0x000A29: u'Pan Dacom Networking AG', 0x000A2A: u'QSI Systems Inc.', 0x000A2B: u'Etherstuff', 0x000A2C: u'Active Tchnology Corporation', 0x000A2D: u'PRIVATE', 0x000A2E: u'MAPLE NETWORKS CO., LTD', 0x000A2F: u'Artnix Inc.', 0x000A30: u'Johnson Controls-ASG', 0x000A31: u'HCV Wireless', 0x000A32: u'Xsido Corporation', 0x000A33: u'Emulex Corporation', 0x000A34: u'Identicard Systems Incorporated', 0x000A35: u'Xilinx', 0x000A36: u'Synelec Telecom Multimedia', 0x000A37: u'Procera Networks, Inc.', 0x000A38: u'Netlock Technologies, Inc.', 0x000A39: u'LoPA Information Technology', 0x000A3A: u'J-THREE INTERNATIONAL Holding Co., Ltd.', 0x000A3B: u'GCT Semiconductor, Inc', 0x000A3C: u'Enerpoint Ltd.', 0x000A3D: u'Elo Sistemas Eletronicos S.A.', 0x000A3E: u'EADS Telecom', 0x000A3F: u'Data East Corporation', 0x000A40: u'Crown Audio', 0x000A41: u'Cisco Systems', 0x000A42: u'Cisco Systems', 0x000A43: u'Chunghwa Telecom Co., Ltd.', 0x000A44: u'Avery Dennison Deutschland GmbH', 0x000A45: u'Audio-Technica Corp.', 0x000A46: u'ARO Controls SAS', 0x000A47: u'Allied Vision Technologies', 0x000A48: u'Albatron Technology', 0x000A49: u'Acopia Networks', 0x000A4A: u'Targa Systems Ltd.', 0x000A4B: u'DataPower Technology, Inc.', 0x000A4C: u'Molecular Devices Corporation', 0x000A4D: u'Noritz Corporation', 0x000A4E: u'UNITEK Electronics INC.', 0x000A4F: u'Brain Boxes Limited', 0x000A50: u'REMOTEK CORPORATION', 0x000A51: u'GyroSignal Technology Co., Ltd.', 0x000A52: u'AsiaRF Ltd.', 0x000A53: u'Intronics, Incorporated', 0x000A54: u'Laguna Hills, Inc.', 0x000A55: u'MARKEM Corporation', 0x000A56: u'HITACHI Maxell Ltd.', 0x000A57: u'Hewlett-Packard Company - Standards', 0x000A58: u'Ingenieur-Buero Freyer & Siegel', 0x000A59: u'HW server', 0x000A5A: u'GreenNET Technologies Co.,Ltd.', 0x000A5B: u'Power-One as', 0x000A5C: u'Carel s.p.a.', 0x000A5D: u'PUC Founder (MSC) Berhad', 0x000A5E: u'3COM Corporation', 0x000A5F: u'almedio inc.', 0x000A60: u'Autostar Technology Pte Ltd', 0x000A61: u'Cellinx Systems Inc.', 0x000A62: u'Crinis Networks, Inc.', 0x000A63: u'DHD GmbH', 0x000A64: u'Eracom Technologies', 0x000A65: u'GentechMedia.co.,ltd.', 0x000A66: u'MITSUBISHI ELECTRIC SYSTEM & SERVICE CO.,LTD.', 0x000A67: u'OngCorp', 0x000A68: u'SolarFlare Communications, Inc.', 0x000A69: u'SUNNY bell Technology Co., Ltd.', 0x000A6A: u'SVM Microwaves s.r.o.', 0x000A6B: u'Tadiran Telecom Business Systems LTD', 0x000A6C: u'Walchem Corporation', 0x000A6D: u'EKS Elektronikservice GmbH', 0x000A6E: u'Broadcast Technology Limited', 0x000A6F: u'ZyFLEX Technologies Inc', 0x000A70: u'MPLS Forum', 0x000A71: u'Avrio Technologies, Inc', 0x000A72: u'SimpleTech, Inc.', 0x000A73: u'Scientific Atlanta', 0x000A74: u'Manticom Networks Inc.', 0x000A75: u'Cat Electronics', 0x000A76: u'Beida Jade Bird Huaguang Technology Co.,Ltd', 0x000A77: u'Bluewire Technologies LLC', 0x000A78: u'OLITEC', 0x000A79: u'corega K.K.', 0x000A7A: u'Kyoritsu Electric Co., Ltd.', 0x000A7B: u'Cornelius Consult', 0x000A7C: u'Tecton Ltd', 0x000A7D: u'Valo, Inc.', 0x000A7E: u'The Advantage Group', 0x000A7F: u'Teradon Industries, Inc', 0x000A80: u'Telkonet Inc.', 0x000A81: u'TEIMA Audiotex S.L.', 0x000A82: u'TATSUTA SYSTEM ELECTRONICS CO.,LTD.', 0x000A83: u'SALTO SYSTEMS S.L.', 0x000A84: u'Rainsun Enterprise Co., Ltd.', 0x000A85: u'PLAT\'C2,Inc', 0x000A86: u'Lenze', 0x000A87: u'Integrated Micromachines Inc.', 0x000A88: u'InCypher S.A.', 0x000A89: u'Creval Systems, Inc.', 0x000A8A: u'Cisco Systems', 0x000A8B: u'Cisco Systems', 0x000A8C: u'Guardware Systems Ltd.', 0x000A8D: u'EUROTHERM LIMITED', 0x000A8E: u'Invacom Ltd', 0x000A8F: u'Aska International Inc.', 0x000A90: u'Bayside Interactive, Inc.', 0x000A91: u'HemoCue AB', 0x000A92: u'Presonus Corporation', 0x000A93: u'W2 Networks, Inc.', 0x000A94: u'ShangHai cellink CO., LTD', 0x000A95: u'Apple Computer, Inc.', 0x000A96: u'MEWTEL TECHNOLOGY INC.', 0x000A97: u'SONICblue, Inc.', 0x000A98: u'M+F Gwinner GmbH & Co', 0x000A99: u'Dataradio Inc.', 0x000A9A: u'Aiptek International Inc', 0x000A9B: u'Towa Meccs Corporation', 0x000A9C: u'Server Technology, Inc.', 0x000A9D: u'King Young Technology Co. Ltd.', 0x000A9E: u'BroadWeb Corportation', 0x000A9F: u'Pannaway Technologies, Inc.', 0x000AA0: u'Cedar Point Communications', 0x000AA1: u'V V S Limited', 0x000AA2: u'SYSTEK INC.', 0x000AA3: u'SHIMAFUJI ELECTRIC CO.,LTD.', 0x000AA4: u'SHANGHAI SURVEILLANCE TECHNOLOGY CO,LTD', 0x000AA5: u'MAXLINK INDUSTRIES LIMITED', 0x000AA6: u'Hochiki Corporation', 0x000AA7: u'FEI Company', 0x000AA8: u'ePipe Pty. Ltd.', 0x000AA9: u'Brooks Automation GmbH', 0x000AAA: u'AltiGen Communications Inc.', 0x000AAB: u'TOYOTA MACS, INC.', 0x000AAC: u'TerraTec Electronic GmbH', 0x000AAD: u'Stargames Corporation', 0x000AAE: u'Rosemount Process Analytical', 0x000AAF: u'Pipal Systems', 0x000AB0: u'LOYTEC electronics GmbH', 0x000AB1: u'GENETEC Corporation', 0x000AB2: u'Fresnel Wireless Systems', 0x000AB3: u'Fa. GIRA', 0x000AB4: u'ETIC Telecommunications', 0x000AB5: u'Digital Electronic Network', 0x000AB6: u'COMPUNETIX, INC', 0x000AB7: u'Cisco Systems', 0x000AB8: u'Cisco Systems', 0x000AB9: u'Astera Technologies Corp.', 0x000ABA: u'Arcon Technology Limited', 0x000ABB: u'Taiwan Secom Co,. Ltd', 0x000ABC: u'Seabridge Ltd.', 0x000ABD: u'Rupprecht & Patashnick Co.', 0x000ABE: u'OPNET Technologies CO., LTD.', 0x000ABF: u'HIROTA SS', 0x000AC0: u'Fuyoh Video Industry CO., LTD.', 0x000AC1: u'Futuretel', 0x000AC2: u'FiberHome Telecommunication Technologies CO.,LTD', 0x000AC3: u'eM Technics Co., Ltd.', 0x000AC4: u'Daewoo Teletech Co., Ltd', 0x000AC5: u'Color Kinetics', 0x000AC6: u'Ceterus Networks, Inc.', 0x000AC7: u'Unication Group', 0x000AC8: u'ZPSYS CO.,LTD. (Planning&Management)', 0x000AC9: u'Zambeel Inc', 0x000ACA: u'YOKOYAMA SHOKAI CO.,Ltd.', 0x000ACB: u'XPAK MSA Group', 0x000ACC: u'Winnow Networks, Inc.', 0x000ACD: u'Sunrich Technology Limited', 0x000ACE: u'RADIANTECH, INC.', 0x000ACF: u'PROVIDEO Multimedia Co. Ltd.', 0x000AD0: u'Niigata Develoment Center, F.I.T. Co., Ltd.', 0x000AD1: u'MWS', 0x000AD2: u'JEPICO Corporation', 0x000AD3: u'INITECH Co., Ltd', 0x000AD4: u'CoreBell Systems Inc.', 0x000AD5: u'Brainchild Electronic Co., Ltd.', 0x000AD6: u'BeamReach Networks', 0x000AD7: u'Origin ELECTRIC CO.,LTD.', 0x000AD8: u'IPCserv Technology Corp.', 0x000AD9: u'Sony Ericsson Mobile Communications AB', 0x000ADA: u'PRIVATE', 0x000ADB: u'SkyPilot Network, Inc', 0x000ADC: u'RuggedCom Inc.', 0x000ADD: u'InSciTek Microsystems, Inc.', 0x000ADE: u'Happy Communication Co., Ltd.', 0x000ADF: u'Gennum Corporation', 0x000AE0: u'Fujitsu Softek', 0x000AE1: u'EG Technology', 0x000AE2: u'Binatone Electronics International, Ltd', 0x000AE3: u'YANG MEI TECHNOLOGY CO., LTD', 0x000AE4: u'Wistron Corp.', 0x000AE5: u'ScottCare Corporation', 0x000AE6: u'Elitegroup Computer System Co. (ECS)', 0x000AE7: u'ELIOP S.A.', 0x000AE8: u'Cathay Roxus Information Technology Co. LTD', 0x000AE9: u'AirVast Technology Inc.', 0x000AEA: u'ADAM ELEKTRONIK LTD.STI.', 0x000AEB: u'Shenzhen Tp-Link Technology Co; Ltd.', 0x000AEC: u'Koatsu Gas Kogyo Co., Ltd.', 0x000AED: u'HARTING Vending G.m.b.H. & CO KG', 0x000AEE: u'GCD Hard- & Software GmbH', 0x000AEF: u'OTRUM ASA', 0x000AF0: u'SHIN-OH ELECTRONICS CO., LTD. R&D', 0x000AF1: u'Clarity Design, Inc.', 0x000AF2: u'NeoAxiom Corp.', 0x000AF3: u'Cisco Systems', 0x000AF4: u'Cisco Systems', 0x000AF5: u'Airgo Networks, Inc.', 0x000AF6: u'Computer Process Controls', 0x000AF7: u'Broadcom Corp.', 0x000AF8: u'American Telecare Inc.', 0x000AF9: u'HiConnect, Inc.', 0x000AFA: u'Traverse Technologies Australia', 0x000AFB: u'Ambri Limited', 0x000AFC: u'Core Tec Communications, LLC', 0x000AFD: u'Viking Electronic Services', 0x000AFE: u'NovaPal Ltd', 0x000AFF: u'Kilchherr Elektronik AG', 0x000B00: u'FUJIAN START COMPUTER EQUIPMENT CO.,LTD', 0x000B01: u'DAIICHI ELECTRONICS CO., LTD.', 0x000B02: u'Dallmeier electronic', 0x000B03: u'Taekwang Industrial Co., Ltd', 0x000B04: u'Volktek Corporation', 0x000B05: u'Pacific Broadband Networks', 0x000B06: u'Motorola BCS', 0x000B07: u'Voxpath Networks', 0x000B08: u'Pillar Data Systems', 0x000B09: u'Ifoundry Systems Singapore', 0x000B0A: u'dBm Optics', 0x000B0B: u'Corrent Corporation', 0x000B0C: u'Agile Systems Inc.', 0x000B0D: u'Air2U, Inc.', 0x000B0E: u'Trapeze Networks', 0x000B0F: u'Nyquist Industrial Control BV', 0x000B10: u'11wave Technonlogy Co.,Ltd', 0x000B11: u'HIMEJI ABC TRADING CO.,LTD.', 0x000B12: u'NURI Telecom Co., Ltd.', 0x000B13: u'ZETRON INC', 0x000B14: u'ViewSonic Corporation', 0x000B15: u'Platypus Technology', 0x000B16: u'Communication Machinery Corporation', 0x000B17: u'MKS Instruments', 0x000B18: u'PRIVATE', 0x000B19: u'Vernier Networks, Inc.', 0x000B1A: u'Teltone Corporation', 0x000B1B: u'Systronix, Inc.', 0x000B1C: u'SIBCO bv', 0x000B1D: u'LayerZero Power Systems, Inc.', 0x000B1E: u'KAPPA opto-electronics GmbH', 0x000B1F: u'I CON Computer Co.', 0x000B20: u'Hirata corporation', 0x000B21: u'G-Star Communications Inc.', 0x000B22: u'Environmental Systems and Services', 0x000B23: u'Siemens Subscriber Networks', 0x000B24: u'AirLogic', 0x000B25: u'Aeluros', 0x000B26: u'Wetek Corporation', 0x000B27: u'Scion Corporation', 0x000B28: u'Quatech Inc.', 0x000B29: u'LG Industrial Systems Co.,Ltd.', 0x000B2A: u'HOWTEL Co., Ltd.', 0x000B2B: u'HOSTNET CORPORATION', 0x000B2C: u'Eiki Industrial Co. Ltd.', 0x000B2D: u'Danfoss Inc.', 0x000B2E: u'Cal-Comp Electronics (Thailand) Public Company Limited Taipe', 0x000B2F: u'bplan GmbH', 0x000B30: u'Beijing Gongye Science & Technology Co.,Ltd', 0x000B31: u'Yantai ZhiYang Scientific and technology industry CO., LTD', 0x000B32: u'VORMETRIC, INC.', 0x000B33: u'Vivato', 0x000B34: u'ShangHai Broadband Technologies CO.LTD', 0x000B35: u'Quad Bit System co., Ltd.', 0x000B36: u'Productivity Systems, Inc.', 0x000B37: u'MANUFACTURE DES MONTRES ROLEX SA', 0x000B38: u'Knuerr AG', 0x000B39: u'Keisoku Giken Co.,Ltd.', 0x000B3A: u'QuStream Corporation', 0x000B3B: u'devolo AG', 0x000B3C: u'Cygnal Integrated Products, Inc.', 0x000B3D: u'CONTAL OK Ltd.', 0x000B3E: u'BittWare, Inc', 0x000B3F: u'Anthology Solutions Inc.', 0x000B40: u'OpNext Inc.', 0x000B41: u'Ing. Buero Dr. Beutlhauser', 0x000B42: u'commax Co., Ltd.', 0x000B43: u'Microscan Systems, Inc.', 0x000B44: u'Concord IDea Corp.', 0x000B45: u'Cisco', 0x000B46: u'Cisco', 0x000B47: u'Advanced Energy', 0x000B48: u'sofrel', 0x000B49: u'RF-Link System Inc.', 0x000B4A: u'Visimetrics (UK) Ltd', 0x000B4B: u'VISIOWAVE SA', 0x000B4C: u'Clarion (M) Sdn Bhd', 0x000B4D: u'Emuzed', 0x000B4E: u'VertexRSI Antenna Products Division', 0x000B4F: u'Verifone, INC.', 0x000B50: u'Oxygnet', 0x000B51: u'Micetek International Inc.', 0x000B52: u'JOYMAX ELECTRONICS CORP.', 0x000B53: u'INITIUM Co., Ltd.', 0x000B54: u'BiTMICRO Networks, Inc.', 0x000B55: u'ADInstruments', 0x000B56: u'Cybernetics', 0x000B57: u'Silicon Laboratories', 0x000B58: u'Astronautics C.A LTD', 0x000B59: u'ScriptPro, LLC', 0x000B5A: u'HyperEdge', 0x000B5B: u'Rincon Research Corporation', 0x000B5C: u'Newtech Co.,Ltd', 0x000B5D: u'FUJITSU LIMITED', 0x000B5E: u'Audio Engineering Society Inc.', 0x000B5F: u'Cisco Systems', 0x000B60: u'Cisco Systems', 0x000B61: u'Friedrich Lütze GmbH &Co.', 0x000B62: u'Ingenieurbüro Ingo Mohnen', 0x000B63: u'Kaleidescape', 0x000B64: u'Kieback & Peter GmbH & Co KG', 0x000B65: u'Sy.A.C. srl', 0x000B66: u'Teralink Communications', 0x000B67: u'Topview Technology Corporation', 0x000B68: u'Addvalue Communications Pte Ltd', 0x000B69: u'Franke Finland Oy', 0x000B6A: u'Asiarock Incorporation', 0x000B6B: u'Wistron Neweb Corp.', 0x000B6C: u'Sychip Inc.', 0x000B6D: u'SOLECTRON JAPAN NAKANIIDA', 0x000B6E: u'Neff Instrument Corp.', 0x000B6F: u'Media Streaming Networks Inc', 0x000B70: u'Load Technology, Inc.', 0x000B71: u'Litchfield Communications Inc.', 0x000B72: u'Lawo AG', 0x000B73: u'Kodeos Communications', 0x000B74: u'Kingwave Technology Co., Ltd.', 0x000B75: u'Iosoft Ltd.', 0x000B76: u'ET&T Co. Ltd.', 0x000B77: u'Cogent Systems, Inc.', 0x000B78: u'TAIFATECH INC.', 0x000B79: u'X-COM, Inc.', 0x000B7A: u'Wave Science Inc.', 0x000B7B: u'Test-Um Inc.', 0x000B7C: u'Telex Communications', 0x000B7D: u'SOLOMON EXTREME INTERNATIONAL LTD.', 0x000B7E: u'SAGINOMIYA Seisakusho Inc.', 0x000B7F: u'OmniWerks', 0x000B80: u'Lycium Networks', 0x000B81: u'Kaparel Corporation', 0x000B82: u'Grandstream Networks, Inc.', 0x000B83: u'DATAWATT B.V.', 0x000B84: u'BODET', 0x000B85: u'Airespace, Inc.', 0x000B86: u'Aruba Networks', 0x000B87: u'American Reliance Inc.', 0x000B88: u'Vidisco ltd.', 0x000B89: u'Top Global Technology, Ltd.', 0x000B8A: u'MITEQ Inc.', 0x000B8B: u'KERAJET, S.A.', 0x000B8C: u'flextronics israel', 0x000B8D: u'Avvio Networks', 0x000B8E: u'Ascent Corporation', 0x000B8F: u'AKITA ELECTRONICS SYSTEMS CO.,LTD.', 0x000B90: u'Covaro Networks, Inc.', 0x000B91: u'Aglaia Gesellschaft für Bildverarbeitung und Kommunikation m', 0x000B92: u'Ascom Danmark A/S', 0x000B93: u'Barmag Electronic', 0x000B94: u'Digital Monitoring Products, Inc.', 0x000B95: u'eBet Gaming Systems Pty Ltd', 0x000B96: u'Innotrac Diagnostics Oy', 0x000B97: u'Matsushita Electric Industrial Co.,Ltd.', 0x000B98: u'NiceTechVision', 0x000B99: u'SensAble Technologies, Inc.', 0x000B9A: u'Shanghai Ulink Telecom Equipment Co. Ltd.', 0x000B9B: u'Sirius System Co, Ltd.', 0x000B9C: u'TriBeam Technologies, Inc.', 0x000B9D: u'TwinMOS Technologies Inc.', 0x000B9E: u'Yasing Technology Corp.', 0x000B9F: u'Neue ELSA GmbH', 0x000BA0: u'T&L Information Inc.', 0x000BA1: u'SYSCOM Ltd.', 0x000BA2: u'Sumitomo Electric Networks, Inc', 0x000BA3: u'Siemens AG, I&S', 0x000BA4: u'Shiron Satellite Communications Ltd. (1996)', 0x000BA5: u'Quasar Cipta Mandiri, PT', 0x000BA6: u'Miyakawa Electric Works Ltd.', 0x000BA7: u'Maranti Networks', 0x000BA8: u'HANBACK ELECTRONICS CO., LTD.', 0x000BA9: u'CloudShield Technologies, Inc.', 0x000BAA: u'Aiphone co.,Ltd', 0x000BAB: u'Advantech Technology (CHINA) Co., Ltd.', 0x000BAC: u'3Com Europe Ltd.', 0x000BAD: u'PC-PoS Inc.', 0x000BAE: u'Vitals System Inc.', 0x000BAF: u'WOOJU COMMUNICATIONS Co,.Ltd', 0x000BB0: u'Sysnet Telematica srl', 0x000BB1: u'Super Star Technology Co., Ltd.', 0x000BB2: u'SMALLBIG TECHNOLOGY', 0x000BB3: u'RiT technologies Ltd.', 0x000BB4: u'RDC Semiconductor Inc.,', 0x000BB5: u'nStor Technologies, Inc.', 0x000BB6: u'Mototech Inc.', 0x000BB7: u'Micro Systems Co.,Ltd.', 0x000BB8: u'Kihoku Electronic Co.', 0x000BB9: u'Imsys AB', 0x000BBA: u'Harmonic Broadband Access Networks', 0x000BBB: u'Etin Systems Co., Ltd', 0x000BBC: u'En Garde Systems, Inc.', 0x000BBD: u'Connexionz Limited', 0x000BBE: u'Cisco Systems', 0x000BBF: u'Cisco Systems', 0x000BC0: u'China IWNComm Co., Ltd.', 0x000BC1: u'Bay Microsystems, Inc.', 0x000BC2: u'Corinex Communication Corp.', 0x000BC3: u'Multiplex, Inc.', 0x000BC4: u'BIOTRONIK GmbH & Co', 0x000BC5: u'SMC Networks, Inc.', 0x000BC6: u'ISAC, Inc.', 0x000BC7: u'ICET S.p.A.', 0x000BC8: u'AirFlow Networks', 0x000BC9: u'Electroline Equipment', 0x000BCA: u'DATAVAN International Corporation', 0x000BCB: u'Fagor Automation , S. Coop', 0x000BCC: u'JUSAN, S.A.', 0x000BCD: u'Compaq (HP)', 0x000BCE: u'Free2move AB', 0x000BCF: u'AGFA NDT INC.', 0x000BD0: u'XiMeta Technology Americas Inc.', 0x000BD1: u'Aeronix, Inc.', 0x000BD2: u'Remopro Technology Inc.', 0x000BD3: u'cd3o', 0x000BD4: u'Beijing Wise Technology & Science Development Co.Ltd', 0x000BD5: u'Nvergence, Inc.', 0x000BD6: u'Paxton Access Ltd', 0x000BD7: u'MBB Gelma GmbH', 0x000BD8: u'Industrial Scientific Corp.', 0x000BD9: u'General Hydrogen', 0x000BDA: u'EyeCross Co.,Inc.', 0x000BDB: u'Dell ESG PCBA Test', 0x000BDC: u'AKCP', 0x000BDD: u'TOHOKU RICOH Co., LTD.', 0x000BDE: u'TELDIX GmbH', 0x000BDF: u'Shenzhen RouterD Networks Limited', 0x000BE0: u'SercoNet Ltd.', 0x000BE1: u'Nokia NET Product Operations', 0x000BE2: u'Lumenera Corporation', 0x000BE3: u'Key Stream Co., Ltd.', 0x000BE4: u'Hosiden Corporation', 0x000BE5: u'HIMS Korea Co., Ltd.', 0x000BE6: u'Datel Electronics', 0x000BE7: u'COMFLUX TECHNOLOGY INC.', 0x000BE8: u'AOIP', 0x000BE9: u'Actel Corporation', 0x000BEA: u'Zultys Technologies', 0x000BEB: u'Systegra AG', 0x000BEC: u'NIPPON ELECTRIC INSTRUMENT, INC.', 0x000BED: u'ELM Inc.', 0x000BEE: u'inc.jet, Incorporated', 0x000BEF: u'Code Corporation', 0x000BF0: u'MoTEX Products Co., Ltd.', 0x000BF1: u'LAP Laser Applikations', 0x000BF2: u'Chih-Kan Technology Co., Ltd.', 0x000BF3: u'BAE SYSTEMS', 0x000BF4: u'PRIVATE', 0x000BF5: u'Shanghai Sibo Telecom Technology Co.,Ltd', 0x000BF6: u'Nitgen Co., Ltd', 0x000BF7: u'NIDEK CO.,LTD', 0x000BF8: u'Infinera', 0x000BF9: u'Gemstone communications, Inc.', 0x000BFA: u'EXEMYS SRL', 0x000BFB: u'D-NET International Corporation', 0x000BFC: u'Cisco Systems', 0x000BFD: u'Cisco Systems', 0x000BFE: u'CASTEL Broadband Limited', 0x000BFF: u'Berkeley Camera Engineering', 0x000C00: u'BEB Industrie-Elektronik AG', 0x000C01: u'Abatron AG', 0x000C02: u'ABB Oy', 0x000C03: u'HDMI Licensing, LLC', 0x000C04: u'Tecnova', 0x000C05: u'RPA Reserch Co., Ltd.', 0x000C06: u'Nixvue Systems Pte Ltd', 0x000C07: u'Iftest AG', 0x000C08: u'HUMEX Technologies Corp.', 0x000C09: u'Hitachi IE Systems Co., Ltd', 0x000C0A: u'Guangdong Province Electronic Technology Research Institute', 0x000C0B: u'Broadbus Technologies', 0x000C0C: u'APPRO TECHNOLOGY INC.', 0x000C0D: u'Communications & Power Industries / Satcom Division', 0x000C0E: u'XtremeSpectrum, Inc.', 0x000C0F: u'Techno-One Co., Ltd', 0x000C10: u'PNI Corporation', 0x000C11: u'NIPPON DEMPA CO.,LTD.', 0x000C12: u'Micro-Optronic-Messtechnik GmbH', 0x000C13: u'MediaQ', 0x000C14: u'Diagnostic Instruments, Inc.', 0x000C15: u'CyberPower Systems, Inc.', 0x000C16: u'Concorde Microsystems Inc.', 0x000C17: u'AJA Video Systems Inc', 0x000C18: u'Zenisu Keisoku Inc.', 0x000C19: u'Telio Communications GmbH', 0x000C1A: u'Quest Technical Solutions Inc.', 0x000C1B: u'ORACOM Co, Ltd.', 0x000C1C: u'MicroWeb Co., Ltd.', 0x000C1D: u'Mettler & Fuchs AG', 0x000C1E: u'Global Cache', 0x000C1F: u'Glimmerglass Networks', 0x000C20: u'Fi WIn, Inc.', 0x000C21: u'Faculty of Science and Technology, Keio University', 0x000C22: u'Double D Electronics Ltd', 0x000C23: u'Beijing Lanchuan Tech. Co., Ltd.', 0x000C24: u'ANATOR', 0x000C25: u'Allied Telesyn Networks', 0x000C26: u'Weintek Labs. Inc.', 0x000C27: u'Sammy Corporation', 0x000C28: u'RIFATRON', 0x000C29: u'VMware, Inc.', 0x000C2A: u'OCTTEL Communication Co., Ltd.', 0x000C2B: u'ELIAS Technology, Inc.', 0x000C2C: u'Enwiser Inc.', 0x000C2D: u'FullWave Technology Co., Ltd.', 0x000C2E: u'Openet information technology(shenzhen) Co., Ltd.', 0x000C2F: u'SeorimTechnology Co.,Ltd.', 0x000C30: u'Cisco', 0x000C31: u'Cisco', 0x000C32: u'Avionic Design Development GmbH', 0x000C33: u'Compucase Enterprise Co. Ltd.', 0x000C34: u'Vixen Co., Ltd.', 0x000C35: u'KaVo Dental GmbH & Co. KG', 0x000C36: u'SHARP TAKAYA ELECTRONICS INDUSTRY CO.,LTD.', 0x000C37: u'Geomation, Inc.', 0x000C38: u'TelcoBridges Inc.', 0x000C39: u'Sentinel Wireless Inc.', 0x000C3A: u'Oxance', 0x000C3B: u'Orion Electric Co., Ltd.', 0x000C3C: u'MediaChorus, Inc.', 0x000C3D: u'Glsystech Co., Ltd.', 0x000C3E: u'Crest Audio', 0x000C3F: u'Cogent Defence & Security Networks,', 0x000C40: u'Altech Controls', 0x000C41: u'The Linksys Group, Inc.', 0x000C42: u'Routerboard.com', 0x000C43: u'Ralink Technology, Corp.', 0x000C44: u'Automated Interfaces, Inc.', 0x000C45: u'Animation Technologies Inc.', 0x000C46: u'Allied Telesyn Inc.', 0x000C47: u'SK Teletech(R&D Planning Team)', 0x000C48: u'QoStek Corporation', 0x000C49: u'Dangaard Telecom RTC Division A/S', 0x000C4A: u'Cygnus Microsystems Private Limited', 0x000C4B: u'Cheops Elektronik', 0x000C4C: u'Arcor AG&Co.', 0x000C4D: u'ACRA CONTROL', 0x000C4E: u'Winbest Technology CO,LT', 0x000C4F: u'UDTech Japan Corporation', 0x000C50: u'Seagate Technology', 0x000C51: u'Scientific Technologies Inc.', 0x000C52: u'Roll Systems Inc.', 0x000C53: u'PRIVATE', 0x000C54: u'Pedestal Networks, Inc', 0x000C55: u'Microlink Communications Inc.', 0x000C56: u'Megatel Computer (1986) Corp.', 0x000C57: u'MACKIE Engineering Services Belgium BVBA', 0x000C58: u'M&S Systems', 0x000C59: u'Indyme Electronics, Inc.', 0x000C5A: u'IBSmm Industrieelektronik Multimedia', 0x000C5B: u'HANWANG TECHNOLOGY CO.,LTD', 0x000C5C: u'GTN Systems B.V.', 0x000C5D: u'CHIC TECHNOLOGY (CHINA) CORP.', 0x000C5E: u'Calypso Medical', 0x000C5F: u'Avtec, Inc.', 0x000C60: u'ACM Systems', 0x000C61: u'AC Tech corporation DBA Advanced Digital', 0x000C62: u'ABB Automation Technology Products AB, Control', 0x000C63: u'Zenith Electronics Corporation', 0x000C64: u'X2 MSA Group', 0x000C65: u'Sunin Telecom', 0x000C66: u'Pronto Networks Inc', 0x000C67: u'OYO ELECTRIC CO.,LTD', 0x000C68: u'SigmaTel, Inc.', 0x000C69: u'National Radio Astronomy Observatory', 0x000C6A: u'MBARI', 0x000C6B: u'Kurz Industrie-Elektronik GmbH', 0x000C6C: u'Elgato Systems LLC', 0x000C6D: u'BOC Edwards', 0x000C6E: u'ASUSTEK COMPUTER INC.', 0x000C6F: u'Amtek system co.,LTD.', 0x000C70: u'ACC GmbH', 0x000C71: u'Wybron, Inc', 0x000C72: u'Tempearl Industrial Co., Ltd.', 0x000C73: u'TELSON ELECTRONICS CO., LTD', 0x000C74: u'RIVERTEC CORPORATION', 0x000C75: u'Oriental integrated electronics. LTD', 0x000C76: u'MICRO-STAR INTERNATIONAL CO., LTD.', 0x000C77: u'Life Racing Ltd', 0x000C78: u'In-Tech Electronics Limited', 0x000C79: u'Extel Communications P/L', 0x000C7A: u'DaTARIUS Technologies GmbH', 0x000C7B: u'ALPHA PROJECT Co.,Ltd.', 0x000C7C: u'Internet Information Image Inc.', 0x000C7D: u'TEIKOKU ELECTRIC MFG. CO., LTD', 0x000C7E: u'Tellium Incorporated', 0x000C7F: u'synertronixx GmbH', 0x000C80: u'Opelcomm Inc.', 0x000C81: u'Nulec Industries Pty Ltd', 0x000C82: u'NETWORK TECHNOLOGIES INC', 0x000C83: u'Logical Solutions', 0x000C84: u'Eazix, Inc.', 0x000C85: u'Cisco Systems', 0x000C86: u'Cisco Systems', 0x000C87: u'ATI', 0x000C88: u'Apache Micro Peripherals, Inc.', 0x000C89: u'AC Electric Vehicles, Ltd.', 0x000C8A: u'Bose Corporation', 0x000C8B: u'Connect Tech Inc', 0x000C8C: u'KODICOM CO.,LTD.', 0x000C8D: u'MATRIX VISION GmbH', 0x000C8E: u'Mentor Engineering Inc', 0x000C8F: u'Nergal s.r.l.', 0x000C90: u'Octasic Inc.', 0x000C91: u'Riverhead Networks Inc.', 0x000C92: u'WolfVision Gmbh', 0x000C93: u'Xeline Co., Ltd.', 0x000C94: u'United Electronic Industries, Inc.', 0x000C95: u'PrimeNet', 0x000C96: u'OQO, Inc.', 0x000C97: u'NV ADB TTV Technologies SA', 0x000C98: u'LETEK Communications Inc.', 0x000C99: u'HITEL LINK Co.,Ltd', 0x000C9A: u'Hitech Electronics Corp.', 0x000C9B: u'EE Solutions, Inc', 0x000C9C: u'Chongho information & communications', 0x000C9D: u'AirWalk Communications, Inc.', 0x000C9E: u'MemoryLink Corp.', 0x000C9F: u'NKE Corporation', 0x000CA0: u'StorCase Technology, Inc.', 0x000CA1: u'SIGMACOM Co., LTD.', 0x000CA2: u'Scopus Network Technologies Ltd', 0x000CA3: u'Rancho Technology, Inc.', 0x000CA4: u'Prompttec Product Management GmbH', 0x000CA5: u'Naman NZ LTd', 0x000CA6: u'Mintera Corporation', 0x000CA7: u'Metro (Suzhou) Technologies Co., Ltd.', 0x000CA8: u'Garuda Networks Corporation', 0x000CA9: u'Ebtron Inc.', 0x000CAA: u'Cubic Transportation Systems Inc', 0x000CAB: u'COMMEND International', 0x000CAC: u'Citizen Watch Co., Ltd.', 0x000CAD: u'BTU International', 0x000CAE: u'Ailocom Oy', 0x000CAF: u'TRI TERM CO.,LTD.', 0x000CB0: u'Star Semiconductor Corporation', 0x000CB1: u'Salland Engineering (Europe) BV', 0x000CB2: u'safei Co., Ltd.', 0x000CB3: u'ROUND Co.,Ltd.', 0x000CB4: u'AutoCell Laboratories, Inc.', 0x000CB5: u'Premier Technolgies, Inc', 0x000CB6: u'NANJING SEU MOBILE & INTERNET TECHNOLOGY CO.,LTD', 0x000CB7: u'Nanjing Huazhuo Electronics Co., Ltd.', 0x000CB8: u'MEDION AG', 0x000CB9: u'LEA', 0x000CBA: u'Jamex', 0x000CBB: u'ISKRAEMECO', 0x000CBC: u'Iscutum', 0x000CBD: u'Interface Masters, Inc', 0x000CBE: u'PRIVATE', 0x000CBF: u'Holy Stone Ent. Co., Ltd.', 0x000CC0: u'Genera Oy', 0x000CC1: u'Cooper Industries Inc.', 0x000CC2: u'PRIVATE', 0x000CC3: u'BeWAN systems', 0x000CC4: u'Tiptel AG', 0x000CC5: u'Nextlink Co., Ltd.', 0x000CC6: u'Ka-Ro electronics GmbH', 0x000CC7: u'Intelligent Computer Solutions Inc.', 0x000CC8: u'Xytronix Research & Design, Inc.', 0x000CC9: u'ILWOO DATA & TECHNOLOGY CO.,LTD', 0x000CCA: u'Hitachi Global Storage Technologies', 0x000CCB: u'Design Combus Ltd', 0x000CCC: u'Aeroscout Ltd.', 0x000CCD: u'IEC - TC57', 0x000CCE: u'Cisco Systems', 0x000CCF: u'Cisco Systems', 0x000CD0: u'Symetrix', 0x000CD1: u'SFOM Technology Corp.', 0x000CD2: u'Schaffner EMV AG', 0x000CD3: u'Prettl Elektronik Radeberg GmbH', 0x000CD4: u'Positron Public Safety Systems inc.', 0x000CD5: u'Passave Inc.', 0x000CD6: u'PARTNER TECH', 0x000CD7: u'Nallatech Ltd', 0x000CD8: u'M. K. Juchheim GmbH & Co', 0x000CD9: u'Itcare Co., Ltd', 0x000CDA: u'FreeHand Systems, Inc.', 0x000CDB: u'Foundry Networks', 0x000CDC: u'BECS Technology, Inc', 0x000CDD: u'AOS Technologies AG', 0x000CDE: u'ABB STOTZ-KONTAKT GmbH', 0x000CDF: u'PULNiX America, Inc', 0x000CE0: u'Trek Diagnostics Inc.', 0x000CE1: u'The Open Group', 0x000CE2: u'Rolls-Royce', 0x000CE3: u'Option International N.V.', 0x000CE4: u'NeuroCom International, Inc.', 0x000CE5: u'Motorola BCS', 0x000CE6: u'Meru Networks Inc', 0x000CE7: u'MediaTek Inc.', 0x000CE8: u'GuangZhou AnJuBao Co., Ltd', 0x000CE9: u'BLOOMBERG L.P.', 0x000CEA: u'aphona Kommunikationssysteme', 0x000CEB: u'CNMP Networks, Inc.', 0x000CEC: u'Spectracom Corp.', 0x000CED: u'Real Digital Media', 0x000CEE: u'jp-embedded', 0x000CEF: u'Open Networks Engineering Ltd', 0x000CF0: u'M & N GmbH', 0x000CF1: u'Intel Corporation', 0x000CF2: u'GAMESA EÓLICA', 0x000CF3: u'CALL IMAGE SA', 0x000CF4: u'AKATSUKI ELECTRIC MFG.CO.,LTD.', 0x000CF5: u'InfoExpress', 0x000CF6: u'Sitecom Europe BV', 0x000CF7: u'Nortel Networks', 0x000CF8: u'Nortel Networks', 0x000CF9: u'ITT Flygt AB', 0x000CFA: u'Digital Systems Corp', 0x000CFB: u'Korea Network Systems', 0x000CFC: u'S2io Technologies Corp', 0x000CFD: u'PRIVATE', 0x000CFE: u'Grand Electronic Co., Ltd', 0x000CFF: u'MRO-TEK LIMITED', 0x000D00: u'Seaway Networks Inc.', 0x000D01: u'P&E Microcomputer Systems, Inc.', 0x000D02: u'NEC AccessTechnica,Ltd', 0x000D03: u'Matrics, Inc.', 0x000D04: u'Foxboro Eckardt Development GmbH', 0x000D05: u'cybernet manufacturing inc.', 0x000D06: u'Compulogic Limited', 0x000D07: u'Calrec Audio Ltd', 0x000D08: u'AboveCable, Inc.', 0x000D09: u'Yuehua(Zhuhai) Electronic CO. LTD', 0x000D0A: u'Projectiondesign as', 0x000D0B: u'Buffalo Inc.', 0x000D0C: u'MDI Security Systems', 0x000D0D: u'ITSupported, LLC', 0x000D0E: u'Inqnet Systems, Inc.', 0x000D0F: u'Finlux Ltd', 0x000D10: u'Embedtronics Oy', 0x000D11: u'DENTSPLY - Gendex', 0x000D12: u'AXELL Corporation', 0x000D13: u'Wilhelm Rutenbeck GmbH&Co.', 0x000D14: u'Vtech Innovation LP dba Advanced American Telephones', 0x000D15: u'Voipac s.r.o.', 0x000D16: u'UHS Systems Pty Ltd', 0x000D17: u'Turbo Networks Co.Ltd', 0x000D18: u'Sunitec Enterprise Co., Ltd.', 0x000D19: u'ROBE Show lighting', 0x000D1A: u'Mustek System Inc.', 0x000D1B: u'Kyoto Electronics Manufacturing Co., Ltd.', 0x000D1C: u'I2E TELECOM', 0x000D1D: u'HIGH-TEK HARNESS ENT. CO., LTD.', 0x000D1E: u'Control Techniques', 0x000D1F: u'AV Digital', 0x000D20: u'ASAHIKASEI TECHNOSYSTEM CO.,LTD.', 0x000D21: u'WISCORE Inc.', 0x000D22: u'Unitronics', 0x000D23: u'Smart Solution, Inc', 0x000D24: u'SENTEC E&E CO., LTD.', 0x000D25: u'SANDEN CORPORATION', 0x000D26: u'Primagraphics Limited', 0x000D27: u'MICROPLEX Printware AG', 0x000D28: u'Cisco', 0x000D29: u'Cisco', 0x000D2A: u'Scanmatic AS', 0x000D2B: u'Racal Instruments', 0x000D2C: u'Patapsco Designs Ltd', 0x000D2D: u'NCT Deutschland GmbH', 0x000D2E: u'Matsushita Avionics Systems Corporation', 0x000D2F: u'AIN Comm.Tech.Co., LTD', 0x000D30: u'IceFyre Semiconductor', 0x000D31: u'Compellent Technologies, Inc.', 0x000D32: u'DispenseSource, Inc.', 0x000D33: u'Prediwave Corp.', 0x000D34: u'Shell International Exploration and Production, Inc.', 0x000D35: u'PAC International Ltd', 0x000D36: u'Wu Han Routon Electronic Co., Ltd', 0x000D37: u'WIPLUG', 0x000D38: u'NISSIN INC.', 0x000D39: u'Network Electronics', 0x000D3A: u'Microsoft Corp.', 0x000D3B: u'Microelectronics Technology Inc.', 0x000D3C: u'i.Tech Dynamic Ltd', 0x000D3D: u'Hammerhead Systems, Inc.', 0x000D3E: u'APLUX Communications Ltd.', 0x000D3F: u'VXI Technology', 0x000D40: u'Verint Loronix Video Solutions', 0x000D41: u'Siemens AG ICM MP UC RD IT KLF1', 0x000D42: u'Newbest Development Limited', 0x000D43: u'DRS Tactical Systems Inc.', 0x000D44: u'PRIVATE', 0x000D45: u'Tottori SANYO Electric Co., Ltd.', 0x000D46: u'SSD Drives, Inc.', 0x000D47: u'Collex', 0x000D48: u'AEWIN Technologies Co., Ltd.', 0x000D49: u'Triton Systems of Delaware, Inc.', 0x000D4A: u'Steag ETA-Optik', 0x000D4B: u'Roku, LLC', 0x000D4C: u'Outline Electronics Ltd.', 0x000D4D: u'Ninelanes', 0x000D4E: u'NDR Co.,LTD.', 0x000D4F: u'Kenwood Corporation', 0x000D50: u'Galazar Networks', 0x000D51: u'DIVR Systems, Inc.', 0x000D52: u'Comart system', 0x000D53: u'Beijing 5w Communication Corp.', 0x000D54: u'3Com Europe Ltd', 0x000D55: u'SANYCOM Technology Co.,Ltd', 0x000D56: u'Dell PCBA Test', 0x000D57: u'Fujitsu I-Network Systems Limited.', 0x000D58: u'PRIVATE', 0x000D59: u'Amity Systems, Inc.', 0x000D5A: u'Tiesse SpA', 0x000D5B: u'Smart Empire Investments Limited', 0x000D5C: u'Robert Bosch GmbH, VT-ATMO', 0x000D5D: u'Raritan Computer, Inc', 0x000D5E: u'NEC CustomTechnica, Ltd.', 0x000D5F: u'Minds Inc', 0x000D60: u'IBM Corporation', 0x000D61: u'Giga-Byte Technology Co., Ltd.', 0x000D62: u'Funkwerk Dabendorf GmbH', 0x000D63: u'DENT Instruments, Inc.', 0x000D64: u'COMAG Handels AG', 0x000D65: u'Cisco Systems', 0x000D66: u'Cisco Systems', 0x000D67: u'BelAir Networks Inc.', 0x000D68: u'Vinci Systems, Inc.', 0x000D69: u'TMT&D Corporation', 0x000D6A: u'Redwood Technologies LTD', 0x000D6B: u'Mita-Teknik A/S', 0x000D6C: u'M-Audio', 0x000D6D: u'K-Tech Devices Corp.', 0x000D6E: u'K-Patents Oy', 0x000D6F: u'Ember Corporation', 0x000D70: u'Datamax Corporation', 0x000D71: u'boca systems', 0x000D72: u'2Wire, Inc', 0x000D73: u'Technical Support, Inc.', 0x000D74: u'Sand Network Systems, Inc.', 0x000D75: u'Kobian Pte Ltd - Taiwan Branch', 0x000D76: u'Hokuto Denshi Co,. Ltd.', 0x000D77: u'FalconStor Software', 0x000D78: u'Engineering & Security', 0x000D79: u'Dynamic Solutions Co,.Ltd.', 0x000D7A: u'DiGATTO Asia Pacific Pte Ltd', 0x000D7B: u'Consensys Computers Inc.', 0x000D7C: u'Codian Ltd', 0x000D7D: u'Afco Systems', 0x000D7E: u'Axiowave Networks, Inc.', 0x000D7F: u'MIDAS COMMUNICATION TECHNOLOGIES PTE LTD ( Foreign Branch)', 0x000D80: u'Online Development Inc', 0x000D81: u'Pepperl+Fuchs GmbH', 0x000D82: u'PHS srl', 0x000D83: u'Sanmina-SCI Hungary Ltd.', 0x000D84: u'Makus Inc.', 0x000D85: u'Tapwave, Inc.', 0x000D86: u'Huber + Suhner AG', 0x000D87: u'Elitegroup Computer System Co. (ECS)', 0x000D88: u'D-Link Corporation', 0x000D89: u'Bils Technology Inc', 0x000D8A: u'Winners Electronics Co., Ltd.', 0x000D8B: u'T&D Corporation', 0x000D8C: u'Shanghai Wedone Digital Ltd. CO.', 0x000D8D: u'ProLinx Communication Gateways, Inc.', 0x000D8E: u'Koden Electronics Co., Ltd.', 0x000D8F: u'King Tsushin Kogyo Co., LTD.', 0x000D90: u'Factum Electronics AB', 0x000D91: u'Eclipse (HQ Espana) S.L.', 0x000D92: u'Arima Communication Corporation', 0x000D93: u'Apple Computer', 0x000D94: u'AFAR Communications,Inc', 0x000D95: u'Opti-cell, Inc.', 0x000D96: u'Vtera Technology Inc.', 0x000D97: u'Tropos Networks, Inc.', 0x000D98: u'S.W.A.C. Schmitt-Walter Automation Consult GmbH', 0x000D99: u'Orbital Sciences Corp.; Launch Systems Group', 0x000D9A: u'INFOTEC LTD', 0x000D9B: u'Heraeus Electro-Nite International N.V.', 0x000D9C: u'Elan GmbH & Co KG', 0x000D9D: u'Hewlett Packard', 0x000D9E: u'TOKUDEN OHIZUMI SEISAKUSYO Co.,Ltd.', 0x000D9F: u'RF Micro Devices', 0x000DA0: u'NEDAP N.V.', 0x000DA1: u'MIRAE ITS Co.,LTD.', 0x000DA2: u'Infrant Technologies, Inc.', 0x000DA3: u'Emerging Technologies Limited', 0x000DA4: u'DOSCH & AMAND SYSTEMS AG', 0x000DA5: u'Fabric7 Systems, Inc', 0x000DA6: u'Universal Switching Corporation', 0x000DA7: u'PRIVATE', 0x000DA8: u'Teletronics Technology Corporation', 0x000DA9: u'T.E.A.M. S.L.', 0x000DAA: u'S.A.Tehnology co.,Ltd.', 0x000DAB: u'Parker Hannifin GmbH Electromechanical Division Europe', 0x000DAC: u'Japan CBM Corporation', 0x000DAD: u'Dataprobe Inc', 0x000DAE: u'SAMSUNG HEAVY INDUSTRIES CO., LTD.', 0x000DAF: u'Plexus Corp (UK) Ltd', 0x000DB0: u'Olym-tech Co.,Ltd.', 0x000DB1: u'Japan Network Service Co., Ltd.', 0x000DB2: u'Ammasso, Inc.', 0x000DB3: u'SDO Communication Corperation', 0x000DB4: u'NETASQ', 0x000DB5: u'GLOBALSAT TECHNOLOGY CORPORATION', 0x000DB6: u'Teknovus, Inc.', 0x000DB7: u'SANKO ELECTRIC CO,.LTD', 0x000DB8: u'SCHILLER AG', 0x000DB9: u'PC Engines GmbH', 0x000DBA: u'Océ Document Technologies GmbH', 0x000DBB: u'Nippon Dentsu Co.,Ltd.', 0x000DBC: u'Cisco Systems', 0x000DBD: u'Cisco Systems', 0x000DBE: u'Bel Fuse Europe Ltd.,UK', 0x000DBF: u'TekTone Sound & Signal Mfg., Inc.', 0x000DC0: u'Spagat AS', 0x000DC1: u'SafeWeb Inc', 0x000DC2: u'PRIVATE', 0x000DC3: u'First Communication, Inc.', 0x000DC4: u'Emcore Corporation', 0x000DC5: u'EchoStar International Corporation', 0x000DC6: u'DigiRose Technology Co., Ltd.', 0x000DC7: u'COSMIC ENGINEERING INC.', 0x000DC8: u'AirMagnet, Inc', 0x000DC9: u'THALES Elektronik Systeme GmbH', 0x000DCA: u'Tait Electronics', 0x000DCB: u'Petcomkorea Co., Ltd.', 0x000DCC: u'NEOSMART Corp.', 0x000DCD: u'GROUPE TXCOM', 0x000DCE: u'Dynavac Technology Pte Ltd', 0x000DCF: u'Cidra Corp.', 0x000DD0: u'TetraTec Instruments GmbH', 0x000DD1: u'Stryker Corporation', 0x000DD2: u'Simrad Optronics ASA', 0x000DD3: u'SAMWOO Telecommunication Co.,Ltd.', 0x000DD4: u'Revivio Inc.', 0x000DD5: u'O\'RITE TECHNOLOGY CO.,LTD', 0x000DD6: u'ITI LTD', 0x000DD7: u'Bright', 0x000DD8: u'BBN', 0x000DD9: u'Anton Paar GmbH', 0x000DDA: u'ALLIED TELESIS K.K.', 0x000DDB: u'AIRWAVE TECHNOLOGIES INC.', 0x000DDC: u'VAC', 0x000DDD: u'PROFÝLO TELRA ELEKTRONÝK SANAYÝ VE TÝCARET A.Þ.', 0x000DDE: u'Joyteck Co., Ltd.', 0x000DDF: u'Japan Image & Network Inc.', 0x000DE0: u'ICPDAS Co.,LTD', 0x000DE1: u'Control Products, Inc.', 0x000DE2: u'CMZ Sistemi Elettronici', 0x000DE3: u'AT Sweden AB', 0x000DE4: u'DIGINICS, Inc.', 0x000DE5: u'Samsung Thales', 0x000DE6: u'YOUNGBO ENGINEERING CO.,LTD', 0x000DE7: u'Snap-on OEM Group', 0x000DE8: u'Nasaco Electronics Pte. Ltd', 0x000DE9: u'Napatech Aps', 0x000DEA: u'Kingtel Telecommunication Corp.', 0x000DEB: u'CompXs Limited', 0x000DEC: u'Cisco Systems', 0x000DED: u'Cisco Systems', 0x000DEE: u'Andrew RF Power Amplifier Group', 0x000DEF: u'Soc. Coop. Bilanciai', 0x000DF0: u'QCOM TECHNOLOGY INC.', 0x000DF1: u'IONIX INC.', 0x000DF2: u'PRIVATE', 0x000DF3: u'Asmax Solutions', 0x000DF4: u'Watertek Co.', 0x000DF5: u'Teletronics International Inc.', 0x000DF6: u'Technology Thesaurus Corp.', 0x000DF7: u'Space Dynamics Lab', 0x000DF8: u'ORGA Kartensysteme GmbH', 0x000DF9: u'NDS Limited', 0x000DFA: u'Micro Control Systems Ltd.', 0x000DFB: u'Komax AG', 0x000DFC: u'ITFOR Inc. resarch and development', 0x000DFD: u'Huges Hi-Tech Inc.,', 0x000DFE: u'Hauppauge Computer Works, Inc.', 0x000DFF: u'CHENMING MOLD INDUSTRY CORP.', 0x000E00: u'Atrie', 0x000E01: u'ASIP Technologies Inc.', 0x000E02: u'Advantech AMT Inc.', 0x000E03: u'Emulex', 0x000E04: u'CMA/Microdialysis AB', 0x000E05: u'WIRELESS MATRIX CORP.', 0x000E06: u'Team Simoco Ltd', 0x000E07: u'Sony Ericsson Mobile Communications AB', 0x000E08: u'Sipura Technology, Inc.', 0x000E09: u'Shenzhen Coship Software Co.,LTD.', 0x000E0A: u'SAKUMA DESIGN OFFICE', 0x000E0B: u'Netac Technology Co., Ltd.', 0x000E0C: u'Intel Corporation', 0x000E0D: u'HESCH Schröder GmbH', 0x000E0E: u'ESA elettronica S.P.A.', 0x000E0F: u'ERMME', 0x000E10: u'PRIVATE', 0x000E11: u'BDT Büro- und Datentechnik GmbH & Co. KG', 0x000E12: u'Adaptive Micro Systems Inc.', 0x000E13: u'Accu-Sort Systems inc.', 0x000E14: u'Visionary Solutions, Inc.', 0x000E15: u'Tadlys LTD', 0x000E16: u'SouthWing', 0x000E17: u'PRIVATE', 0x000E18: u'MyA Technology', 0x000E19: u'LogicaCMG Pty Ltd', 0x000E1A: u'JPS Communications', 0x000E1B: u'IAV GmbH', 0x000E1C: u'Hach Company', 0x000E1D: u'ARION Technology Inc.', 0x000E1E: u'PRIVATE', 0x000E1F: u'TCL Networks Equipment Co., Ltd.', 0x000E20: u'PalmSource, Inc.', 0x000E21: u'MTU Friedrichshafen GmbH', 0x000E22: u'PRIVATE', 0x000E23: u'Incipient, Inc.', 0x000E24: u'Huwell Technology Inc.', 0x000E25: u'Hannae Technology Co., Ltd', 0x000E26: u'Gincom Technology Corp.', 0x000E27: u'Crere Networks, Inc.', 0x000E28: u'Dynamic Ratings P/L', 0x000E29: u'Shester Communications Inc', 0x000E2A: u'PRIVATE', 0x000E2B: u'Safari Technologies', 0x000E2C: u'Netcodec co.', 0x000E2D: u'Hyundai Digital Technology Co.,Ltd.', 0x000E2E: u'Edimax Technology Co., Ltd.', 0x000E2F: u'Disetronic Medical Systems AG', 0x000E30: u'AERAS Networks, Inc.', 0x000E31: u'Olympus BioSystems GmbH', 0x000E32: u'Kontron Medical', 0x000E33: u'Shuko Electronics Co.,Ltd', 0x000E34: u'NexGen City, LP', 0x000E35: u'Intel Corp', 0x000E36: u'HEINESYS, Inc.', 0x000E37: u'Harms & Wende GmbH & Co.KG', 0x000E38: u'Cisco Systems', 0x000E39: u'Cisco Systems', 0x000E3A: u'Cirrus Logic', 0x000E3B: u'Hawking Technologies, Inc.', 0x000E3C: u'TransAct Technoloiges Inc.', 0x000E3D: u'Televic N.V.', 0x000E3E: u'Sun Optronics Inc', 0x000E3F: u'Soronti, Inc.', 0x000E40: u'Nortel Networks', 0x000E41: u'NIHON MECHATRONICS CO.,LTD.', 0x000E42: u'Motic Incoporation Ltd.', 0x000E43: u'G-Tek Electronics Sdn. Bhd.', 0x000E44: u'Digital 5, Inc.', 0x000E45: u'Beijing Newtry Electronic Technology Ltd', 0x000E46: u'Niigata Seimitsu Co.,Ltd.', 0x000E47: u'NCI System Co.,Ltd.', 0x000E48: u'Lipman TransAction Solutions', 0x000E49: u'Forsway Scandinavia AB', 0x000E4A: u'Changchun Huayu WEBPAD Co.,LTD', 0x000E4B: u'atrium c and i', 0x000E4C: u'Bermai Inc.', 0x000E4D: u'Numesa Inc.', 0x000E4E: u'Waveplus Technology Co., Ltd.', 0x000E4F: u'Trajet GmbH', 0x000E50: u'Thomson Telecom Belgium', 0x000E51: u'tecna elettronica srl', 0x000E52: u'Optium Corporation', 0x000E53: u'AV TECH CORPORATION', 0x000E54: u'AlphaCell Wireless Ltd.', 0x000E55: u'AUVITRAN', 0x000E56: u'4G Systems GmbH', 0x000E57: u'Iworld Networking, Inc.', 0x000E58: u'Sonos, Inc.', 0x000E59: u'SAGEM SA', 0x000E5A: u'TELEFIELD inc.', 0x000E5B: u'ParkerVision - Direct2Data', 0x000E5C: u'Motorola BCS', 0x000E5D: u'Triple Play Technologies A/S', 0x000E5E: u'Beijing Raisecom Science & Technology Development Co.,Ltd', 0x000E5F: u'activ-net GmbH & Co. KG', 0x000E60: u'360SUN Digital Broadband Corporation', 0x000E61: u'MICROTROL LIMITED', 0x000E62: u'Nortel Networks', 0x000E63: u'Lemke Diagnostics GmbH', 0x000E64: u'Elphel, Inc', 0x000E65: u'TransCore', 0x000E66: u'Hitachi Advanced Digital, Inc.', 0x000E67: u'Eltis Microelectronics Ltd.', 0x000E68: u'E-TOP Network Technology Inc.', 0x000E69: u'China Electric Power Research Institute', 0x000E6A: u'3COM EUROPE LTD', 0x000E6B: u'Janitza electronics GmbH', 0x000E6C: u'Device Drivers Limited', 0x000E6D: u'Murata Manufacturing Co., Ltd.', 0x000E6E: u'MICRELEC ELECTRONICS S.A', 0x000E6F: u'IRIS Corporation Berhad', 0x000E70: u'in2 Networks', 0x000E71: u'Gemstar Technology Development Ltd.', 0x000E72: u'CTS electronics', 0x000E73: u'Tpack A/S', 0x000E74: u'Solar Telecom. Tech', 0x000E75: u'New York Air Brake Corp.', 0x000E76: u'GEMSOC INNOVISION INC.', 0x000E77: u'Decru, Inc.', 0x000E78: u'Amtelco', 0x000E79: u'Ample Communications Inc.', 0x000E7A: u'GemWon Communications Co., Ltd.', 0x000E7B: u'Toshiba', 0x000E7C: u'Televes S.A.', 0x000E7D: u'Electronics Line 3000 Ltd.', 0x000E7E: u'Comprog Oy', 0x000E7F: u'Hewlett Packard', 0x000E80: u'Thomson Technology Inc', 0x000E81: u'Devicescape Software, Inc.', 0x000E82: u'Commtech Wireless', 0x000E83: u'Cisco Systems', 0x000E84: u'Cisco Systems', 0x000E85: u'Catalyst Enterprises, Inc.', 0x000E86: u'Alcatel North America', 0x000E87: u'adp Gauselmann GmbH', 0x000E88: u'VIDEOTRON CORP.', 0x000E89: u'CLEMATIC', 0x000E8A: u'Avara Technologies Pty. Ltd.', 0x000E8B: u'Astarte Technology Co, Ltd.', 0x000E8C: u'Siemens AG A&D ET', 0x000E8D: u'Systems in Progress Holding GmbH', 0x000E8E: u'SparkLAN Communications, Inc.', 0x000E8F: u'Sercomm Corp.', 0x000E90: u'PONICO CORP.', 0x000E91: u'Northstar Technologies', 0x000E92: u'Millinet Co., Ltd.', 0x000E93: u'Milénio 3 Sistemas Electrónicos, Lda.', 0x000E94: u'Maas International BV', 0x000E95: u'Fujiya Denki Seisakusho Co.,Ltd.', 0x000E96: u'Cubic Defense Applications, Inc.', 0x000E97: u'Ultracker Technology CO., Inc', 0x000E98: u'Vitec CC, INC.', 0x000E99: u'Spectrum Digital, Inc', 0x000E9A: u'BOE TECHNOLOGY GROUP CO.,LTD', 0x000E9B: u'Ambit Microsystems Corporation', 0x000E9C: u'Pemstar', 0x000E9D: u'Video Networks Ltd', 0x000E9E: u'Topfield Co., Ltd', 0x000E9F: u'TEMIC SDS GmbH', 0x000EA0: u'NetKlass Technology Inc.', 0x000EA1: u'Formosa Teletek Corporation', 0x000EA2: u'CyberGuard Corporation', 0x000EA3: u'CNCR-IT CO.,LTD,HangZhou P.R.CHINA', 0x000EA4: u'Certance Inc.', 0x000EA5: u'BLIP Systems', 0x000EA6: u'ASUSTEK COMPUTER INC.', 0x000EA7: u'Endace Inc Ltd.', 0x000EA8: u'United Technologists Europe Limited', 0x000EA9: u'Shanghai Xun Shi Communications Equipment Ltd. Co.', 0x000EAA: u'Scalent Systems, Inc.', 0x000EAB: u'OctigaBay Systems Corporation', 0x000EAC: u'MINTRON ENTERPRISE CO., LTD.', 0x000EAD: u'Metanoia Technologies, Inc.', 0x000EAE: u'GAWELL TECHNOLOGIES CORP.', 0x000EAF: u'CASTEL', 0x000EB0: u'Solutions Radio BV', 0x000EB1: u'Newcotech,Ltd', 0x000EB2: u'Micro-Research Finland Oy', 0x000EB3: u'LeftHand Networks', 0x000EB4: u'GUANGZHOU GAOKE COMMUNICATIONS TECHNOLOGY CO.LTD.', 0x000EB5: u'Ecastle Electronics Co., Ltd.', 0x000EB6: u'Riverbed Technology, Inc.', 0x000EB7: u'Knovative, Inc.', 0x000EB8: u'Iiga co.,Ltd', 0x000EB9: u'HASHIMOTO Electronics Industry Co.,Ltd.', 0x000EBA: u'HANMI SEMICONDUCTOR CO., LTD.', 0x000EBB: u'Everbee Networks', 0x000EBC: u'Cullmann GmbH', 0x000EBD: u'Burdick, a Quinton Compny', 0x000EBE: u'B&B Electronics Manufacturing Co.', 0x000EBF: u'Remsdaq Limited', 0x000EC0: u'Nortel Networks', 0x000EC1: u'MYNAH Technologies', 0x000EC2: u'Lowrance Electronics, Inc.', 0x000EC3: u'Logic Controls, Inc.', 0x000EC4: u'Iskra Transmission d.d.', 0x000EC5: u'Digital Multitools Inc', 0x000EC6: u'ASIX ELECTRONICS CORP.', 0x000EC7: u'Motorola Korea', 0x000EC8: u'Zoran Corporation', 0x000EC9: u'YOKO Technology Corp.', 0x000ECA: u'WTSS Inc', 0x000ECB: u'VineSys Technology', 0x000ECC: u'Tableau', 0x000ECD: u'SKOV A/S', 0x000ECE: u'S.I.T.T.I. S.p.A.', 0x000ECF: u'PROFIBUS Nutzerorganisation e.V.', 0x000ED0: u'Privaris, Inc.', 0x000ED1: u'Osaka Micro Computer.', 0x000ED2: u'Filtronic plc', 0x000ED3: u'Epicenter, Inc.', 0x000ED4: u'CRESITT INDUSTRIE', 0x000ED5: u'COPAN Systems Inc.', 0x000ED6: u'Cisco Systems', 0x000ED7: u'Cisco Systems', 0x000ED8: u'Aktino, Inc.', 0x000ED9: u'Aksys, Ltd.', 0x000EDA: u'C-TECH UNITED CORP.', 0x000EDB: u'XiNCOM Corp.', 0x000EDC: u'Tellion INC.', 0x000EDD: u'SHURE INCORPORATED', 0x000EDE: u'REMEC, Inc.', 0x000EDF: u'PLX Technology', 0x000EE0: u'Mcharge', 0x000EE1: u'ExtremeSpeed Inc.', 0x000EE2: u'Custom Engineering S.p.A.', 0x000EE3: u'Chiyu Technology Co.,Ltd', 0x000EE4: u'BOE TECHNOLOGY GROUP CO.,LTD', 0x000EE5: u'bitWallet, Inc.', 0x000EE6: u'Adimos Systems LTD', 0x000EE7: u'AAC ELECTRONICS CORP.', 0x000EE8: u'zioncom', 0x000EE9: u'WayTech Development, Inc.', 0x000EEA: u'Shadong Luneng Jicheng Electronics,Co.,Ltd', 0x000EEB: u'Sandmartin(zhong shan)Electronics Co.,Ltd', 0x000EEC: u'Orban', 0x000EED: u'Nokia Danmark A/S', 0x000EEE: u'Muco Industrie BV', 0x000EEF: u'PRIVATE', 0x000EF0: u'Festo AG & Co. KG', 0x000EF1: u'EZQUEST INC.', 0x000EF2: u'Infinico Corporation', 0x000EF3: u'Smarthome', 0x000EF4: u'Shenzhen Kasda Digital Technology Co.,Ltd', 0x000EF5: u'iPAC Technology Co., Ltd.', 0x000EF6: u'E-TEN Information Systems Co., Ltd.', 0x000EF7: u'Vulcan Portals Inc', 0x000EF8: u'SBC ASI', 0x000EF9: u'REA Elektronik GmbH', 0x000EFA: u'Optoway Technology Incorporation', 0x000EFB: u'Macey Enterprises', 0x000EFC: u'JTAG Technologies B.V.', 0x000EFD: u'FUJI PHOTO OPTICAL CO., LTD.', 0x000EFE: u'EndRun Technologies LLC', 0x000EFF: u'Megasolution,Inc.', 0x000F00: u'Legra Systems, Inc.', 0x000F01: u'DIGITALKS INC', 0x000F02: u'Digicube Technology Co., Ltd', 0x000F03: u'COM&C CO., LTD', 0x000F04: u'cim-usa inc', 0x000F05: u'3B SYSTEM INC.', 0x000F06: u'Nortel Networks', 0x000F07: u'Mangrove Systems, Inc.', 0x000F08: u'Indagon Oy', 0x000F09: u'PRIVATE', 0x000F0A: u'Clear Edge Networks', 0x000F0B: u'Kentima Technologies AB', 0x000F0C: u'SYNCHRONIC ENGINEERING', 0x000F0D: u'Hunt Electronic Co., Ltd.', 0x000F0E: u'WaveSplitter Technologies, Inc.', 0x000F0F: u'Real ID Technology Co., Ltd.', 0x000F10: u'RDM Corporation', 0x000F11: u'Prodrive B.V.', 0x000F12: u'Panasonic AVC Networks Germany GmbH', 0x000F13: u'Nisca corporation', 0x000F14: u'Mindray Co., Ltd.', 0x000F15: u'Kjaerulff1 A/S', 0x000F16: u'JAY HOW TECHNOLOGY CO.,', 0x000F17: u'Insta Elektro GmbH', 0x000F18: u'Industrial Control Systems', 0x000F19: u'Guidant Corporation', 0x000F1A: u'Gaming Support B.V.', 0x000F1B: u'Ego Systems Inc.', 0x000F1C: u'DigitAll World Co., Ltd', 0x000F1D: u'Cosmo Techs Co., Ltd.', 0x000F1E: u'Chengdu KT Electric Co.of High & New Technology', 0x000F1F: u'WW PCBA Test', 0x000F20: u'Hewlett Packard', 0x000F21: u'Scientific Atlanta, Inc', 0x000F22: u'Helius, Inc.', 0x000F23: u'Cisco Systems', 0x000F24: u'Cisco Systems', 0x000F25: u'AimValley B.V.', 0x000F26: u'WorldAccxx LLC', 0x000F27: u'TEAL Electronics, Inc.', 0x000F28: u'Itronix Corporation', 0x000F29: u'Augmentix Corporation', 0x000F2A: u'Cableware Electronics', 0x000F2B: u'GREENBELL SYSTEMS', 0x000F2C: u'Uplogix, Inc.', 0x000F2D: u'CHUNG-HSIN ELECTRIC & MACHINERY MFG.CORP.', 0x000F2E: u'Megapower International Corp.', 0x000F2F: u'W-LINX TECHNOLOGY CO., LTD.', 0x000F30: u'Raza Microelectronics Inc', 0x000F31: u'Prosilica', 0x000F32: u'LuTong Electronic Technology Co.,Ltd', 0x000F33: u'DUALi Inc.', 0x000F34: u'Cisco Systems', 0x000F35: u'Cisco Systems', 0x000F36: u'Accurate Techhnologies, Inc.', 0x000F37: u'Xambala Incorporated', 0x000F38: u'Netstar', 0x000F39: u'IRIS SENSORS', 0x000F3A: u'HISHARP', 0x000F3B: u'Fuji System Machines Co., Ltd.', 0x000F3C: u'Endeleo Limited', 0x000F3D: u'D-Link Corporation', 0x000F3E: u'CardioNet, Inc', 0x000F3F: u'Big Bear Networks', 0x000F40: u'Optical Internetworking Forum', 0x000F41: u'Zipher Ltd', 0x000F42: u'Xalyo Systems', 0x000F43: u'Wasabi Systems Inc.', 0x000F44: u'Tivella Inc.', 0x000F45: u'Stretch, Inc.', 0x000F46: u'SINAR AG', 0x000F47: u'ROBOX SPA', 0x000F48: u'Polypix Inc.', 0x000F49: u'Northover Solutions Limited', 0x000F4A: u'Kyushu-kyohan co.,ltd', 0x000F4B: u'Katana Technology', 0x000F4C: u'Elextech INC', 0x000F4D: u'Centrepoint Technologies Inc.', 0x000F4E: u'Cellink', 0x000F4F: u'Cadmus Technology Ltd', 0x000F50: u'Baxall Limited', 0x000F51: u'Azul Systems, Inc.', 0x000F52: u'YORK Refrigeration, Marine & Controls', 0x000F53: u'Solarflare Communications Inc', 0x000F54: u'Entrelogic Corporation', 0x000F55: u'Datawire Communication Networks Inc.', 0x000F56: u'Continuum Photonics Inc', 0x000F57: u'CABLELOGIC Co., Ltd.', 0x000F58: u'Adder Technology Limited', 0x000F59: u'Phonak Communications AG', 0x000F5A: u'Peribit Networks', 0x000F5B: u'Delta Information Systems, Inc.', 0x000F5C: u'Day One Digital Media Limited', 0x000F5D: u'42Networks AB', 0x000F5E: u'Veo', 0x000F5F: u'Nicety Technologies Inc. (NTS)', 0x000F60: u'Lifetron Co.,Ltd', 0x000F61: u'Kiwi Networks', 0x000F62: u'Alcatel Bell Space N.V.', 0x000F63: u'Obzerv Technologies', 0x000F64: u'D&R Electronica Weesp BV', 0x000F65: u'icube Corp.', 0x000F66: u'Cisco-Linksys', 0x000F67: u'West Instruments', 0x000F68: u'Vavic Network Technology, Inc.', 0x000F69: u'SEW Eurodrive GmbH & Co. KG', 0x000F6A: u'Nortel Networks', 0x000F6B: u'GateWare Communications GmbH', 0x000F6C: u'ADDI-DATA GmbH', 0x000F6D: u'Midas Engineering', 0x000F6E: u'BBox', 0x000F6F: u'FTA Communication Technologies', 0x000F70: u'Wintec Industries, inc.', 0x000F71: u'Sanmei Electronics Co.,Ltd', 0x000F72: u'Sandburst', 0x000F73: u'Rockwell Samsung Automation', 0x000F74: u'Qamcom Technology AB', 0x000F75: u'First Silicon Solutions', 0x000F76: u'Digital Keystone, Inc.', 0x000F77: u'DENTUM CO.,LTD', 0x000F78: u'Datacap Systems Inc', 0x000F79: u'Bluetooth Interest Group Inc.', 0x000F7A: u'BeiJing NuQX Technology CO.,LTD', 0x000F7B: u'Arce Sistemas, S.A.', 0x000F7C: u'ACTi Corporation', 0x000F7D: u'Xirrus', 0x000F7E: u'Ablerex Electronics Co., LTD', 0x000F7F: u'UBSTORAGE Co.,Ltd.', 0x000F80: u'Trinity Security Systems,Inc.', 0x000F81: u'Secure Info Imaging', 0x000F82: u'Mortara Instrument, Inc.', 0x000F83: u'Brainium Technologies Inc.', 0x000F84: u'Astute Networks, Inc.', 0x000F85: u'ADDO-Japan Corporation', 0x000F86: u'Research In Motion Limited', 0x000F87: u'Maxcess International', 0x000F88: u'AMETEK, Inc.', 0x000F89: u'Winnertec System Co., Ltd.', 0x000F8A: u'WideView', 0x000F8B: u'Orion MultiSystems Inc', 0x000F8C: u'Gigawavetech Pte Ltd', 0x000F8D: u'FAST TV-Server AG', 0x000F8E: u'DONGYANG TELECOM CO.,LTD.', 0x000F8F: u'Cisco Systems', 0x000F90: u'Cisco Systems', 0x000F91: u'Aerotelecom Co.,Ltd.', 0x000F92: u'Microhard Systems Inc.', 0x000F93: u'Landis+Gyr Ltd.', 0x000F94: u'Genexis', 0x000F95: u'ELECOM Co.,LTD Laneed Division', 0x000F96: u'Critical Telecom Corp.', 0x000F97: u'Avanex Corporation', 0x000F98: u'Avamax Co. Ltd.', 0x000F99: u'APAC opto Electronics Inc.', 0x000F9A: u'Synchrony, Inc.', 0x000F9B: u'Ross Video Limited', 0x000F9C: u'Panduit Corp', 0x000F9D: u'Newnham Research Ltd', 0x000F9E: u'Murrelektronik GmbH', 0x000F9F: u'Motorola BCS', 0x000FA0: u'CANON KOREA BUSINESS SOLUTIONS INC.', 0x000FA1: u'Gigabit Systems Inc.', 0x000FA2: u'Digital Path Networks', 0x000FA3: u'Alpha Networks Inc.', 0x000FA4: u'Sprecher Automation GmbH', 0x000FA5: u'SMP / BWA Technology GmbH', 0x000FA6: u'S2 Security Corporation', 0x000FA7: u'Raptor Networks Technology', 0x000FA8: u'Photometrics, Inc.', 0x000FA9: u'PC Fabrik', 0x000FAA: u'Nexus Technologies', 0x000FAB: u'Kyushu Electronics Systems Inc.', 0x000FAC: u'IEEE 802.11', 0x000FAD: u'FMN communications GmbH', 0x000FAE: u'E2O Communications', 0x000FAF: u'Dialog Inc.', 0x000FB0: u'Compal Electronics,INC.', 0x000FB1: u'Cognio Inc.', 0x000FB2: u'Broadband Pacenet (India) Pvt. Ltd.', 0x000FB3: u'Actiontec Electronics, Inc', 0x000FB4: u'Timespace Technology', 0x000FB5: u'NETGEAR Inc', 0x000FB6: u'Europlex Technologies', 0x000FB7: u'Cavium Networks', 0x000FB8: u'CallURL Inc.', 0x000FB9: u'Adaptive Instruments', 0x000FBA: u'Tevebox AB', 0x000FBB: u'Siemens Networks GmbH & Co. KG', 0x000FBC: u'Onkey Technologies, Inc.', 0x000FBD: u'MRV Communications (Networks) LTD', 0x000FBE: u'e-w/you Inc.', 0x000FBF: u'DGT Sp. z o.o.', 0x000FC0: u'DELCOMp', 0x000FC1: u'WAVE Corporation', 0x000FC2: u'Uniwell Corporation', 0x000FC3: u'PalmPalm Technology, Inc.', 0x000FC4: u'NST co.,LTD.', 0x000FC5: u'KeyMed Ltd', 0x000FC6: u'Eurocom Industries A/S', 0x000FC7: u'Dionica R&D Ltd.', 0x000FC8: u'Chantry Networks', 0x000FC9: u'Allnet GmbH', 0x000FCA: u'A-JIN TECHLINE CO, LTD', 0x000FCB: u'3COM EUROPE LTD', 0x000FCC: u'Netopia, Inc.', 0x000FCD: u'Nortel Networks', 0x000FCE: u'Kikusui Electronics Corp.', 0x000FCF: u'Datawind Research', 0x000FD0: u'ASTRI', 0x000FD1: u'Applied Wireless Identifications Group, Inc.', 0x000FD2: u'EWA Technologies, Inc.', 0x000FD3: u'Digium', 0x000FD4: u'Soundcraft', 0x000FD5: u'Schwechat - RISE', 0x000FD6: u'Sarotech Co., Ltd', 0x000FD7: u'Harman Music Group', 0x000FD8: u'Force, Inc.', 0x000FD9: u'FlexDSL Telecommunications AG', 0x000FDA: u'YAZAKI CORPORATION', 0x000FDB: u'Westell Technologies', 0x000FDC: u'Ueda Japan Radio Co., Ltd.', 0x000FDD: u'SORDIN AB', 0x000FDE: u'Sony Ericsson Mobile Communications AB', 0x000FDF: u'SOLOMON Technology Corp.', 0x000FE0: u'NComputing Co.,Ltd.', 0x000FE1: u'ID DIGITAL CORPORATION', 0x000FE2: u'Hangzhou Huawei-3Com Tech. Co., Ltd.', 0x000FE3: u'Damm Cellular Systems A/S', 0x000FE4: u'Pantech Co.,Ltd', 0x000FE5: u'MERCURY SECURITY CORPORATION', 0x000FE6: u'MBTech Systems, Inc.', 0x000FE7: u'Lutron Electronics Co., Inc.', 0x000FE8: u'Lobos, Inc.', 0x000FE9: u'GW TECHNOLOGIES CO.,LTD.', 0x000FEA: u'Giga-Byte Technology Co.,LTD.', 0x000FEB: u'Cylon Controls', 0x000FEC: u'Arkus Inc.', 0x000FED: u'Anam Electronics Co., Ltd', 0x000FEE: u'XTec, Incorporated', 0x000FEF: u'Thales e-Transactions GmbH', 0x000FF0: u'Sunray Enterprise', 0x000FF1: u'nex-G Systems Pte.Ltd', 0x000FF2: u'Loud Technologies Inc.', 0x000FF3: u'Jung Myoung Communications&Technology', 0x000FF4: u'Guntermann & Drunck GmbH', 0x000FF5: u'GN&S company', 0x000FF6: u'Darfon Electronics Corp.', 0x000FF7: u'Cisco Systems', 0x000FF8: u'Cisco Systems', 0x000FF9: u'Valcretec, Inc.', 0x000FFA: u'Optinel Systems, Inc.', 0x000FFB: u'Nippon Denso Industry Co., Ltd.', 0x000FFC: u'Merit Li-Lin Ent.', 0x000FFD: u'Glorytek Network Inc.', 0x000FFE: u'G-PRO COMPUTER', 0x000FFF: u'Control4', 0x001000: u'CABLE TELEVISION LABORATORIES, INC.', 0x001001: u'MCK COMMUNICATIONS', 0x001002: u'ACTIA', 0x001003: u'IMATRON, INC.', 0x001004: u'THE BRANTLEY COILE COMPANY,INC', 0x001005: u'UEC COMMERCIAL', 0x001006: u'Thales Contact Solutions Ltd.', 0x001007: u'CISCO SYSTEMS, INC.', 0x001008: u'VIENNA SYSTEMS CORPORATION', 0x001009: u'HORO QUARTZ', 0x00100A: u'WILLIAMS COMMUNICATIONS GROUP', 0x00100B: u'CISCO SYSTEMS, INC.', 0x00100C: u'ITO CO., LTD.', 0x00100D: u'CISCO SYSTEMS, INC.', 0x00100E: u'MICRO LINEAR COPORATION', 0x00100F: u'INDUSTRIAL CPU SYSTEMS', 0x001010: u'INITIO CORPORATION', 0x001011: u'CISCO SYSTEMS, INC.', 0x001012: u'PROCESSOR SYSTEMS (I) PVT LTD', 0x001013: u'Kontron', 0x001014: u'CISCO SYSTEMS, INC.', 0x001015: u'OOmon Inc.', 0x001016: u'T.SQWARE', 0x001017: u'MICOS GmbH', 0x001018: u'BROADCOM CORPORATION', 0x001019: u'SIRONA DENTAL SYSTEMS GmbH & Co. KG', 0x00101A: u'PictureTel Corp.', 0x00101B: u'CORNET TECHNOLOGY, INC.', 0x00101C: u'OHM TECHNOLOGIES INTL, LLC', 0x00101D: u'WINBOND ELECTRONICS CORP.', 0x00101E: u'MATSUSHITA ELECTRONIC INSTRUMENTS CORP.', 0x00101F: u'CISCO SYSTEMS, INC.', 0x001020: u'WELCH ALLYN, DATA COLLECTION', 0x001021: u'ENCANTO NETWORKS, INC.', 0x001022: u'SatCom Media Corporation', 0x001023: u'FLOWWISE NETWORKS, INC.', 0x001024: u'NAGOYA ELECTRIC WORKS CO., LTD', 0x001025: u'GRAYHILL INC.', 0x001026: u'ACCELERATED NETWORKS, INC.', 0x001027: u'L-3 COMMUNICATIONS EAST', 0x001028: u'COMPUTER TECHNICA, INC.', 0x001029: u'CISCO SYSTEMS, INC.', 0x00102A: u'ZF MICROSYSTEMS, INC.', 0x00102B: u'UMAX DATA SYSTEMS, INC.', 0x00102C: u'Lasat Networks A/S', 0x00102D: u'HITACHI SOFTWARE ENGINEERING', 0x00102E: u'NETWORK SYSTEMS & TECHNOLOGIES PVT. LTD.', 0x00102F: u'CISCO SYSTEMS, INC.', 0x001030: u'EION Inc.', 0x001031: u'OBJECTIVE COMMUNICATIONS, INC.', 0x001032: u'ALTA TECHNOLOGY', 0x001033: u'ACCESSLAN COMMUNICATIONS, INC.', 0x001034: u'GNP Computers', 0x001035: u'ELITEGROUP COMPUTER SYSTEMS CO., LTD', 0x001036: u'INTER-TEL INTEGRATED SYSTEMS', 0x001037: u'CYQ\'ve Technology Co., Ltd.', 0x001038: u'MICRO RESEARCH INSTITUTE, INC.', 0x001039: u'Vectron Systems AG', 0x00103A: u'DIAMOND NETWORK TECH', 0x00103B: u'HIPPI NETWORKING FORUM', 0x00103C: u'IC ENSEMBLE, INC.', 0x00103D: u'PHASECOM, LTD.', 0x00103E: u'NETSCHOOLS CORPORATION', 0x00103F: u'TOLLGRADE COMMUNICATIONS, INC.', 0x001040: u'INTERMEC CORPORATION', 0x001041: u'BRISTOL BABCOCK, INC.', 0x001042: u'AlacriTech', 0x001043: u'A2 CORPORATION', 0x001044: u'InnoLabs Corporation', 0x001045: u'Nortel Networks', 0x001046: u'ALCORN MCBRIDE INC.', 0x001047: u'ECHO ELETRIC CO. LTD.', 0x001048: u'HTRC AUTOMATION, INC.', 0x001049: u'SHORELINE TELEWORKS, INC.', 0x00104A: u'THE PARVUC CORPORATION', 0x00104B: u'3COM CORPORATION', 0x00104C: u'COMPUTER ACCESS TECHNOLOGY', 0x00104D: u'SURTEC INDUSTRIES, INC.', 0x00104E: u'CEOLOGIC', 0x00104F: u'STORAGE TECHNOLOGY CORPORATION', 0x001050: u'RION CO., LTD.', 0x001051: u'CMICRO CORPORATION', 0x001052: u'METTLER-TOLEDO (ALBSTADT) GMBH', 0x001053: u'COMPUTER TECHNOLOGY CORP.', 0x001054: u'CISCO SYSTEMS, INC.', 0x001055: u'FUJITSU MICROELECTRONICS, INC.', 0x001056: u'SODICK CO., LTD.', 0x001057: u'Rebel.com, Inc.', 0x001058: u'ArrowPoint Communications', 0x001059: u'DIABLO RESEARCH CO. LLC', 0x00105A: u'3COM CORPORATION', 0x00105B: u'NET INSIGHT AB', 0x00105C: u'QUANTUM DESIGNS (H.K.) LTD.', 0x00105D: u'Draeger Medical', 0x00105E: u'HEKIMIAN LABORATORIES, INC.', 0x00105F: u'IN-SNEC', 0x001060: u'BILLIONTON SYSTEMS, INC.', 0x001061: u'HOSTLINK CORP.', 0x001062: u'NX SERVER, ILNC.', 0x001063: u'STARGUIDE DIGITAL NETWORKS', 0x001064: u'DNPG, LLC', 0x001065: u'RADYNE CORPORATION', 0x001066: u'ADVANCED CONTROL SYSTEMS, INC.', 0x001067: u'REDBACK NETWORKS, INC.', 0x001068: u'COMOS TELECOM', 0x001069: u'HELIOSS COMMUNICATIONS, INC.', 0x00106A: u'DIGITAL MICROWAVE CORPORATION', 0x00106B: u'SONUS NETWORKS, INC.', 0x00106C: u'INFRATEC PLUS GmbH', 0x00106D: u'Axxcelera Broadband Wireless', 0x00106E: u'TADIRAN COM. LTD.', 0x00106F: u'TRENTON TECHNOLOGY INC.', 0x001070: u'CARADON TREND LTD.', 0x001071: u'ADVANET INC.', 0x001072: u'GVN TECHNOLOGIES, INC.', 0x001073: u'TECHNOBOX, INC.', 0x001074: u'ATEN INTERNATIONAL CO., LTD.', 0x001075: u'Maxtor Corporation', 0x001076: u'EUREM GmbH', 0x001077: u'SAF DRIVE SYSTEMS, LTD.', 0x001078: u'NUERA COMMUNICATIONS, INC.', 0x001079: u'CISCO SYSTEMS, INC.', 0x00107A: u'AmbiCom, Inc.', 0x00107B: u'CISCO SYSTEMS, INC.', 0x00107C: u'P-COM, INC.', 0x00107D: u'AURORA COMMUNICATIONS, LTD.', 0x00107E: u'BACHMANN ELECTRONIC GmbH', 0x00107F: u'CRESTRON ELECTRONICS, INC.', 0x001080: u'METAWAVE COMMUNICATIONS', 0x001081: u'DPS, INC.', 0x001082: u'JNA TELECOMMUNICATIONS LIMITED', 0x001083: u'HEWLETT-PACKARD COMPANY', 0x001084: u'K-BOT COMMUNICATIONS', 0x001085: u'POLARIS COMMUNICATIONS, INC.', 0x001086: u'ATTO TECHNOLOGY, INC.', 0x001087: u'Xstreamis PLC', 0x001088: u'AMERICAN NETWORKS INC.', 0x001089: u'WebSonic', 0x00108A: u'TeraLogic, Inc.', 0x00108B: u'LASERANIMATION SOLLINGER GmbH', 0x00108C: u'FUJITSU TELECOMMUNICATIONS EUROPE, LTD.', 0x00108D: u'JOHNSON CONTROLS, INC.', 0x00108E: u'HUGH SYMONS CONCEPT Technologies Ltd.', 0x00108F: u'RAPTOR SYSTEMS', 0x001090: u'CIMETRICS, INC.', 0x001091: u'NO WIRES NEEDED BV', 0x001092: u'NETCORE INC.', 0x001093: u'CMS COMPUTERS, LTD.', 0x001094: u'Performance Analysis Broadband, Spirent plc', 0x001095: u'Thomson Inc.', 0x001096: u'TRACEWELL SYSTEMS, INC.', 0x001097: u'WinNet Metropolitan Communications Systems, Inc.', 0x001098: u'STARNET TECHNOLOGIES, INC.', 0x001099: u'InnoMedia, Inc.', 0x00109A: u'NETLINE', 0x00109B: u'Emulex Corporation', 0x00109C: u'M-SYSTEM CO., LTD.', 0x00109D: u'CLARINET SYSTEMS, INC.', 0x00109E: u'AWARE, INC.', 0x00109F: u'PAVO, INC.', 0x0010A0: u'INNOVEX TECHNOLOGIES, INC.', 0x0010A1: u'KENDIN SEMICONDUCTOR, INC.', 0x0010A2: u'TNS', 0x0010A3: u'OMNITRONIX, INC.', 0x0010A4: u'XIRCOM', 0x0010A5: u'OXFORD INSTRUMENTS', 0x0010A6: u'CISCO SYSTEMS, INC.', 0x0010A7: u'UNEX TECHNOLOGY CORPORATION', 0x0010A8: u'RELIANCE COMPUTER CORP.', 0x0010A9: u'ADHOC TECHNOLOGIES', 0x0010AA: u'MEDIA4, INC.', 0x0010AB: u'KOITO INDUSTRIES, LTD.', 0x0010AC: u'IMCI TECHNOLOGIES', 0x0010AD: u'SOFTRONICS USB, INC.', 0x0010AE: u'SHINKO ELECTRIC INDUSTRIES CO.', 0x0010AF: u'TAC SYSTEMS, INC.', 0x0010B0: u'MERIDIAN TECHNOLOGY CORP.', 0x0010B1: u'FOR-A CO., LTD.', 0x0010B2: u'COACTIVE AESTHETICS', 0x0010B3: u'NOKIA MULTIMEDIA TERMINALS', 0x0010B4: u'ATMOSPHERE NETWORKS', 0x0010B5: u'ACCTON TECHNOLOGY CORPORATION', 0x0010B6: u'ENTRATA COMMUNICATIONS CORP.', 0x0010B7: u'COYOTE TECHNOLOGIES, LLC', 0x0010B8: u'ISHIGAKI COMPUTER SYSTEM CO.', 0x0010B9: u'MAXTOR CORP.', 0x0010BA: u'MARTINHO-DAVIS SYSTEMS, INC.', 0x0010BB: u'DATA & INFORMATION TECHNOLOGY', 0x0010BC: u'Aastra Telecom', 0x0010BD: u'THE TELECOMMUNICATION TECHNOLOGY COMMITTEE', 0x0010BE: u'TELEXIS CORP.', 0x0010BF: u'InterAir Wireless', 0x0010C0: u'ARMA, INC.', 0x0010C1: u'OI ELECTRIC CO., LTD.', 0x0010C2: u'WILLNET, INC.', 0x0010C3: u'CSI-CONTROL SYSTEMS', 0x0010C4: u'MEDIA LINKS CO., LTD.', 0x0010C5: u'PROTOCOL TECHNOLOGIES, INC.', 0x0010C6: u'USI', 0x0010C7: u'DATA TRANSMISSION NETWORK', 0x0010C8: u'COMMUNICATIONS ELECTRONICS SECURITY GROUP', 0x0010C9: u'MITSUBISHI ELECTRONICS LOGISTIC SUPPORT CO.', 0x0010CA: u'INTEGRAL ACCESS', 0x0010CB: u'FACIT K.K.', 0x0010CC: u'CLP COMPUTER LOGISTIK PLANUNG GmbH', 0x0010CD: u'INTERFACE CONCEPT', 0x0010CE: u'VOLAMP, LTD.', 0x0010CF: u'FIBERLANE COMMUNICATIONS', 0x0010D0: u'WITCOM, LTD.', 0x0010D1: u'Top Layer Networks, Inc.', 0x0010D2: u'NITTO TSUSHINKI CO., LTD', 0x0010D3: u'GRIPS ELECTRONIC GMBH', 0x0010D4: u'STORAGE COMPUTER CORPORATION', 0x0010D5: u'IMASDE CANARIAS, S.A.', 0x0010D6: u'ITT - A/CD', 0x0010D7: u'ARGOSY RESEARCH INC.', 0x0010D8: u'CALISTA', 0x0010D9: u'IBM JAPAN, FUJISAWA MT+D', 0x0010DA: u'MOTION ENGINEERING, INC.', 0x0010DB: u'Juniper Networks, Inc.', 0x0010DC: u'MICRO-STAR INTERNATIONAL CO., LTD.', 0x0010DD: u'ENABLE SEMICONDUCTOR, INC.', 0x0010DE: u'INTERNATIONAL DATACASTING CORPORATION', 0x0010DF: u'RISE COMPUTER INC.', 0x0010E0: u'COBALT MICROSERVER, INC.', 0x0010E1: u'S.I. TECH, INC.', 0x0010E2: u'ArrayComm, Inc.', 0x0010E3: u'COMPAQ COMPUTER CORPORATION', 0x0010E4: u'NSI CORPORATION', 0x0010E5: u'SOLECTRON TEXAS', 0x0010E6: u'APPLIED INTELLIGENT SYSTEMS, INC.', 0x0010E7: u'BreezeCom', 0x0010E8: u'TELOCITY, INCORPORATED', 0x0010E9: u'RAIDTEC LTD.', 0x0010EA: u'ADEPT TECHNOLOGY', 0x0010EB: u'SELSIUS SYSTEMS, INC.', 0x0010EC: u'RPCG, LLC', 0x0010ED: u'SUNDANCE TECHNOLOGY, INC.', 0x0010EE: u'CTI PRODUCTS, INC.', 0x0010EF: u'DBTEL INCORPORATED', 0x0010F1: u'I-O CORPORATION', 0x0010F2: u'ANTEC', 0x0010F3: u'Nexcom International Co., Ltd.', 0x0010F4: u'VERTICAL NETWORKS, INC.', 0x0010F5: u'AMHERST SYSTEMS, INC.', 0x0010F6: u'CISCO SYSTEMS, INC.', 0x0010F7: u'IRIICHI TECHNOLOGIES Inc.', 0x0010F8: u'TEXIO CORPORATION', 0x0010F9: u'UNIQUE SYSTEMS, INC.', 0x0010FA: u'ZAYANTE, INC.', 0x0010FB: u'ZIDA TECHNOLOGIES LIMITED', 0x0010FC: u'BROADBAND NETWORKS, INC.', 0x0010FD: u'COCOM A/S', 0x0010FE: u'DIGITAL EQUIPMENT CORPORATION', 0x0010FF: u'CISCO SYSTEMS, INC.', 0x001100: u'RAM Industries, LLC', 0x001101: u'CET Technologies Pte Ltd', 0x001102: u'Aurora Multimedia Corp.', 0x001103: u'kawamura electric inc.', 0x001104: u'TELEXY', 0x001105: u'Sunplus Technology Co., Ltd.', 0x001106: u'Siemens NV (Belgium)', 0x001107: u'RGB Networks Inc.', 0x001108: u'Orbital Data Corporation', 0x001109: u'Micro-Star International', 0x00110A: u'Hewlett Packard', 0x00110B: u'Franklin Technology Systems', 0x00110C: u'Atmark Techno, Inc.', 0x00110D: u'SANBlaze Technology, Inc.', 0x00110E: u'Tsurusaki Sealand Transportation Co. Ltd.', 0x00110F: u'netplat,Inc.', 0x001110: u'Maxanna Technology Co., Ltd.', 0x001111: u'Intel Corporation', 0x001112: u'Honeywell CMSS', 0x001113: u'Fraunhofer FOKUS', 0x001114: u'EverFocus Electronics Corp.', 0x001115: u'EPIN Technologies, Inc.', 0x001116: u'COTEAU VERT CO., LTD.', 0x001117: u'CESNET', 0x001118: u'BLX IC Design Corp., Ltd.', 0x001119: u'Solteras, Inc.', 0x00111A: u'Motorola BCS', 0x00111B: u'Targa Systems Div L-3 Communications Canada', 0x00111C: u'Pleora Technologies Inc.', 0x00111D: u'Hectrix Limited', 0x00111E: u'EPSG (Ethernet Powerlink Standardization Group)', 0x00111F: u'Doremi Labs, Inc.', 0x001120: u'Cisco Systems', 0x001121: u'Cisco Systems', 0x001122: u'CIMSYS Inc', 0x001123: u'Appointech, Inc.', 0x001124: u'Apple Computer', 0x001125: u'IBM Corporation', 0x001126: u'Venstar Inc.', 0x001127: u'TASI, Inc', 0x001128: u'Streamit', 0x001129: u'Paradise Datacom Ltd.', 0x00112A: u'Niko NV', 0x00112B: u'NetModule', 0x00112C: u'IZT GmbH', 0x00112D: u'Guys Without Ties', 0x00112E: u'CEICOM', 0x00112F: u'ASUSTek Computer Inc.', 0x001130: u'Allied Telesis (Hong Kong) Ltd.', 0x001131: u'UNATECH. CO.,LTD', 0x001132: u'Synology Incorporated', 0x001133: u'Siemens Austria SIMEA', 0x001134: u'MediaCell, Inc.', 0x001135: u'Grandeye Ltd', 0x001136: u'Goodrich Sensor Systems', 0x001137: u'AICHI ELECTRIC CO., LTD.', 0x001138: u'TAISHIN CO., LTD.', 0x001139: u'STOEBER ANTRIEBSTECHNIK GmbH + Co. KG.', 0x00113A: u'SHINBORAM', 0x00113B: u'Micronet Communications Inc.', 0x00113C: u'Micronas GmbH', 0x00113D: u'KN SOLTEC CO.,LTD.', 0x00113E: u'JL Corporation', 0x00113F: u'Alcatel DI', 0x001140: u'Nanometrics Inc.', 0x001141: u'GoodMan Corporation', 0x001142: u'e-SMARTCOM INC.', 0x001143: u'DELL INC.', 0x001144: u'Assurance Technology Corp', 0x001145: u'ValuePoint Networks', 0x001146: u'Telecard-Pribor Ltd', 0x001147: u'Secom-Industry co.LTD.', 0x001148: u'Prolon Control Systems', 0x001149: u'Proliphix LLC', 0x00114A: u'KAYABA INDUSTRY Co,.Ltd.', 0x00114B: u'Francotyp-Postalia AG & Co. KG', 0x00114C: u'caffeina applied research ltd.', 0x00114D: u'Atsumi Electric Co.,LTD.', 0x00114E: u'690885 Ontario Inc.', 0x00114F: u'US Digital Television, Inc', 0x001150: u'Belkin Corporation', 0x001151: u'Mykotronx', 0x001152: u'Eidsvoll Electronics AS', 0x001153: u'Trident Tek, Inc.', 0x001154: u'Webpro Technologies Inc.', 0x001155: u'Sevis Systems', 0x001156: u'Pharos Systems NZ', 0x001157: u'OF Networks Co., Ltd.', 0x001158: u'Nortel Networks', 0x001159: u'MATISSE NETWORKS INC', 0x00115A: u'Ivoclar Vivadent AG', 0x00115B: u'Elitegroup Computer System Co. (ECS)', 0x00115C: u'Cisco', 0x00115D: u'Cisco', 0x00115E: u'ProMinent Dosiertechnik GmbH', 0x00115F: u'Intellix Co., Ltd.', 0x001160: u'ARTDIO Company Co., LTD', 0x001161: u'NetStreams, LLC', 0x001162: u'STAR MICRONICS CO.,LTD.', 0x001163: u'SYSTEM SPA DEPT. ELECTRONICS', 0x001164: u'ACARD Technology Corp.', 0x001165: u'Znyx Networks', 0x001166: u'Taelim Electronics Co., Ltd.', 0x001167: u'Integrated System Solution Corp.', 0x001168: u'HomeLogic LLC', 0x001169: u'EMS Satcom', 0x00116A: u'Domo Ltd', 0x00116B: u'Digital Data Communications Asia Co.,Ltd', 0x00116C: u'Nanwang Multimedia Inc.,Ltd', 0x00116D: u'American Time and Signal', 0x00116E: u'PePLink Ltd.', 0x00116F: u'Netforyou Co., LTD.', 0x001170: u'GSC SRL', 0x001171: u'DEXTER Communications, Inc.', 0x001172: u'COTRON CORPORATION', 0x001173: u'Adtron Corporation', 0x001174: u'Wibhu Technologies, Inc.', 0x001175: u'PathScale, Inc.', 0x001176: u'Intellambda Systems, Inc.', 0x001177: u'COAXIAL NETWORKS, INC.', 0x001178: u'Chiron Technology Ltd', 0x001179: u'Singular Technology Co. Ltd.', 0x00117A: u'Singim International Corp.', 0x00117B: u'Büchi Labortechnik AG', 0x00117C: u'e-zy.net', 0x00117D: u'ZMD America, Inc.', 0x00117E: u'Progeny Inc.', 0x00117F: u'Neotune Information Technology Corporation,.LTD', 0x001180: u'Motorola BCS', 0x001181: u'InterEnergy Co.Ltd,', 0x001182: u'IMI Norgren Ltd', 0x001183: u'PSC Scanning, Inc', 0x001184: u'Humo Laboratory,Ltd.', 0x001185: u'Hewlett Packard', 0x001186: u'Prime Systems, Inc.', 0x001187: u'Category Solutions, Inc', 0x001188: u'Enterasys', 0x001189: u'Aerotech Inc', 0x00118A: u'Viewtran Technology Limited', 0x00118B: u'NetDevices Inc.', 0x00118C: u'Missouri Department of Transportation', 0x00118D: u'Hanchang System Corp.', 0x00118E: u'Halytech Mace', 0x00118F: u'EUTECH INSTRUMENTS PTE. LTD.', 0x001190: u'Digital Design Corporation', 0x001191: u'CTS-Clima Temperatur Systeme GmbH', 0x001192: u'Cisco Systems', 0x001193: u'Cisco Systems', 0x001194: u'Chi Mei Communication Systems, Inc.', 0x001195: u'D-Link Corporation', 0x001196: u'Actuality Systems, Inc.', 0x001197: u'Monitoring Technologies Limited', 0x001198: u'Prism Media Products Limited', 0x001199: u'2wcom GmbH', 0x00119A: u'Alkeria srl', 0x00119B: u'Telesynergy Research Inc.', 0x00119C: u'EP&T Energy', 0x00119D: u'Diginfo Technology Corporation', 0x00119E: u'Solectron Brazil', 0x00119F: u'Nokia Danmark A/S', 0x0011A0: u'Vtech Engineering Canada Ltd', 0x0011A1: u'VISION NETWARE CO.,LTD', 0x0011A2: u'Manufacturing Technology Inc', 0x0011A3: u'LanReady Technologies Inc.', 0x0011A4: u'JStream Technologies Inc.', 0x0011A5: u'Fortuna Electronic Corp.', 0x0011A6: u'Sypixx Networks', 0x0011A7: u'Infilco Degremont Inc.', 0x0011A8: u'Quest Technologies', 0x0011A9: u'MOIMSTONE Co., LTD', 0x0011AA: u'Uniclass Technology, Co., LTD', 0x0011AB: u'TRUSTABLE TECHNOLOGY CO.,LTD.', 0x0011AC: u'Simtec Electronics', 0x0011AD: u'Shanghai Ruijie Technology', 0x0011AE: u'Motorola BCS', 0x0011AF: u'Medialink-i,Inc', 0x0011B0: u'Fortelink Inc.', 0x0011B1: u'BlueExpert Technology Corp.', 0x0011B2: u'2001 Technology Inc.', 0x0011B3: u'YOSHIMIYA CO.,LTD.', 0x0011B4: u'Westermo Teleindustri AB', 0x0011B5: u'Shenzhen Powercom Co.,Ltd', 0x0011B6: u'Open Systems International', 0x0011B7: u'Melexis Nederland B.V.', 0x0011B8: u'Liebherr - Elektronik GmbH', 0x0011B9: u'Inner Range Pty. Ltd.', 0x0011BA: u'Elexol Pty Ltd', 0x0011BB: u'Cisco Systems', 0x0011BC: u'Cisco Systems', 0x0011BD: u'Bombardier Transportation', 0x0011BE: u'AGP Telecom Co. Ltd', 0x0011BF: u'AESYS S.p.A.', 0x0011C0: u'Aday Technology Inc', 0x0011C1: u'4P MOBILE DATA PROCESSING', 0x0011C2: u'United Fiber Optic Communication', 0x0011C3: u'Transceiving System Technology Corporation', 0x0011C4: u'Terminales de Telecomunicacion Terrestre, S.L.', 0x0011C5: u'TEN Technology', 0x0011C6: u'Seagate Technology LLC', 0x0011C7: u'RAYMARINE Group Ltd.', 0x0011C8: u'Powercom Co., Ltd.', 0x0011C9: u'MTT Corporation', 0x0011CA: u'Long Range Systems, Inc.', 0x0011CB: u'Jacobsons RKH AB', 0x0011CC: u'Guangzhou Jinpeng Group Co.,Ltd.', 0x0011CD: u'Axsun Technologies', 0x0011CE: u'Ubisense Limited', 0x0011CF: u'Thrane & Thrane A/S', 0x0011D0: u'Tandberg Data ASA', 0x0011D1: u'Soft Imaging System GmbH', 0x0011D2: u'Perception Digital Ltd', 0x0011D3: u'NextGenTel Holding ASA', 0x0011D4: u'NetEnrich, Inc', 0x0011D5: u'Hangzhou Sunyard System Engineering Co.,Ltd.', 0x0011D6: u'HandEra, Inc.', 0x0011D7: u'eWerks Inc', 0x0011D8: u'ASUSTek Computer Inc.', 0x0011D9: u'TiVo', 0x0011DA: u'Vivaas Technology Inc.', 0x0011DB: u'Land-Cellular Corporation', 0x0011DC: u'Glunz & Jensen', 0x0011DD: u'FROMUS TEC. Co., Ltd.', 0x0011DE: u'EURILOGIC', 0x0011DF: u'Arecont Systems', 0x0011E0: u'U-MEDIA Communications, Inc.', 0x0011E1: u'BEKO Electronics Co.', 0x0011E2: u'Hua Jung Components Co., Ltd.', 0x0011E3: u'Thomson, Inc.', 0x0011E4: u'Danelec Electronics A/S', 0x0011E5: u'KCodes Corporation', 0x0011E6: u'Scientific Atlanta', 0x0011E7: u'WORLDSAT - Texas de France', 0x0011E8: u'Tixi.Com', 0x0011E9: u'STARNEX CO., LTD.', 0x0011EA: u'IWICS Inc.', 0x0011EB: u'Innovative Integration', 0x0011EC: u'AVIX INC.', 0x0011ED: u'802 Global', 0x0011EE: u'Estari, Inc.', 0x0011EF: u'Conitec Datensysteme GmbH', 0x0011F0: u'Wideful Limited', 0x0011F1: u'QinetiQ Ltd', 0x0011F2: u'Institute of Network Technologies', 0x0011F3: u'Gavitec AG- mobile digit', 0x0011F4: u'woori-net', 0x0011F5: u'ASKEY COMPUTER CORP.', 0x0011F6: u'Asia Pacific Microsystems , Inc.', 0x0011F7: u'Shenzhen Forward Industry Co., Ltd', 0x0011F8: u'AIRAYA Corp', 0x0011F9: u'Nortel Networks', 0x0011FA: u'Rane Corporation', 0x0011FB: u'Heidelberg Engineering GmbH', 0x0011FC: u'HARTING Electric Gmbh & Co.KG', 0x0011FD: u'KORG INC.', 0x0011FE: u'Keiyo System Research, Inc.', 0x0011FF: u'Digitro Tecnologia Ltda', 0x001200: u'Cisco', 0x001201: u'Cisco', 0x001202: u'Audio International Inc.', 0x001203: u'Activ Networks', 0x001204: u'u10 Networks, Inc.', 0x001205: u'Terrasat Communications, Inc.', 0x001206: u'iQuest (NZ) Ltd', 0x001207: u'Head Strong International Limited', 0x001208: u'Gantner Electronic GmbH', 0x001209: u'Fastrax Ltd', 0x00120A: u'Emerson Electric GmbH & Co. OHG', 0x00120B: u'Chinasys Technologies Limited', 0x00120C: u'CE-Infosys Pte Ltd', 0x00120D: u'Advanced Telecommunication Technologies, Inc.', 0x00120E: u'AboCom', 0x00120F: u'IEEE 802.3', 0x001210: u'WideRay Corp', 0x001211: u'Protechna Herbst GmbH & Co. KG', 0x001212: u'PLUS Vision Corporation', 0x001213: u'Metrohm AG', 0x001214: u'Koenig & Bauer AG', 0x001215: u'iStor Networks, Inc.', 0x001216: u'ICP Internet Communication Payment AG', 0x001217: u'Cisco-Linksys, LLC', 0x001218: u'ARUZE Corporation', 0x001219: u'Ahead Communication Systems Inc', 0x00121A: u'Techno Soft Systemnics Inc.', 0x00121B: u'Sound Devices, LLC', 0x00121C: u'PARROT S.A.', 0x00121D: u'Netfabric Corporation', 0x00121E: u'Juniper Networks, Inc.', 0x00121F: u'Harding Intruments', 0x001220: u'Cadco Systems', 0x001221: u'B.Braun Melsungen AG', 0x001222: u'Skardin (UK) Ltd', 0x001223: u'Pixim', 0x001224: u'NexQL Corporation', 0x001225: u'Motorola BCS', 0x001226: u'Japan Direx Corporation', 0x001227: u'Franklin Electric Co., Inc.', 0x001228: u'Data Ltd.', 0x001229: u'BroadEasy Technologies Co.,Ltd', 0x00122A: u'VTech Telecommunications Ltd.', 0x00122B: u'Virbiage Pty Ltd', 0x00122C: u'Soenen Controls N.V.', 0x00122D: u'SiNett Corporation', 0x00122E: u'Signal Technology - AISD', 0x00122F: u'Sanei Electric Inc.', 0x001230: u'Picaso Infocommunication CO., LTD.', 0x001231: u'Motion Control Systems, Inc.', 0x001232: u'LeWiz Communications Inc.', 0x001233: u'JRC TOKKI Co.,Ltd.', 0x001234: u'Camille Bauer', 0x001235: u'Andrew Corporation', 0x001236: u'ConSentry Networks', 0x001237: u'Texas Instruments', 0x001238: u'SetaBox Technology Co., Ltd.', 0x001239: u'S Net Systems Inc.', 0x00123A: u'Posystech Inc., Co.', 0x00123B: u'KeRo Systems ApS', 0x00123C: u'IP3 Networks, Inc.', 0x00123D: u'GES', 0x00123E: u'ERUNE technology Co., Ltd.', 0x00123F: u'Dell Inc', 0x001240: u'AMOI ELECTRONICS CO.,LTD', 0x001241: u'a2i marketing center', 0x001242: u'Millennial Net', 0x001243: u'Cisco', 0x001244: u'Cisco', 0x001245: u'Zellweger Analytics, Inc.', 0x001246: u'T.O.M TECHNOLOGY INC..', 0x001247: u'Samsung Electronics Co., Ltd.', 0x001248: u'Kashya Inc.', 0x001249: u'Delta Elettronica S.p.A.', 0x00124A: u'Dedicated Devices, Inc.', 0x00124B: u'Chipcon AS', 0x00124C: u'BBWM Corporation', 0x00124D: u'Inducon BV', 0x00124E: u'XAC AUTOMATION CORP.', 0x00124F: u'Tyco Thermal Controls LLC.', 0x001250: u'Tokyo Aircaft Instrument Co., Ltd.', 0x001251: u'SILINK', 0x001252: u'Citronix, LLC', 0x001253: u'AudioDev AB', 0x001254: u'Spectra Technologies Holdings Company Ltd', 0x001255: u'NetEffect Incorporated', 0x001256: u'LG INFORMATION & COMM.', 0x001257: u'LeapComm Communication Technologies Inc.', 0x001258: u'Activis Polska', 0x001259: u'THERMO ELECTRON KARLSRUHE', 0x00125A: u'Microsoft Corporation', 0x00125B: u'KAIMEI ELECTRONI', 0x00125C: u'Green Hills Software, Inc.', 0x00125D: u'CyberNet Inc.', 0x00125E: u'CAEN', 0x00125F: u'AWIND Inc.', 0x001260: u'Stanton Magnetics,inc.', 0x001261: u'Adaptix, Inc', 0x001262: u'Nokia Danmark A/S', 0x001263: u'Data Voice Technologies GmbH', 0x001264: u'daum electronic gmbh', 0x001265: u'Enerdyne Technologies, Inc.', 0x001266: u'PRIVATE', 0x001267: u'Matsushita Electronic Components Co., Ltd.', 0x001268: u'IPS d.o.o.', 0x001269: u'Value Electronics', 0x00126A: u'OPTOELECTRONICS Co., Ltd.', 0x00126B: u'Ascalade Communications Limited', 0x00126C: u'Visonic Ltd.', 0x00126D: u'University of California, Berkeley', 0x00126E: u'Seidel Elektronik GmbH Nfg.KG', 0x00126F: u'Rayson Technology Co., Ltd.', 0x001270: u'NGES Denro Systems', 0x001271: u'Measurement Computing Corp', 0x001272: u'Redux Communications Ltd.', 0x001273: u'Stoke Inc', 0x001274: u'NIT lab', 0x001275: u'Moteiv Corporation', 0x001276: u'Microsol Holdings Ltd.', 0x001277: u'Korenix Technologies Co., Ltd.', 0x001278: u'International Bar Code', 0x001279: u'Hewlett Packard', 0x00127A: u'Sanyu Industry Co.,Ltd.', 0x00127B: u'VIA Networking Technologies, Inc.', 0x00127C: u'SWEGON AB', 0x00127D: u'MobileAria', 0x00127E: u'Digital Lifestyles Group, Inc.', 0x00127F: u'Cisco', 0x001280: u'Cisco', 0x001281: u'CIEFFE srl', 0x001282: u'Qovia', 0x001283: u'Nortel Networks', 0x001284: u'Lab33 Srl', 0x001285: u'Gizmondo Europe Ltd', 0x001286: u'ENDEVCO CORP', 0x001287: u'Digital Everywhere Unterhaltungselektronik GmbH', 0x001288: u'2Wire, Inc', 0x001289: u'Advance Sterilization Products', 0x00128A: u'Motorola PCS', 0x00128B: u'Sensory Networks Inc', 0x00128C: u'Woodward Governor', 0x00128D: u'STB Datenservice GmbH', 0x00128E: u'Q-Free ASA', 0x00128F: u'Montilio', 0x001290: u'KYOWA Electric & Machinery Corp.', 0x001291: u'KWS Computersysteme GmbH', 0x001292: u'Griffin Technology', 0x001293: u'GE Energy', 0x001294: u'Eudyna Devices Inc.', 0x001295: u'Aiware Inc.', 0x001296: u'Addlogix', 0x001297: u'O2Micro, Inc.', 0x001298: u'MICO ELECTRIC(SHENZHEN) LIMITED', 0x001299: u'Ktech Telecommunications Inc', 0x00129A: u'IRT Electronics Pty Ltd', 0x00129B: u'E2S Electronic Engineering Solutions, S.L.', 0x00129C: u'Yulinet', 0x00129D: u'FIRST INTERNATIONAL COMPUTER DO BRASIL LTDA', 0x00129E: u'Surf Communications Inc.', 0x00129F: u'RAE Systems, Inc.', 0x0012A0: u'NeoMeridian Sdn Bhd', 0x0012A1: u'BluePacket Communications Co., Ltd.', 0x0012A2: u'VITA', 0x0012A3: u'Trust International B.V.', 0x0012A4: u'ThingMagic, LLC', 0x0012A5: u'Stargen, Inc.', 0x0012A6: u'Lake Technology Ltd', 0x0012A7: u'ISR TECHNOLOGIES Inc', 0x0012A8: u'intec GmbH', 0x0012A9: u'3COM EUROPE LTD', 0x0012AA: u'IEE, Inc.', 0x0012AB: u'WiLife, Inc.', 0x0012AC: u'ONTIMETEK INC.', 0x0012AD: u'IDS GmbH', 0x0012AE: u'HLS HARD-LINE Solutions Inc.', 0x0012AF: u'ELPRO Technologies', 0x0012B0: u'Efore Oyj (Plc)', 0x0012B1: u'Dai Nippon Printing Co., Ltd', 0x0012B2: u'AVOLITES LTD.', 0x0012B3: u'Advance Wireless Technology Corp.', 0x0012B4: u'Work GmbH', 0x0012B5: u'Vialta, Inc.', 0x0012B6: u'Santa Barbara Infrared, Inc.', 0x0012B7: u'PTW Freiburg', 0x0012B8: u'G2 Microsystems', 0x0012B9: u'Fusion Digital Technology', 0x0012BA: u'FSI Systems, Inc.', 0x0012BB: u'Telecommunications Industry Association TR-41 Committee', 0x0012BC: u'Echolab LLC', 0x0012BD: u'Avantec Manufacturing Limited', 0x0012BE: u'Astek Corporation', 0x0012BF: u'Arcadyan Technology Corporation', 0x0012C0: u'HotLava Systems, Inc.', 0x0012C1: u'Check Point Software Technologies', 0x0012C2: u'Apex Electronics Factory', 0x0012C3: u'WIT S.A.', 0x0012C4: u'Viseon, Inc.', 0x0012C5: u'V-Show Technology Co.Ltd', 0x0012C6: u'TGC America, Inc', 0x0012C7: u'SECURAY Technologies Ltd.Co.', 0x0012C8: u'Perfect tech', 0x0012C9: u'Motorola BCS', 0x0012CA: u'Hansen Telecom', 0x0012CB: u'CSS Inc.', 0x0012CC: u'Bitatek CO., LTD', 0x0012CD: u'ASEM SpA', 0x0012CE: u'Advanced Cybernetics Group', 0x0012CF: u'Accton Technology Corporation', 0x0012D0: u'Gossen-Metrawatt-GmbH', 0x0012D1: u'Texas Instruments Inc', 0x0012D2: u'Texas Instruments', 0x0012D3: u'Zetta Systems, Inc.', 0x0012D4: u'Princeton Technology, Ltd', 0x0012D5: u'Motion Reality Inc.', 0x0012D6: u'Jiangsu Yitong High-Tech Co.,Ltd', 0x0012D7: u'Invento Networks, Inc.', 0x0012D8: u'International Games System Co., Ltd.', 0x0012D9: u'Cisco Systems', 0x0012DA: u'Cisco Systems', 0x0012DB: u'ZIEHL industrie-elektronik GmbH + Co KG', 0x0012DC: u'SunCorp Industrial Limited', 0x0012DD: u'Shengqu Information Technology (Shanghai) Co., Ltd.', 0x0012DE: u'Radio Components Sweden AB', 0x0012DF: u'Novomatic AG', 0x0012E0: u'Codan Limited', 0x0012E1: u'Alliant Networks, Inc', 0x0012E2: u'ALAXALA Networks Corporation', 0x0012E3: u'Agat-RT, Ltd.', 0x0012E4: u'ZIEHL industrie-electronik GmbH + Co KG', 0x0012E5: u'Time America, Inc.', 0x0012E6: u'SPECTEC COMPUTER CO., LTD.', 0x0012E7: u'Projectek Networking Electronics Corp.', 0x0012E8: u'Fraunhofer IMS', 0x0012E9: u'Abbey Systems Ltd', 0x0012EA: u'Trane', 0x0012EB: u'R2DI, LLC', 0x0012EC: u'Movacolor b.v.', 0x0012ED: u'AVG Advanced Technologies', 0x0012EE: u'Sony Ericsson Mobile Communications AB', 0x0012EF: u'OneAccess SA', 0x0012F0: u'Intel Corporate', 0x0012F1: u'IFOTEC', 0x0012F2: u'Foundry Networks', 0x0012F3: u'connectBlue AB', 0x0012F4: u'Belco International Co.,Ltd.', 0x0012F5: u'Prolificx Ltd', 0x0012F6: u'MDK CO.,LTD.', 0x0012F7: u'Xiamen Xinglian Electronics Co., Ltd.', 0x0012F8: u'WNI Resources, LLC', 0x0012F9: u'URYU SEISAKU, LTD.', 0x0012FA: u'THX LTD', 0x0012FB: u'Samsung Electronics', 0x0012FC: u'PLANET System Co.,LTD', 0x0012FD: u'OPTIMUS IC S.A.', 0x0012FE: u'Lenovo Mobile Communication Technology Ltd.', 0x0012FF: u'Lely Industries N.V.', 0x001300: u'IT-FACTORY, INC.', 0x001301: u'IronGate S.L.', 0x001302: u'Intel Corporate', 0x001303: u'GateConnect Technologies GmbH', 0x001304: u'Flaircomm Technologies Co. LTD', 0x001305: u'Epicom, Inc.', 0x001306: u'Always On Wireless', 0x001307: u'Paravirtual Corporation', 0x001308: u'Nuvera Fuel Cells', 0x001309: u'Ocean Broadband Networks', 0x00130A: u'Nortel', 0x00130B: u'Mextal B.V.', 0x00130C: u'HF System Corporation', 0x00130D: u'GALILEO AVIONICA', 0x00130E: u'Focusrite Audio Engineering Limited', 0x00130F: u'EGEMEN Bilgisayar Muh San ve Tic LTD STI', 0x001310: u'Cisco-Linksys, LLC', 0x001311: u'ARRIS International', 0x001312: u'Amedia Networks Inc.', 0x001313: u'GuangZhou Post & Telecom Equipment ltd', 0x001314: u'Asiamajor Inc.', 0x001315: u'SONY Computer Entertainment inc,', 0x001316: u'L-S-B GmbH', 0x001317: u'GN Netcom as', 0x001318: u'DGSTATION Co., Ltd.', 0x001319: u'Cisco Systems', 0x00131A: u'Cisco Systems', 0x00131B: u'BeCell Innovations Corp.', 0x00131C: u'LiteTouch, Inc.', 0x00131D: u'Scanvaegt International A/S', 0x00131E: u'Peiker acustic GmbH & Co. KG', 0x00131F: u'NxtPhase T&D, Corp.', 0x001320: u'Intel Corporate', 0x001321: u'Hewlett Packard', 0x001322: u'DAQ Electronics, Inc.', 0x001323: u'Cap Co., Ltd.', 0x001324: u'Schneider Electric Ultra Terminal', 0x001325: u'ImmenStar Inc.', 0x001326: u'ECM Systems Ltd', 0x001327: u'Data Acquisitions limited', 0x001328: u'Westech Korea Inc.,', 0x001329: u'VSST Co., LTD', 0x00132A: u'STROM telecom, s. r. o.', 0x00132B: u'Phoenix Digital', 0x00132C: u'MAZ Brandenburg GmbH', 0x00132D: u'iWise Communications Pty Ltd', 0x00132E: u'ITian Coporation', 0x00132F: u'Interactek', 0x001330: u'EURO PROTECTION SURVEILLANCE', 0x001331: u'CellPoint Connect', 0x001332: u'Beijing Topsec Network Security Technology Co., Ltd.', 0x001333: u'Baud Technology Inc.', 0x001334: u'Arkados, Inc.', 0x001335: u'VS Industry Berhad', 0x001336: u'Tianjin 712 Communication Broadcasting co., ltd.', 0x001337: u'Orient Power Home Network Ltd.', 0x001338: u'FRESENIUS-VIAL', 0x001339: u'EL-ME AG', 0x00133A: u'VadaTech Inc.', 0x00133B: u'Speed Dragon Multimedia Limited', 0x00133C: u'QUINTRON SYSTEMS INC.', 0x00133D: u'Micro Memory LLC', 0x00133E: u'MetaSwitch', 0x00133F: u'Eppendorf Instrumente GmbH', 0x001340: u'AD.EL s.r.l.', 0x001341: u'Shandong New Beiyang Information Technology Co.,Ltd', 0x001342: u'Vision Research, Inc.', 0x001343: u'Matsushita Electronic Components (Europe) GmbH', 0x001344: u'Fargo Electronics Inc.', 0x001345: u'Eaton Corporation', 0x001346: u'D-Link Corporation', 0x001347: u'BlueTree Wireless Data Inc.', 0x001348: u'Artila Electronics Co., Ltd.', 0x001349: u'ZyXEL Communications Corporation', 0x00134A: u'Engim, Inc.', 0x00134B: u'ToGoldenNet Technology Inc.', 0x00134C: u'YDT Technology International', 0x00134D: u'IPC systems', 0x00134E: u'Valox Systems, Inc.', 0x00134F: u'Tranzeo Wireless Technologies Inc.', 0x001350: u'Silver Spring Networks, Inc', 0x001351: u'Niles Audio Corporation', 0x001352: u'Naztec, Inc.', 0x001353: u'HYDAC Filtertechnik GMBH', 0x001354: u'Zcomax Technologies, Inc.', 0x001355: u'TOMEN Cyber-business Solutions, Inc.', 0x001356: u'target systemelectronic gmbh', 0x001357: u'Soyal Technology Co., Ltd.', 0x001358: u'Realm Systems, Inc.', 0x001359: u'ProTelevision Technologies A/S', 0x00135A: u'Project T&E Limited', 0x00135B: u'PanelLink Cinema, LLC', 0x00135C: u'OnSite Systems, Inc.', 0x00135D: u'NTTPC Communications, Inc.', 0x00135E: u'EAB/RWI/K', 0x00135F: u'Cisco Systems', 0x001360: u'Cisco Systems', 0x001361: u'Biospace Co., Ltd.', 0x001362: u'ShinHeung Precision Co., Ltd.', 0x001363: u'Verascape, Inc.', 0x001364: u'Paradigm Technology Inc..', 0x001365: u'Nortel', 0x001366: u'Neturity Technologies Inc.', 0x001367: u'Narayon. Co., Ltd.', 0x001368: u'Maersk Data Defence', 0x001369: u'Honda Electron Co., LED.', 0x00136A: u'Hach Ultra Analytics', 0x00136B: u'E-TEC', 0x00136C: u'PRIVATE', 0x00136D: u'Tentaculus AB', 0x00136E: u'Techmetro Corp.', 0x00136F: u'PacketMotion, Inc.', 0x001370: u'Nokia Danmark A/S', 0x001371: u'Motorola CHS', 0x001372: u'Dell Inc.', 0x001373: u'BLwave Electronics Co., Ltd', 0x001374: u'Attansic Technology Corp.', 0x001375: u'American Security Products Co.', 0x001376: u'Tabor Electronics Ltd.', 0x001377: u'Samsung Electronics CO., LTD', 0x001378: u'QSAN Technology, Inc.', 0x001379: u'PONDER INFORMATION INDUSTRIES LTD.', 0x00137A: u'Netvox Technology Co., Ltd.', 0x00137B: u'Movon Corporation', 0x00137C: u'Kaicom co., Ltd.', 0x00137D: u'Dynalab, Inc.', 0x00137E: u'CorEdge Networks, Inc.', 0x00137F: u'Cisco Systems', 0x001380: u'Cisco Systems', 0x001381: u'CHIPS & Systems, Inc.', 0x001382: u'Cetacea Networks Corporation', 0x001383: u'Application Technologies and Engineering Research Laboratory', 0x001384: u'Advanced Motion Controls', 0x001385: u'Add-On Technology Co., LTD.', 0x001386: u'ABB Inc./Totalflow', 0x001387: u'27M Technologies AB', 0x001388: u'WiMedia Alliance', 0x001389: u'Redes de Telefonía Móvil S.A.', 0x00138A: u'QINGDAO GOERTEK ELECTRONICS CO.,LTD.', 0x00138B: u'Phantom Technologies LLC', 0x00138C: u'Kumyoung.Co.Ltd', 0x00138D: u'Kinghold', 0x00138E: u'FOAB Elektronik AB', 0x00138F: u'Asiarock Incorporation', 0x001390: u'Termtek Computer Co., Ltd', 0x001391: u'OUEN CO.,LTD.', 0x001392: u'Ruckus Wireless', 0x001393: u'Panta Systems, Inc.', 0x001394: u'Infohand Co.,Ltd', 0x001395: u'congatec AG', 0x001396: u'Acbel Polytech Inc.', 0x001397: u'Xsigo Systems, Inc.', 0x001398: u'TrafficSim Co.,Ltd', 0x001399: u'STAC Corporation.', 0x00139A: u'K-ubique ID Corp.', 0x00139B: u'ioIMAGE Ltd.', 0x00139C: u'Exavera Technologies, Inc.', 0x00139D: u'Design of Systems on Silicon S.A.', 0x00139E: u'Ciara Technologies Inc.', 0x00139F: u'Electronics Design Services, Co., Ltd.', 0x0013A0: u'ALGOSYSTEM Co., Ltd.', 0x0013A1: u'Crow Electronic Engeneering', 0x0013A2: u'MaxStream, Inc', 0x0013A3: u'Siemens Com CPE Devices', 0x0013A4: u'KeyEye Communications', 0x0013A5: u'General Solutions, LTD.', 0x0013A6: u'Extricom Ltd', 0x0013A7: u'BATTELLE MEMORIAL INSTITUTE', 0x0013A8: u'Tanisys Technology', 0x0013A9: u'Sony Corporation', 0x0013AA: u'ALS & TEC Ltd.', 0x0013AB: u'Telemotive AG', 0x0013AC: u'Sunmyung Electronics Co., LTD', 0x0013AD: u'Sendo Ltd', 0x0013AE: u'Radiance Technologies', 0x0013AF: u'NUMA Technology,Inc.', 0x0013B0: u'Jablotron', 0x0013B1: u'Intelligent Control Systems (Asia) Pte Ltd', 0x0013B2: u'Carallon Limited', 0x0013B3: u'Beijing Ecom Communications Technology Co., Ltd.', 0x0013B4: u'Appear TV', 0x0013B5: u'Wavesat', 0x0013B6: u'Sling Media, Inc.', 0x0013B7: u'Scantech ID', 0x0013B8: u'RyCo Electronic Systems Limited', 0x0013B9: u'BM SPA', 0x0013BA: u'ReadyLinks Inc', 0x0013BB: u'PRIVATE', 0x0013BC: u'Artimi Ltd', 0x0013BD: u'HYMATOM SA', 0x0013BE: u'Virtual Conexions', 0x0013BF: u'Media System Planning Corp.', 0x0013C0: u'Trix Tecnologia Ltda.', 0x0013C1: u'Asoka USA Corporation', 0x0013C2: u'WACOM Co.,Ltd', 0x0013C3: u'Cisco Systems', 0x0013C4: u'Cisco Systems', 0x0013C5: u'LIGHTRON FIBER-OPTIC DEVICES INC.', 0x0013C6: u'OpenGear, Inc', 0x0013C7: u'IONOS Co.,Ltd.', 0x0013C8: u'PIRELLI BROADBAND SOLUTIONS S.P.A.', 0x0013C9: u'Beyond Achieve Enterprises Ltd.', 0x0013CA: u'X-Digital Systems, Inc.', 0x0013CB: u'Zenitel Norway AS', 0x0013CC: u'Tall Maple Systems', 0x0013CD: u'MTI co. LTD', 0x0013CE: u'Intel Corporate', 0x0013CF: u'4Access Communications', 0x0013D0: u'e-San Limited', 0x0013D1: u'KIRK telecom A/S', 0x0013D2: u'PAGE IBERICA, S.A.', 0x0013D3: u'MICRO-STAR INTERNATIONAL CO., LTD.', 0x0013D4: u'ASUSTek COMPUTER INC.', 0x0013D5: u'WiNetworks LTD', 0x0013D6: u'TII NETWORK TECHNOLOGIES, INC.', 0x0013D7: u'SPIDCOM Technologies SA', 0x0013D8: u'Princeton Instruments', 0x0013D9: u'Matrix Product Development, Inc.', 0x0013DA: u'Diskware Co., Ltd', 0x0013DB: u'SHOEI Electric Co.,Ltd', 0x0013DC: u'IBTEK INC.', 0x0013DD: u'Abbott Diagnostics', 0x0013DE: u'Adapt4', 0x0013DF: u'Ryvor Corp.', 0x0013E0: u'Murata Manufacturing Co., Ltd.', 0x0013E1: u'Iprobe', 0x0013E2: u'GeoVision Inc.', 0x0013E3: u'CoVi Technologies, Inc.', 0x0013E4: u'YANGJAE SYSTEMS CORP.', 0x0013E5: u'TENOSYS, INC.', 0x0013E6: u'Technolution', 0x0013E7: u'Minelab Electronics Pty Limited', 0x0013E8: u'Intel Corporate', 0x0013E9: u'VeriWave, Inc.', 0x0013EA: u'Kamstrup A/S', 0x0013EB: u'Sysmaster Corporation', 0x0013EC: u'Sunbay Software AG', 0x0013ED: u'PSIA', 0x0013EE: u'JBX Designs Inc.', 0x0013EF: u'Kingjon Digital Technology Co.,Ltd', 0x0013F0: u'Wavefront Semiconductor', 0x0013F1: u'AMOD Technology Co., Ltd.', 0x0013F2: u'Klas Ltd', 0x0013F3: u'Giga-byte Communications Inc.', 0x0013F4: u'Psitek (Pty) Ltd', 0x0013F5: u'Akimbi Systems', 0x0013F6: u'Cintech', 0x0013F7: u'SMC Networks, Inc.', 0x0013F8: u'Dex Security Solutions', 0x0013F9: u'Cavera Systems', 0x0013FA: u'LifeSize Communications, Inc', 0x0013FB: u'RKC INSTRUMENT INC.', 0x0013FC: u'SiCortex, Inc', 0x0013FD: u'Nokia Danmark A/S', 0x0013FE: u'GRANDTEC ELECTRONIC CORP.', 0x0013FF: u'Dage-MTI of MC, Inc.', 0x001400: u'MINERVA KOREA CO., LTD', 0x001401: u'Rivertree Networks Corp.', 0x001402: u'kk-electronic a/s', 0x001403: u'Renasis, LLC', 0x001404: u'Motorola CHS', 0x001405: u'OpenIB, Inc.', 0x001406: u'Go Networks', 0x001407: u'Biosystems', 0x001408: u'Eka Systems Inc.', 0x001409: u'MAGNETI MARELLI S.E. S.p.A.', 0x00140A: u'WEPIO Co., Ltd.', 0x00140B: u'FIRST INTERNATIONAL COMPUTER, INC.', 0x00140C: u'GKB CCTV CO., LTD.', 0x00140D: u'Nortel', 0x00140E: u'Nortel', 0x00140F: u'Federal State Unitary Enterprise Leningrad R&D Institute of', 0x001410: u'Suzhou Keda Technology CO.,Ltd', 0x001411: u'Deutschmann Automation GmbH & Co. KG', 0x001412: u'S-TEC electronics AG', 0x001413: u'Trebing & Himstedt Prozessautomation GmbH & Co. KG', 0x001414: u'Jumpnode Systems LLC.', 0x001415: u'Intec Automation Inc.', 0x001416: u'Scosche Industries, Inc.', 0x001417: u'RSE Informations Technologie GmbH', 0x001418: u'C4Line', 0x001419: u'SIDSA', 0x00141A: u'DEICY CORPORATION', 0x00141B: u'Cisco Systems', 0x00141C: u'Cisco Systems', 0x00141D: u'Lust Antriebstechnik GmbH', 0x00141E: u'P.A. Semi, Inc.', 0x00141F: u'SunKwang Electronics Co., Ltd', 0x001420: u'G-Links networking company', 0x001421: u'Total Wireless Technologies Pte. Ltd.', 0x001422: u'Dell Inc.', 0x001423: u'J-S Co. NEUROCOM', 0x001424: u'Merry Electrics CO., LTD.', 0x001425: u'Galactic Computing Corp.', 0x001426: u'NL Technology', 0x001427: u'JazzMutant', 0x001428: u'Vocollect, Inc', 0x001429: u'V Center Technologies Co., Ltd.', 0x00142A: u'Elitegroup Computer System Co., Ltd', 0x00142B: u'Edata Technologies Inc.', 0x00142C: u'Koncept International, Inc.', 0x00142D: u'Toradex AG', 0x00142E: u'77 Elektronika Kft.', 0x00142F: u'WildPackets', 0x001430: u'ViPowER, Inc', 0x001431: u'PDL Electronics Ltd', 0x001432: u'Tarallax Wireless, Inc.', 0x001433: u'Empower Technologies(Canada) Inc.', 0x001434: u'Keri Systems, Inc', 0x001435: u'CityCom Corp.', 0x001436: u'Qwerty Elektronik AB', 0x001437: u'GSTeletech Co.,Ltd.', 0x001438: u'Hewlett Packard', 0x001439: u'Blonder Tongue Laboratories, Inc.', 0x00143A: u'RAYTALK INTERNATIONAL SRL', 0x00143B: u'Sensovation AG', 0x00143C: u'Oerlikon Contraves Inc.', 0x00143D: u'Aevoe Inc.', 0x00143E: u'AirLink Communications, Inc.', 0x00143F: u'Hotway Technology Corporation', 0x001440: u'ATOMIC Corporation', 0x001441: u'Innovation Sound Technology Co., LTD.', 0x001442: u'ATTO CORPORATION', 0x001443: u'Consultronics Europe Ltd', 0x001444: u'Grundfos Electronics', 0x001445: u'Telefon-Gradnja d.o.o.', 0x001446: u'KidMapper, Inc.', 0x001447: u'BOAZ Inc.', 0x001448: u'Inventec Multimedia & Telecom Corporation', 0x001449: u'Sichuan Changhong Electric Ltd.', 0x00144A: u'Taiwan Thick-Film Ind. Corp.', 0x00144B: u'Hifn, Inc.', 0x00144C: u'General Meters Corp.', 0x00144D: u'Intelligent Systems', 0x00144E: u'SRISA', 0x00144F: u'Sun Microsystems, Inc.', 0x001450: u'Heim Systems GmbH', 0x001451: u'Apple Computer Inc.', 0x001452: u'CALCULEX,INC.', 0x001453: u'ADVANTECH TECHNOLOGIES CO.,LTD', 0x001454: u'Symwave', 0x001455: u'Coder Electronics Corporation', 0x001456: u'Edge Products', 0x001457: u'T-VIPS AS', 0x001458: u'HS Automatic ApS', 0x001459: u'Moram Co., Ltd.', 0x00145A: u'Elektrobit AG', 0x00145B: u'SeekerNet Inc.', 0x00145C: u'Intronics B.V.', 0x00145D: u'WJ Communications, Inc.', 0x00145E: u'IBM', 0x00145F: u'ADITEC CO. LTD', 0x001460: u'Kyocera Wireless Corp.', 0x001461: u'CORONA CORPORATION', 0x001462: u'Digiwell Technology, inc', 0x001463: u'IDCS N.V.', 0x001464: u'Cryptosoft', 0x001465: u'Novo Nordisk A/S', 0x001466: u'Kleinhenz Elektronik GmbH', 0x001467: u'ArrowSpan Inc.', 0x001468: u'CelPlan International, Inc.', 0x001469: u'Cisco Systems', 0x00146A: u'Cisco Systems', 0x00146B: u'Anagran, Inc.', 0x00146C: u'Netgear Inc.', 0x00146D: u'RF Technologies', 0x00146E: u'H. Stoll GmbH & Co. KG', 0x00146F: u'Kohler Co', 0x001470: u'Prokom Software SA', 0x001471: u'Eastern Asia Technology Limited', 0x001472: u'China Broadband Wireless IP Standard Group', 0x001473: u'Bookham Inc', 0x001474: u'K40 Electronics', 0x001475: u'Wiline Networks, Inc.', 0x001476: u'MultiCom Industries Limited', 0x001477: u'Nertec Inc.', 0x001478: u'ShenZhen TP-LINK Technologies Co., Ltd.', 0x001479: u'NEC Magnus Communications,Ltd.', 0x00147A: u'Eubus GmbH', 0x00147B: u'Iteris, Inc.', 0x00147C: u'3Com Europe Ltd', 0x00147D: u'Aeon Digital International', 0x00147E: u'PanGo Networks, Inc.', 0x00147F: u'Thomson Telecom Belgium', 0x001480: u'Hitachi-LG Data Storage Korea, Inc', 0x001481: u'Multilink Inc', 0x001482: u'GoBackTV, Inc', 0x001483: u'eXS Inc.', 0x001484: u'CERMATE TECHNOLOGIES INC', 0x001485: u'Giga-Byte', 0x001486: u'Echo Digital Audio Corporation', 0x001487: u'American Technology Integrators', 0x001488: u'Akorri Networks', 0x001489: u'B15402100 - JANDEI, S.L.', 0x00148A: u'Elin Ebg Traction Gmbh', 0x00148B: u'Globo Electronic GmbH & Co. KG', 0x00148C: u'Fortress Technologies', 0x00148D: u'Cubic Defense Simulation Systems', 0x00148E: u'Tele Power Inc.', 0x00148F: u'Protronic (Far East) Ltd.', 0x001490: u'ASP Corporation', 0x001491: u'Daniels Electronics Ltd.', 0x001492: u'Liteon, Mobile Media Solution SBU', 0x001493: u'Systimax Solutions', 0x001494: u'ESU AG', 0x001495: u'2Wire, Inc.', 0x001496: u'Phonic Corp.', 0x001497: u'ZHIYUAN Eletronics co.,ltd.', 0x001498: u'Viking Design Technology', 0x001499: u'Helicomm Inc', 0x00149A: u'Motorola Mobile Devices Business', 0x00149B: u'Nokota Communications, LLC', 0x00149C: u'HF Company', 0x00149D: u'Sound ID Inc.', 0x00149E: u'UbONE Co., Ltd', 0x00149F: u'System and Chips, Inc.', 0x0014A0: u'RFID Asset Track, Inc.', 0x0014A1: u'Synchronous Communication Corp', 0x0014A2: u'Core Micro Systems Inc.', 0x0014A3: u'Vitelec BV', 0x0014A4: u'Hon Hai Precision Ind. Co., Ltd.', 0x0014A5: u'Gemtek Technology Co., Ltd.', 0x0014A6: u'Teranetics, Inc.', 0x0014A7: u'Nokia Danmark A/S', 0x0014A8: u'Cisco Systems', 0x0014A9: u'Cisco Systems', 0x0014AA: u'Ashly Audio, Inc.', 0x0014AB: u'Senhai Electronic Technology Co., Ltd.', 0x0014AC: u'Bountiful WiFi', 0x0014AD: u'Gassner Wiege- u. Meßtechnik GmbH', 0x0014AE: u'Wizlogics Co., Ltd.', 0x0014AF: u'Datasym Inc.', 0x0014B0: u'Naeil Community', 0x0014B1: u'Avitec AB', 0x0014B2: u'mCubelogics Corporation', 0x0014B3: u'CoreStar International Corp', 0x0014B4: u'General Dynamics United Kingdom Ltd', 0x0014B5: u'PRIVATE', 0x0014B6: u'Enswer Technology Inc.', 0x0014B7: u'AR Infotek Inc.', 0x0014B8: u'Hill-Rom', 0x0014B9: u'STEPMIND', 0x0014BA: u'Carvers SA de CV', 0x0014BB: u'Open Interface North America', 0x0014BC: u'SYNECTIC TELECOM EXPORTS PVT. LTD.', 0x0014BD: u'incNETWORKS, Inc', 0x0014BE: u'Wink communication technology CO.LTD', 0x0014BF: u'Cisco-Linksys LLC', 0x0014C0: u'Symstream Technology Group Ltd', 0x0014C1: u'U.S. Robotics Corporation', 0x0014C2: u'Hewlett Packard', 0x0014C3: u'Seagate Technology LLC', 0x0014C4: u'Vitelcom Mobile Technology', 0x0014C5: u'Alive Technologies Pty Ltd', 0x0014C6: u'Quixant Ltd', 0x0014C7: u'Nortel', 0x0014C8: u'Contemporary Research Corp', 0x0014C9: u'Silverback Systems, Inc.', 0x0014CA: u'Key Radio Systems Limited', 0x0014CB: u'LifeSync Corporation', 0x0014CC: u'Zetec, Inc.', 0x0014CD: u'DigitalZone Co., Ltd.', 0x0014CE: u'NF CORPORATION', 0x0014CF: u'Nextlink.to A/S', 0x0014D0: u'BTI Photonics', 0x0014D1: u'TRENDware International, Inc.', 0x0014D2: u'KYUKI CORPORATION', 0x0014D3: u'SEPSA', 0x0014D4: u'K Technology Corporation', 0x0014D5: u'Datang Telecom Technology CO. , LCD,Optical Communication Br', 0x0014D6: u'Jeongmin Electronics Co.,Ltd.', 0x0014D7: u'DataStor Technology Inc.', 0x0014D8: u'bio-logic SA', 0x0014D9: u'IP Fabrics, Inc.', 0x0014DA: u'Huntleigh Healthcare', 0x0014DB: u'Elma Trenew Electronic GmbH', 0x0014DC: u'Communication System Design & Manufacturing (CSDM)', 0x0014DD: u'Covergence Inc.', 0x0014DE: u'Sage Instruments Inc.', 0x0014DF: u'HI-P Tech Corporation', 0x0014E0: u'LET\'S Corporation', 0x0014E1: u'Data Display AG', 0x0014E2: u'datacom systems inc.', 0x0014E3: u'mm-lab GmbH', 0x0014E4: u'Integral Technologies', 0x0014E5: u'Alticast', 0x0014E6: u'AIM Infrarotmodule GmbH', 0x0014E7: u'Stolinx,. Inc', 0x0014E8: u'Motorola CHS', 0x0014E9: u'Nortech International', 0x0014EA: u'S Digm Inc. (Safe Paradigm Inc.)', 0x0014EB: u'AwarePoint Corporation', 0x0014EC: u'Acro Telecom', 0x0014ED: u'Airak, Inc.', 0x0014EE: u'Western Digital Technologies, Inc.', 0x0014EF: u'TZero Technologies, Inc.', 0x0014F0: u'Business Security OL AB', 0x0014F1: u'Cisco Systems', 0x0014F2: u'Cisco Systems', 0x0014F3: u'ViXS Systems Inc', 0x0014F4: u'DekTec Digital Video B.V.', 0x0014F5: u'OSI Security Devices', 0x0014F6: u'Juniper Networks, Inc.', 0x0014F7: u'Crevis', 0x0014F8: u'Scientific Atlanta', 0x0014F9: u'Vantage Controls', 0x0014FA: u'AsGa S.A.', 0x0014FB: u'Technical Solutions Inc.', 0x0014FC: u'Extandon, Inc.', 0x0014FD: u'Thecus Technology Corp.', 0x0014FE: u'Artech Electronics', 0x0014FF: u'Precise Automation, LLC', 0x001500: u'Intel Corporate', 0x001501: u'LexBox', 0x001502: u'BETA tech', 0x001503: u'PROFIcomms s.r.o.', 0x001504: u'GAME PLUS CO., LTD.', 0x001505: u'Actiontec Electronics, Inc', 0x001506: u'BeamExpress, Inc', 0x001507: u'Renaissance Learning Inc', 0x001508: u'Global Target Enterprise Inc', 0x001509: u'Plus Technology Co., Ltd', 0x00150A: u'Sonoa Systems, Inc', 0x00150B: u'SAGE INFOTECH LTD.', 0x00150C: u'AVM GmbH', 0x00150D: u'Hoana Medical, Inc.', 0x00150E: u'OPENBRAIN TECHNOLOGIES CO., LTD.', 0x00150F: u'mingjong', 0x001510: u'Techsphere Co., Ltd', 0x001511: u'Data Center Systems', 0x001512: u'Zurich University of Applied Sciences', 0x001513: u'EFS sas', 0x001514: u'Hu Zhou NAVA Networks&Electronics Ltd.', 0x001515: u'Leipold+Co.GmbH', 0x001516: u'URIEL SYSTEMS INC.', 0x001517: u'Intel Corporate', 0x001518: u'Shenzhen 10MOONS Technology Development CO.,Ltd', 0x001519: u'StoreAge Networking Technologies', 0x00151A: u'Hunter Engineering Company', 0x00151B: u'Isilon Systems Inc.', 0x00151C: u'LENECO', 0x00151D: u'M2I CORPORATION', 0x00151E: u'Metaware Co., Ltd.', 0x00151F: u'Multivision Intelligent Surveillance (Hong Kong) Ltd', 0x001520: u'Radiocrafts AS', 0x001521: u'Horoquartz', 0x001522: u'Dea Security', 0x001523: u'Meteor Communications Corporation', 0x001524: u'Numatics, Inc.', 0x001525: u'PTI Integrated Systems, Inc.', 0x001526: u'Remote Technologies Inc', 0x001527: u'Balboa Instruments', 0x001528: u'Beacon Medical Products LLC d.b.a. BeaconMedaes', 0x001529: u'N3 Corporation', 0x00152A: u'Nokia GmbH', 0x00152B: u'Cisco Systems', 0x00152C: u'Cisco Systems', 0x00152D: u'TenX Networks, LLC', 0x00152E: u'PacketHop, Inc.', 0x00152F: u'Motorola CHS', 0x001530: u'Bus-Tech, Inc.', 0x001531: u'KOCOM', 0x001532: u'Consumer Technologies Group, LLC', 0x001533: u'NADAM.CO.,LTD', 0x001534: u'A BELTRÓNICA, Companhia de Comunicações, Lda', 0x001535: u'OTE Spa', 0x001536: u'Powertech co.,Ltd', 0x001537: u'Ventus Networks', 0x001538: u'RFID, Inc.', 0x001539: u'Technodrive SRL', 0x00153A: u'Shenzhen Syscan Technology Co.,Ltd.', 0x00153B: u'EMH Elektrizitätszähler GmbH & CoKG', 0x00153C: u'Kprotech Co., Ltd.', 0x00153D: u'ELIM PRODUCT CO.', 0x00153E: u'Q-Matic Sweden AB', 0x00153F: u'Alcatel Alenia Space Italia', 0x001540: u'Nortel', 0x001541: u'StrataLight Communications, Inc.', 0x001542: u'MICROHARD S.R.L.', 0x001543: u'Aberdeen Test Center', 0x001544: u'coM.s.a.t. AG', 0x001545: u'SEECODE Co., Ltd.', 0x001546: u'ITG Worldwide Sdn Bhd', 0x001547: u'AiZen Solutions Inc.', 0x001548: u'CUBE TECHNOLOGIES', 0x001549: u'Dixtal Biomedica Ind. Com. Ltda', 0x00154A: u'WANSHIH ELECTRONIC CO., LTD', 0x00154B: u'Wonde Proud Technology Co., Ltd', 0x00154C: u'Saunders Electronics', 0x00154D: u'Netronome Systems, Inc.', 0x00154E: u'Hirschmann Automation and Control GmbH', 0x00154F: u'one RF Technology', 0x001550: u'Nits Technology Inc', 0x001551: u'RadioPulse Inc.', 0x001552: u'Wi-Gear Inc.', 0x001553: u'Cytyc Corporation', 0x001554: u'Atalum Wireless S.A.', 0x001555: u'DFM GmbH', 0x001556: u'SAGEM SA', 0x001557: u'Olivetti', 0x001558: u'FOXCONN', 0x001559: u'Securaplane Technologies, Inc.', 0x00155A: u'DAINIPPON PHARMACEUTICAL CO., LTD.', 0x00155B: u'Sampo Corporation', 0x00155C: u'Dresser Wayne', 0x00155D: u'Microsoft Corporation', 0x00155E: u'Morgan Stanley', 0x00155F: u'Ubiwave', 0x001560: u'Hewlett Packard', 0x001561: u'JJPlus Corporation', 0x001562: u'Cisco Systems', 0x001563: u'Cisco Systems', 0x001564: u'BEHRINGER Spezielle Studiotechnik GmbH', 0x001565: u'XIAMEN YEALINK NETWORK TECHNOLOGY CO.,LTD', 0x001566: u'A-First Technology Co., Ltd.', 0x001567: u'RADWIN Inc.', 0x001568: u'Dilithium Networks', 0x001569: u'PECO II, Inc.', 0x00156A: u'DG2L Technologies Pvt. Ltd.', 0x00156B: u'Perfisans Networks Corp.', 0x00156C: u'SANE SYSTEM CO., LTD', 0x00156D: u'Ubiquiti Networks', 0x00156E: u'A. W. Communication Systems Ltd', 0x00156F: u'Xiranet Communications GmbH', 0x001570: u'Symbol Technologies', 0x001571: u'Nolan Systems', 0x001572: u'Red-Lemon', 0x001573: u'NewSoft Technology Corporation', 0x001574: u'Horizon Semiconductors Ltd.', 0x001575: u'Nevis Networks Inc.', 0x001576: u'scil animal care company GmbH', 0x001577: u'Allied Telesyn, Inc.', 0x001578: u'Audio / Video Innovations', 0x001579: u'Lunatone Industrielle Elektronik GmbH', 0x00157A: u'Telefin S.p.A.', 0x00157B: u'Leuze electronic GmbH + Co. KG', 0x00157C: u'Dave Networks, Inc.', 0x00157D: u'POSDATA CO., LTD.', 0x00157E: u'HEYFRA ELECTRONIC gmbH', 0x00157F: u'ChuanG International Holding CO.,LTD.', 0x001580: u'U-WAY CORPORATION', 0x001581: u'MAKUS Inc.', 0x001582: u'TVonics Ltd', 0x001583: u'IVT corporation', 0x001584: u'Schenck Process GmbH', 0x001585: u'Aonvision Technolopy Corp.', 0x001586: u'Xiamen Overseas Chinese Electronic Co., Ltd.', 0x001587: u'Takenaka Seisakusho Co.,Ltd', 0x001588: u'Balda-Thong Fook Solutions Sdn. Bhd.', 0x001589: u'D-MAX Technology Co.,Ltd', 0x00158A: u'SURECOM Technology Corp.', 0x00158B: u'Park Air Systems Ltd', 0x00158C: u'Liab ApS', 0x00158D: u'Jennic Ltd', 0x00158E: u'Plustek.INC', 0x00158F: u'NTT Advanced Technology Corporation', 0x001590: u'Hectronic GmbH', 0x001591: u'RLW Inc.', 0x001592: u'Facom UK Ltd (Melksham)', 0x001593: u'U4EA Technologies Inc.', 0x001594: u'BIXOLON CO.,LTD', 0x001595: u'Quester Tangent Corporation', 0x001596: u'ARRIS International', 0x001597: u'AETA AUDIO SYSTEMS', 0x001598: u'Kolektor group', 0x001599: u'Samsung Electronics Co., LTD', 0x00159A: u'Motorola CHS', 0x00159B: u'Nortel', 0x00159C: u'B-KYUNG SYSTEM Co.,Ltd.', 0x00159D: u'Minicom Advanced Systems ltd', 0x00159E: u'Saitek plc', 0x00159F: u'Terascala, Inc.', 0x0015A0: u'Nokia Danmark A/S', 0x0015A1: u'SINTERS SAS', 0x0015A2: u'ARRIS International', 0x0015A3: u'ARRIS International', 0x0015A4: u'ARRIS International', 0x0015A5: u'DCI Co., Ltd.', 0x0015A6: u'Digital Electronics Products Ltd.', 0x0015A7: u'Robatech AG', 0x0015A8: u'Motorola Mobile Devices', 0x0015A9: u'KWANG WOO I&C CO.,LTD', 0x0015AA: u'Rextechnik International Co.,', 0x0015AB: u'PRO CO SOUND INC', 0x0015AC: u'Capelon AB', 0x0015AD: u'Accedian Networks', 0x0015AE: u'kyung il', 0x0015AF: u'AzureWave Technologies, Inc.', 0x0015B0: u'AUTOTELENET CO.,LTD', 0x0015B1: u'Ambient Corporation', 0x0015B2: u'Advanced Industrial Computer, Inc.', 0x0015B3: u'Caretech AB', 0x0015B4: u'Polymap Wireless LLC', 0x0015B5: u'CI Network Corp.', 0x0015B6: u'ShinMaywa Industries, Ltd.', 0x0015B7: u'Toshiba', 0x0015B8: u'Tahoe', 0x0015B9: u'Samsung Electronics Co., Ltd.', 0x0015BA: u'iba AG', 0x0015BB: u'SMA Technologie AG', 0x0015BC: u'Develco', 0x0015BD: u'Group 4 Technology Ltd', 0x0015BE: u'Iqua Ltd.', 0x0015BF: u'technicob', 0x0015C0: u'DIGITAL TELEMEDIA CO.,LTD.', 0x0015C1: u'SONY Computer Entertainment inc,', 0x0015C2: u'3M Germany', 0x0015C3: u'Ruf Telematik AG', 0x0015C4: u'FLOVEL CO., LTD.', 0x0015C5: u'Dell Inc', 0x0015C6: u'Cisco Systems', 0x0015C7: u'Cisco Systems', 0x0015C8: u'FlexiPanel Ltd', 0x0015C9: u'Gumstix, Inc', 0x0015CA: u'TeraRecon, Inc.', 0x0015CB: u'Surf Communication Solutions Ltd.', 0x0015CC: u'TEPCO UQUEST, LTD.', 0x0015CD: u'Exartech International Corp.', 0x0015CE: u'ARRIS International', 0x0015CF: u'ARRIS International', 0x0015D0: u'ARRIS International', 0x0015D1: u'ARRIS International', 0x0015D2: u'Xantech Corporation', 0x0015D3: u'Pantech&Curitel Communications, Inc.', 0x0015D4: u'Emitor AB', 0x0015D5: u'NICEVT', 0x0015D6: u'OSLiNK Sp. z o.o.', 0x0015D7: u'Reti Corporation', 0x0015D8: u'Interlink Electronics', 0x0015D9: u'PKC Electronics Oy', 0x0015DA: u'IRITEL A.D.', 0x0015DB: u'Canesta Inc.', 0x0015DC: u'KT&C Co., Ltd.', 0x0015DD: u'IP Control Systems Ltd.', 0x0015DE: u'Nokia Danmark A/S', 0x0015DF: u'Clivet S.p.A.', 0x0015E0: u'Ericsson Mobile Platforms', 0x0015E1: u'picoChip Designs Ltd', 0x0015E2: u'Wissenschaftliche Geraetebau Dr. Ing. H. Knauer GmbH', 0x0015E3: u'Dream Technologies Corporation', 0x0015E4: u'Zimmer Elektromedizin', 0x0015E5: u'Cheertek Inc.', 0x0015E6: u'MOBILE TECHNIKA Inc.', 0x0015E7: u'Quantec ProAudio', 0x0015E8: u'Nortel', 0x0015E9: u'D-Link Corporation', 0x0015EA: u'Tellumat (Pty) Ltd', 0x0015EB: u'ZTE CORPORATION', 0x0015EC: u'Boca Devices LLC', 0x0015ED: u'Fulcrum Microsystems, Inc.', 0x0015EE: u'Omnex Control Systems', 0x0015EF: u'NEC TOKIN Corporation', 0x0015F0: u'EGO BV', 0x0015F1: u'KYLINK Communications Corp.', 0x0015F2: u'ASUSTek COMPUTER INC.', 0x0015F3: u'PELTOR AB', 0x0015F4: u'Eventide', 0x0015F5: u'Sustainable Energy Systems', 0x0015F6: u'SCIENCE AND ENGINEERING SERVICES, INC.', 0x0015F7: u'Wintecronics Ltd.', 0x0015F8: u'Kingtronics Industrial Co. Ltd.', 0x0015F9: u'Cisco Systems', 0x0015FA: u'Cisco Systems', 0x0015FB: u'setex schermuly textile computer gmbh', 0x0015FC: u'Startco Engineering Ltd.', 0x0015FD: u'Complete Media Systems', 0x0015FE: u'SCHILLING ROBOTICS LLC', 0x0015FF: u'Novatel Wireless, Inc.', 0x001600: u'CelleBrite Mobile Synchronization', 0x001601: u'Buffalo Inc.', 0x001602: u'CEYON TECHNOLOGY CO.,LTD.', 0x001603: u'PRIVATE', 0x001604: u'Sigpro', 0x001605: u'YORKVILLE SOUND INC.', 0x001606: u'Ideal Industries', 0x001607: u'Curves International Inc.', 0x001608: u'Sequans Communications', 0x001609: u'Unitech electronics co., ltd.', 0x00160A: u'SWEEX Europe BV', 0x00160B: u'TVWorks LLC', 0x00160C: u'LPL DEVELOPMENT S.A. DE C.V', 0x00160D: u'Be Here Corporation', 0x00160E: u'Optica Technologies Inc.', 0x00160F: u'BADGER METER INC', 0x001610: u'Carina Technology', 0x001611: u'Altecon Srl', 0x001612: u'Otsuka Electronics Co., Ltd.', 0x001613: u'LibreStream Technologies Inc.', 0x001614: u'Picosecond Pulse Labs', 0x001615: u'Nittan Company, Limited', 0x001616: u'BROWAN COMMUNICATION INC.', 0x001617: u'MSI', 0x001618: u'HIVION Co., Ltd.', 0x001619: u'La Factoría de Comunicaciones Aplicadas,S.L.', 0x00161A: u'Dametric AB', 0x00161B: u'Micronet Corporation', 0x00161C: u'e:cue', 0x00161D: u'Innovative Wireless Technologies, Inc.', 0x00161E: u'Woojinnet', 0x00161F: u'SUNWAVETEC Co., Ltd.', 0x001620: u'Sony Ericsson Mobile Communications AB', 0x001621: u'Colorado Vnet', 0x001622: u'BBH SYSTEMS GMBH', 0x001623: u'Interval Media', 0x001624: u'PRIVATE', 0x001625: u'Impinj, Inc.', 0x001626: u'Motorola CHS', 0x001627: u'embedded-logic DESIGN AND MORE GmbH', 0x001628: u'Ultra Electronics Manufacturing and Card Systems', 0x001629: u'Nivus GmbH', 0x00162A: u'Antik computers & communications s.r.o.', 0x00162B: u'Togami Electric Mfg.co.,Ltd.', 0x00162C: u'Xanboo', 0x00162D: u'STNet Co., Ltd.', 0x00162E: u'Space Shuttle Hi-Tech Co., Ltd.', 0x00162F: u'Geutebrück GmbH', 0x001630: u'Vativ Technologies', 0x001631: u'Xteam', 0x001632: u'SAMSUNG ELECTRONICS CO., LTD.', 0x001633: u'Oxford Diagnostics Ltd.', 0x001634: u'Mathtech, Inc.', 0x001635: u'Hewlett Packard', 0x001636: u'Quanta Computer Inc.', 0x001637: u'Citel Srl', 0x001638: u'TECOM Co., Ltd.', 0x001639: u'UBIQUAM Co.,Ltd', 0x00163A: u'YVES TECHNOLOGY CO., LTD.', 0x00163B: u'VertexRSI/General Dynamics', 0x00163C: u'Rebox B.V.', 0x00163D: u'Tsinghua Tongfang Legend Silicon Tech. Co., Ltd.', 0x00163E: u'Xensource, Inc.', 0x00163F: u'CReTE SYSTEMS Inc.', 0x001640: u'Asmobile Communication Inc.', 0x001641: u'USI', 0x001642: u'Pangolin', 0x001643: u'Sunhillo Corproation', 0x001644: u'LITE-ON Technology Corp.', 0x001645: u'Power Distribution, Inc.', 0x001646: u'Cisco Systems', 0x001647: u'Cisco Systems', 0x001648: u'SSD Company Limited', 0x001649: u'SetOne GmbH', 0x00164A: u'Vibration Technology Limited', 0x00164B: u'Quorion Data Systems GmbH', 0x00164C: u'PLANET INT Co., Ltd', 0x00164D: u'Alcatel North America IP Division', 0x00164E: u'Nokia Danmark A/S', 0x00164F: u'World Ethnic Broadcastin Inc.', 0x001650: u'EYAL MICROWAVE', 0x001651: u'PRIVATE', 0x001652: u'Hoatech Technologies, Inc.', 0x001653: u'LEGO System A/S IE Electronics Division', 0x001654: u'Flex-P Industries Sdn. Bhd.', 0x001655: u'FUHO TECHNOLOGY Co., LTD', 0x001656: u'Nintendo Co., Ltd.', 0x001657: u'Aegate Ltd', 0x001658: u'Fusiontech Technologies Inc.', 0x001659: u'Z.M.P. RADWAG', 0x00165A: u'Harman Specialty Group', 0x00165B: u'Grip Audio', 0x00165C: u'Trackflow Ltd', 0x00165D: u'AirDefense, Inc.', 0x00165E: u'Precision I/O', 0x00165F: u'Fairmount Automation', 0x001660: u'Nortel', 0x001661: u'Novatium Solutions (P) Ltd', 0x001662: u'Liyuh Technology Ltd.', 0x001663: u'KBT Mobile', 0x001664: u'Prod-El SpA', 0x001665: u'Cellon France', 0x001666: u'Quantier Communication Inc.', 0x001667: u'A-TEC Subsystem INC.', 0x001668: u'Eishin Electronics', 0x001669: u'MRV Communication (Networks) LTD', 0x00166A: u'TPS', 0x00166B: u'Samsung Electronics', 0x00166C: u'Samsung Electonics Digital Video System Division', 0x00166D: u'Yulong Computer Telecommunication Scientific(shenzhen)Co.,Lt', 0x00166E: u'Arbitron Inc.', 0x00166F: u'Intel Corporation', 0x001670: u'SKNET Corporation', 0x001671: u'Symphox Information Co.', 0x001672: u'Zenway enterprise ltd', 0x001673: u'PRIVATE', 0x001674: u'EuroCB (Phils.), Inc.', 0x001675: u'Motorola MDb', 0x001676: u'Intel Corporation', 0x001677: u'Bihl+Wiedemann GmbH', 0x001678: u'SHENZHEN BAOAN GAOKE ELECTRONICS CO., LTD', 0x001679: u'eOn Communications', 0x00167A: u'Skyworth Overseas Dvelopment Ltd.', 0x00167B: u'Haver&Boecker', 0x00167C: u'iRex Technologies BV', 0x00167D: u'Sky-Line', 0x00167E: u'DIBOSS.CO.,LTD', 0x00167F: u'Bluebird Soft Inc.', 0x001680: u'Bally Gaming + Systems', 0x001681: u'Vector Informatik GmbH', 0x001682: u'Pro Dex, Inc', 0x001683: u'WEBIO International Co.,.Ltd.', 0x001684: u'Donjin Co.,Ltd.', 0x001685: u'FRWD Technologies Ltd.', 0x001686: u'Karl Storz Imaging', 0x001687: u'Chubb CSC-Vendor AP', 0x001688: u'ServerEngines LLC', 0x001689: u'Pilkor Electronics Co., Ltd', 0x00168A: u'id-Confirm Inc', 0x00168B: u'Paralan Corporation', 0x00168C: u'DSL Partner AS', 0x00168D: u'KORWIN CO., Ltd.', 0x00168E: u'Vimicro corporation', 0x00168F: u'GN Netcom as', 0x001690: u'J-TEK INCORPORATION', 0x001691: u'Moser-Baer AG', 0x001692: u'Scientific-Atlanta, Inc.', 0x001693: u'PowerLink Technology Inc.', 0x001694: u'Sennheiser Communications A/S', 0x001695: u'AVC Technology Limited', 0x001696: u'QDI Technology (H.K.) Limited', 0x001697: u'NEC Corporation', 0x001698: u'T&A Mobile Phones SAS', 0x001699: u'PRIVATE', 0x00169A: u'Quadrics Ltd', 0x00169B: u'Alstom Transport', 0x00169C: u'Cisco Systems', 0x00169D: u'Cisco Systems', 0x00169E: u'TV One Ltd', 0x00169F: u'Vimtron Electronics Co., Ltd.', 0x0016A0: u'Auto-Maskin', 0x0016A1: u'3Leaf Networks', 0x0016A2: u'CentraLite Systems, Inc.', 0x0016A3: u'TEAM ARTECHE, S.A.', 0x0016A4: u'Ezurio Ltd', 0x0016A5: u'Tandberg Storage ASA', 0x0016A6: u'Dovado FZ-LLC', 0x0016A7: u'AWETA G&P', 0x0016A8: u'CWT CO., LTD.', 0x0016A9: u'2EI', 0x0016AA: u'Kei Communication Technology Inc.', 0x0016AB: u'PBI-Dansensor A/S', 0x0016AC: u'Toho Technology Corp.', 0x0016AD: u'BT-Links Company Limited', 0x0016AE: u'INVENTEL', 0x0016AF: u'Shenzhen Union Networks Equipment Co.,Ltd.', 0x0016B0: u'VK Corporation', 0x0016B1: u'KBS', 0x0016B2: u'DriveCam Inc', 0x0016B3: u'Photonicbridges (China) Co., Ltd.', 0x0016B4: u'PRIVATE', 0x0016B5: u'Motorola CHS', 0x0016B6: u'Cisco-Linksys', 0x0016B7: u'Seoul Commtech', 0x0016B8: u'Sony Ericsson Mobile Communications', 0x0016B9: u'ProCurve Networking', 0x0016BA: u'WEATHERNEWS INC.', 0x0016BB: u'Law-Chain Computer Technology Co Ltd', 0x0016BC: u'Nokia Danmark A/S', 0x0016BD: u'ATI Industrial Automation', 0x0016BE: u'INFRANET, Inc.', 0x0016BF: u'PaloDEx Group Oy', 0x0016C0: u'Semtech Corporation', 0x0016C1: u'Eleksen Ltd', 0x0016C2: u'Avtec Systems Inc', 0x0016C3: u'BA Systems Inc', 0x0016C4: u'SiRF Technology, Inc.', 0x0016C5: u'Shenzhen Xing Feng Industry Co.,Ltd', 0x0016C6: u'North Atlantic Industries', 0x0016C7: u'Cisco Systems', 0x0016C8: u'Cisco Systems', 0x0016C9: u'NAT Seattle, Inc.', 0x0016CA: u'Nortel', 0x0016CB: u'Apple Computer', 0x0016CC: u'Xcute Mobile Corp.', 0x0016CD: u'HIJI HIGH-TECH CO., LTD.', 0x0016CE: u'Hon Hai Precision Ind. Co., Ltd.', 0x0016CF: u'Hon Hai Precision Ind. Co., Ltd.', 0x0016D0: u'ATech elektronika d.o.o.', 0x0016D1: u'ZAT a.s.', 0x0016D2: u'Caspian', 0x0016D3: u'Wistron Corporation', 0x0016D4: u'Compal Communications, Inc.', 0x0016D5: u'Synccom Co., Ltd', 0x0016D6: u'TDA Tech Pty Ltd', 0x0016D7: u'Sunways AG', 0x0016D8: u'Senea AB', 0x0016D9: u'NINGBO BIRD CO.,LTD.', 0x0016DA: u'Futronic Technology Co. Ltd.', 0x0016DB: u'Samsung Electronics Co., Ltd.', 0x0016DC: u'ARCHOS', 0x0016DD: u'Gigabeam Corporation', 0x0016DE: u'FAST Inc', 0x0016DF: u'Lundinova AB', 0x0016E0: u'3Com Europe Ltd', 0x0016E1: u'SiliconStor, Inc.', 0x0016E2: u'American Fibertek, Inc.', 0x0016E3: u'ASKEY COMPUTER CORP.', 0x0016E4: u'VANGUARD SECURITY ENGINEERING CORP.', 0x0016E5: u'FORDLEY DEVELOPMENT LIMITED', 0x0016E6: u'GIGA-BYTE TECHNOLOGY CO.,LTD.', 0x0016E7: u'Dynamix Promotions Limited', 0x0016E8: u'Sigma Designs, Inc.', 0x0016E9: u'Tiba Medical Inc', 0x0016EA: u'Intel Corporation', 0x0016EB: u'Intel Corporation', 0x0016EC: u'Elitegroup Computer Systems Co., Ltd.', 0x0016ED: u'Integrian, Inc.', 0x0016EE: u'RoyalDigital Inc.', 0x0016EF: u'Koko Fitness, Inc.', 0x0016F0: u'Zermatt Systems, Inc', 0x0016F1: u'OmniSense, LLC', 0x0016F2: u'Dmobile System Co., Ltd.', 0x0016F3: u'CAST Information Co., Ltd', 0x0016F4: u'Eidicom Co., Ltd.', 0x0016F5: u'Dalian Golden Hualu Digital Technology Co.,Ltd', 0x0016F6: u'Video Products Group', 0x0016F7: u'L-3 Communications, Electrodynamics, Inc.', 0x0016F8: u'AVIQTECH TECHNOLOGY CO., LTD.', 0x0016F9: u'CETRTA POT, d.o.o., Kranj', 0x0016FA: u'ECI Telecom Ltd.', 0x0016FB: u'SHENZHEN MTC CO.,LTD.', 0x0016FC: u'TOHKEN CO.,LTD.', 0x0016FD: u'Jaty Electronics', 0x0016FE: u'Alps Electric Co., Ltd', 0x0016FF: u'Wamin Optocomm Mfg Corp', 0x001700: u'Motorola MDb', 0x001701: u'KDE, Inc.', 0x001702: u'Osung Midicom Co., Ltd', 0x001703: u'MOSDAN Internation Co.,Ltd', 0x001704: u'Shinco Electronics Group Co.,Ltd', 0x001705: u'Methode Electronics', 0x001706: u'Techfaith Wireless Communication Technology Limited.', 0x001707: u'InGrid, Inc', 0x001708: u'Hewlett Packard', 0x001709: u'Exalt Communications', 0x00170A: u'INEW DIGITAL COMPANY', 0x00170B: u'Contela, Inc.', 0x00170C: u'Benefon Oyj', 0x00170D: u'Dust Networks Inc.', 0x00170E: u'Cisco Systems', 0x00170F: u'Cisco Systems', 0x001710: u'Casa Systems Inc.', 0x001711: u'GE Healthcare Bio-Sciences AB', 0x001712: u'ISCO International', 0x001713: u'Tiger NetCom', 0x001714: u'BR Controls Nederland bv', 0x001715: u'Qstik', 0x001716: u'Qno Technology Inc.', 0x001717: u'Leica Geosystems AG', 0x001718: u'Vansco Electronics Oy', 0x001719: u'AudioCodes USA, Inc', 0x00171A: u'Winegard Company', 0x00171B: u'Innovation Lab Corp.', 0x00171C: u'NT MicroSystems, Inc.', 0x00171D: u'DIGIT', 0x00171E: u'Theo Benning GmbH & Co. KG', 0x00171F: u'IMV Corporation', 0x001720: u'Image Sensing Systems, Inc.', 0x001721: u'FITRE S.p.A.', 0x001722: u'Hanazeder Electronic GmbH', 0x001723: u'Summit Data Communications', 0x001724: u'Studer Professional Audio GmbH', 0x001725: u'Liquid Computing', 0x001726: u'm2c Electronic Technology Ltd.', 0x001727: u'Thermo Ramsey Italia s.r.l.', 0x001728: u'Selex Communications', 0x001729: u'Ubicod Co.LTD', 0x00172A: u'Proware Technology Corp.', 0x00172B: u'Global Technologies Inc.', 0x00172C: u'TAEJIN INFOTECH', 0x00172D: u'Axcen Photonics Corporation', 0x00172E: u'FXC Inc.', 0x00172F: u'NeuLion Incorporated', 0x001730: u'Automation Electronics', 0x001731: u'ASUSTek COMPUTER INC.', 0x001732: u'Science-Technical Center "RISSA"', 0x001733: u'neuf cegetel', 0x001734: u'LGC Wireless Inc.', 0x001735: u'PRIVATE', 0x001736: u'iiTron Inc.', 0x001737: u'Industrie Dial Face S.p.A.', 0x001738: u'XIV', 0x001739: u'Bright Headphone Electronics Company', 0x00173A: u'Edge Integration Systems Inc.', 0x00173B: u'Arched Rock Corporation', 0x00173C: u'Extreme Engineering Solutions', 0x00173D: u'Neology', 0x00173E: u'LeucotronEquipamentos Ltda.', 0x00173F: u'Belkin Corporation', 0x001740: u'Technologies Labtronix', 0x001741: u'DEFIDEV', 0x001742: u'FUJITSU LIMITED', 0x001743: u'Deck Srl', 0x001744: u'Araneo Ltd.', 0x001745: u'INNOTZ CO., Ltd', 0x001746: u'Freedom9 Inc.', 0x001747: u'Trimble', 0x001748: u'Neokoros Brasil Ltda', 0x001749: u'HYUNDAE YONG-O-SA CO.,LTD', 0x00174A: u'SOCOMEC', 0x00174B: u'Nokia Danmark A/S', 0x00174C: u'Millipore', 0x00174D: u'DYNAMIC NETWORK FACTORY, INC.', 0x00174E: u'Parama-tech Co.,Ltd.', 0x00174F: u'iCatch Inc.', 0x001750: u'GSI Group, MicroE Systems', 0x001751: u'Online Corporation', 0x001752: u'DAGS, Inc', 0x001753: u'nFore Technology Inc.', 0x001754: u'Arkino Corporation., Ltd', 0x001755: u'GE Security', 0x001756: u'Vinci Labs Oy', 0x001757: u'RIX TECHNOLOGY LIMITED', 0x001758: u'ThruVision Ltd', 0x001759: u'Cisco Systems', 0x00175A: u'Cisco Systems', 0x00175B: u'ACS Solutions Switzerland Ltd.', 0x00175C: u'SHARP CORPORATION', 0x00175D: u'Dongseo system.', 0x00175E: u'Anta Systems, Inc.', 0x00175F: u'XENOLINK Communications Co., Ltd.', 0x001760: u'Naito Densei Machida MFG.CO.,LTD', 0x001761: u'ZKSoftware Inc.', 0x001762: u'Solar Technology, Inc.', 0x001763: u'Essentia S.p.A.', 0x001764: u'ATMedia GmbH', 0x001765: u'Nortel', 0x001766: u'Accense Technology, Inc.', 0x001767: u'Earforce AS', 0x001768: u'Zinwave Ltd', 0x001769: u'Cymphonix Corp', 0x00176A: u'Avago Technologies', 0x00176B: u'Kiyon, Inc.', 0x00176C: u'Pivot3, Inc.', 0x00176D: u'CORE CORPORATION', 0x00176E: u'DUCATI SISTEMI', 0x00176F: u'PAX Computer Technology(Shenzhen) Ltd.', 0x001770: u'Arti Industrial Electronics Ltd.', 0x001771: u'APD Communications Ltd', 0x001772: u'ASTRO Strobel Kommunikationssysteme GmbH', 0x001773: u'Laketune Technologies Co. Ltd', 0x001774: u'Elesta GmbH', 0x001775: u'TTE Germany GmbH', 0x001776: u'Meso Scale Diagnostics, LLC', 0x001777: u'Obsidian Research Corporation', 0x001778: u'Central Music Co.', 0x001779: u'QuickTel', 0x00177A: u'ASSA ABLOY AB', 0x00177B: u'Azalea Networks inc', 0x00177C: u'D-Link India Ltd', 0x00177D: u'IDT International Limited', 0x00177E: u'Meshcom Technologies Inc.', 0x00177F: u'Worldsmart Retech', 0x001780: u'Applera Holding B.V. Singapore Operations', 0x001781: u'Greystone Data System, Inc.', 0x001782: u'LoBenn Inc.', 0x001783: u'Texas Instruments', 0x001784: u'Motorola Mobile Devices', 0x001785: u'Sparr Electronics Ltd', 0x001786: u'wisembed', 0x001787: u'Brother, Brother & Sons ApS', 0x001788: u'Philips Lighting BV', 0x001789: u'Zenitron Corporation', 0x00178A: u'DARTS TECHNOLOGIES CORP.', 0x00178B: u'Teledyne Technologies Incorporated', 0x00178C: u'Independent Witness, Inc', 0x00178D: u'Checkpoint Systems, Inc.', 0x00178E: u'Gunnebo Cash Automation AB', 0x00178F: u'NINGBO YIDONG ELECTRONIC CO.,LTD.', 0x001790: u'HYUNDAI DIGITECH Co, Ltd.', 0x001791: u'LinTech GmbH', 0x001792: u'Falcom Wireless Comunications Gmbh', 0x001793: u'Tigi Corporation', 0x001794: u'Cisco Systems', 0x001795: u'Cisco Systems', 0x001796: u'Rittmeyer AG', 0x001797: u'Telsy Elettronica S.p.A.', 0x001798: u'Azonic Technology Co., LTD', 0x001799: u'SmarTire Systems Inc.', 0x00179A: u'D-Link Corporation', 0x00179B: u'Chant Sincere CO., LTD.', 0x00179C: u'DEPRAG SCHULZ GMBH u. CO.', 0x00179D: u'Kelman Limited', 0x00179E: u'Sirit Inc', 0x00179F: u'Apricorn', 0x0017A0: u'RoboTech srl', 0x0017A1: u'3soft inc.', 0x0017A2: u'Camrivox Ltd.', 0x0017A3: u'MIX s.r.l.', 0x0017A4: u'Global Data Services', 0x0017A5: u'TrendChip Technologies Corp.', 0x0017A6: u'YOSIN ELECTRONICS CO., LTD.', 0x0017A7: u'Mobile Computing Promotion Consortium', 0x0017A8: u'EDM Corporation', 0x0017A9: u'Sentivision', 0x0017AA: u'elab-experience inc.', 0x0017AB: u'Nintendo Co., Ltd.', 0x0017AC: u'O\'Neil Product Development Inc.', 0x0017AD: u'AceNet Corporation', 0x0017AE: u'GAI-Tronics', 0x0017AF: u'Enermet', 0x0017B0: u'Nokia Danmark A/S', 0x0017B1: u'ACIST Medical Systems, Inc.', 0x0017B2: u'SK Telesys', 0x0017B3: u'Aftek Infosys Limited', 0x0017B4: u'Remote Security Systems, LLC', 0x0017B5: u'Peerless Systems Corporation', 0x0017B6: u'Aquantia', 0x0017B7: u'Tonze Technology Co.', 0x0017B8: u'NOVATRON CO., LTD.', 0x0017B9: u'Gambro Lundia AB', 0x0017BA: u'SEDO CO., LTD.', 0x0017BB: u'Syrinx Industrial Electronics', 0x0017BC: u'Touchtunes Music Corporation', 0x0017BD: u'Tibetsystem', 0x0017BE: u'Tratec Telecom B.V.', 0x0017BF: u'Coherent Research Limited', 0x0017C0: u'PureTech Systems, Inc.', 0x0017C1: u'CM Precision Technology LTD.', 0x0017C2: u'Pirelli Broadband Solutions', 0x0017C3: u'KTF Technologies Inc.', 0x0017C4: u'Quanta Microsystems, INC.', 0x0017C5: u'SonicWALL', 0x0017C6: u'Labcal Technologies', 0x0017C7: u'MARA Systems Consulting AB', 0x0017C8: u'Kyocera Mita Corporation', 0x0017C9: u'Samsung Electronics Co., Ltd.', 0x0017CA: u'BenQ Corporation', 0x0017CB: u'Juniper Networks', 0x0017CC: u'Alcatel USA Sourcing LP', 0x0017CD: u'CEC Wireless R&D Ltd.', 0x0017CE: u'MB International Telecom Labs srl', 0x0017CF: u'iMCA-GmbH', 0x0017D0: u'Opticom Communications, LLC', 0x0017D1: u'Nortel', 0x0017D2: u'THINLINX PTY LTD', 0x0017D3: u'Etymotic Research, Inc.', 0x0017D4: u'Monsoon Multimedia, Inc', 0x0017D5: u'Samsung Electronics Co., Ltd.', 0x0017D6: u'Bluechips Microhouse Co.,Ltd.', 0x0017D7: u'Input/Output Inc.', 0x0017D8: u'Magnum Semiconductor, Inc.', 0x0017D9: u'AAI Corporation', 0x0017DA: u'Spans Logic', 0x0017DB: u'PRIVATE', 0x0017DC: u'DAEMYUNG ZERO1', 0x0017DD: u'Clipsal Australia', 0x0017DE: u'Advantage Six Ltd', 0x0017DF: u'Cisco Systems', 0x0017E0: u'Cisco Systems', 0x0017E1: u'DACOS Technologies Co., Ltd.', 0x0017E2: u'Motorola Mobile Devices', 0x0017E3: u'Texas Instruments', 0x0017E4: u'Texas Instruments', 0x0017E5: u'Texas Instruments', 0x0017E6: u'Texas Instruments', 0x0017E7: u'Texas Instruments', 0x0017E8: u'Texas Instruments', 0x0017E9: u'Texas Instruments', 0x0017EA: u'Texas Instruments', 0x0017EB: u'Texas Instruments', 0x0017EC: u'Texas Instruments', 0x0017ED: u'WooJooIT Ltd.', 0x0017EE: u'Motorola CHS', 0x0017EF: u'Blade Network Technologies, Inc.', 0x0017F0: u'SZCOM Broadband Network Technology Co.,Ltd', 0x0017F1: u'Renu Electronics Pvt Ltd', 0x0017F2: u'Apple Computer', 0x0017F3: u'M/A-COM Wireless Systems', 0x0017F4: u'ZERON ALLIANCE', 0x0017F5: u'NEOPTEK', 0x0017F6: u'Pyramid Meriden Inc.', 0x0017F7: u'CEM Solutions Pvt Ltd', 0x0017F8: u'Motech Industries Inc.', 0x0017F9: u'Forcom Sp. z o.o.', 0x0017FA: u'Microsoft Corporation', 0x0017FB: u'FA', 0x0017FC: u'Suprema Inc.', 0x0017FD: u'Amulet Hotkey', 0x0017FE: u'TALOS SYSTEM INC.', 0x0017FF: u'PLAYLINE Co.,Ltd.', 0x001800: u'UNIGRAND LTD', 0x001801: u'Actiontec Electronics, Inc', 0x001802: u'Alpha Networks Inc.', 0x001803: u'ArcSoft Shanghai Co. LTD', 0x001804: u'E-TEK DIGITAL TECHNOLOGY LIMITED', 0x001805: u'Beijing InHand Networking', 0x001806: u'Hokkei Industries Co., Ltd.', 0x001807: u'Fanstel Corp.', 0x001808: u'SightLogix, Inc.', 0x001809: u'CRESYN', 0x00180A: u'Meraki Networks, Inc.', 0x00180B: u'Brilliant Telecommunications', 0x00180C: u'Optelian Access Networks Corporation', 0x00180D: u'Terabytes Server Storage Tech Corp', 0x00180E: u'Avega Systems', 0x00180F: u'Nokia Danmark A/S', 0x001810: u'IPTrade S.A.', 0x001811: u'Neuros Technology International, LLC.', 0x001812: u'Beijing Xinwei Telecom Technology Co., Ltd.', 0x001813: u'Sony Ericsson Mobile Communications', 0x001814: u'Mitutoyo Corporation', 0x001815: u'GZ Technologies, Inc.', 0x001816: u'Ubixon Co., Ltd.', 0x001817: u'D. E. Shaw Research, LLC', 0x001818: u'Cisco Systems', 0x001819: u'Cisco Systems', 0x00181A: u'AVerMedia Technologies Inc.', 0x00181B: u'TaiJin Metal Co., Ltd.', 0x00181C: u'Exterity Limited', 0x00181D: u'ASIA ELECTRONICS CO.,LTD', 0x00181E: u'GDX Technologies Ltd.', 0x00181F: u'Palmmicro Communications', 0x001820: u'w5networks', 0x001821: u'SINDORICOH', 0x001822: u'CEC TELECOM CO.,LTD.', 0x001823: u'Delta Electronics, Inc.', 0x001824: u'Kimaldi Electronics, S.L.', 0x001825: u'Wavion LTD', 0x001826: u'Cale Access AB', 0x001827: u'NEC PHILIPS UNIFIED SOLUTIONS NEDERLAND BV', 0x001828: u'e2v technologies (UK) ltd.', 0x001829: u'Gatsometer', 0x00182A: u'Taiwan Video & Monitor', 0x00182B: u'Softier', 0x00182C: u'Ascend Networks, Inc.', 0x00182D: u'Artec Group OÜ', 0x00182E: u'Wireless Ventures USA', 0x00182F: u'Texas Instruments', 0x001830: u'Texas Instruments', 0x001831: u'Texas Instruments', 0x001832: u'Texas Instruments', 0x001833: u'Texas Instruments', 0x001834: u'Texas Instruments', 0x001835: u'ITC', 0x001836: u'Reliance Electric Limited', 0x001837: u'Universal ABIT Co., Ltd.', 0x001838: u'PanAccess Communications,Inc.', 0x001839: u'Cisco-Linksys LLC', 0x00183A: u'Westell Technologies', 0x00183B: u'CENITS Co., Ltd.', 0x00183C: u'Encore Software Limited', 0x00183D: u'Vertex Link Corporation', 0x00183E: u'Digilent, Inc', 0x00183F: u'2Wire, Inc', 0x001840: u'3 Phoenix, Inc.', 0x001841: u'High Tech Computer Corp', 0x001842: u'Nokia Danmark A/S', 0x001843: u'Dawevision Ltd', 0x001844: u'Heads Up Technologies, Inc.', 0x001845: u'NPL Pulsar Ltd.', 0x001846: u'Crypto S.A.', 0x001847: u'AceNet Technology Inc.', 0x001848: u'Vecima Networks Inc.', 0x001849: u'Pigeon Point Systems', 0x00184A: u'Catcher, Inc.', 0x00184B: u'Las Vegas Gaming, Inc.', 0x00184C: u'Bogen Communications', 0x00184D: u'Netgear Inc.', 0x00184E: u'Lianhe Technologies, Inc.', 0x00184F: u'8 Ways Technology Corp.', 0x001850: u'Secfone Kft', 0x001851: u'SWsoft', 0x001852: u'StorLink Semiconductors, Inc.', 0x001853: u'Atera Networks LTD.', 0x001854: u'Argard Co., Ltd', 0x001855: u'Aeromaritime Systembau GmbH', 0x001856: u'EyeFi, Inc', 0x001857: u'Unilever R&D', 0x001858: u'TagMaster AB', 0x001859: u'Strawberry Linux Co.,Ltd.', 0x00185A: u'uControl, Inc.', 0x00185B: u'Network Chemistry, Inc', 0x00185C: u'EDS Lab Pte Ltd', 0x00185D: u'TAIGUEN TECHNOLOGY (SHEN-ZHEN) CO., LTD.', 0x00185E: u'Nexterm Inc.', 0x00185F: u'TAC Inc.', 0x001860: u'SIM Technology Group Shanghai Simcom Ltd.,', 0x001861: u'Ooma, Inc.', 0x001862: u'Seagate Technology', 0x001863: u'Veritech Electronics Limited', 0x001864: u'Cybectec Inc.', 0x001865: u'Bayer Diagnostics Sudbury Ltd', 0x001866: u'Leutron Vision', 0x001867: u'Evolution Robotics Retail', 0x001868: u'Scientific Atlanta, A Cisco Company', 0x001869: u'KINGJIM', 0x00186A: u'Global Link Digital Technology Co,.LTD', 0x00186B: u'Sambu Communics CO., LTD.', 0x00186C: u'Neonode AB', 0x00186D: u'Zhenjiang Sapphire Electronic Industry CO.', 0x00186E: u'3COM Europe Ltd', 0x00186F: u'Setha Industria Eletronica LTDA', 0x001870: u'E28 Shanghai Limited', 0x001871: u'Global Data Services', 0x001872: u'Expertise Engineering', 0x001873: u'Cisco Systems', 0x001874: u'Cisco Systems', 0x001875: u'AnaCise Testnology Pte Ltd', 0x001876: u'WowWee Ltd.', 0x001877: u'Amplex A/S', 0x001878: u'Mackware GmbH', 0x001879: u'dSys', 0x00187A: u'Wiremold', 0x00187B: u'4NSYS Co. Ltd.', 0x00187C: u'INTERCROSS, LLC', 0x00187D: u'Armorlink shanghai Co. Ltd', 0x00187E: u'RGB Spectrum', 0x00187F: u'ZODIANET', 0x001880: u'Mobilygen', 0x001881: u'Buyang Electronics Industrial Co., Ltd', 0x001882: u'Huawei Technologies Co., Ltd.', 0x001883: u'FORMOSA21 INC.', 0x001884: u'FON', 0x001885: u'Avigilon Corporation', 0x001886: u'EL-TECH, INC.', 0x001887: u'Metasystem SpA', 0x001888: u'GOTIVE a.s.', 0x001889: u'WinNet Solutions Limited', 0x00188A: u'Infinova LLC', 0x00188B: u'Dell', 0x00188C: u'Mobile Action Technology Inc.', 0x00188D: u'Nokia Danmark A/S', 0x00188E: u'Ekahau, Inc.', 0x00188F: u'Montgomery Technology, Inc.', 0x001890: u'RadioCOM, s.r.o.', 0x001891: u'Zhongshan General K-mate Electronics Co., Ltd', 0x001892: u'ads-tec GmbH', 0x001893: u'SHENZHEN PHOTON BROADBAND TECHNOLOGY CO.,LTD', 0x001894: u'zimocom', 0x001895: u'Hansun Technologies Inc.', 0x001896: u'Great Well Electronic LTD', 0x001897: u'JESS-LINK PRODUCTS Co., LTD', 0x001898: u'KINGSTATE ELECTRONICS CORPORATION', 0x001899: u'ShenZhen jieshun Science&Technology Industry CO,LTD.', 0x00189A: u'HANA Micron Inc.', 0x00189B: u'Thomson Inc.', 0x00189C: u'Weldex Corporation', 0x00189D: u'Navcast Inc.', 0x00189E: u'OMNIKEY GmbH.', 0x00189F: u'Lenntek Corporation', 0x0018A0: u'Cierma Ascenseurs', 0x0018A1: u'Tiqit Computers, Inc.', 0x0018A2: u'XIP Technology AB', 0x0018A3: u'ZIPPY TECHNOLOGY CORP.', 0x0018A4: u'Motorola Mobile Devices', 0x0018A5: u'ADigit Technologies Corp.', 0x0018A6: u'Persistent Systems, LLC', 0x0018A7: u'Yoggie Security Systems LTD.', 0x0018A8: u'AnNeal Technology Inc.', 0x0018A9: u'Ethernet Direct Corporation', 0x0018AA: u'PRIVATE', 0x0018AB: u'BEIJING LHWT MICROELECTRONICS INC.', 0x0018AC: u'Shanghai Jiao Da HISYS Technology Co. Ltd.', 0x0018AD: u'NIDEC SANKYO CORPORATION', 0x0018AE: u'Tongwei Video Technology CO.,LTD', 0x0018AF: u'Samsung Electronics Co., Ltd.', 0x0018B0: u'Nortel', 0x0018B1: u'Blade Network Technologies', 0x0018B2: u'ADEUNIS RF', 0x0018B3: u'TEC WizHome Co., Ltd.', 0x0018B4: u'Dawon Media Inc.', 0x0018B5: u'Magna Carta', 0x0018B6: u'S3C, Inc.', 0x0018B7: u'D3 LED, LLC', 0x0018B8: u'New Voice International AG', 0x0018B9: u'Cisco Systems', 0x0018BA: u'Cisco Systems', 0x0018BB: u'Eliwell Controls srl', 0x0018BC: u'ZAO NVP Bolid', 0x0018BD: u'SHENZHEN DVBWORLD TECHNOLOGY CO., LTD.', 0x0018BE: u'ANSA Corporation', 0x0018BF: u'Essence Technology Solution, Inc.', 0x0018C0: u'Motorola CHS', 0x0018C1: u'Almitec Informática e Comércio Ltda.', 0x0018C2: u'Firetide, Inc', 0x0018C3: u'C&S Microwave', 0x0018C4: u'Raba Technologies LLC', 0x0018C5: u'Nokia Danmark A/S', 0x0018C6: u'OPW Fuel Management Systems', 0x0018C7: u'Real Time Automation', 0x0018C8: u'ISONAS Inc.', 0x0018C9: u'EOps Technology Limited', 0x0018CA: u'Viprinet GmbH', 0x0018CB: u'Tecobest Technology Limited', 0x0018CC: u'AXIOHM SAS', 0x0018CD: u'Erae Electronics Industry Co., Ltd', 0x0018CE: u'Dreamtech Co., Ltd', 0x0018CF: u'Baldor Electric Company', 0x0018D0: u'@ROAD Inc', 0x0018D1: u'Siemens Home & Office Comm. Devices', 0x0018D2: u'High-Gain Antennas LLC', 0x0018D3: u'TEAMCAST', 0x0018D4: u'Unified Display Interface SIG', 0x0018D5: u'REIGNCOM', 0x0018D6: u'Swirlnet A/S', 0x0018D7: u'Javad Navigation Systems Inc.', 0x0018D8: u'ARCH METER Corporation', 0x0018D9: u'Santosha Internatonal, Inc', 0x0018DA: u'AMBER wireless GmbH', 0x0018DB: u'EPL Technology Ltd', 0x0018DC: u'Prostar Co., Ltd.', 0x0018DD: u'Silicondust Engineering Ltd', 0x0018DE: u'Intel Corporation', 0x0018DF: u'The Morey Corporation', 0x0018E0: u'ANAVEO', 0x0018E1: u'Verkerk Service Systemen', 0x0018E2: u'Topdata Sistemas de Automacao Ltda', 0x0018E3: u'Visualgate Systems, Inc.', 0x0018E4: u'YIGUANG', 0x0018E5: u'Adhoco AG', 0x0018E6: u'Computer Hardware Design SIA', 0x0018E7: u'Cameo Communications, INC.', 0x0018E8: u'Hacetron Corporation', 0x0018E9: u'Numata Corporation', 0x0018EA: u'Alltec GmbH', 0x0018EB: u'BroVis Wireless Networks', 0x0018EC: u'Welding Technology Corporation', 0x0018ED: u'ACCUTECH INTERNATIONAL CO., LTD.', 0x0018EE: u'Videology Imaging Solutions, Inc.', 0x0018EF: u'Escape Communications, Inc.', 0x0018F0: u'JOYTOTO Co., Ltd.', 0x0018F1: u'Chunichi Denshi Co.,LTD.', 0x0018F2: u'Beijing Tianyu Communication Equipment Co., Ltd', 0x0018F3: u'ASUSTek COMPUTER INC.', 0x0018F4: u'EO TECHNICS Co., Ltd.', 0x0018F5: u'Shenzhen Streaming Video Technology Company Limited', 0x0018F6: u'Thomson Telecom Belgium', 0x0018F7: u'Kameleon Technologies', 0x0018F8: u'Cisco-Linksys LLC', 0x0018F9: u'VVOND, Inc.', 0x0018FA: u'Yushin Precision Equipment Co.,Ltd.', 0x0018FB: u'Compro Technology', 0x0018FC: u'Altec Electronic AG', 0x0018FD: u'Optimal Technologies International Inc.', 0x0018FE: u'Hewlett Packard', 0x0018FF: u'PowerQuattro Co.', 0x001900: u'Intelliverese - DBA Voicecom', 0x001901: u'F1MEDIA', 0x001902: u'Cambridge Consultants Ltd', 0x001903: u'Bigfoot Networks Inc', 0x001904: u'WB Electronics Sp. z o.o.', 0x001905: u'SCHRACK Seconet AG', 0x001906: u'Cisco Systems', 0x001907: u'Cisco Systems', 0x001908: u'Duaxes Corporation', 0x001909: u'Devi A/S', 0x00190A: u'HASWARE INC.', 0x00190B: u'Southern Vision Systems, Inc.', 0x00190C: u'Encore Electronics, Inc.', 0x00190D: u'IEEE 1394c', 0x00190E: u'Atech Technology Co., Ltd.', 0x00190F: u'Advansus Corp.', 0x001910: u'Knick Elektronische Messgeraete GmbH & Co. KG', 0x001911: u'Just In Mobile Information Technologies (Shanghai) Co., Ltd.', 0x001912: u'Welcat Inc', 0x001913: u'Chuang-Yi Network Equipment Co.Ltd.', 0x001914: u'Winix Co., Ltd', 0x001915: u'TECOM Co., Ltd.', 0x001916: u'PayTec AG', 0x001917: u'Posiflex Inc.', 0x001918: u'Interactive Wear AG', 0x001919: u'ASTEL Inc.', 0x00191A: u'IRLINK', 0x00191B: u'Sputnik Engineering AG', 0x00191C: u'Sensicast Systems', 0x00191D: u'Nintendo Co.,Ltd.', 0x00191E: u'Beyondwiz Co., Ltd.', 0x00191F: u'Microlink communications Inc.', 0x001920: u'KUME electric Co.,Ltd.', 0x001921: u'Elitegroup Computer System Co.', 0x001922: u'CM Comandos Lineares', 0x001923: u'Phonex Korea Co., LTD.', 0x001924: u'LBNL Engineering', 0x001925: u'Intelicis Corporation', 0x001926: u'BitsGen Co., Ltd.', 0x001927: u'ImCoSys Ltd', 0x001928: u'Siemens AG, Transportation Systems', 0x001929: u'2M2B Montadora de Maquinas Bahia Brasil LTDA', 0x00192A: u'Antiope Associates', 0x00192B: u'Hexagram, Inc.', 0x00192C: u'Motorola Mobile Devices', 0x00192D: u'Nokia Corporation', 0x00192E: u'Spectral Instruments, Inc.', 0x00192F: u'Cisco Systems', 0x001930: u'Cisco Systems', 0x001931: u'Balluff GmbH', 0x001932: u'Gude Analog- und Digialsysteme GmbH', 0x001933: u'Strix Systems, Inc.', 0x001934: u'TRENDON TOUCH TECHNOLOGY CORP.', 0x001935: u'Duerr Dental GmbH & Co. KG', 0x001936: u'STERLITE OPTICAL TECHNOLOGIES LIMITED', 0x001937: u'CommerceGuard AB', 0x001938: u'UMB Communications Co., Ltd.', 0x001939: u'Gigamips', 0x00193A: u'OESOLUTIONS', 0x00193B: u'Deliberant LLC', 0x00193C: u'HighPoint Technologies Incorporated', 0x00193D: u'GMC Guardian Mobility Corp.', 0x00193E: u'PIRELLI BROADBAND SOLUTIONS', 0x00193F: u'RDI technology(Shenzhen) Co.,LTD', 0x001940: u'Rackable Systems', 0x001941: u'Pitney Bowes, Inc', 0x001942: u'ON SOFTWARE INTERNATIONAL LIMITED', 0x001943: u'Belden', 0x001944: u'Fossil Partners, L.P.', 0x001945: u'Ten-Tec Inc.', 0x001946: u'Cianet Industria e Comercio S/A', 0x001947: u'Scientific Atlanta, A Cisco Company', 0x001948: u'AireSpider Networks', 0x001949: u'TENTEL COMTECH CO., LTD.', 0x00194A: u'TESTO AG', 0x00194B: u'SAGEM COMMUNICATION', 0x00194C: u'Fujian Stelcom information & Technology CO.,Ltd', 0x00194D: u'Avago Technologies Sdn Bhd', 0x00194E: u'Ultra Electronics - TCS (Tactical Communication Systems)', 0x00194F: u'Nokia Danmark A/S', 0x001950: u'Harman Multimedia', 0x001951: u'NETCONS, s.r.o.', 0x001952: u'ACOGITO Co., Ltd', 0x001953: u'Chainleader Communications Corp.', 0x001954: u'Leaf Corporation.', 0x001955: u'Cisco Systems', 0x001956: u'Cisco Systems', 0x001957: u'Saafnet Canada Inc.', 0x001958: u'Bluetooth SIG, Inc.', 0x001959: u'Staccato Communications Inc.', 0x00195A: u'Jenaer Antriebstechnik GmbH', 0x00195B: u'D-Link Corporation', 0x00195C: u'Innotech Corporation', 0x00195D: u'ShenZhen XinHuaTong Opto Electronics Co.,Ltd', 0x00195E: u'Motorola CHS', 0x00195F: u'Valemount Networks Corporation', 0x001960: u'DoCoMo Systems, Inc.', 0x001961: u'Blaupunkt GmbH', 0x001962: u'Commerciant, LP', 0x001963: u'Sony Ericsson Mobile Communications AB', 0x001964: u'Doorking Inc.', 0x001965: u'YuHua TelTech (ShangHai) Co., Ltd.', 0x001966: u'Asiarock Technology Limited', 0x001967: u'TELDAT Sp.J.', 0x001968: u'Digital Video Networks(Shanghai) CO. LTD.', 0x001969: u'Nortel', 0x00196A: u'MikroM GmbH', 0x00196B: u'Danpex Corporation', 0x00196C: u'ETROVISION TECHNOLOGY', 0x00196D: u'Raybit Systems Korea, Inc', 0x00196E: u'Metacom (Pty) Ltd.', 0x00196F: u'SensoPart GmbH', 0x001970: u'Z-Com, Inc.', 0x001971: u'Guangzhou Unicomp Technology Co.,Ltd', 0x001972: u'Plexus (Xiamen) Co.,ltd', 0x001973: u'Zeugma Systems', 0x001974: u'AboCom Systems, Inc.', 0x001975: u'Beijing Huisen networks technology Inc', 0x001976: u'Xipher Technologies, LLC', 0x001977: u'Aerohive Networks, Inc.', 0x001978: u'Datum Systems, Inc.', 0x001979: u'Nokia Danmark A/S', 0x00197A: u'MAZeT GmbH', 0x00197B: u'Picotest Corp.', 0x00197C: u'Riedel Communications GmbH', 0x00197D: u'Hon Hai Precision Ind. Co., Ltd', 0x00197E: u'Hon Hai Precision Ind. Co., Ltd', 0x00197F: u'PLANTRONICS, INC.', 0x001980: u'Gridpoint Systems', 0x001981: u'Vivox Inc', 0x001982: u'SmarDTV', 0x001983: u'CCT R&D Limited', 0x001984: u'ESTIC Corporation', 0x001985: u'IT Watchdogs, Inc', 0x001986: u'Cheng Hongjian', 0x001987: u'Panasonic Mobile Communications Co., Ltd.', 0x001988: u'Wi2Wi, Inc', 0x001989: u'Sonitrol Corporation', 0x00198A: u'Northrop Grumman Systems Corp.', 0x00198B: u'Novera Optics Korea, Inc.', 0x00198C: u'iXSea', 0x00198D: u'Ocean Optics, Inc.', 0x00198E: u'Oticon A/S', 0x00198F: u'Alcatel Bell N.V.', 0x001990: u'ELM DATA Co., Ltd.', 0x001991: u'avinfo', 0x001992: u'Bluesocket, Inc', 0x001993: u'Changshu Switchgear MFG. Co.,Ltd. (Former Changshu Switchgea', 0x001994: u'Jorjin technologies inc.', 0x001995: u'Jurong Hi-Tech (Suzhou)Co.ltd', 0x001996: u'TurboChef Technologies Inc.', 0x001997: u'Soft Device Sdn Bhd', 0x001998: u'SATO CORPORATION', 0x001999: u'Fujitsu Siemens Computers', 0x00199A: u'EDO-EVI', 0x00199B: u'Diversified Technical Systems, Inc.', 0x00199C: u'CTRING', 0x00199D: u'V, Inc.', 0x00199E: u'SHOWADENSHI ELECTRONICS,INC.', 0x00199F: u'DKT A/S', 0x0019A0: u'NIHON DATA SYSTENS, INC.', 0x0019A1: u'LG INFORMATION & COMM.', 0x0019A2: u'ORION TELE-EQUIPMENTS PVT LTD', 0x0019A3: u'asteel electronique atlantique', 0x0019A4: u'Austar Technology (hang zhou) Co.,Ltd', 0x0019A5: u'RadarFind Corporation', 0x0019A6: u'Motorola CHS', 0x0019A7: u'ITU-T', 0x0019A8: u'WiQuest Communications, Inc', 0x0019A9: u'Cisco Systems', 0x0019AA: u'Cisco Systems', 0x0019AB: u'Raycom CO ., LTD', 0x0019AC: u'GSP SYSTEMS Inc.', 0x0019AD: u'BOBST SA', 0x0019AE: u'Hopling Technologies b.v.', 0x0019AF: u'Rigol Technologies, Inc.', 0x0019B0: u'HanYang System', 0x0019B1: u'Arrow7 Corporation', 0x0019B2: u'XYnetsoft Co.,Ltd', 0x0019B3: u'Stanford Research Systems', 0x0019B4: u'VideoCast Ltd.', 0x0019B5: u'Famar Fueguina S.A.', 0x0019B6: u'Euro Emme s.r.l.', 0x0019B7: u'Nokia Danmark A/S', 0x0019B8: u'Boundary Devices', 0x0019B9: u'Dell Inc.', 0x0019BA: u'Paradox Security Systems Ltd', 0x0019BB: u'Hewlett Packard', 0x0019BC: u'ELECTRO CHANCE SRL', 0x0019BD: u'New Media Life', 0x0019BE: u'Altai Technologies Limited', 0x0019BF: u'Citiway technology Co.,ltd', 0x0019C0: u'Motorola Mobile Devices', 0x0019C1: u'Alps Electric Co., Ltd', 0x0019C2: u'Equustek Solutions, Inc.', 0x0019C3: u'Qualitrol', 0x0019C4: u'Infocrypt Inc.', 0x0019C5: u'SONY Computer Entertainment inc,', 0x0019C6: u'ZTE Corporation', 0x0019C7: u'Cambridge Industries(Group) Co.,Ltd.', 0x0019C8: u'AnyDATA Corporation', 0x0019C9: u'S&C ELECTRIC COMPANY', 0x0019CA: u'Broadata Communications, Inc', 0x0019CB: u'ZyXEL Communications Corporation', 0x0019CC: u'RCG (HK) Ltd', 0x0019CD: u'Chengdu ethercom information technology Ltd.', 0x0019CE: u'Progressive Gaming International', 0x0019CF: u'SALICRU, S.A.', 0x0019D0: u'Cathexis', 0x0019D1: u'Intel Corporation', 0x0019D2: u'Intel Corporation', 0x0019D3: u'TRAK Microwave', 0x0019D4: u'ICX Technologies', 0x0019D5: u'IP Innovations, Inc.', 0x0019D6: u'LS Cable Ltd.', 0x0019D7: u'FORTUNETEK CO., LTD', 0x0019D8: u'MAXFOR', 0x0019D9: u'Zeutschel GmbH', 0x0019DA: u'Welltrans O&E Technology Co. , Ltd.', 0x0019DB: u'MICRO-STAR INTERNATIONAL CO., LTD.', 0x0019DC: u'ENENSYS Technologies', 0x0019DD: u'FEI-Zyfer, Inc.', 0x0019DE: u'MOBITEK', 0x0019DF: u'THOMSON APDG', 0x0019E0: u'TP-LINK Technologies Co., Ltd.', 0x0019E1: u'Nortel', 0x0019E2: u'Juniper Networks', 0x0019E3: u'Apple Computers', 0x0019E4: u'2Wire, Inc', 0x0019E5: u'Lynx Studio Technology, Inc.', 0x0019E6: u'TOYO MEDIC CO.,LTD.', 0x0019E7: u'Cisco Systems', 0x0019E8: u'Cisco Systems', 0x0019E9: u'S-Information Technolgy, Co., Ltd.', 0x0019EA: u'TeraMage Technologies Co., Ltd.', 0x0019EB: u'Pyronix Ltd', 0x0019EC: u'Sagamore Systems, Inc.', 0x0019ED: u'Axesstel Inc.', 0x0019EE: u'CARLO GAVAZZI CONTROLS SPA-Controls Division', 0x0019EF: u'SHENZHEN LINNKING ELECTRONICS CO.,LTD', 0x0019F0: u'UNIONMAN TECHNOLOGY CO.,LTD', 0x0019F1: u'Star Communication Network Technology Co.,Ltd', 0x0019F2: u'Teradyne K.K.', 0x0019F3: u'Telematrix, Inc', 0x0019F4: u'Convergens Oy Ltd', 0x0019F5: u'Imagination Technologies Ltd', 0x0019F6: u'Acconet (PTE) Ltd', 0x0019F7: u'Onset Computer Corporation', 0x0019F8: u'Embedded Systems Design, Inc.', 0x0019F9: u'Lambda', 0x0019FA: u'Cable Vision Electronics CO., LTD.', 0x0019FB: u'AMSTRAD PLC', 0x0019FC: u'PT. Ufoakses Sukses Luarbiasa', 0x0019FD: u'Nintendo Co., Ltd.', 0x0019FE: u'SHENZHEN SEECOMM TECHNOLOGY CO.,LTD.', 0x0019FF: u'Finnzymes', 0x001A00: u'MATRIX INC.', 0x001A01: u'Smiths Medical', 0x001A02: u'SECURE CARE PRODUCTS, INC', 0x001A03: u'Angel Electronics Co., Ltd.', 0x001A04: u'Interay Solutions BV', 0x001A05: u'OPTIBASE LTD', 0x001A06: u'OpVista, Inc.', 0x001A07: u'Arecont Vision', 0x001A08: u'Dalman Technical Services', 0x001A09: u'Wayfarer Transit Systems Ltd', 0x001A0A: u'Adaptive Micro-Ware Inc.', 0x001A0B: u'BONA TECHNOLOGY INC.', 0x001A0C: u'Swe-Dish Satellite Systems AB', 0x001A0D: u'HandHeld entertainment, Inc.', 0x001A0E: u'Cheng Uei Precision Industry Co.,Ltd', 0x001A0F: u'Sistemas Avanzados de Control, S.A.', 0x001A10: u'LUCENT TRANS ELECTRONICS CO.,LTD', 0x001A11: u'Google Inc.', 0x001A12: u'PRIVATE', 0x001A13: u'Wanlida Group Co., LTD', 0x001A14: u'Xin Hua Control Engineering Co.,Ltd.', 0x001A15: u'gemalto e-Payment', 0x001A16: u'Nokia Danmark A/S', 0x001A17: u'Teak Technologies, Inc.', 0x001A18: u'Advanced Simulation Technology inc.', 0x001A19: u'Computer Engineering Limited', 0x001A1A: u'Gentex Corporation/Electro-Acoustic Products', 0x001A1B: u'Motorola Mobile Devices', 0x001A1C: u'GT&T Engineering Pte Ltd', 0x001A1D: u'PChome Online Inc.', 0x001A1E: u'Aruba Networks', 0x001A1F: u'Coastal Environmental Systems', 0x001A20: u'CMOTECH Co. Ltd.', 0x001A21: u'Indac B.V.', 0x001A22: u'eq-3 GmbH', 0x001A23: u'Ice Qube, Inc', 0x001A24: u'Galaxy Telecom Technologies Ltd', 0x001A25: u'DELTA DORE', 0x001A26: u'Deltanode Solutions AB', 0x001A27: u'Ubistar', 0x001A28: u'ASWT Co., LTD. Taiwan Branch H.K.', 0x001A29: u'Techsonic Industries d/b/a Humminbird', 0x001A2A: u'Arcadyan Technology Corporation', 0x001A2B: u'Ayecom Technology Co., Ltd.', 0x001A2C: u'SATEC Co.,LTD', 0x001A2D: u'The Navvo Group', 0x001A2E: u'Ziova Coporation', 0x001A2F: u'Cisco Systems', 0x001A30: u'Cisco Systems', 0x001A31: u'SCAN COIN Industries AB', 0x001A32: u'ACTIVA MULTIMEDIA', 0x001A33: u'ASI Communications, Inc.', 0x001A34: u'Konka Group Co., Ltd.', 0x001A35: u'BARTEC GmbH', 0x001A36: u'Actimon GmbH & Co. KG', 0x001A37: u'Lear Corporation', 0x001A38: u'SCI Technology', 0x001A39: u'Merten GmbH&CoKG', 0x001A3A: u'Dongahelecomm', 0x001A3B: u'Doah Elecom Inc.', 0x001A3C: u'Technowave Ltd.', 0x001A3D: u'Ajin Vision Co.,Ltd', 0x001A3E: u'Faster Technology LLC', 0x001A3F: u'intelbras', 0x001A40: u'A-FOUR TECH CO., LTD.', 0x001A41: u'INOCOVA Co.,Ltd', 0x001A42: u'Techcity Technology co., Ltd.', 0x001A43: u'Logical Link Communications', 0x001A44: u'JWTrading Co., Ltd', 0x001A45: u'GN Netcom as', 0x001A46: u'Digital Multimedia Technology Co., Ltd', 0x001A47: u'Agami Systems, Inc.', 0x001A48: u'Takacom Corporation', 0x001A49: u'Micro Vision Co.,LTD', 0x001A4A: u'Qumranet Inc.', 0x001A4B: u'Hewlett Packard', 0x001A4C: u'Crossbow Technology, Inc', 0x001A4D: u'GIGABYTE TECHNOLOGY CO.,LTD.', 0x001A4E: u'NTI AG / LinMot', 0x001A4F: u'AVM GmbH', 0x001A50: u'PheeNet Technology Corp.', 0x001A51: u'Alfred Mann Foundation', 0x001A52: u'Meshlinx Wireless Inc.', 0x001A53: u'Zylaya', 0x001A54: u'Hip Shing Electronics Ltd.', 0x001A55: u'ACA-Digital Corporation', 0x001A56: u'ViewTel Co,. Ltd.', 0x001A57: u'Matrix Design Group, LLC', 0x001A58: u'Celectronic GmbH', 0x001A59: u'Ircona', 0x001A5A: u'Korea Electric Power Data Network (KDN) Co., Ltd', 0x001A5B: u'NetCare Service Co., Ltd.', 0x001A5C: u'Euchner GmbH+Co. KG', 0x001A5D: u'Mobinnova Corp.', 0x001A5E: u'Thincom Technology Co.,Ltd', 0x001A5F: u'KitWorks.fi Ltd.', 0x001A60: u'Wave Electronics Co.,Ltd.', 0x001A61: u'PacStar Corp.', 0x001A62: u'trusted data', 0x001A63: u'Elster Electricity, LLC', 0x001A64: u'IBM Corp.', 0x001A65: u'Seluxit', 0x001A66: u'Motorola CHS', 0x001A67: u'Infinite QL Sdn Bhd', 0x001A68: u'Weltec Enterprise Co., Ltd.', 0x001A69: u'Wuhan Yangtze Optical Technology CO.,Ltd.', 0x001A6A: u'Tranzas, Inc.', 0x001A6B: u'USI', 0x001A6C: u'Cisco Systems', 0x001A6D: u'Cisco Systems', 0x001A6E: u'Impro Technologies', 0x001A6F: u'MI.TEL s.r.l.', 0x001A70: u'Cisco-Linksys, LLC', 0x001A71: u'Diostech Co., Ltd.', 0x001A72: u'Mosart Semiconductor Corp.', 0x001A73: u'Gemtek Technology Co., Ltd.', 0x001A74: u'Procare International Co', 0x001A75: u'Sony Ericsson Mobile Communications', 0x001A76: u'SDT information Technology Co.,LTD.', 0x001A77: u'Motorola Mobile Devices', 0x001A78: u'ubtos', 0x001A79: u'TELECOMUNICATION TECHNOLOGIES LTD.', 0x001A7A: u'Lismore Instruments Limited', 0x001A7B: u'Teleco, Inc.', 0x001A7C: u'Hirschmann Automation and Control B.V.', 0x001A7D: u'cyber-blue(HK)Ltd', 0x001A7E: u'LN Srithai Comm Ltd.', 0x001A7F: u'GCI Science&Technology Co.,Ltd.', 0x001A80: u'Sony Corporation', 0x001A81: u'Zelax', 0x001A82: u'PROBA Building Automation Co.,LTD', 0x001A83: u'Pegasus Technologies Inc.', 0x001A84: u'V One Multimedia Pte Ltd', 0x001A85: u'NV Michel Van de Wiele', 0x001A86: u'AdvancedIO Systems Inc', 0x001A87: u'Canhold International Limited', 0x001A88: u'Venergy,Co,Ltd', 0x001A89: u'Nokia Danmark A/S', 0x001A8A: u'Samsung Electronics Co., Ltd.', 0x001A8B: u'CHUNIL ELECTRIC IND., CO.', 0x001A8C: u'Astaro AG', 0x001A8D: u'AVECS Bergen GmbH', 0x001A8E: u'3Way Networks Ltd', 0x001A8F: u'Nortel', 0x001A90: u'Trópico Sistemas e Telecomunicações da Amazônia LTDA.', 0x001A91: u'FusionDynamic Ltd.', 0x001A92: u'ASUSTek COMPUTER INC.', 0x001A93: u'ERCO Leuchten GmbH', 0x001A94: u'Votronic GmbH', 0x001A95: u'Hisense Mobile Communications Technoligy Co.,Ltd.', 0x001A96: u'ECLER S.A.', 0x001A97: u'fitivision technology Inc.', 0x001A98: u'Asotel Communication Limited Taiwan Branch', 0x001A99: u'Smarty (HZ) Information Electronics Co., Ltd', 0x001A9A: u'Skyworth Digital technology(shenzhen)co.ltd.', 0x001A9B: u'ADEC & Parter AG', 0x001A9C: u'RightHand Technologies, Inc.', 0x001A9D: u'Skipper Wireless, Inc.', 0x001A9E: u'ICON Digital International Limited', 0x001A9F: u'A-Link Europe Ltd', 0x001AA0: u'Dell Inc', 0x001AA1: u'Cisco Systems', 0x001AA2: u'Cisco Systems', 0x001AA3: u'DELORME', 0x001AA4: u'Future University-Hakodate', 0x001AA5: u'BRN Phoenix', 0x001AA6: u'Telefunken Radio Communication Systems GmbH &CO.KG', 0x001AA7: u'Torian Wireless', 0x001AA8: u'Mamiya Digital Imaging Co., Ltd.', 0x001AA9: u'FUJIAN STAR-NET COMMUNICATION CO.,LTD', 0x001AAA: u'Analogic Corp.', 0x001AAB: u'eWings s.r.l.', 0x001AAC: u'Corelatus AB', 0x001AAD: u'Motorola CHS', 0x001AAE: u'Savant Systems LLC', 0x001AAF: u'BLUSENS TECHNOLOGY', 0x001AB0: u'Signal Networks Pvt. Ltd.,', 0x001AB1: u'Asia Pacific Satellite Industries Co., Ltd.', 0x001AB2: u'Cyber Solutions Inc.', 0x001AB3: u'VISIONITE INC.', 0x001AB4: u'FFEI Ltd.', 0x001AB5: u'Home Network System', 0x001AB6: u'Luminary Micro Inc', 0x001AB7: u'Ethos Networks LTD.', 0x001AB8: u'Anseri Corporation', 0x001AB9: u'PMC', 0x001ABA: u'Caton Overseas Limited', 0x001ABB: u'Fontal Technology Incorporation', 0x001ABC: u'U4EA Technologies Ltd', 0x001ABD: u'Impatica Inc.', 0x001ABE: u'COMPUTER HI-TECH INC.', 0x001ABF: u'TRUMPF Laser Marking Systems AG', 0x001AC0: u'JOYBIEN TECHNOLOGIES CO., LTD.', 0x001AC1: u'3COM EUROPE', 0x001AC2: u'YEC Co.,Ltd.', 0x001AC3: u'Scientific-Atlanta, Inc', 0x001AC4: u'2Wire, Inc', 0x001AC5: u'BreakingPoint Systems, Inc.', 0x001AC6: u'Micro Control Designs', 0x001AC7: u'UNIPOINT', 0x001AC8: u'ISL (Instrumentation Scientifique de Laboratoire)', 0x001AC9: u'SUZUKEN CO.,LTD', 0x001ACA: u'Tilera Corporation', 0x001ACB: u'Autocom Products Ltd', 0x001ACC: u'Celestial Semiconductor, Ltd', 0x001ACD: u'Tidel Engineering LP', 0x001ACE: u'YUPITERU INDUSTRIES CO., LTD.', 0x001ACF: u'C.T. ELETTRONICA', 0x001AD0: u'Siemens Schweiz AG', 0x001AD1: u'FARGO CO., LTD.', 0x001AD2: u'Eletronica Nitron Ltda', 0x001AD3: u'Vamp Ltd.', 0x001AD4: u'iPOX Technology Co., Ltd.', 0x001AD5: u'KMC CHAIN INDUSTRIAL CO., LTD.', 0x001AD6: u'JIAGNSU AETNA ELECTRIC CO.,LTD', 0x001AD7: u'Christie Digital Systems, Inc.', 0x001AD8: u'AlsterAero GmbH', 0x001AD9: u'International Broadband Electric Communications, Inc.', 0x001ADA: u'Biz-2-Me Inc.', 0x001ADB: u'Motorola Mobile Devices', 0x001ADC: u'Nokia Danmark A/S', 0x001ADD: u'PePWave Ltd', 0x001ADE: u'Motorola CHS', 0x001ADF: u'Interactivetv Pty Limited', 0x001AE0: u'Mythology Tech Express Inc.', 0x001AE1: u'EDGE ACCESS INC', 0x001AE2: u'Cisco Systems', 0x001AE3: u'Cisco Systems', 0x001AE4: u'Liposonix Inc,', 0x001AE5: u'Mvox Technologies Inc.', 0x001AE6: u'Atlanta Advanced Communications Holdings Limited', 0x001AE7: u'Aztek Networks, Inc.', 0x001AE8: u'Siemens Enterprise Communications GmbH & Co. KG', 0x001AE9: u'Nintendo Co., Ltd.', 0x001AEA: u'Radio Terminal Systems Pty Ltd', 0x001AEB: u'Allied Telesis K.K.', 0x001AEC: u'Keumbee Electronics Co.,Ltd.', 0x001AED: u'INCOTEC GmbH', 0x001AEE: u'Shenztech Ltd', 0x001AEF: u'Loopcomm Technology, Inc.', 0x001AF0: u'Alcatel - IPD', 0x001AF1: u'Embedded Artists AB', 0x001AF2: u'Dynavisions GmbH', 0x001AF3: u'Samyoung Electronics', 0x001AF4: u'Handreamnet', 0x001AF5: u'PENTAONE. CO., LTD.', 0x001AF6: u'Woven Systems, Inc.', 0x001AF7: u'dataschalt e+a GmbH', 0x001AF8: u'Copley Controls Corporation', 0x001AF9: u'AeroVIronment (AV Inc)', 0x001AFA: u'Welch Allyn, Inc.', 0x001AFB: u'Joby Inc.', 0x001AFC: u'ModusLink Corporation', 0x001AFD: u'EVOLIS', 0x001AFE: u'SOFACREAL', 0x001AFF: u'Wizyoung Tech.', 0x001B00: u'Neopost Technologies', 0x001B01: u'Applied Radio Technologies', 0x001B02: u'ED Co.Ltd', 0x001B03: u'Action Technology (SZ) Co., Ltd', 0x001B04: u'Affinity International S.p.a', 0x001B05: u'Young Media Concepts GmbH', 0x001B06: u'Ateliers R. LAUMONIER', 0x001B07: u'Mendocino Software', 0x001B08: u'Danfoss Drives A/S', 0x001B09: u'Matrix Telecom Pvt. Ltd.', 0x001B0A: u'Intelligent Distributed Controls Ltd', 0x001B0B: u'Phidgets Inc.', 0x001B0C: u'Cisco Systems', 0x001B0D: u'Cisco Systems', 0x001B0E: u'InoTec GmbH Organisationssysteme', 0x001B0F: u'Petratec', 0x001B10: u'ShenZhen Kang Hui Technology Co.,ltd', 0x001B11: u'D-Link Corporation', 0x001B12: u'Apprion', 0x001B13: u'Icron Technologies Corporation', 0x001B14: u'Carex Lighting Equipment Factory', 0x001B15: u'Voxtel, Inc.', 0x001B16: u'Celtro Ltd.', 0x001B17: u'Palo Alto Networks', 0x001B18: u'Tsuken Electric Ind. Co.,Ltd', 0x001B19: u'IEEE 1588 Standard', 0x001B1A: u'e-trees Japan, Inc.', 0x001B1B: u'Siemens AG, A&D AS EWK PU1', 0x001B1C: u'Coherent', 0x001B1D: u'Phoenix International Co., Ltd', 0x001B1E: u'HART Communication Foundation', 0x001B1F: u'DELTA - Danish Electronics, Light & Acoustics', 0x001B20: u'TPine Technology', 0x001B21: u'Intel Corporate', 0x001B22: u'Palit Microsystems ( H.K.) Ltd.', 0x001B23: u'SimpleComTools', 0x001B24: u'Quanta Computer Inc.', 0x001B25: u'Nortel', 0x001B26: u'RON-Telecom ZAO', 0x001B27: u'Merlin CSI', 0x001B28: u'POLYGON, JSC', 0x001B29: u'Avantis.Co.,Ltd', 0x001B2A: u'Cisco Systems', 0x001B2B: u'Cisco Systems', 0x001B2C: u'ATRON electronic GmbH', 0x001B2D: u'PRIVATE', 0x001B2E: u'Sinkyo Electron Inc', 0x001B2F: u'NETGEAR Inc.', 0x001B30: u'Solitech Inc.', 0x001B31: u'Neural Image. Co. Ltd.', 0x001B32: u'QLogic Corporation', 0x001B33: u'Nokia Danmark A/S', 0x001B34: u'Focus System Inc.', 0x001B35: u'ChongQing JINOU Science & Technology Development CO.,Ltd', 0x001B36: u'Tsubata Engineering Co.,Ltd. (Head Office)', 0x001B37: u'Computec Oy', 0x001B38: u'COMPAL ELECTRONICS TECHNOLOGIC CO., LTD.', 0x001B39: u'Proxicast', 0x001B3A: u'SIMS Corp.', 0x001B3B: u'Yi-Qing CO., LTD', 0x001B3C: u'Software Technologies Group,Inc.', 0x001B3D: u'EuroTel Spa', 0x001B3E: u'Curtis, Inc.', 0x001B3F: u'ProCurve Networking by HP', 0x001B40: u'Network Automation mxc AB', 0x001B41: u'General Infinity Co.,Ltd.', 0x001B42: u'Wise & Blue', 0x001B43: u'Beijing DG Telecommunications equipment Co.,Ltd', 0x001B44: u'SanDisk Corporation', 0x001B45: u'ABB AS, Division Automation Products', 0x001B46: u'Blueone Technology Co.,Ltd', 0x001B47: u'Futarque A/S', 0x001B48: u'Shenzhen Lantech Electronics Co., Ltd.', 0x001B49: u'Roberts Radio limited', 0x001B4A: u'W&W Communications, Inc.', 0x001B4B: u'SANION Co., Ltd.', 0x001B4C: u'Signtech', 0x001B4D: u'Areca Technology Corporation', 0x001B4E: u'Navman New Zealand', 0x001B4F: u'Avaya Inc.', 0x001B50: u'Nizhny Novgorod Factory named after M.Frunze, FSUE (NZiF)', 0x001B51: u'Vector Technology Corp.', 0x001B52: u'Motorola Mobile Devices', 0x001B53: u'Cisco Systems', 0x001B54: u'Cisco Systems', 0x001B55: u'Hurco Automation Ltd.', 0x001B56: u'Tehuti Networks Ltd.', 0x001B57: u'SEMINDIA SYSTEMS PRIVATE LIMITED', 0x001B58: u'PRIVATE', 0x001B59: u'Sony Ericsson Mobile Communications AB', 0x001B5A: u'Apollo Imaging Technologies, Inc.', 0x001B5B: u'2Wire, Inc.', 0x001B5C: u'Azuretec Co., Ltd.', 0x001B5D: u'Vololink Pty Ltd', 0x001B5E: u'BPL Limited', 0x001B5F: u'Alien Technology', 0x001B60: u'NAVIGON AG', 0x001B61: u'Digital Acoustics, LLC', 0x001B62: u'JHT Optoelectronics Co.,Ltd.', 0x001B63: u'Apple Inc.', 0x001B64: u'IsaacLandKorea', 0x001B65: u'China Gridcom Co., Ltd', 0x001B66: u'Sennheiser electronic GmbH & Co. KG', 0x001B67: u'Ubiquisys Ltd', 0x001B68: u'Modnnet Co., Ltd', 0x001B69: u'Equaline Corporation', 0x001B6A: u'Powerwave UK Ltd', 0x001B6B: u'Swyx Solutions AG', 0x001B6C: u'LookX Digital Media BV', 0x001B6D: u'Midtronics, Inc.', 0x001B6E: u'Anue Systems, Inc.', 0x001B6F: u'Teletrak Ltd', 0x001B70: u'IRI Ubiteq, INC.', 0x001B71: u'Telular Corp.', 0x001B72: u'Sicep s.p.a.', 0x001B73: u'DTL Broadcast Ltd', 0x001B74: u'MiraLink Corporation', 0x001B75: u'Hypermedia Systems', 0x001B76: u'Ripcode, Inc.', 0x001B77: u'Intel Corporate', 0x001B78: u'Hewlett Packard', 0x001B79: u'FAIVELEY TRANSPORT', 0x001B7A: u'Nintendo Co., Ltd.', 0x001B7B: u'The Tintometer Ltd', 0x001B7C: u'A & R Cambridge', 0x001B7D: u'CXR Anderson Jacobson', 0x001B7E: u'Beckmann GmbH', 0x001B7F: u'TMN Technologies Telecomunicacoes Ltda', 0x001B80: u'LORD Corporation', 0x001B81: u'DATAQ Instruments, Inc.', 0x001B82: u'Taiwan Semiconductor Co., Ltd.', 0x001B83: u'Finsoft Ltd', 0x001B84: u'Scan Engineering Telecom', 0x001B85: u'MAN Diesel A/S', 0x001B86: u'Bosch Access Systems GmbH', 0x001B87: u'Deepsound Tech. Co., Ltd', 0x001B88: u'Divinet Access Technologies Ltd', 0x001B89: u'EMZA Visual Sense Ltd.', 0x001B8A: u'2M Electronic A/S', 0x001B8B: u'NEC AccessTechnica,Ltd.', 0x001B8C: u'JMicron Technology Corp.', 0x001B8D: u'Electronic Computer Systems, Inc.', 0x001B8E: u'Hulu Sweden AB', 0x001B8F: u'Cisco Systems', 0x001B90: u'Cisco Systems', 0x001B91: u'EFKON AG', 0x001B92: u'l-acoustics', 0x001B93: u'JC Decaux SA DNT', 0x001B94: u'T.E.M.A. S.p.A.', 0x001B95: u'VIDEO SYSTEMS SRL', 0x001B96: u'Snif Labs, Inc.', 0x001B97: u'Violin Technologies', 0x001B98: u'Samsung Electronics Co., Ltd.', 0x001B99: u'KS System GmbH', 0x001B9A: u'Apollo Fire Detectors Ltd', 0x001B9B: u'Hose-McCann Communications', 0x001B9C: u'SATEL sp. z o.o.', 0x001B9D: u'Novus Security Sp. z o.o.', 0x001B9E: u'ASKEY COMPUTER CORP', 0x001B9F: u'Calyptech Pty Ltd', 0x001BA0: u'Awox', 0x001BA1: u'Åmic AB', 0x001BA2: u'IDS Imaging Development Systems GmbH', 0x001BA3: u'Flexit Group GmbH', 0x001BA4: u'S.A.E Afikim', 0x001BA5: u'MyungMin Systems, Inc.', 0x001BA6: u'intotech inc.', 0x001BA7: u'Lorica Solutions', 0x001BA8: u'UBI&MOBI,.Inc', 0x001BA9: u'BROTHER INDUSTRIES, LTD. Printing & Solutions Company', 0x001BAA: u'XenICs nv', 0x001BAB: u'Telchemy, Incorporated', 0x001BAC: u'Curtiss Wright Controls Embedded Computing', 0x001BAD: u'iControl Incorporated', 0x001BAE: u'Micro Control Systems, Inc', 0x001BAF: u'Nokia Danmark A/S', 0x001BB0: u'BHARAT ELECTRONICS', 0x001BB1: u'Wistron Neweb Corp.', 0x001BB2: u'Intellect International NV', 0x001BB3: u'Condalo GmbH', 0x001BB4: u'Airvod Limited', 0x001BB5: u'Cherry GmbH', 0x001BB6: u'Bird Electronic Corp.', 0x001BB7: u'Alta Heights Technology Corp.', 0x001BB8: u'BLUEWAY ELECTRONIC CO;LTD', 0x001BB9: u'Elitegroup Computer System Co.', 0x001C7C: u'PERQ SYSTEMS CORPORATION', 0x002000: u'LEXMARK INTERNATIONAL, INC.', 0x002001: u'DSP SOLUTIONS, INC.', 0x002002: u'SERITECH ENTERPRISE CO., LTD.', 0x002003: u'PIXEL POWER LTD.', 0x002004: u'YAMATAKE-HONEYWELL CO., LTD.', 0x002005: u'SIMPLE TECHNOLOGY', 0x002006: u'GARRETT COMMUNICATIONS, INC.', 0x002007: u'SFA, INC.', 0x002008: u'CABLE & COMPUTER TECHNOLOGY', 0x002009: u'PACKARD BELL ELEC., INC.', 0x00200A: u'SOURCE-COMM CORP.', 0x00200B: u'OCTAGON SYSTEMS CORP.', 0x00200C: u'ADASTRA SYSTEMS CORP.', 0x00200D: u'CARL ZEISS', 0x00200E: u'SATELLITE TECHNOLOGY MGMT, INC', 0x00200F: u'TANBAC CO., LTD.', 0x002010: u'JEOL SYSTEM TECHNOLOGY CO. LTD', 0x002011: u'CANOPUS CO., LTD.', 0x002012: u'CAMTRONICS MEDICAL SYSTEMS', 0x002013: u'DIVERSIFIED TECHNOLOGY, INC.', 0x002014: u'GLOBAL VIEW CO., LTD.', 0x002015: u'ACTIS COMPUTER SA', 0x002016: u'SHOWA ELECTRIC WIRE & CABLE CO', 0x002017: u'ORBOTECH', 0x002018: u'CIS TECHNOLOGY INC.', 0x002019: u'OHLER GmbH', 0x00201A: u'MRV Communications, Inc.', 0x00201B: u'NORTHERN TELECOM/NETWORK', 0x00201C: u'EXCEL, INC.', 0x00201D: u'KATANA PRODUCTS', 0x00201E: u'NETQUEST CORPORATION', 0x00201F: u'BEST POWER TECHNOLOGY, INC.', 0x002020: u'MEGATRON COMPUTER INDUSTRIES PTY, LTD.', 0x002021: u'ALGORITHMS SOFTWARE PVT. LTD.', 0x002022: u'NMS Communications', 0x002023: u'T.C. TECHNOLOGIES PTY. LTD', 0x002024: u'PACIFIC COMMUNICATION SCIENCES', 0x002025: u'CONTROL TECHNOLOGY, INC.', 0x002026: u'AMKLY SYSTEMS, INC.', 0x002027: u'MING FORTUNE INDUSTRY CO., LTD', 0x002028: u'WEST EGG SYSTEMS, INC.', 0x002029: u'TELEPROCESSING PRODUCTS, INC.', 0x00202A: u'N.V. DZINE', 0x00202B: u'ADVANCED TELECOMMUNICATIONS MODULES, LTD.', 0x00202C: u'WELLTRONIX CO., LTD.', 0x00202D: u'TAIYO CORPORATION', 0x00202E: u'DAYSTAR DIGITAL', 0x00202F: u'ZETA COMMUNICATIONS, LTD.', 0x002030: u'ANALOG & DIGITAL SYSTEMS', 0x002031: u'ERTEC GmbH', 0x002032: u'ALCATEL TAISEL', 0x002033: u'SYNAPSE TECHNOLOGIES, INC.', 0x002034: u'ROTEC INDUSTRIEAUTOMATION GMBH', 0x002035: u'IBM CORPORATION', 0x002036: u'BMC SOFTWARE', 0x002037: u'SEAGATE TECHNOLOGY', 0x002038: u'VME MICROSYSTEMS INTERNATIONAL CORPORATION', 0x002039: u'SCINETS', 0x00203A: u'DIGITAL BI0METRICS INC.', 0x00203B: u'WISDM LTD.', 0x00203C: u'EUROTIME AB', 0x00203D: u'NOVAR ELECTRONICS CORPORATION', 0x00203E: u'LogiCan Technologies, Inc.', 0x00203F: u'JUKI CORPORATION', 0x002040: u'Motorola Broadband Communications Sector', 0x002041: u'DATA NET', 0x002042: u'DATAMETRICS CORP.', 0x002043: u'NEURON COMPANY LIMITED', 0x002044: u'GENITECH PTY LTD', 0x002045: u'ION Networks, Inc.', 0x002046: u'CIPRICO, INC.', 0x002047: u'STEINBRECHER CORP.', 0x002048: u'Marconi Communications', 0x002049: u'COMTRON, INC.', 0x00204A: u'PRONET GMBH', 0x00204B: u'AUTOCOMPUTER CO., LTD.', 0x00204C: u'MITRON COMPUTER PTE LTD.', 0x00204D: u'INOVIS GMBH', 0x00204E: u'NETWORK SECURITY SYSTEMS, INC.', 0x00204F: u'DEUTSCHE AEROSPACE AG', 0x002050: u'KOREA COMPUTER INC.', 0x002051: u'Verilink Corporation', 0x002052: u'RAGULA SYSTEMS', 0x002053: u'HUNTSVILLE MICROSYSTEMS, INC.', 0x002054: u'EASTERN RESEARCH, INC.', 0x002055: u'ALTECH CO., LTD.', 0x002056: u'NEOPRODUCTS', 0x002057: u'TITZE DATENTECHNIK GmbH', 0x002058: u'ALLIED SIGNAL INC.', 0x002059: u'MIRO COMPUTER PRODUCTS AG', 0x00205A: u'COMPUTER IDENTICS', 0x00205B: u'Kentrox, LLC', 0x00205C: u'InterNet Systems of Florida, Inc.', 0x00205D: u'NANOMATIC OY', 0x00205E: u'CASTLE ROCK, INC.', 0x00205F: u'GAMMADATA COMPUTER GMBH', 0x002060: u'ALCATEL ITALIA S.p.A.', 0x002061: u'DYNATECH COMMUNICATIONS, INC.', 0x002062: u'SCORPION LOGIC, LTD.', 0x002063: u'WIPRO INFOTECH LTD.', 0x002064: u'PROTEC MICROSYSTEMS, INC.', 0x002065: u'SUPERNET NETWORKING INC.', 0x002066: u'GENERAL MAGIC, INC.', 0x002067: u'PRIVATE', 0x002068: u'ISDYNE', 0x002069: u'ISDN SYSTEMS CORPORATION', 0x00206A: u'OSAKA COMPUTER CORP.', 0x00206B: u'KONICA MINOLTA HOLDINGS, INC.', 0x00206C: u'EVERGREEN TECHNOLOGY CORP.', 0x00206D: u'DATA RACE, INC.', 0x00206E: u'XACT, INC.', 0x00206F: u'FLOWPOINT CORPORATION', 0x002070: u'HYNET, LTD.', 0x002071: u'IBR GMBH', 0x002072: u'WORKLINK INNOVATIONS', 0x002073: u'FUSION SYSTEMS CORPORATION', 0x002074: u'SUNGWOON SYSTEMS', 0x002075: u'MOTOROLA COMMUNICATION ISRAEL', 0x002076: u'REUDO CORPORATION', 0x002077: u'KARDIOS SYSTEMS CORP.', 0x002078: u'RUNTOP, INC.', 0x002079: u'MIKRON GMBH', 0x00207A: u'WiSE Communications, Inc.', 0x00207B: u'Intel Corporation', 0x00207C: u'AUTEC GmbH', 0x00207D: u'ADVANCED COMPUTER APPLICATIONS', 0x00207E: u'FINECOM Co., Ltd.', 0x00207F: u'KYOEI SANGYO CO., LTD.', 0x002080: u'SYNERGY (UK) LTD.', 0x002081: u'TITAN ELECTRONICS', 0x002082: u'ONEAC CORPORATION', 0x002083: u'PRESTICOM INCORPORATED', 0x002084: u'OCE PRINTING SYSTEMS, GMBH', 0x002085: u'EXIDE ELECTRONICS', 0x002086: u'MICROTECH ELECTRONICS LIMITED', 0x002087: u'MEMOTEC COMMUNICATIONS CORP.', 0x002088: u'GLOBAL VILLAGE COMMUNICATION', 0x002089: u'T3PLUS NETWORKING, INC.', 0x00208A: u'SONIX COMMUNICATIONS, LTD.', 0x00208B: u'LAPIS TECHNOLOGIES, INC.', 0x00208C: u'GALAXY NETWORKS, INC.', 0x00208D: u'CMD TECHNOLOGY', 0x00208E: u'CHEVIN SOFTWARE ENG. LTD.', 0x00208F: u'ECI TELECOM LTD.', 0x002090: u'ADVANCED COMPRESSION TECHNOLOGY, INC.', 0x002091: u'J125, NATIONAL SECURITY AGENCY', 0x002092: u'CHESS ENGINEERING B.V.', 0x002093: u'LANDINGS TECHNOLOGY CORP.', 0x002094: u'CUBIX CORPORATION', 0x002095: u'RIVA ELECTRONICS', 0x002096: u'Invensys', 0x002097: u'APPLIED SIGNAL TECHNOLOGY', 0x002098: u'HECTRONIC AB', 0x002099: u'BON ELECTRIC CO., LTD.', 0x00209A: u'THE 3DO COMPANY', 0x00209B: u'ERSAT ELECTRONIC GMBH', 0x00209C: u'PRIMARY ACCESS CORP.', 0x00209D: u'LIPPERT AUTOMATIONSTECHNIK', 0x00209E: u'BROWN\'S OPERATING SYSTEM SERVICES, LTD.', 0x00209F: u'MERCURY COMPUTER SYSTEMS, INC.', 0x0020A0: u'OA LABORATORY CO., LTD.', 0x0020A1: u'DOVATRON', 0x0020A2: u'GALCOM NETWORKING LTD.', 0x0020A3: u'DIVICOM INC.', 0x0020A4: u'MULTIPOINT NETWORKS', 0x0020A5: u'API ENGINEERING', 0x0020A6: u'PROXIM, INC.', 0x0020A7: u'PAIRGAIN TECHNOLOGIES, INC.', 0x0020A8: u'SAST TECHNOLOGY CORP.', 0x0020A9: u'WHITE HORSE INDUSTRIAL', 0x0020AA: u'DIGIMEDIA VISION LTD.', 0x0020AB: u'MICRO INDUSTRIES CORP.', 0x0020AC: u'INTERFLEX DATENSYSTEME GMBH', 0x0020AD: u'LINQ SYSTEMS', 0x0020AE: u'ORNET DATA COMMUNICATION TECH.', 0x0020AF: u'3COM CORPORATION', 0x0020B0: u'GATEWAY DEVICES, INC.', 0x0020B1: u'COMTECH RESEARCH INC.', 0x0020B2: u'GKD Gesellschaft Fur Kommunikation Und Datentechnik', 0x0020B3: u'SCLTEC COMMUNICATIONS SYSTEMS', 0x0020B4: u'TERMA ELEKTRONIK AS', 0x0020B5: u'YASKAWA ELECTRIC CORPORATION', 0x0020B6: u'AGILE NETWORKS, INC.', 0x0020B7: u'NAMAQUA COMPUTERWARE', 0x0020B8: u'PRIME OPTION, INC.', 0x0020B9: u'METRICOM, INC.', 0x0020BA: u'CENTER FOR HIGH PERFORMANCE', 0x0020BB: u'ZAX CORPORATION', 0x0020BC: u'Long Reach Networks Pty Ltd', 0x0020BD: u'NIOBRARA R & D CORPORATION', 0x0020BE: u'LAN ACCESS CORP.', 0x0020BF: u'AEHR TEST SYSTEMS', 0x0020C0: u'PULSE ELECTRONICS, INC.', 0x0020C1: u'SAXA, Inc.', 0x0020C2: u'TEXAS MEMORY SYSTEMS, INC.', 0x0020C3: u'COUNTER SOLUTIONS LTD.', 0x0020C4: u'INET,INC.', 0x0020C5: u'EAGLE TECHNOLOGY', 0x0020C6: u'NECTEC', 0x0020C7: u'AKAI Professional M.I. Corp.', 0x0020C8: u'LARSCOM INCORPORATED', 0x0020C9: u'VICTRON BV', 0x0020CA: u'DIGITAL OCEAN', 0x0020CB: u'PRETEC ELECTRONICS CORP.', 0x0020CC: u'DIGITAL SERVICES, LTD.', 0x0020CD: u'HYBRID NETWORKS, INC.', 0x0020CE: u'LOGICAL DESIGN GROUP, INC.', 0x0020CF: u'TEST & MEASUREMENT SYSTEMS INC', 0x0020D0: u'VERSALYNX CORPORATION', 0x0020D1: u'MICROCOMPUTER SYSTEMS (M) SDN.', 0x0020D2: u'RAD DATA COMMUNICATIONS, LTD.', 0x0020D3: u'OST (OUEST STANDARD TELEMATIQU', 0x0020D4: u'CABLETRON - ZEITTNET INC.', 0x0020D5: u'VIPA GMBH', 0x0020D6: u'BREEZECOM', 0x0020D7: u'JAPAN MINICOMPUTER SYSTEMS CO., Ltd.', 0x0020D8: u'Nortel Networks', 0x0020D9: u'PANASONIC TECHNOLOGIES, INC./MIECO-US', 0x0020DA: u'Alcatel North America ESD', 0x0020DB: u'XNET TECHNOLOGY, INC.', 0x0020DC: u'DENSITRON TAIWAN LTD.', 0x0020DD: u'Cybertec Pty Ltd', 0x0020DE: u'JAPAN DIGITAL LABORAT\'Y CO.LTD', 0x0020DF: u'KYOSAN ELECTRIC MFG. CO., LTD.', 0x0020E0: u'Actiontec Electronics, Inc.', 0x0020E1: u'ALAMAR ELECTRONICS', 0x0020E2: u'INFORMATION RESOURCE ENGINEERING', 0x0020E3: u'MCD KENCOM CORPORATION', 0x0020E4: u'HSING TECH ENTERPRISE CO., LTD', 0x0020E5: u'APEX DATA, INC.', 0x0020E6: u'LIDKOPING MACHINE TOOLS AB', 0x0020E7: u'B&W NUCLEAR SERVICE COMPANY', 0x0020E8: u'DATATREK CORPORATION', 0x0020E9: u'DANTEL', 0x0020EA: u'EFFICIENT NETWORKS, INC.', 0x0020EB: u'CINCINNATI MICROWAVE, INC.', 0x0020EC: u'TECHWARE SYSTEMS CORP.', 0x0020ED: u'GIGA-BYTE TECHNOLOGY CO., LTD.', 0x0020EE: u'GTECH CORPORATION', 0x0020EF: u'USC CORPORATION', 0x0020F0: u'UNIVERSAL MICROELECTRONICS CO.', 0x0020F1: u'ALTOS INDIA LIMITED', 0x0020F2: u'SUN MICROSYSTEMS, INC.', 0x0020F3: u'RAYNET CORPORATION', 0x0020F4: u'SPECTRIX CORPORATION', 0x0020F5: u'PANDATEL AG', 0x0020F6: u'NET TEK AND KARLNET, INC.', 0x0020F7: u'CYBERDATA', 0x0020F8: u'CARRERA COMPUTERS, INC.', 0x0020F9: u'PARALINK NETWORKS, INC.', 0x0020FA: u'GDE SYSTEMS, INC.', 0x0020FB: u'OCTEL COMMUNICATIONS CORP.', 0x0020FC: u'MATROX', 0x0020FD: u'ITV TECHNOLOGIES, INC.', 0x0020FE: u'TOPWARE INC. / GRAND COMPUTER', 0x0020FF: u'SYMMETRICAL TECHNOLOGIES', 0x002654: u'3Com Corporation', 0x003000: u'ALLWELL TECHNOLOGY CORP.', 0x003001: u'SMP', 0x003002: u'Expand Networks', 0x003003: u'Phasys Ltd.', 0x003004: u'LEADTEK RESEARCH INC.', 0x003005: u'Fujitsu Siemens Computers', 0x003006: u'SUPERPOWER COMPUTER', 0x003007: u'OPTI, INC.', 0x003008: u'AVIO DIGITAL, INC.', 0x003009: u'Tachion Networks, Inc.', 0x00300A: u'AZTECH SYSTEMS LTD.', 0x00300B: u'mPHASE Technologies, Inc.', 0x00300C: u'CONGRUENCY, LTD.', 0x00300D: u'MMC Technology, Inc.', 0x00300E: u'Klotz Digital AG', 0x00300F: u'IMT - Information Management T', 0x003010: u'VISIONETICS INTERNATIONAL', 0x003011: u'HMS FIELDBUS SYSTEMS AB', 0x003012: u'DIGITAL ENGINEERING LTD.', 0x003013: u'NEC Corporation', 0x003014: u'DIVIO, INC.', 0x003015: u'CP CLARE CORP.', 0x003016: u'ISHIDA CO., LTD.', 0x003017: u'BlueArc UK Ltd', 0x003018: u'Jetway Information Co., Ltd.', 0x003019: u'CISCO SYSTEMS, INC.', 0x00301A: u'SMARTBRIDGES PTE. LTD.', 0x00301B: u'SHUTTLE, INC.', 0x00301C: u'ALTVATER AIRDATA SYSTEMS', 0x00301D: u'SKYSTREAM, INC.', 0x00301E: u'3COM Europe Ltd.', 0x00301F: u'OPTICAL NETWORKS, INC.', 0x003020: u'TSI, Inc..', 0x003021: u'HSING TECH. ENTERPRISE CO.,LTD', 0x003022: u'Fong Kai Industrial Co., Ltd.', 0x003023: u'COGENT COMPUTER SYSTEMS, INC.', 0x003024: u'CISCO SYSTEMS, INC.', 0x003025: u'CHECKOUT COMPUTER SYSTEMS, LTD', 0x003026: u'HeiTel Digital Video GmbH', 0x003027: u'KERBANGO, INC.', 0x003028: u'FASE Saldatura srl', 0x003029: u'OPICOM', 0x00302A: u'SOUTHERN INFORMATION', 0x00302B: u'INALP NETWORKS, INC.', 0x00302C: u'SYLANTRO SYSTEMS CORPORATION', 0x00302D: u'QUANTUM BRIDGE COMMUNICATIONS', 0x00302E: u'Hoft & Wessel AG', 0x00302F: u'Smiths Industries', 0x003030: u'HARMONIX CORPORATION', 0x003031: u'LIGHTWAVE COMMUNICATIONS, INC.', 0x003032: u'MagicRam, Inc.', 0x003033: u'ORIENT TELECOM CO., LTD.', 0x003034: u'SET ENGINEERING', 0x003035: u'Corning Incorporated', 0x003036: u'RMP ELEKTRONIKSYSTEME GMBH', 0x003037: u'Packard Bell Nec Services', 0x003038: u'XCP, INC.', 0x003039: u'SOFTBOOK PRESS', 0x00303A: u'MAATEL', 0x00303B: u'PowerCom Technology', 0x00303C: u'ONNTO CORP.', 0x00303D: u'IVA CORPORATION', 0x00303E: u'Radcom Ltd.', 0x00303F: u'TurboComm Tech Inc.', 0x003040: u'CISCO SYSTEMS, INC.', 0x003041: u'SAEJIN T & M CO., LTD.', 0x003042: u'DeTeWe-Deutsche Telephonwerke', 0x003043: u'IDREAM TECHNOLOGIES, PTE. LTD.', 0x003044: u'Portsmith LLC', 0x003045: u'Village Networks, Inc. (VNI)', 0x003046: u'Controlled Electronic Manageme', 0x003047: u'NISSEI ELECTRIC CO., LTD.', 0x003048: u'Supermicro Computer, Inc.', 0x003049: u'BRYANT TECHNOLOGY, LTD.', 0x00304A: u'Fraunhofer IPMS', 0x00304B: u'ORBACOM SYSTEMS, INC.', 0x00304C: u'APPIAN COMMUNICATIONS, INC.', 0x00304D: u'ESI', 0x00304E: u'BUSTEC PRODUCTION LTD.', 0x00304F: u'PLANET Technology Corporation', 0x003050: u'Versa Technology', 0x003051: u'ORBIT AVIONIC & COMMUNICATION', 0x003052: u'ELASTIC NETWORKS', 0x003053: u'Basler AG', 0x003054: u'CASTLENET TECHNOLOGY, INC.', 0x003055: u'Hitachi Semiconductor America,', 0x003056: u'Beck IPC GmbH', 0x003057: u'QTelNet, Inc.', 0x003058: u'API MOTION', 0x003059: u'DIGITAL-LOGIC AG', 0x00305A: u'TELGEN CORPORATION', 0x00305B: u'MODULE DEPARTMENT', 0x00305C: u'SMAR Laboratories Corp.', 0x00305D: u'DIGITRA SYSTEMS, INC.', 0x00305E: u'Abelko Innovation', 0x00305F: u'IMACON APS', 0x003060: u'Powerfile, Inc.', 0x003061: u'MobyTEL', 0x003062: u'PATH 1 NETWORK TECHNOL\'S INC.', 0x003063: u'SANTERA SYSTEMS, INC.', 0x003064: u'ADLINK TECHNOLOGY, INC.', 0x003065: u'APPLE COMPUTER, INC.', 0x003066: u'DIGITAL WIRELESS CORPORATION', 0x003067: u'BIOSTAR MICROTECH INT\'L CORP.', 0x003068: u'CYBERNETICS TECH. CO., LTD.', 0x003069: u'IMPACCT TECHNOLOGY CORP.', 0x00306A: u'PENTA MEDIA CO., LTD.', 0x00306B: u'CMOS SYSTEMS, INC.', 0x00306C: u'Hitex Holding GmbH', 0x00306D: u'LUCENT TECHNOLOGIES', 0x00306E: u'HEWLETT PACKARD', 0x00306F: u'SEYEON TECH. CO., LTD.', 0x003070: u'1Net Corporation', 0x003071: u'Cisco Systems, Inc.', 0x003072: u'INTELLIBYTE INC.', 0x003073: u'International Microsystems, In', 0x003074: u'EQUIINET LTD.', 0x003075: u'ADTECH', 0x003076: u'Akamba Corporation', 0x003077: u'ONPREM NETWORKS', 0x003078: u'Cisco Systems, Inc.', 0x003079: u'CQOS, INC.', 0x00307A: u'Advanced Technology & Systems', 0x00307B: u'Cisco Systems, Inc.', 0x00307C: u'ADID SA', 0x00307D: u'GRE AMERICA, INC.', 0x00307E: u'Redflex Communication Systems', 0x00307F: u'IRLAN LTD.', 0x003080: u'CISCO SYSTEMS, INC.', 0x003081: u'ALTOS C&C', 0x003082: u'TAIHAN ELECTRIC WIRE CO., LTD.', 0x003083: u'Ivron Systems', 0x003084: u'ALLIED TELESYN INTERNAIONAL', 0x003085: u'CISCO SYSTEMS, INC.', 0x003086: u'Transistor Devices, Inc.', 0x003087: u'VEGA GRIESHABER KG', 0x003088: u'Siara Systems, Inc.', 0x003089: u'Spectrapoint Wireless, LLC', 0x00308A: u'NICOTRA SISTEMI S.P.A', 0x00308B: u'Brix Networks', 0x00308C: u'ADVANCED DIGITAL INFORMATION', 0x00308D: u'PINNACLE SYSTEMS, INC.', 0x00308E: u'CROSS MATCH TECHNOLOGIES, INC.', 0x00308F: u'MICRILOR, Inc.', 0x003090: u'CYRA TECHNOLOGIES, INC.', 0x003091: u'TAIWAN FIRST LINE ELEC. CORP.', 0x003092: u'ModuNORM GmbH', 0x003093: u'SONNET TECHNOLOGIES, INC.', 0x003094: u'Cisco Systems, Inc.', 0x003095: u'Procomp Informatics, Ltd.', 0x003096: u'CISCO SYSTEMS, INC.', 0x003097: u'EXOMATIC AB', 0x003098: u'Global Converging Technologies', 0x003099: u'BOENIG UND KALLENBACH OHG', 0x00309A: u'ASTRO TERRA CORP.', 0x00309B: u'Smartware', 0x00309C: u'Timing Applications, Inc.', 0x00309D: u'Nimble Microsystems, Inc.', 0x00309E: u'WORKBIT CORPORATION.', 0x00309F: u'AMBER NETWORKS', 0x0030A0: u'TYCO SUBMARINE SYSTEMS, LTD.', 0x0030A1: u'WEBGATE Inc.', 0x0030A2: u'Lightner Engineering', 0x0030A3: u'CISCO SYSTEMS, INC.', 0x0030A4: u'Woodwind Communications System', 0x0030A5: u'ACTIVE POWER', 0x0030A6: u'VIANET TECHNOLOGIES, LTD.', 0x0030A7: u'SCHWEITZER ENGINEERING', 0x0030A8: u'OL\'E COMMUNICATIONS, INC.', 0x0030A9: u'Netiverse, Inc.', 0x0030AA: u'AXUS MICROSYSTEMS, INC.', 0x0030AB: u'DELTA NETWORKS, INC.', 0x0030AC: u'Systeme Lauer GmbH & Co., Ltd.', 0x0030AD: u'SHANGHAI COMMUNICATION', 0x0030AE: u'Times N System, Inc.', 0x0030AF: u'Honeywell GmbH', 0x0030B0: u'Convergenet Technologies', 0x0030B1: u'aXess-pro networks GmbH', 0x0030B2: u'L-3 Sonoma EO', 0x0030B3: u'San Valley Systems, Inc.', 0x0030B4: u'INTERSIL CORP.', 0x0030B5: u'Tadiran Microwave Networks', 0x0030B6: u'CISCO SYSTEMS, INC.', 0x0030B7: u'Teletrol Systems, Inc.', 0x0030B8: u'RiverDelta Networks', 0x0030B9: u'ECTEL', 0x0030BA: u'AC&T SYSTEM CO., LTD.', 0x0030BB: u'CacheFlow, Inc.', 0x0030BC: u'Optronic AG', 0x0030BD: u'BELKIN COMPONENTS', 0x0030BE: u'City-Net Technology, Inc.', 0x0030BF: u'MULTIDATA GMBH', 0x0030C0: u'Lara Technology, Inc.', 0x0030C1: u'HEWLETT-PACKARD', 0x0030C2: u'COMONE', 0x0030C3: u'FLUECKIGER ELEKTRONIK AG', 0x0030C4: u'Canon Imaging System Technologies, Inc.', 0x0030C5: u'CADENCE DESIGN SYSTEMS', 0x0030C6: u'CONTROL SOLUTIONS, INC.', 0x0030C7: u'MACROMATE CORP.', 0x0030C8: u'GAD LINE, LTD.', 0x0030C9: u'LuxN, N', 0x0030CA: u'Discovery Com', 0x0030CB: u'OMNI FLOW COMPUTERS, INC.', 0x0030CC: u'Tenor Networks, Inc.', 0x0030CD: u'CONEXANT SYSTEMS, INC.', 0x0030CE: u'Zaffire', 0x0030CF: u'TWO TECHNOLOGIES, INC.', 0x0030D0: u'Tellabs', 0x0030D1: u'INOVA CORPORATION', 0x0030D2: u'WIN TECHNOLOGIES, CO., LTD.', 0x0030D3: u'Agilent Technologies', 0x0030D4: u'AAE Systems, Inc', 0x0030D5: u'DResearch GmbH', 0x0030D6: u'MSC VERTRIEBS GMBH', 0x0030D7: u'Innovative Systems, L.L.C.', 0x0030D8: u'SITEK', 0x0030D9: u'DATACORE SOFTWARE CORP.', 0x0030DA: u'COMTREND CO.', 0x0030DB: u'Mindready Solutions, Inc.', 0x0030DC: u'RIGHTECH CORPORATION', 0x0030DD: u'INDIGITA CORPORATION', 0x0030DE: u'WAGO Kontakttechnik GmbH', 0x0030DF: u'KB/TEL TELECOMUNICACIONES', 0x0030E0: u'OXFORD SEMICONDUCTOR LTD.', 0x0030E1: u'ACROTRON SYSTEMS, INC.', 0x0030E2: u'GARNET SYSTEMS CO., LTD.', 0x0030E3: u'SEDONA NETWORKS CORP.', 0x0030E4: u'CHIYODA SYSTEM RIKEN', 0x0030E5: u'Amper Datos S.A.', 0x0030E6: u'Draeger Medical Systems, Inc.', 0x0030E7: u'CNF MOBILE SOLUTIONS, INC.', 0x0030E8: u'ENSIM CORP.', 0x0030E9: u'GMA COMMUNICATION MANUFACT\'G', 0x0030EA: u'TeraForce Technology Corporation', 0x0030EB: u'TURBONET COMMUNICATIONS, INC.', 0x0030EC: u'BORGARDT', 0x0030ED: u'Expert Magnetics Corp.', 0x0030EE: u'DSG Technology, Inc.', 0x0030EF: u'NEON TECHNOLOGY, INC.', 0x0030F0: u'Uniform Industrial Corp.', 0x0030F1: u'Accton Technology Corp.', 0x0030F2: u'CISCO SYSTEMS, INC.', 0x0030F3: u'At Work Computers', 0x0030F4: u'STARDOT TECHNOLOGIES', 0x0030F5: u'Wild Lab. Ltd.', 0x0030F6: u'SECURELOGIX CORPORATION', 0x0030F7: u'RAMIX INC.', 0x0030F8: u'Dynapro Systems, Inc.', 0x0030F9: u'Sollae Systems Co., Ltd.', 0x0030FA: u'TELICA, INC.', 0x0030FB: u'AZS Technology AG', 0x0030FC: u'Terawave Communications, Inc.', 0x0030FD: u'INTEGRATED SYSTEMS DESIGN', 0x0030FE: u'DSA GmbH', 0x0030FF: u'DATAFAB SYSTEMS, INC.', 0x004000: u'PCI COMPONENTES DA AMZONIA LTD', 0x004001: u'ZYXEL COMMUNICATIONS, INC.', 0x004002: u'PERLE SYSTEMS LIMITED', 0x004003: u'Emerson Process Management Power & Water Solutions, Inc.', 0x004004: u'ICM CO. LTD.', 0x004005: u'ANI COMMUNICATIONS INC.', 0x004006: u'SAMPO TECHNOLOGY CORPORATION', 0x004007: u'TELMAT INFORMATIQUE', 0x004008: u'A PLUS INFO CORPORATION', 0x004009: u'TACHIBANA TECTRON CO., LTD.', 0x00400A: u'PIVOTAL TECHNOLOGIES, INC.', 0x00400B: u'CISCO SYSTEMS, INC.', 0x00400C: u'GENERAL MICRO SYSTEMS, INC.', 0x00400D: u'LANNET DATA COMMUNICATIONS,LTD', 0x00400E: u'MEMOTEC COMMUNICATIONS, INC.', 0x00400F: u'DATACOM TECHNOLOGIES', 0x004010: u'SONIC SYSTEMS, INC.', 0x004011: u'ANDOVER CONTROLS CORPORATION', 0x004012: u'WINDATA, INC.', 0x004013: u'NTT DATA COMM. SYSTEMS CORP.', 0x004014: u'COMSOFT GMBH', 0x004015: u'ASCOM INFRASYS AG', 0x004016: u'HADAX ELECTRONICS, INC.', 0x004017: u'Silex Technology America', 0x004018: u'ADOBE SYSTEMS, INC.', 0x004019: u'AEON SYSTEMS, INC.', 0x00401A: u'FUJI ELECTRIC CO., LTD.', 0x00401B: u'PRINTER SYSTEMS CORP.', 0x00401C: u'AST RESEARCH, INC.', 0x00401D: u'INVISIBLE SOFTWARE, INC.', 0x00401E: u'ICC', 0x00401F: u'COLORGRAPH LTD', 0x004020: u'PINACL COMMUNICATION', 0x004021: u'RASTER GRAPHICS', 0x004022: u'KLEVER COMPUTERS, INC.', 0x004023: u'LOGIC CORPORATION', 0x004024: u'COMPAC INC.', 0x004025: u'MOLECULAR DYNAMICS', 0x004026: u'MELCO, INC.', 0x004027: u'SMC MASSACHUSETTS, INC.', 0x004028: u'NETCOMM LIMITED', 0x004029: u'COMPEX', 0x00402A: u'CANOGA-PERKINS', 0x00402B: u'TRIGEM COMPUTER, INC.', 0x00402C: u'ISIS DISTRIBUTED SYSTEMS, INC.', 0x00402D: u'HARRIS ADACOM CORPORATION', 0x00402E: u'PRECISION SOFTWARE, INC.', 0x00402F: u'XLNT DESIGNS INC.', 0x004030: u'GK COMPUTER', 0x004031: u'KOKUSAI ELECTRIC CO., LTD', 0x004032: u'DIGITAL COMMUNICATIONS', 0x004033: u'ADDTRON TECHNOLOGY CO., LTD.', 0x004034: u'BUSTEK CORPORATION', 0x004035: u'OPCOM', 0x004036: u'TRIBE COMPUTER WORKS, INC.', 0x004037: u'SEA-ILAN, INC.', 0x004038: u'TALENT ELECTRIC INCORPORATED', 0x004039: u'OPTEC DAIICHI DENKO CO., LTD.', 0x00403A: u'IMPACT TECHNOLOGIES', 0x00403B: u'SYNERJET INTERNATIONAL CORP.', 0x00403C: u'FORKS, INC.', 0x00403D: u'TERADATA', 0x00403E: u'RASTER OPS CORPORATION', 0x00403F: u'SSANGYONG COMPUTER SYSTEMS', 0x004040: u'RING ACCESS, INC.', 0x004041: u'FUJIKURA LTD.', 0x004042: u'N.A.T. GMBH', 0x004043: u'NOKIA TELECOMMUNICATIONS', 0x004044: u'QNIX COMPUTER CO., LTD.', 0x004045: u'TWINHEAD CORPORATION', 0x004046: u'UDC RESEARCH LIMITED', 0x004047: u'WIND RIVER SYSTEMS', 0x004048: u'SMD INFORMATICA S.A.', 0x004049: u'TEGIMENTA AG', 0x00404A: u'WEST AUSTRALIAN DEPARTMENT', 0x00404B: u'MAPLE COMPUTER SYSTEMS', 0x00404C: u'HYPERTEC PTY LTD.', 0x00404D: u'TELECOMMUNICATIONS TECHNIQUES', 0x00404E: u'FLUENT, INC.', 0x00404F: u'SPACE & NAVAL WARFARE SYSTEMS', 0x004050: u'IRONICS, INCORPORATED', 0x004051: u'GRACILIS, INC.', 0x004052: u'STAR TECHNOLOGIES, INC.', 0x004053: u'AMPRO COMPUTERS', 0x004054: u'CONNECTION MACHINES SERVICES', 0x004055: u'METRONIX GMBH', 0x004056: u'MCM JAPAN LTD.', 0x004057: u'LOCKHEED - SANDERS', 0x004058: u'KRONOS, INC.', 0x004059: u'YOSHIDA KOGYO K. K.', 0x00405A: u'GOLDSTAR INFORMATION & COMM.', 0x00405B: u'FUNASSET LIMITED', 0x00405C: u'FUTURE SYSTEMS, INC.', 0x00405D: u'STAR-TEK, INC.', 0x00405E: u'NORTH HILLS ISRAEL', 0x00405F: u'AFE COMPUTERS LTD.', 0x004060: u'COMENDEC LTD', 0x004061: u'DATATECH ENTERPRISES CO., LTD.', 0x004062: u'E-SYSTEMS, INC./GARLAND DIV.', 0x004063: u'VIA TECHNOLOGIES, INC.', 0x004064: u'KLA INSTRUMENTS CORPORATION', 0x004065: u'GTE SPACENET', 0x004066: u'HITACHI CABLE, LTD.', 0x004067: u'OMNIBYTE CORPORATION', 0x004068: u'EXTENDED SYSTEMS', 0x004069: u'LEMCOM SYSTEMS, INC.', 0x00406A: u'KENTEK INFORMATION SYSTEMS,INC', 0x00406B: u'SYSGEN', 0x00406C: u'COPERNIQUE', 0x00406D: u'LANCO, INC.', 0x00406E: u'COROLLARY, INC.', 0x00406F: u'SYNC RESEARCH INC.', 0x004070: u'INTERWARE CO., LTD.', 0x004071: u'ATM COMPUTER GMBH', 0x004072: u'Applied Innovation Inc.', 0x004073: u'BASS ASSOCIATES', 0x004074: u'CABLE AND WIRELESS', 0x004075: u'M-TRADE (UK) LTD', 0x004076: u'Sun Conversion Technologies', 0x004077: u'MAXTON TECHNOLOGY CORPORATION', 0x004078: u'WEARNES AUTOMATION PTE LTD', 0x004079: u'JUKO MANUFACTURE COMPANY, LTD.', 0x00407A: u'SOCIETE D\'EXPLOITATION DU CNIT', 0x00407B: u'SCIENTIFIC ATLANTA', 0x00407C: u'QUME CORPORATION', 0x00407D: u'EXTENSION TECHNOLOGY CORP.', 0x00407E: u'EVERGREEN SYSTEMS, INC.', 0x00407F: u'FLIR Systems', 0x004080: u'ATHENIX CORPORATION', 0x004081: u'MANNESMANN SCANGRAPHIC GMBH', 0x004082: u'LABORATORY EQUIPMENT CORP.', 0x004083: u'TDA INDUSTRIA DE PRODUTOS', 0x004084: u'HONEYWELL INC.', 0x004085: u'SAAB INSTRUMENTS AB', 0x004086: u'MICHELS & KLEBERHOFF COMPUTER', 0x004087: u'UBITREX CORPORATION', 0x004088: u'MOBIUS TECHNOLOGIES, INC.', 0x004089: u'MEIDENSHA CORPORATION', 0x00408A: u'TPS TELEPROCESSING SYS. GMBH', 0x00408B: u'RAYLAN CORPORATION', 0x00408C: u'AXIS COMMUNICATIONS AB', 0x00408D: u'THE GOODYEAR TIRE & RUBBER CO.', 0x00408E: u'DIGILOG, INC.', 0x00408F: u'WM-DATA MINFO AB', 0x004090: u'ANSEL COMMUNICATIONS', 0x004091: u'PROCOMP INDUSTRIA ELETRONICA', 0x004092: u'ASP COMPUTER PRODUCTS, INC.', 0x004093: u'PAXDATA NETWORKS LTD.', 0x004094: u'SHOGRAPHICS, INC.', 0x004095: u'R.P.T. INTERGROUPS INT\'L LTD.', 0x004096: u'Cisco Systems, Inc.', 0x004097: u'DATEX DIVISION OF', 0x004098: u'DRESSLER GMBH & CO.', 0x004099: u'NEWGEN SYSTEMS CORP.', 0x00409A: u'NETWORK EXPRESS, INC.', 0x00409B: u'HAL COMPUTER SYSTEMS INC.', 0x00409C: u'TRANSWARE', 0x00409D: u'DIGIBOARD, INC.', 0x00409E: u'CONCURRENT TECHNOLOGIES LTD.', 0x00409F: u'LANCAST/CASAT TECHNOLOGY, INC.', 0x0040A0: u'GOLDSTAR CO., LTD.', 0x0040A1: u'ERGO COMPUTING', 0x0040A2: u'KINGSTAR TECHNOLOGY INC.', 0x0040A3: u'MICROUNITY SYSTEMS ENGINEERING', 0x0040A4: u'ROSE ELECTRONICS', 0x0040A5: u'CLINICOMP INTL.', 0x0040A6: u'Cray, Inc.', 0x0040A7: u'ITAUTEC PHILCO S.A.', 0x0040A8: u'IMF INTERNATIONAL LTD.', 0x0040A9: u'DATACOM INC.', 0x0040AA: u'VALMET AUTOMATION INC.', 0x0040AB: u'ROLAND DG CORPORATION', 0x0040AC: u'SUPER WORKSTATION, INC.', 0x0040AD: u'SMA REGELSYSTEME GMBH', 0x0040AE: u'DELTA CONTROLS, INC.', 0x0040AF: u'DIGITAL PRODUCTS, INC.', 0x0040B0: u'BYTEX CORPORATION, ENGINEERING', 0x0040B1: u'CODONICS INC.', 0x0040B2: u'SYSTEMFORSCHUNG', 0x0040B3: u'PAR MICROSYSTEMS CORPORATION', 0x0040B4: u'NEXTCOM K.K.', 0x0040B5: u'VIDEO TECHNOLOGY COMPUTERS LTD', 0x0040B6: u'COMPUTERM CORPORATION', 0x0040B7: u'STEALTH COMPUTER SYSTEMS', 0x0040B8: u'IDEA ASSOCIATES', 0x0040B9: u'MACQ ELECTRONIQUE SA', 0x0040BA: u'ALLIANT COMPUTER SYSTEMS CORP.', 0x0040BB: u'GOLDSTAR CABLE CO., LTD.', 0x0040BC: u'ALGORITHMICS LTD.', 0x0040BD: u'STARLIGHT NETWORKS, INC.', 0x0040BE: u'BOEING DEFENSE & SPACE', 0x0040BF: u'CHANNEL SYSTEMS INTERN\'L INC.', 0x0040C0: u'VISTA CONTROLS CORPORATION', 0x0040C1: u'BIZERBA-WERKE WILHEIM KRAUT', 0x0040C2: u'APPLIED COMPUTING DEVICES', 0x0040C3: u'FISCHER AND PORTER CO.', 0x0040C4: u'KINKEI SYSTEM CORPORATION', 0x0040C5: u'MICOM COMMUNICATIONS INC.', 0x0040C6: u'FIBERNET RESEARCH, INC.', 0x0040C7: u'RUBY TECH CORPORATION', 0x0040C8: u'MILAN TECHNOLOGY CORPORATION', 0x0040C9: u'NCUBE', 0x0040CA: u'FIRST INTERNAT\'L COMPUTER, INC', 0x0040CB: u'LANWAN TECHNOLOGIES', 0x0040CC: u'SILCOM MANUF\'G TECHNOLOGY INC.', 0x0040CD: u'TERA MICROSYSTEMS, INC.', 0x0040CE: u'NET-SOURCE, INC.', 0x0040CF: u'STRAWBERRY TREE, INC.', 0x0040D0: u'MITAC INTERNATIONAL CORP.', 0x0040D1: u'FUKUDA DENSHI CO., LTD.', 0x0040D2: u'PAGINE CORPORATION', 0x0040D3: u'KIMPSION INTERNATIONAL CORP.', 0x0040D4: u'GAGE TALKER CORP.', 0x0040D5: u'SARTORIUS AG', 0x0040D6: u'LOCAMATION B.V.', 0x0040D7: u'STUDIO GEN INC.', 0x0040D8: u'OCEAN OFFICE AUTOMATION LTD.', 0x0040D9: u'AMERICAN MEGATRENDS INC.', 0x0040DA: u'TELSPEC LTD', 0x0040DB: u'ADVANCED TECHNICAL SOLUTIONS', 0x0040DC: u'TRITEC ELECTRONIC GMBH', 0x0040DD: u'HONG TECHNOLOGIES', 0x0040DE: u'ELETTRONICA SAN GIORGIO', 0x0040DF: u'DIGALOG SYSTEMS, INC.', 0x0040E0: u'ATOMWIDE LTD.', 0x0040E1: u'MARNER INTERNATIONAL, INC.', 0x0040E2: u'MESA RIDGE TECHNOLOGIES, INC.', 0x0040E3: u'QUIN SYSTEMS LTD', 0x0040E4: u'E-M TECHNOLOGY, INC.', 0x0040E5: u'SYBUS CORPORATION', 0x0040E6: u'C.A.E.N.', 0x0040E7: u'ARNOS INSTRUMENTS & COMPUTER', 0x0040E8: u'CHARLES RIVER DATA SYSTEMS,INC', 0x0040E9: u'ACCORD SYSTEMS, INC.', 0x0040EA: u'PLAIN TREE SYSTEMS INC', 0x0040EB: u'MARTIN MARIETTA CORPORATION', 0x0040EC: u'MIKASA SYSTEM ENGINEERING', 0x0040ED: u'NETWORK CONTROLS INT\'NATL INC.', 0x0040EE: u'OPTIMEM', 0x0040EF: u'HYPERCOM, INC.', 0x0040F0: u'MICRO SYSTEMS, INC.', 0x0040F1: u'CHUO ELECTRONICS CO., LTD.', 0x0040F2: u'JANICH & KLASS COMPUTERTECHNIK', 0x0040F3: u'NETCOR', 0x0040F4: u'CAMEO COMMUNICATIONS, INC.', 0x0040F5: u'OEM ENGINES', 0x0040F6: u'KATRON COMPUTERS INC.', 0x0040F7: u'POLAROID MEDICAL IMAGING SYS.', 0x0040F8: u'SYSTEMHAUS DISCOM', 0x0040F9: u'COMBINET', 0x0040FA: u'MICROBOARDS, INC.', 0x0040FB: u'CASCADE COMMUNICATIONS CORP.', 0x0040FC: u'IBR COMPUTER TECHNIK GMBH', 0x0040FD: u'LXE', 0x0040FE: u'SYMPLEX COMMUNICATIONS', 0x0040FF: u'TELEBIT CORPORATION', 0x004252: u'RLX Technologies', 0x004501: u'Versus Technology, Inc.', 0x005000: u'NEXO COMMUNICATIONS, INC.', 0x005001: u'YAMASHITA SYSTEMS CORP.', 0x005002: u'OMNISEC AG', 0x005003: u'GRETAG MACBETH AG', 0x005004: u'3COM CORPORATION', 0x005006: u'TAC AB', 0x005007: u'SIEMENS TELECOMMUNICATION SYSTEMS LIMITED', 0x005008: u'TIVA MICROCOMPUTER CORP. (TMC)', 0x005009: u'PHILIPS BROADBAND NETWORKS', 0x00500A: u'IRIS TECHNOLOGIES, INC.', 0x00500B: u'CISCO SYSTEMS, INC.', 0x00500C: u'e-Tek Labs, Inc.', 0x00500D: u'SATORI ELECTORIC CO., LTD.', 0x00500E: u'CHROMATIS NETWORKS, INC.', 0x00500F: u'CISCO SYSTEMS, INC.', 0x005010: u'NovaNET Learning, Inc.', 0x005012: u'CBL - GMBH', 0x005013: u'Chaparral Network Storage', 0x005014: u'CISCO SYSTEMS, INC.', 0x005015: u'BRIGHT STAR ENGINEERING', 0x005016: u'SST/WOODHEAD INDUSTRIES', 0x005017: u'RSR S.R.L.', 0x005018: u'AMIT, Inc.', 0x005019: u'SPRING TIDE NETWORKS, INC.', 0x00501A: u'UISIQN', 0x00501B: u'ABL CANADA, INC.', 0x00501C: u'JATOM SYSTEMS, INC.', 0x00501E: u'Miranda Technologies, Inc.', 0x00501F: u'MRG SYSTEMS, LTD.', 0x005020: u'MEDIASTAR CO., LTD.', 0x005021: u'EIS INTERNATIONAL, INC.', 0x005022: u'ZONET TECHNOLOGY, INC.', 0x005023: u'PG DESIGN ELECTRONICS, INC.', 0x005024: u'NAVIC SYSTEMS, INC.', 0x005026: u'COSYSTEMS, INC.', 0x005027: u'GENICOM CORPORATION', 0x005028: u'AVAL COMMUNICATIONS', 0x005029: u'1394 PRINTER WORKING GROUP', 0x00502A: u'CISCO SYSTEMS, INC.', 0x00502B: u'GENRAD LTD.', 0x00502C: u'SOYO COMPUTER, INC.', 0x00502D: u'ACCEL, INC.', 0x00502E: u'CAMBEX CORPORATION', 0x00502F: u'TollBridge Technologies, Inc.', 0x005030: u'FUTURE PLUS SYSTEMS', 0x005031: u'AEROFLEX LABORATORIES, INC.', 0x005032: u'PICAZO COMMUNICATIONS, INC.', 0x005033: u'MAYAN NETWORKS', 0x005036: u'NETCAM, LTD.', 0x005037: u'KOGA ELECTRONICS CO.', 0x005038: u'DAIN TELECOM CO., LTD.', 0x005039: u'MARINER NETWORKS', 0x00503A: u'DATONG ELECTRONICS LTD.', 0x00503B: u'MEDIAFIRE CORPORATION', 0x00503C: u'TSINGHUA NOVEL ELECTRONICS', 0x00503E: u'CISCO SYSTEMS, INC.', 0x00503F: u'ANCHOR GAMES', 0x005040: u'Matsushita Electric Works, Ltd.', 0x005041: u'Coretronic Corporation', 0x005042: u'SCI MANUFACTURING SINGAPORE PTE, LTD.', 0x005043: u'MARVELL SEMICONDUCTOR, INC.', 0x005044: u'ASACA CORPORATION', 0x005045: u'RIOWORKS SOLUTIONS, INC.', 0x005046: u'MENICX INTERNATIONAL CO., LTD.', 0x005047: u'PRIVATE', 0x005048: u'INFOLIBRIA', 0x005049: u'ELLACOYA NETWORKS, INC.', 0x00504A: u'ELTECO A.S.', 0x00504B: u'BARCONET N.V.', 0x00504C: u'GALIL MOTION CONTROL, INC.', 0x00504D: u'TOKYO ELECTRON DEVICE LTD.', 0x00504E: u'SIERRA MONITOR CORP.', 0x00504F: u'OLENCOM ELECTRONICS', 0x005050: u'CISCO SYSTEMS, INC.', 0x005051: u'IWATSU ELECTRIC CO., LTD.', 0x005052: u'TIARA NETWORKS, INC.', 0x005053: u'CISCO SYSTEMS, INC.', 0x005054: u'CISCO SYSTEMS, INC.', 0x005055: u'DOMS A/S', 0x005056: u'VMWare, Inc.', 0x005057: u'BROADBAND ACCESS SYSTEMS', 0x005058: u'VegaStream Limted', 0x005059: u'iBAHN', 0x00505A: u'NETWORK ALCHEMY, INC.', 0x00505B: u'KAWASAKI LSI U.S.A., INC.', 0x00505C: u'TUNDO CORPORATION', 0x00505E: u'DIGITEK MICROLOGIC S.A.', 0x00505F: u'BRAND INNOVATORS', 0x005060: u'TANDBERG TELECOM AS', 0x005062: u'KOUWELL ELECTRONICS CORP. **', 0x005063: u'OY COMSEL SYSTEM AB', 0x005064: u'CAE ELECTRONICS', 0x005065: u'DENSEI-LAMBAD Co., Ltd.', 0x005066: u'AtecoM GmbH advanced telecomunication modules', 0x005067: u'AEROCOMM, INC.', 0x005068: u'ELECTRONIC INDUSTRIES ASSOCIATION', 0x005069: u'PixStream Incorporated', 0x00506A: u'EDEVA, INC.', 0x00506B: u'SPX-ATEG', 0x00506C: u'G & L BEIJER ELECTRONICS AB', 0x00506D: u'VIDEOJET SYSTEMS', 0x00506E: u'CORDER ENGINEERING CORPORATION', 0x00506F: u'G-CONNECT', 0x005070: u'CHAINTECH COMPUTER CO., LTD.', 0x005071: u'AIWA CO., LTD.', 0x005072: u'CORVIS CORPORATION', 0x005073: u'CISCO SYSTEMS, INC.', 0x005074: u'ADVANCED HI-TECH CORP.', 0x005075: u'KESTREL SOLUTIONS', 0x005076: u'IBM', 0x005077: u'PROLIFIC TECHNOLOGY, INC.', 0x005078: u'MEGATON HOUSE, LTD.', 0x005079: u'PRIVATE', 0x00507A: u'XPEED, INC.', 0x00507B: u'MERLOT COMMUNICATIONS', 0x00507C: u'VIDEOCON AG', 0x00507D: u'IFP', 0x00507E: u'NEWER TECHNOLOGY', 0x00507F: u'DrayTek Corp.', 0x005080: u'CISCO SYSTEMS, INC.', 0x005081: u'MURATA MACHINERY, LTD.', 0x005082: u'FORESSON CORPORATION', 0x005083: u'GILBARCO, INC.', 0x005084: u'ATL PRODUCTS', 0x005086: u'TELKOM SA, LTD.', 0x005087: u'TERASAKI ELECTRIC CO., LTD.', 0x005088: u'AMANO CORPORATION', 0x005089: u'SAFETY MANAGEMENT SYSTEMS', 0x00508B: u'COMPAQ COMPUTER CORPORATION', 0x00508C: u'RSI SYSTEMS', 0x00508D: u'ABIT COMPUTER CORPORATION', 0x00508E: u'OPTIMATION, INC.', 0x00508F: u'ASITA TECHNOLOGIES INT\'L LTD.', 0x005090: u'DCTRI', 0x005091: u'NETACCESS, INC.', 0x005092: u'RIGAKU INDUSTRIAL CORPORATION', 0x005093: u'BOEING', 0x005094: u'PACE MICRO TECHNOLOGY PLC', 0x005095: u'PERACOM NETWORKS', 0x005096: u'SALIX TECHNOLOGIES, INC.', 0x005097: u'MMC-EMBEDDED COMPUTERTECHNIK GmbH', 0x005098: u'GLOBALOOP, LTD.', 0x005099: u'3COM EUROPE, LTD.', 0x00509A: u'TAG ELECTRONIC SYSTEMS', 0x00509B: u'SWITCHCORE AB', 0x00509C: u'BETA RESEARCH', 0x00509D: u'THE INDUSTREE B.V.', 0x00509E: u'Les Technologies SoftAcoustik Inc.', 0x00509F: u'HORIZON COMPUTER', 0x0050A0: u'DELTA COMPUTER SYSTEMS, INC.', 0x0050A1: u'CARLO GAVAZZI, INC.', 0x0050A2: u'CISCO SYSTEMS, INC.', 0x0050A3: u'TransMedia Communications, Inc.', 0x0050A4: u'IO TECH, INC.', 0x0050A5: u'CAPITOL BUSINESS SYSTEMS, LTD.', 0x0050A6: u'OPTRONICS', 0x0050A7: u'CISCO SYSTEMS, INC.', 0x0050A8: u'OpenCon Systems, Inc.', 0x0050A9: u'MOLDAT WIRELESS TECHNOLGIES', 0x0050AA: u'KONICA MINOLTA HOLDINGS, INC.', 0x0050AB: u'NALTEC, INC.', 0x0050AC: u'MAPLE COMPUTER CORPORATION', 0x0050AD: u'CommUnique Wireless Corp.', 0x0050AE: u'IWAKI ELECTRONICS CO., LTD.', 0x0050AF: u'INTERGON, INC.', 0x0050B0: u'TECHNOLOGY ATLANTA CORPORATION', 0x0050B1: u'GIDDINGS & LEWIS', 0x0050B2: u'BRODEL AUTOMATION', 0x0050B3: u'VOICEBOARD CORPORATION', 0x0050B4: u'SATCHWELL CONTROL SYSTEMS, LTD', 0x0050B5: u'FICHET-BAUCHE', 0x0050B6: u'GOOD WAY IND. CO., LTD.', 0x0050B7: u'BOSER TECHNOLOGY CO., LTD.', 0x0050B8: u'INOVA COMPUTERS GMBH & CO. KG', 0x0050B9: u'XITRON TECHNOLOGIES, INC.', 0x0050BA: u'D-LINK', 0x0050BB: u'CMS TECHNOLOGIES', 0x0050BC: u'HAMMER STORAGE SOLUTIONS', 0x0050BD: u'CISCO SYSTEMS, INC.', 0x0050BE: u'FAST MULTIMEDIA AG', 0x0050BF: u'MOTOTECH INC.', 0x0050C0: u'GATAN, INC.', 0x0050C1: u'GEMFLEX NETWORKS, LTD.', 0x0050C2: u'IEEE REGISTRATION AUTHORITY', 0x0050C4: u'IMD', 0x0050C5: u'ADS TECHNOLOGIES, INC.', 0x0050C6: u'LOOP TELECOMMUNICATION INTERNATIONAL, INC.', 0x0050C8: u'ADDONICS COMMUNICATIONS, INC.', 0x0050C9: u'MASPRO DENKOH CORP.', 0x0050CA: u'NET TO NET TECHNOLOGIES', 0x0050CB: u'JETTER', 0x0050CC: u'XYRATEX', 0x0050CD: u'DIGIANSWER A/S', 0x0050CE: u'LG INTERNATIONAL CORP.', 0x0050CF: u'VANLINK COMMUNICATION TECHNOLOGY RESEARCH INSTITUTE', 0x0050D0: u'MINERVA SYSTEMS', 0x0050D1: u'CISCO SYSTEMS, INC.', 0x0050D2: u'CMC Electronics Inc', 0x0050D3: u'DIGITAL AUDIO PROCESSING PTY. LTD.', 0x0050D4: u'JOOHONG INFORMATION &', 0x0050D5: u'AD SYSTEMS CORP.', 0x0050D6: u'ATLAS COPCO TOOLS AB', 0x0050D7: u'TELSTRAT', 0x0050D8: u'UNICORN COMPUTER CORP.', 0x0050D9: u'ENGETRON-ENGENHARIA ELETRONICA IND. e COM. LTDA', 0x0050DA: u'3COM CORPORATION', 0x0050DB: u'CONTEMPORARY CONTROL', 0x0050DC: u'TAS TELEFONBAU A. SCHWABE GMBH & CO. KG', 0x0050DD: u'SERRA SOLDADURA, S.A.', 0x0050DE: u'SIGNUM SYSTEMS CORP.', 0x0050DF: u'AirFiber, Inc.', 0x0050E1: u'NS TECH ELECTRONICS SDN BHD', 0x0050E2: u'CISCO SYSTEMS, INC.', 0x0050E3: u'Terayon Communications Systems', 0x0050E4: u'APPLE COMPUTER, INC.', 0x0050E6: u'HAKUSAN CORPORATION', 0x0050E7: u'PARADISE INNOVATIONS (ASIA)', 0x0050E8: u'NOMADIX INC.', 0x0050EA: u'XEL COMMUNICATIONS, INC.', 0x0050EB: u'ALPHA-TOP CORPORATION', 0x0050EC: u'OLICOM A/S', 0x0050ED: u'ANDA NETWORKS', 0x0050EE: u'TEK DIGITEL CORPORATION', 0x0050EF: u'SPE Systemhaus GmbH', 0x0050F0: u'CISCO SYSTEMS, INC.', 0x0050F1: u'LIBIT SIGNAL PROCESSING, LTD.', 0x0050F2: u'MICROSOFT CORP.', 0x0050F3: u'GLOBAL NET INFORMATION CO., Ltd.', 0x0050F4: u'SIGMATEK GMBH & CO. KG', 0x0050F6: u'PAN-INTERNATIONAL INDUSTRIAL CORP.', 0x0050F7: u'VENTURE MANUFACTURING (SINGAPORE) LTD.', 0x0050F8: u'ENTREGA TECHNOLOGIES, INC.', 0x0050F9: u'SENSORMATIC ACD', 0x0050FA: u'OXTEL, LTD.', 0x0050FB: u'VSK ELECTRONICS', 0x0050FC: u'EDIMAX TECHNOLOGY CO., LTD.', 0x0050FD: u'VISIONCOMM CO., LTD.', 0x0050FE: u'PCTVnet ASA', 0x0050FF: u'HAKKO ELECTRONICS CO., LTD.', 0x006000: u'XYCOM INC.', 0x006001: u'InnoSys, Inc.', 0x006002: u'SCREEN SUBTITLING SYSTEMS, LTD', 0x006003: u'TERAOKA WEIGH SYSTEM PTE, LTD.', 0x006004: u'COMPUTADORES MODULARES SA', 0x006005: u'FEEDBACK DATA LTD.', 0x006006: u'SOTEC CO., LTD', 0x006007: u'ACRES GAMING, INC.', 0x006008: u'3COM CORPORATION', 0x006009: u'CISCO SYSTEMS, INC.', 0x00600A: u'SORD COMPUTER CORPORATION', 0x00600B: u'LOGWARE GmbH', 0x00600C: u'APPLIED DATA SYSTEMS, INC.', 0x00600D: u'Digital Logic GmbH', 0x00600E: u'WAVENET INTERNATIONAL, INC.', 0x00600F: u'WESTELL, INC.', 0x006010: u'NETWORK MACHINES, INC.', 0x006011: u'CRYSTAL SEMICONDUCTOR CORP.', 0x006012: u'POWER COMPUTING CORPORATION', 0x006013: u'NETSTAL MASCHINEN AG', 0x006014: u'EDEC CO., LTD.', 0x006015: u'NET2NET CORPORATION', 0x006016: u'CLARIION', 0x006017: u'TOKIMEC INC.', 0x006018: u'STELLAR ONE CORPORATION', 0x006019: u'Roche Diagnostics', 0x00601A: u'KEITHLEY INSTRUMENTS', 0x00601B: u'MESA ELECTRONICS', 0x00601C: u'TELXON CORPORATION', 0x00601D: u'LUCENT TECHNOLOGIES', 0x00601E: u'SOFTLAB, INC.', 0x00601F: u'STALLION TECHNOLOGIES', 0x006020: u'PIVOTAL NETWORKING, INC.', 0x006021: u'DSC CORPORATION', 0x006022: u'VICOM SYSTEMS, INC.', 0x006023: u'PERICOM SEMICONDUCTOR CORP.', 0x006024: u'GRADIENT TECHNOLOGIES, INC.', 0x006025: u'ACTIVE IMAGING PLC', 0x006026: u'VIKING COMPONENTS, INC.', 0x006027: u'Superior Modular Products', 0x006028: u'MACROVISION CORPORATION', 0x006029: u'CARY PERIPHERALS INC.', 0x00602A: u'SYMICRON COMPUTER COMMUNICATIONS, LTD.', 0x00602B: u'PEAK AUDIO', 0x00602C: u'LINX Data Terminals, Inc.', 0x00602D: u'ALERTON TECHNOLOGIES, INC.', 0x00602E: u'CYCLADES CORPORATION', 0x00602F: u'CISCO SYSTEMS, INC.', 0x006030: u'VILLAGE TRONIC ENTWICKLUNG', 0x006031: u'HRK SYSTEMS', 0x006032: u'I-CUBE, INC.', 0x006033: u'ACUITY IMAGING, INC.', 0x006034: u'ROBERT BOSCH GmbH', 0x006035: u'DALLAS SEMICONDUCTOR, INC.', 0x006036: u'AUSTRIAN RESEARCH CENTER SEIBERSDORF', 0x006037: u'NXP Semiconductors', 0x006038: u'Nortel Networks', 0x006039: u'SanCom Technology, Inc.', 0x00603A: u'QUICK CONTROLS LTD.', 0x00603B: u'AMTEC spa', 0x00603C: u'HAGIWARA SYS-COM CO., LTD.', 0x00603D: u'3CX', 0x00603E: u'CISCO SYSTEMS, INC.', 0x00603F: u'PATAPSCO DESIGNS', 0x006040: u'NETRO CORP.', 0x006041: u'Yokogawa Electric Corporation', 0x006042: u'TKS (USA), INC.', 0x006043: u'ComSoft Systems, Inc.', 0x006044: u'LITTON/POLY-SCIENTIFIC', 0x006045: u'PATHLIGHT TECHNOLOGIES', 0x006046: u'VMETRO, INC.', 0x006047: u'CISCO SYSTEMS, INC.', 0x006048: u'EMC CORPORATION', 0x006049: u'VINA TECHNOLOGIES', 0x00604A: u'SAIC IDEAS GROUP', 0x00604B: u'Safe-com GmbH & Co. KG', 0x00604C: u'SAGEM SA', 0x00604D: u'MMC NETWORKS, INC.', 0x00604E: u'CYCLE COMPUTER CORPORATION, INC.', 0x00604F: u'SUZUKI MFG. CO., LTD.', 0x006050: u'INTERNIX INC.', 0x006051: u'QUALITY SEMICONDUCTOR', 0x006052: u'PERIPHERALS ENTERPRISE CO., Ltd.', 0x006053: u'TOYODA MACHINE WORKS, LTD.', 0x006054: u'CONTROLWARE GMBH', 0x006055: u'CORNELL UNIVERSITY', 0x006056: u'NETWORK TOOLS, INC.', 0x006057: u'MURATA MANUFACTURING CO., LTD.', 0x006058: u'COPPER MOUNTAIN COMMUNICATIONS, INC.', 0x006059: u'TECHNICAL COMMUNICATIONS CORP.', 0x00605A: u'CELCORE, INC.', 0x00605B: u'IntraServer Technology, Inc.', 0x00605C: u'CISCO SYSTEMS, INC.', 0x00605D: u'SCANIVALVE CORP.', 0x00605E: u'LIBERTY TECHNOLOGY NETWORKING', 0x00605F: u'NIPPON UNISOFT CORPORATION', 0x006060: u'DAWNING TECHNOLOGIES, INC.', 0x006061: u'WHISTLE COMMUNICATIONS CORP.', 0x006062: u'TELESYNC, INC.', 0x006063: u'PSION DACOM PLC.', 0x006064: u'NETCOMM LIMITED', 0x006065: u'BERNECKER & RAINER INDUSTRIE-ELEKTRONIC GmbH', 0x006066: u'LACROIX TECHNOLGIE', 0x006067: u'ACER NETXUS INC.', 0x006068: u'EICON TECHNOLOGY CORPORATION', 0x006069: u'BROCADE COMMUNICATIONS SYSTEMS, Inc.', 0x00606A: u'MITSUBISHI WIRELESS COMMUNICATIONS. INC.', 0x00606B: u'Synclayer Inc.', 0x00606C: u'ARESCOM', 0x00606D: u'DIGITAL EQUIPMENT CORP.', 0x00606E: u'DAVICOM SEMICONDUCTOR, INC.', 0x00606F: u'CLARION CORPORATION OF AMERICA', 0x006070: u'CISCO SYSTEMS, INC.', 0x006071: u'MIDAS LAB, INC.', 0x006072: u'VXL INSTRUMENTS, LIMITED', 0x006073: u'REDCREEK COMMUNICATIONS, INC.', 0x006074: u'QSC AUDIO PRODUCTS', 0x006075: u'PENTEK, INC.', 0x006076: u'SCHLUMBERGER TECHNOLOGIES RETAIL PETROLEUM SYSTEMS', 0x006077: u'PRISA NETWORKS', 0x006078: u'POWER MEASUREMENT LTD.', 0x006079: u'Mainstream Data, Inc.', 0x00607A: u'DVS GmbH', 0x00607B: u'FORE SYSTEMS, INC.', 0x00607C: u'WaveAccess, Ltd.', 0x00607D: u'SENTIENT NETWORKS INC.', 0x00607E: u'GIGALABS, INC.', 0x00607F: u'AURORA TECHNOLOGIES, INC.', 0x006080: u'MICROTRONIX DATACOM LTD.', 0x006081: u'TV/COM INTERNATIONAL', 0x006082: u'NOVALINK TECHNOLOGIES, INC.', 0x006083: u'CISCO SYSTEMS, INC.', 0x006084: u'DIGITAL VIDEO', 0x006085: u'Storage Concepts', 0x006086: u'LOGIC REPLACEMENT TECH. LTD.', 0x006087: u'KANSAI ELECTRIC CO., LTD.', 0x006088: u'WHITE MOUNTAIN DSP, INC.', 0x006089: u'XATA', 0x00608A: u'CITADEL COMPUTER', 0x00608B: u'ConferTech International', 0x00608C: u'3COM CORPORATION', 0x00608D: u'UNIPULSE CORP.', 0x00608E: u'HE ELECTRONICS, TECHNOLOGIE & SYSTEMTECHNIK GmbH', 0x00608F: u'TEKRAM TECHNOLOGY CO., LTD.', 0x006090: u'ABLE COMMUNICATIONS, INC.', 0x006091: u'FIRST PACIFIC NETWORKS, INC.', 0x006092: u'MICRO/SYS, INC.', 0x006093: u'VARIAN', 0x006094: u'IBM CORP.', 0x006095: u'ACCU-TIME SYSTEMS, INC.', 0x006096: u'T.S. MICROTECH INC.', 0x006097: u'3COM CORPORATION', 0x006098: u'HT COMMUNICATIONS', 0x006099: u'SBE, Inc.', 0x00609A: u'NJK TECHNO CO.', 0x00609B: u'ASTRO-MED, INC.', 0x00609C: u'Perkin-Elmer Incorporated', 0x00609D: u'PMI FOOD EQUIPMENT GROUP', 0x00609E: u'ASC X3 - INFORMATION TECHNOLOGY STANDARDS SECRETARIATS', 0x00609F: u'PHAST CORPORATION', 0x0060A0: u'SWITCHED NETWORK TECHNOLOGIES, INC.', 0x0060A1: u'VPNet, Inc.', 0x0060A2: u'NIHON UNISYS LIMITED CO.', 0x0060A3: u'CONTINUUM TECHNOLOGY CORP.', 0x0060A4: u'GRINAKER SYSTEM TECHNOLOGIES', 0x0060A5: u'PERFORMANCE TELECOM CORP.', 0x0060A6: u'PARTICLE MEASURING SYSTEMS', 0x0060A7: u'MICROSENS GmbH & CO. KG', 0x0060A8: u'TIDOMAT AB', 0x0060A9: u'GESYTEC MbH', 0x0060AA: u'INTELLIGENT DEVICES INC. (IDI)', 0x0060AB: u'LARSCOM INCORPORATED', 0x0060AC: u'RESILIENCE CORPORATION', 0x0060AD: u'MegaChips Corporation', 0x0060AE: u'TRIO INFORMATION SYSTEMS AB', 0x0060AF: u'PACIFIC MICRO DATA, INC.', 0x0060B0: u'HEWLETT-PACKARD CO.', 0x0060B1: u'INPUT/OUTPUT, INC.', 0x0060B2: u'PROCESS CONTROL CORP.', 0x0060B3: u'Z-COM, INC.', 0x0060B4: u'GLENAYRE R&D INC.', 0x0060B5: u'KEBA GmbH', 0x0060B6: u'LAND COMPUTER CO., LTD.', 0x0060B7: u'CHANNELMATIC, INC.', 0x0060B8: u'CORELIS INC.', 0x0060B9: u'NITSUKO CORPORATION', 0x0060BA: u'SAHARA NETWORKS, INC.', 0x0060BB: u'CABLETRON - NETLINK, INC.', 0x0060BC: u'KeunYoung Electronics & Communication Co., Ltd.', 0x0060BD: u'HUBBELL-PULSECOM', 0x0060BE: u'WEBTRONICS', 0x0060BF: u'MACRAIGOR SYSTEMS, INC.', 0x0060C0: u'NERA AS', 0x0060C1: u'WaveSpan Corporation', 0x0060C2: u'MPL AG', 0x0060C3: u'NETVISION CORPORATION', 0x0060C4: u'SOLITON SYSTEMS K.K.', 0x0060C5: u'ANCOT CORP.', 0x0060C6: u'DCS AG', 0x0060C7: u'AMATI COMMUNICATIONS CORP.', 0x0060C8: u'KUKA WELDING SYSTEMS & ROBOTS', 0x0060C9: u'ControlNet, Inc.', 0x0060CA: u'HARMONIC SYSTEMS INCORPORATED', 0x0060CB: u'HITACHI ZOSEN CORPORATION', 0x0060CC: u'EMTRAK, INCORPORATED', 0x0060CD: u'VideoServer, Inc.', 0x0060CE: u'ACCLAIM COMMUNICATIONS', 0x0060CF: u'ALTEON NETWORKS, INC.', 0x0060D0: u'SNMP RESEARCH INCORPORATED', 0x0060D1: u'CASCADE COMMUNICATIONS', 0x0060D2: u'LUCENT TECHNOLOGIES TAIWAN TELECOMMUNICATIONS CO., LTD.', 0x0060D3: u'AT&T', 0x0060D4: u'ELDAT COMMUNICATION LTD.', 0x0060D5: u'MIYACHI TECHNOS CORP.', 0x0060D6: u'NovAtel Wireless Technologies Ltd.', 0x0060D7: u'ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (EPFL)', 0x0060D8: u'ELMIC SYSTEMS, INC.', 0x0060D9: u'TRANSYS NETWORKS INC.', 0x0060DA: u'JBM ELECTRONICS CO.', 0x0060DB: u'NTP ELEKTRONIK A/S', 0x0060DC: u'Toyo Network Systems Co, Ltd.', 0x0060DD: u'MYRICOM, INC.', 0x0060DE: u'KAYSER-THREDE GmbH', 0x0060DF: u'CNT Corporation', 0x0060E0: u'AXIOM TECHNOLOGY CO., LTD.', 0x0060E1: u'ORCKIT COMMUNICATIONS LTD.', 0x0060E2: u'QUEST ENGINEERING & DEVELOPMENT', 0x0060E3: u'ARBIN INSTRUMENTS', 0x0060E4: u'COMPUSERVE, INC.', 0x0060E5: u'FUJI AUTOMATION CO., LTD.', 0x0060E6: u'SHOMITI SYSTEMS INCORPORATED', 0x0060E7: u'RANDATA', 0x0060E8: u'HITACHI COMPUTER PRODUCTS (AMERICA), INC.', 0x0060E9: u'ATOP TECHNOLOGIES, INC.', 0x0060EA: u'StreamLogic', 0x0060EB: u'FOURTHTRACK SYSTEMS', 0x0060EC: u'HERMARY OPTO ELECTRONICS INC.', 0x0060ED: u'RICARDO TEST AUTOMATION LTD.', 0x0060EE: u'APOLLO', 0x0060EF: u'FLYTECH TECHNOLOGY CO., LTD.', 0x0060F0: u'JOHNSON & JOHNSON MEDICAL, INC', 0x0060F1: u'EXP COMPUTER, INC.', 0x0060F2: u'LASERGRAPHICS, INC.', 0x0060F3: u'Performance Analysis Broadband, Spirent plc', 0x0060F4: u'ADVANCED COMPUTER SOLUTIONS, Inc.', 0x0060F5: u'ICON WEST, INC.', 0x0060F6: u'NEXTEST COMMUNICATIONS PRODUCTS, INC.', 0x0060F7: u'DATAFUSION SYSTEMS', 0x0060F8: u'Loran International Technologies Inc.', 0x0060F9: u'DIAMOND LANE COMMUNICATIONS', 0x0060FA: u'EDUCATIONAL TECHNOLOGY RESOURCES, INC.', 0x0060FB: u'PACKETEER, INC.', 0x0060FC: u'CONSERVATION THROUGH INNOVATION LTD.', 0x0060FD: u'NetICs, Inc.', 0x0060FE: u'LYNX SYSTEM DEVELOPERS, INC.', 0x0060FF: u'QuVis, Inc.', 0x0070B0: u'M/A-COM INC. COMPANIES', 0x0070B3: u'DATA RECALL LTD.', 0x008000: u'MULTITECH SYSTEMS, INC.', 0x008001: u'PERIPHONICS CORPORATION', 0x008002: u'SATELCOM (UK) LTD', 0x008003: u'HYTEC ELECTRONICS LTD.', 0x008004: u'ANTLOW COMMUNICATIONS, LTD.', 0x008005: u'CACTUS COMPUTER INC.', 0x008006: u'COMPUADD CORPORATION', 0x008007: u'DLOG NC-SYSTEME', 0x008008: u'DYNATECH COMPUTER SYSTEMS', 0x008009: u'JUPITER SYSTEMS, INC.', 0x00800A: u'JAPAN COMPUTER CORP.', 0x00800B: u'CSK CORPORATION', 0x00800C: u'VIDECOM LIMITED', 0x00800D: u'VOSSWINKEL F.U.', 0x00800E: u'ATLANTIX CORPORATION', 0x00800F: u'STANDARD MICROSYSTEMS', 0x008010: u'COMMODORE INTERNATIONAL', 0x008011: u'DIGITAL SYSTEMS INT\'L. INC.', 0x008012: u'INTEGRATED MEASUREMENT SYSTEMS', 0x008013: u'THOMAS-CONRAD CORPORATION', 0x008014: u'ESPRIT SYSTEMS', 0x008015: u'SEIKO SYSTEMS, INC.', 0x008016: u'WANDEL AND GOLTERMANN', 0x008017: u'PFU LIMITED', 0x008018: u'KOBE STEEL, LTD.', 0x008019: u'DAYNA COMMUNICATIONS, INC.', 0x00801A: u'BELL ATLANTIC', 0x00801B: u'KODIAK TECHNOLOGY', 0x00801C: u'NEWPORT SYSTEMS SOLUTIONS', 0x00801D: u'INTEGRATED INFERENCE MACHINES', 0x00801E: u'XINETRON, INC.', 0x00801F: u'KRUPP ATLAS ELECTRONIK GMBH', 0x008020: u'NETWORK PRODUCTS', 0x008021: u'Alcatel Canada Inc.', 0x008022: u'SCAN-OPTICS', 0x008023: u'INTEGRATED BUSINESS NETWORKS', 0x008024: u'KALPANA, INC.', 0x008025: u'STOLLMANN GMBH', 0x008026: u'NETWORK PRODUCTS CORPORATION', 0x008027: u'ADAPTIVE SYSTEMS, INC.', 0x008028: u'TRADPOST (HK) LTD', 0x008029: u'EAGLE TECHNOLOGY, INC.', 0x00802A: u'TEST SYSTEMS & SIMULATIONS INC', 0x00802B: u'INTEGRATED MARKETING CO', 0x00802C: u'THE SAGE GROUP PLC', 0x00802D: u'XYLOGICS INC', 0x00802E: u'CASTLE ROCK COMPUTING', 0x00802F: u'NATIONAL INSTRUMENTS CORP.', 0x008030: u'NEXUS ELECTRONICS', 0x008031: u'BASYS, CORP.', 0x008032: u'ACCESS CO., LTD.', 0x008033: u'FORMATION, INC.', 0x008034: u'SMT GOUPIL', 0x008035: u'TECHNOLOGY WORKS, INC.', 0x008036: u'REFLEX MANUFACTURING SYSTEMS', 0x008037: u'Ericsson Group', 0x008038: u'DATA RESEARCH & APPLICATIONS', 0x008039: u'ALCATEL STC AUSTRALIA', 0x00803A: u'VARITYPER, INC.', 0x00803B: u'APT COMMUNICATIONS, INC.', 0x00803C: u'TVS ELECTRONICS LTD', 0x00803D: u'SURIGIKEN CO., LTD.', 0x00803E: u'SYNERNETICS', 0x00803F: u'TATUNG COMPANY', 0x008040: u'JOHN FLUKE MANUFACTURING CO.', 0x008041: u'VEB KOMBINAT ROBOTRON', 0x008042: u'FORCE COMPUTERS', 0x008043: u'NETWORLD, INC.', 0x008044: u'SYSTECH COMPUTER CORP.', 0x008045: u'MATSUSHITA ELECTRIC IND. CO', 0x008046: u'UNIVERSITY OF TORONTO', 0x008047: u'IN-NET CORP.', 0x008048: u'COMPEX INCORPORATED', 0x008049: u'NISSIN ELECTRIC CO., LTD.', 0x00804A: u'PRO-LOG', 0x00804B: u'EAGLE TECHNOLOGIES PTY.LTD.', 0x00804C: u'CONTEC CO., LTD.', 0x00804D: u'CYCLONE MICROSYSTEMS, INC.', 0x00804E: u'APEX COMPUTER COMPANY', 0x00804F: u'DAIKIN INDUSTRIES, LTD.', 0x008050: u'ZIATECH CORPORATION', 0x008051: u'FIBERMUX', 0x008052: u'TECHNICALLY ELITE CONCEPTS', 0x008053: u'INTELLICOM, INC.', 0x008054: u'FRONTIER TECHNOLOGIES CORP.', 0x008055: u'FERMILAB', 0x008056: u'SPHINX ELEKTRONIK GMBH', 0x008057: u'ADSOFT, LTD.', 0x008058: u'PRINTER SYSTEMS CORPORATION', 0x008059: u'STANLEY ELECTRIC CO., LTD', 0x00805A: u'TULIP COMPUTERS INTERNAT\'L B.V', 0x00805B: u'CONDOR SYSTEMS, INC.', 0x00805C: u'AGILIS CORPORATION', 0x00805D: u'CANSTAR', 0x00805E: u'LSI LOGIC CORPORATION', 0x00805F: u'COMPAQ COMPUTER CORPORATION', 0x008060: u'NETWORK INTERFACE CORPORATION', 0x008061: u'LITTON SYSTEMS, INC.', 0x008062: u'INTERFACE CO.', 0x008063: u'RICHARD HIRSCHMANN GMBH & CO.', 0x008064: u'WYSE TECHNOLOGY', 0x008065: u'CYBERGRAPHIC SYSTEMS PTY LTD.', 0x008066: u'ARCOM CONTROL SYSTEMS, LTD.', 0x008067: u'SQUARE D COMPANY', 0x008068: u'YAMATECH SCIENTIFIC LTD.', 0x008069: u'COMPUTONE SYSTEMS', 0x00806A: u'ERI (EMPAC RESEARCH INC.)', 0x00806B: u'SCHMID TELECOMMUNICATION', 0x00806C: u'CEGELEC PROJECTS LTD', 0x00806D: u'CENTURY SYSTEMS CORP.', 0x00806E: u'NIPPON STEEL CORPORATION', 0x00806F: u'ONELAN LTD.', 0x008070: u'COMPUTADORAS MICRON', 0x008071: u'SAI TECHNOLOGY', 0x008072: u'MICROPLEX SYSTEMS LTD.', 0x008073: u'DWB ASSOCIATES', 0x008074: u'FISHER CONTROLS', 0x008075: u'PARSYTEC GMBH', 0x008076: u'MCNC', 0x008077: u'BROTHER INDUSTRIES, LTD.', 0x008078: u'PRACTICAL PERIPHERALS, INC.', 0x008079: u'MICROBUS DESIGNS LTD.', 0x00807A: u'AITECH SYSTEMS LTD.', 0x00807B: u'ARTEL COMMUNICATIONS CORP.', 0x00807C: u'FIBERCOM, INC.', 0x00807D: u'EQUINOX SYSTEMS INC.', 0x00807E: u'SOUTHERN PACIFIC LTD.', 0x00807F: u'DY-4 INCORPORATED', 0x008080: u'DATAMEDIA CORPORATION', 0x008081: u'KENDALL SQUARE RESEARCH CORP.', 0x008082: u'PEP MODULAR COMPUTERS GMBH', 0x008083: u'AMDAHL', 0x008084: u'THE CLOUD INC.', 0x008085: u'H-THREE SYSTEMS CORPORATION', 0x008086: u'COMPUTER GENERATION INC.', 0x008087: u'OKI ELECTRIC INDUSTRY CO., LTD', 0x008088: u'VICTOR COMPANY OF JAPAN, LTD.', 0x008089: u'TECNETICS (PTY) LTD.', 0x00808A: u'SUMMIT MICROSYSTEMS CORP.', 0x00808B: u'DACOLL LIMITED', 0x00808C: u'NetScout Systems, Inc.', 0x00808D: u'WESTCOAST TECHNOLOGY B.V.', 0x00808E: u'RADSTONE TECHNOLOGY', 0x00808F: u'C. ITOH ELECTRONICS, INC.', 0x008090: u'MICROTEK INTERNATIONAL, INC.', 0x008091: u'TOKYO ELECTRIC CO.,LTD', 0x008092: u'JAPAN COMPUTER INDUSTRY, INC.', 0x008093: u'XYRON CORPORATION', 0x008094: u'ALFA LAVAL AUTOMATION AB', 0x008095: u'BASIC MERTON HANDELSGES.M.B.H.', 0x008096: u'HUMAN DESIGNED SYSTEMS, INC.', 0x008097: u'CENTRALP AUTOMATISMES', 0x008098: u'TDK CORPORATION', 0x008099: u'KLOCKNER MOELLER IPC', 0x00809A: u'NOVUS NETWORKS LTD', 0x00809B: u'JUSTSYSTEM CORPORATION', 0x00809C: u'LUXCOM, INC.', 0x00809D: u'Commscraft Ltd.', 0x00809E: u'DATUS GMBH', 0x00809F: u'ALCATEL BUSINESS SYSTEMS', 0x0080A0: u'EDISA HEWLETT PACKARD S/A', 0x0080A1: u'MICROTEST, INC.', 0x0080A2: u'CREATIVE ELECTRONIC SYSTEMS', 0x0080A3: u'LANTRONIX', 0x0080A4: u'LIBERTY ELECTRONICS', 0x0080A5: u'SPEED INTERNATIONAL', 0x0080A6: u'REPUBLIC TECHNOLOGY, INC.', 0x0080A7: u'MEASUREX CORP.', 0x0080A8: u'VITACOM CORPORATION', 0x0080A9: u'CLEARPOINT RESEARCH', 0x0080AA: u'MAXPEED', 0x0080AB: u'DUKANE NETWORK INTEGRATION', 0x0080AC: u'IMLOGIX, DIVISION OF GENESYS', 0x0080AD: u'CNET TECHNOLOGY, INC.', 0x0080AE: u'HUGHES NETWORK SYSTEMS', 0x0080AF: u'ALLUMER CO., LTD.', 0x0080B0: u'ADVANCED INFORMATION', 0x0080B1: u'SOFTCOM A/S', 0x0080B2: u'NETWORK EQUIPMENT TECHNOLOGIES', 0x0080B3: u'AVAL DATA CORPORATION', 0x0080B4: u'SOPHIA SYSTEMS', 0x0080B5: u'UNITED NETWORKS INC.', 0x0080B6: u'THEMIS COMPUTER', 0x0080B7: u'STELLAR COMPUTER', 0x0080B8: u'BUG, INCORPORATED', 0x0080B9: u'ARCHE TECHNOLIGIES INC.', 0x0080BA: u'SPECIALIX (ASIA) PTE, LTD', 0x0080BB: u'HUGHES LAN SYSTEMS', 0x0080BC: u'HITACHI ENGINEERING CO., LTD', 0x0080BD: u'THE FURUKAWA ELECTRIC CO., LTD', 0x0080BE: u'ARIES RESEARCH', 0x0080BF: u'TAKAOKA ELECTRIC MFG. CO. LTD.', 0x0080C0: u'PENRIL DATACOMM', 0x0080C1: u'LANEX CORPORATION', 0x0080C2: u'IEEE 802.1 COMMITTEE', 0x0080C3: u'BICC INFORMATION SYSTEMS & SVC', 0x0080C4: u'DOCUMENT TECHNOLOGIES, INC.', 0x0080C5: u'NOVELLCO DE MEXICO', 0x0080C6: u'NATIONAL DATACOMM CORPORATION', 0x0080C7: u'XIRCOM', 0x0080C8: u'D-LINK SYSTEMS, INC.', 0x0080C9: u'ALBERTA MICROELECTRONIC CENTRE', 0x0080CA: u'NETCOM RESEARCH INCORPORATED', 0x0080CB: u'FALCO DATA PRODUCTS', 0x0080CC: u'MICROWAVE BYPASS SYSTEMS', 0x0080CD: u'MICRONICS COMPUTER, INC.', 0x0080CE: u'BROADCAST TELEVISION SYSTEMS', 0x0080CF: u'EMBEDDED PERFORMANCE INC.', 0x0080D0: u'COMPUTER PERIPHERALS, INC.', 0x0080D1: u'KIMTRON CORPORATION', 0x0080D2: u'SHINNIHONDENKO CO., LTD.', 0x0080D3: u'SHIVA CORP.', 0x0080D4: u'CHASE RESEARCH LTD.', 0x0080D5: u'CADRE TECHNOLOGIES', 0x0080D6: u'NUVOTECH, INC.', 0x0080D7: u'Fantum Engineering', 0x0080D8: u'NETWORK PERIPHERALS INC.', 0x0080D9: u'EMK ELEKTRONIK', 0x0080DA: u'BRUEL & KJAER', 0x0080DB: u'GRAPHON CORPORATION', 0x0080DC: u'PICKER INTERNATIONAL', 0x0080DD: u'GMX INC/GIMIX', 0x0080DE: u'GIPSI S.A.', 0x0080DF: u'ADC CODENOLL TECHNOLOGY CORP.', 0x0080E0: u'XTP SYSTEMS, INC.', 0x0080E1: u'STMICROELECTRONICS', 0x0080E2: u'T.D.I. CO., LTD.', 0x0080E3: u'CORAL NETWORK CORPORATION', 0x0080E4: u'NORTHWEST DIGITAL SYSTEMS, INC', 0x0080E5: u'LSI Logic Corporation', 0x0080E6: u'PEER NETWORKS, INC.', 0x0080E7: u'LYNWOOD SCIENTIFIC DEV. LTD.', 0x0080E8: u'CUMULUS CORPORATIION', 0x0080E9: u'Madge Ltd.', 0x0080EA: u'ADVA Optical Networking Ltd.', 0x0080EB: u'COMPCONTROL B.V.', 0x0080EC: u'SUPERCOMPUTING SOLUTIONS, INC.', 0x0080ED: u'IQ TECHNOLOGIES, INC.', 0x0080EE: u'THOMSON CSF', 0x0080EF: u'RATIONAL', 0x0080F0: u'Panasonic Communications Co., Ltd.', 0x0080F1: u'OPUS SYSTEMS', 0x0080F2: u'RAYCOM SYSTEMS INC', 0x0080F3: u'SUN ELECTRONICS CORP.', 0x0080F4: u'TELEMECANIQUE ELECTRIQUE', 0x0080F5: u'QUANTEL LTD', 0x0080F6: u'SYNERGY MICROSYSTEMS', 0x0080F7: u'ZENITH ELECTRONICS', 0x0080F8: u'MIZAR, INC.', 0x0080F9: u'HEURIKON CORPORATION', 0x0080FA: u'RWT GMBH', 0x0080FB: u'BVM LIMITED', 0x0080FC: u'AVATAR CORPORATION', 0x0080FD: u'EXSCEED CORPRATION', 0x0080FE: u'AZURE TECHNOLOGIES, INC.', 0x0080FF: u'SOC. DE TELEINFORMATIQUE RTC', 0x009000: u'DIAMOND MULTIMEDIA', 0x009001: u'NISHIMU ELECTRONICS INDUSTRIES CO., LTD.', 0x009002: u'ALLGON AB', 0x009003: u'APLIO', 0x009004: u'3COM EUROPE LTD.', 0x009005: u'PROTECH SYSTEMS CO., LTD.', 0x009006: u'HAMAMATSU PHOTONICS K.K.', 0x009007: u'DOMEX TECHNOLOGY CORP.', 0x009008: u'HanA Systems Inc.', 0x009009: u'i Controls, Inc.', 0x00900A: u'PROTON ELECTRONIC INDUSTRIAL CO., LTD.', 0x00900B: u'LANNER ELECTRONICS, INC.', 0x00900C: u'CISCO SYSTEMS, INC.', 0x00900D: u'Overland Storage Inc.', 0x00900E: u'HANDLINK TECHNOLOGIES, INC.', 0x00900F: u'KAWASAKI HEAVY INDUSTRIES, LTD', 0x009010: u'SIMULATION LABORATORIES, INC.', 0x009011: u'WAVTrace, Inc.', 0x009012: u'GLOBESPAN SEMICONDUCTOR, INC.', 0x009013: u'SAMSAN CORP.', 0x009014: u'ROTORK INSTRUMENTS, LTD.', 0x009015: u'CENTIGRAM COMMUNICATIONS CORP.', 0x009016: u'ZAC', 0x009017: u'ZYPCOM, INC.', 0x009018: u'ITO ELECTRIC INDUSTRY CO, LTD.', 0x009019: u'HERMES ELECTRONICS CO., LTD.', 0x00901A: u'UNISPHERE SOLUTIONS', 0x00901B: u'DIGITAL CONTROLS', 0x00901C: u'mps Software Gmbh', 0x00901D: u'PEC (NZ) LTD.', 0x00901E: u'SELESTA INGEGNE RIA S.P.A.', 0x00901F: u'ADTEC PRODUCTIONS, INC.', 0x009020: u'PHILIPS ANALYTICAL X-RAY B.V.', 0x009021: u'CISCO SYSTEMS, INC.', 0x009022: u'IVEX', 0x009023: u'ZILOG INC.', 0x009024: u'PIPELINKS, INC.', 0x009025: u'VISION SYSTEMS LTD. PTY', 0x009026: u'ADVANCED SWITCHING COMMUNICATIONS, INC.', 0x009027: u'INTEL CORPORATION', 0x009028: u'NIPPON SIGNAL CO., LTD.', 0x009029: u'CRYPTO AG', 0x00902A: u'COMMUNICATION DEVICES, INC.', 0x00902B: u'CISCO SYSTEMS, INC.', 0x00902C: u'DATA & CONTROL EQUIPMENT LTD.', 0x00902D: u'DATA ELECTRONICS (AUST.) PTY, LTD.', 0x00902E: u'NAMCO LIMITED', 0x00902F: u'NETCORE SYSTEMS, INC.', 0x009030: u'HONEYWELL-DATING', 0x009031: u'MYSTICOM, LTD.', 0x009032: u'PELCOMBE GROUP LTD.', 0x009033: u'INNOVAPHONE AG', 0x009034: u'IMAGIC, INC.', 0x009035: u'ALPHA TELECOM, INC.', 0x009036: u'ens, inc.', 0x009037: u'ACUCOMM, INC.', 0x009038: u'FOUNTAIN TECHNOLOGIES, INC.', 0x009039: u'SHASTA NETWORKS', 0x00903A: u'NIHON MEDIA TOOL INC.', 0x00903B: u'TriEMS Research Lab, Inc.', 0x00903C: u'ATLANTIC NETWORK SYSTEMS', 0x00903D: u'BIOPAC SYSTEMS, INC.', 0x00903E: u'N.V. PHILIPS INDUSTRIAL ACTIVITIES', 0x00903F: u'AZTEC RADIOMEDIA', 0x009040: u'Siemens Network Convergence LLC', 0x009041: u'APPLIED DIGITAL ACCESS', 0x009042: u'ECCS, Inc.', 0x009043: u'NICHIBEI DENSHI CO., LTD.', 0x009044: u'ASSURED DIGITAL, INC.', 0x009045: u'Marconi Communications', 0x009046: u'DEXDYNE, LTD.', 0x009047: u'GIGA FAST E. LTD.', 0x009048: u'ZEAL CORPORATION', 0x009049: u'ENTRIDIA CORPORATION', 0x00904A: u'CONCUR SYSTEM TECHNOLOGIES', 0x00904B: u'GemTek Technology Co., Ltd.', 0x00904C: u'EPIGRAM, INC.', 0x00904D: u'SPEC S.A.', 0x00904E: u'DELEM BV', 0x00904F: u'ABB POWER T&D COMPANY, INC.', 0x009050: u'TELESTE OY', 0x009051: u'ULTIMATE TECHNOLOGY CORP.', 0x009052: u'SELCOM ELETTRONICA S.R.L.', 0x009053: u'DAEWOO ELECTRONICS CO., LTD.', 0x009054: u'INNOVATIVE SEMICONDUCTORS, INC', 0x009055: u'PARKER HANNIFIN CORPORATION COMPUMOTOR DIVISION', 0x009056: u'TELESTREAM, INC.', 0x009057: u'AANetcom, Inc.', 0x009058: u'Ultra Electronics Ltd., Command and Control Systems', 0x009059: u'TELECOM DEVICE K.K.', 0x00905A: u'DEARBORN GROUP, INC.', 0x00905B: u'RAYMOND AND LAE ENGINEERING', 0x00905C: u'EDMI', 0x00905D: u'NETCOM SICHERHEITSTECHNIK GmbH', 0x00905E: u'RAULAND-BORG CORPORATION', 0x00905F: u'CISCO SYSTEMS, INC.', 0x009060: u'SYSTEM CREATE CORP.', 0x009061: u'PACIFIC RESEARCH & ENGINEERING CORPORATION', 0x009062: u'ICP VORTEX COMPUTERSYSTEME GmbH', 0x009063: u'COHERENT COMMUNICATIONS SYSTEMS CORPORATION', 0x009064: u'THOMSON BROADCAST SYSTEMS', 0x009065: u'FINISAR CORPORATION', 0x009066: u'Troika Networks, Inc.', 0x009067: u'WalkAbout Computers, Inc.', 0x009068: u'DVT CORP.', 0x009069: u'JUNIPER NETWORKS, INC.', 0x00906A: u'TURNSTONE SYSTEMS, INC.', 0x00906B: u'APPLIED RESOURCES, INC.', 0x00906C: u'Sartorius Hamburg GmbH', 0x00906D: u'CISCO SYSTEMS, INC.', 0x00906E: u'PRAXON, INC.', 0x00906F: u'CISCO SYSTEMS, INC.', 0x009070: u'NEO NETWORKS, INC.', 0x009071: u'Applied Innovation Inc.', 0x009072: u'SIMRAD AS', 0x009073: u'GAIO TECHNOLOGY', 0x009074: u'ARGON NETWORKS, INC.', 0x009075: u'NEC DO BRASIL S.A.', 0x009076: u'FMT AIRCRAFT GATE SUPPORT SYSTEMS AB', 0x009077: u'ADVANCED FIBRE COMMUNICATIONS', 0x009078: u'MER TELEMANAGEMENT SOLUTIONS, LTD.', 0x009079: u'ClearOne, Inc.', 0x00907A: u'SPECTRALINK CORP.', 0x00907B: u'E-TECH, INC.', 0x00907C: u'DIGITALCAST, INC.', 0x00907D: u'Lake Communications', 0x00907E: u'VETRONIX CORP.', 0x00907F: u'WatchGuard Technologies, Inc.', 0x009080: u'NOT LIMITED, INC.', 0x009081: u'ALOHA NETWORKS, INC.', 0x009082: u'FORCE INSTITUTE', 0x009083: u'TURBO COMMUNICATION, INC.', 0x009084: u'ATECH SYSTEM', 0x009085: u'GOLDEN ENTERPRISES, INC.', 0x009086: u'CISCO SYSTEMS, INC.', 0x009087: u'ITIS', 0x009088: u'BAXALL SECURITY LTD.', 0x009089: u'SOFTCOM MICROSYSTEMS, INC.', 0x00908A: u'BAYLY COMMUNICATIONS, INC.', 0x00908B: u'PFU Systems, Inc.', 0x00908C: u'ETREND ELECTRONICS, INC.', 0x00908D: u'VICKERS ELECTRONICS SYSTEMS', 0x00908E: u'Nortel Networks Broadband Access', 0x00908F: u'AUDIO CODES LTD.', 0x009090: u'I-BUS', 0x009091: u'DigitalScape, Inc.', 0x009092: u'CISCO SYSTEMS, INC.', 0x009093: u'NANAO CORPORATION', 0x009094: u'OSPREY TECHNOLOGIES, INC.', 0x009095: u'UNIVERSAL AVIONICS', 0x009096: u'ASKEY COMPUTER CORP.', 0x009097: u'SYCAMORE NETWORKS', 0x009098: u'SBC DESIGNS, INC.', 0x009099: u'ALLIED TELESIS, K.K.', 0x00909A: u'ONE WORLD SYSTEMS, INC.', 0x00909B: u'MARKPOINT AB', 0x00909C: u'Terayon Communications Systems', 0x00909D: u'NovaTech Process Solutions, LLC', 0x00909E: u'Critical IO, LLC', 0x00909F: u'DIGI-DATA CORPORATION', 0x0090A0: u'8X8 INC.', 0x0090A1: u'FLYING PIG SYSTEMS, LTD.', 0x0090A2: u'CYBERTAN TECHNOLOGY, INC.', 0x0090A3: u'Corecess Inc.', 0x0090A4: u'ALTIGA NETWORKS', 0x0090A5: u'SPECTRA LOGIC', 0x0090A6: u'CISCO SYSTEMS, INC.', 0x0090A7: u'CLIENTEC CORPORATION', 0x0090A8: u'NineTiles Networks, Ltd.', 0x0090A9: u'WESTERN DIGITAL', 0x0090AA: u'INDIGO ACTIVE VISION SYSTEMS LIMITED', 0x0090AB: u'CISCO SYSTEMS, INC.', 0x0090AC: u'OPTIVISION, INC.', 0x0090AD: u'ASPECT ELECTRONICS, INC.', 0x0090AE: u'ITALTEL S.p.A.', 0x0090AF: u'J. MORITA MFG. CORP.', 0x0090B0: u'VADEM', 0x0090B1: u'CISCO SYSTEMS, INC.', 0x0090B2: u'AVICI SYSTEMS INC.', 0x0090B3: u'AGRANAT SYSTEMS', 0x0090B4: u'WILLOWBROOK TECHNOLOGIES', 0x0090B5: u'NIKON CORPORATION', 0x0090B6: u'FIBEX SYSTEMS', 0x0090B7: u'DIGITAL LIGHTWAVE, INC.', 0x0090B8: u'ROHDE & SCHWARZ GMBH & CO. KG', 0x0090B9: u'BERAN INSTRUMENTS LTD.', 0x0090BA: u'VALID NETWORKS, INC.', 0x0090BB: u'TAINET COMMUNICATION SYSTEM Corp.', 0x0090BC: u'TELEMANN CO., LTD.', 0x0090BD: u'OMNIA COMMUNICATIONS, INC.', 0x0090BE: u'IBC/INTEGRATED BUSINESS COMPUTERS', 0x0090BF: u'CISCO SYSTEMS, INC.', 0x0090C0: u'K.J. LAW ENGINEERS, INC.', 0x0090C1: u'Peco II, Inc.', 0x0090C2: u'JK microsystems, Inc.', 0x0090C3: u'TOPIC SEMICONDUCTOR CORP.', 0x0090C4: u'JAVELIN SYSTEMS, INC.', 0x0090C5: u'INTERNET MAGIC, INC.', 0x0090C6: u'OPTIM SYSTEMS, INC.', 0x0090C7: u'ICOM INC.', 0x0090C8: u'WAVERIDER COMMUNICATIONS (CANADA) INC.', 0x0090C9: u'DPAC Technologies', 0x0090CA: u'ACCORD VIDEO TELECOMMUNICATIONS, LTD.', 0x0090CB: u'Wireless OnLine, Inc.', 0x0090CC: u'PLANET COMMUNICATIONS, INC.', 0x0090CD: u'ENT-EMPRESA NACIONAL DE TELECOMMUNICACOES, S.A.', 0x0090CE: u'TETRA GmbH', 0x0090CF: u'NORTEL', 0x0090D0: u'Thomson Telecom Belgium', 0x0090D1: u'LEICHU ENTERPRISE CO., LTD.', 0x0090D2: u'ARTEL VIDEO SYSTEMS', 0x0090D3: u'GIESECKE & DEVRIENT GmbH', 0x0090D4: u'BindView Development Corp.', 0x0090D5: u'EUPHONIX, INC.', 0x0090D6: u'CRYSTAL GROUP', 0x0090D7: u'NetBoost Corp.', 0x0090D8: u'WHITECROSS SYSTEMS', 0x0090D9: u'CISCO SYSTEMS, INC.', 0x0090DA: u'DYNARC, INC.', 0x0090DB: u'NEXT LEVEL COMMUNICATIONS', 0x0090DC: u'TECO INFORMATION SYSTEMS', 0x0090DD: u'THE MIHARU COMMUNICATIONS CO., LTD.', 0x0090DE: u'CARDKEY SYSTEMS, INC.', 0x0090DF: u'MITSUBISHI CHEMICAL AMERICA, INC.', 0x0090E0: u'SYSTRAN CORP.', 0x0090E1: u'TELENA S.P.A.', 0x0090E2: u'DISTRIBUTED PROCESSING TECHNOLOGY', 0x0090E3: u'AVEX ELECTRONICS INC.', 0x0090E4: u'NEC AMERICA, INC.', 0x0090E5: u'TEKNEMA, INC.', 0x0090E6: u'ACER LABORATORIES, INC.', 0x0090E7: u'HORSCH ELEKTRONIK AG', 0x0090E8: u'MOXA TECHNOLOGIES CORP., LTD.', 0x0090E9: u'JANZ COMPUTER AG', 0x0090EA: u'ALPHA TECHNOLOGIES, INC.', 0x0090EB: u'SENTRY TELECOM SYSTEMS', 0x0090EC: u'PYRESCOM', 0x0090ED: u'CENTRAL SYSTEM RESEARCH CO., LTD.', 0x0090EE: u'PERSONAL COMMUNICATIONS TECHNOLOGIES', 0x0090EF: u'INTEGRIX, INC.', 0x0090F0: u'Harmonic Video Systems Ltd.', 0x0090F1: u'DOT HILL SYSTEMS CORPORATION', 0x0090F2: u'CISCO SYSTEMS, INC.', 0x0090F3: u'ASPECT COMMUNICATIONS', 0x0090F4: u'LIGHTNING INSTRUMENTATION', 0x0090F5: u'CLEVO CO.', 0x0090F6: u'ESCALATE NETWORKS, INC.', 0x0090F7: u'NBASE COMMUNICATIONS LTD.', 0x0090F8: u'MEDIATRIX TELECOM', 0x0090F9: u'LEITCH', 0x0090FA: u'EMULEX Corp', 0x0090FB: u'PORTWELL, INC.', 0x0090FC: u'NETWORK COMPUTING DEVICES', 0x0090FD: u'CopperCom, Inc.', 0x0090FE: u'ELECOM CO., LTD. (LANEED DIV.)', 0x0090FF: u'TELLUS TECHNOLOGY INC.', 0x0091D6: u'Crystal Group, Inc.', 0x009D8E: u'CARDIAC RECORDERS, INC.', 0x00A000: u'CENTILLION NETWORKS, INC.', 0x00A001: u'DRS Signal Solutions', 0x00A002: u'LEEDS & NORTHRUP AUSTRALIA PTY LTD', 0x00A003: u'STAEFA CONTROL SYSTEM', 0x00A004: u'NETPOWER, INC.', 0x00A005: u'DANIEL INSTRUMENTS, LTD.', 0x00A006: u'IMAGE DATA PROCESSING SYSTEM GROUP', 0x00A007: u'APEXX TECHNOLOGY, INC.', 0x00A008: u'NETCORP', 0x00A009: u'WHITETREE NETWORK', 0x00A00A: u'Airspan', 0x00A00B: u'COMPUTEX CO., LTD.', 0x00A00C: u'KINGMAX TECHNOLOGY, INC.', 0x00A00D: u'THE PANDA PROJECT', 0x00A00E: u'VISUAL NETWORKS, INC.', 0x00A00F: u'Broadband Technologies', 0x00A010: u'SYSLOGIC DATENTECHNIK AG', 0x00A011: u'MUTOH INDUSTRIES LTD.', 0x00A012: u'B.A.T.M. ADVANCED TECHNOLOGIES', 0x00A013: u'TELTREND LTD.', 0x00A014: u'CSIR', 0x00A015: u'WYLE', 0x00A016: u'MICROPOLIS CORP.', 0x00A017: u'J B M CORPORATION', 0x00A018: u'CREATIVE CONTROLLERS, INC.', 0x00A019: u'NEBULA CONSULTANTS, INC.', 0x00A01A: u'BINAR ELEKTRONIK AB', 0x00A01B: u'PREMISYS COMMUNICATIONS, INC.', 0x00A01C: u'NASCENT NETWORKS CORPORATION', 0x00A01D: u'SIXNET', 0x00A01E: u'EST CORPORATION', 0x00A01F: u'TRICORD SYSTEMS, INC.', 0x00A020: u'CITICORP/TTI', 0x00A021: u'General Dynamics', 0x00A022: u'CENTRE FOR DEVELOPMENT OF ADVANCED COMPUTING', 0x00A023: u'APPLIED CREATIVE TECHNOLOGY, INC.', 0x00A024: u'3COM CORPORATION', 0x00A025: u'REDCOM LABS INC.', 0x00A026: u'TELDAT, S.A.', 0x00A027: u'FIREPOWER SYSTEMS, INC.', 0x00A028: u'CONNER PERIPHERALS', 0x00A029: u'COULTER CORPORATION', 0x00A02A: u'TRANCELL SYSTEMS', 0x00A02B: u'TRANSITIONS RESEARCH CORP.', 0x00A02C: u'interWAVE Communications', 0x00A02D: u'1394 Trade Association', 0x00A02E: u'BRAND COMMUNICATIONS, LTD.', 0x00A02F: u'PIRELLI CAVI', 0x00A030: u'CAPTOR NV/SA', 0x00A031: u'HAZELTINE CORPORATION, MS 1-17', 0x00A032: u'GES SINGAPORE PTE. LTD.', 0x00A033: u'imc MeBsysteme GmbH', 0x00A034: u'AXEL', 0x00A035: u'CYLINK CORPORATION', 0x00A036: u'APPLIED NETWORK TECHNOLOGY', 0x00A037: u'DATASCOPE CORPORATION', 0x00A038: u'EMAIL ELECTRONICS', 0x00A039: u'ROSS TECHNOLOGY, INC.', 0x00A03A: u'KUBOTEK CORPORATION', 0x00A03B: u'TOSHIN ELECTRIC CO., LTD.', 0x00A03C: u'EG&G NUCLEAR INSTRUMENTS', 0x00A03D: u'OPTO-22', 0x00A03E: u'ATM FORUM', 0x00A03F: u'COMPUTER SOCIETY MICROPROCESSOR & MICROPROCESSOR STANDARDS C', 0x00A040: u'APPLE COMPUTER', 0x00A041: u'INFICON', 0x00A042: u'SPUR PRODUCTS CORP.', 0x00A043: u'AMERICAN TECHNOLOGY LABS, INC.', 0x00A044: u'NTT IT CO., LTD.', 0x00A045: u'PHOENIX CONTACT GMBH & CO.', 0x00A046: u'SCITEX CORP. LTD.', 0x00A047: u'INTEGRATED FITNESS CORP.', 0x00A048: u'QUESTECH, LTD.', 0x00A049: u'DIGITECH INDUSTRIES, INC.', 0x00A04A: u'NISSHIN ELECTRIC CO., LTD.', 0x00A04B: u'TFL LAN INC.', 0x00A04C: u'INNOVATIVE SYSTEMS & TECHNOLOGIES, INC.', 0x00A04D: u'EDA INSTRUMENTS, INC.', 0x00A04E: u'VOELKER TECHNOLOGIES, INC.', 0x00A04F: u'AMERITEC CORP.', 0x00A050: u'CYPRESS SEMICONDUCTOR', 0x00A051: u'ANGIA COMMUNICATIONS. INC.', 0x00A052: u'STANILITE ELECTRONICS PTY. LTD', 0x00A053: u'COMPACT DEVICES, INC.', 0x00A054: u'PRIVATE', 0x00A055: u'Data Device Corporation', 0x00A056: u'MICROPROSS', 0x00A057: u'LANCOM Systems GmbH', 0x00A058: u'GLORY, LTD.', 0x00A059: u'HAMILTON HALLMARK', 0x00A05A: u'KOFAX IMAGE PRODUCTS', 0x00A05B: u'MARQUIP, INC.', 0x00A05C: u'INVENTORY CONVERSION, INC./', 0x00A05D: u'CS COMPUTER SYSTEME GmbH', 0x00A05E: u'MYRIAD LOGIC INC.', 0x00A05F: u'BTG ENGINEERING BV', 0x00A060: u'ACER PERIPHERALS, INC.', 0x00A061: u'PURITAN BENNETT', 0x00A062: u'AES PRODATA', 0x00A063: u'JRL SYSTEMS, INC.', 0x00A064: u'KVB/ANALECT', 0x00A065: u'Symantec Corporation', 0x00A066: u'ISA CO., LTD.', 0x00A067: u'NETWORK SERVICES GROUP', 0x00A068: u'BHP LIMITED', 0x00A069: u'Symmetricom, Inc.', 0x00A06A: u'Verilink Corporation', 0x00A06B: u'DMS DORSCH MIKROSYSTEM GMBH', 0x00A06C: u'SHINDENGEN ELECTRIC MFG. CO., LTD.', 0x00A06D: u'MANNESMANN TALLY CORPORATION', 0x00A06E: u'AUSTRON, INC.', 0x00A06F: u'THE APPCON GROUP, INC.', 0x00A070: u'COASTCOM', 0x00A071: u'VIDEO LOTTERY TECHNOLOGIES,INC', 0x00A072: u'OVATION SYSTEMS LTD.', 0x00A073: u'COM21, INC.', 0x00A074: u'PERCEPTION TECHNOLOGY', 0x00A075: u'MICRON TECHNOLOGY, INC.', 0x00A076: u'CARDWARE LAB, INC.', 0x00A077: u'FUJITSU NEXION, INC.', 0x00A078: u'Marconi Communications', 0x00A079: u'ALPS ELECTRIC (USA), INC.', 0x00A07A: u'ADVANCED PERIPHERALS TECHNOLOGIES, INC.', 0x00A07B: u'DAWN COMPUTER INCORPORATION', 0x00A07C: u'TONYANG NYLON CO., LTD.', 0x00A07D: u'SEEQ TECHNOLOGY, INC.', 0x00A07E: u'AVID TECHNOLOGY, INC.', 0x00A07F: u'GSM-SYNTEL, LTD.', 0x00A080: u'SBE, Inc.', 0x00A081: u'ALCATEL DATA NETWORKS', 0x00A082: u'NKT ELEKTRONIK A/S', 0x00A083: u'ASIMMPHONY TURKEY', 0x00A084: u'DATAPLEX PTY. LTD.', 0x00A085: u'PRIVATE', 0x00A086: u'AMBER WAVE SYSTEMS, INC.', 0x00A087: u'Zarlink Semiconductor Ltd.', 0x00A088: u'ESSENTIAL COMMUNICATIONS', 0x00A089: u'XPOINT TECHNOLOGIES, INC.', 0x00A08A: u'BROOKTROUT TECHNOLOGY, INC.', 0x00A08B: u'ASTON ELECTRONIC DESIGNS LTD.', 0x00A08C: u'MultiMedia LANs, Inc.', 0x00A08D: u'JACOMO CORPORATION', 0x00A08E: u'Nokia Internet Communications', 0x00A08F: u'DESKNET SYSTEMS, INC.', 0x00A090: u'TimeStep Corporation', 0x00A091: u'APPLICOM INTERNATIONAL', 0x00A092: u'H. BOLLMANN MANUFACTURERS, LTD', 0x00A093: u'B/E AEROSPACE, Inc.', 0x00A094: u'COMSAT CORPORATION', 0x00A095: u'ACACIA NETWORKS, INC.', 0x00A096: u'MITUMI ELECTRIC CO., LTD.', 0x00A097: u'JC INFORMATION SYSTEMS', 0x00A098: u'NETWORK APPLIANCE CORP.', 0x00A099: u'K-NET LTD.', 0x00A09A: u'NIHON KOHDEN AMERICA', 0x00A09B: u'QPSX COMMUNICATIONS, LTD.', 0x00A09C: u'Xyplex, Inc.', 0x00A09D: u'JOHNATHON FREEMAN TECHNOLOGIES', 0x00A09E: u'ICTV', 0x00A09F: u'COMMVISION CORP.', 0x00A0A0: u'COMPACT DATA, LTD.', 0x00A0A1: u'EPIC DATA INC.', 0x00A0A2: u'DIGICOM S.P.A.', 0x00A0A3: u'RELIABLE POWER METERS', 0x00A0A4: u'MICROS SYSTEMS, INC.', 0x00A0A5: u'TEKNOR MICROSYSTEME, INC.', 0x00A0A6: u'M.I. SYSTEMS, K.K.', 0x00A0A7: u'VORAX CORPORATION', 0x00A0A8: u'RENEX CORPORATION', 0x00A0A9: u'NAVTEL COMMUNICATIONS INC.', 0x00A0AA: u'SPACELABS MEDICAL', 0x00A0AB: u'NETCS INFORMATIONSTECHNIK GMBH', 0x00A0AC: u'GILAT SATELLITE NETWORKS, LTD.', 0x00A0AD: u'MARCONI SPA', 0x00A0AE: u'NUCOM SYSTEMS, INC.', 0x00A0AF: u'WMS INDUSTRIES', 0x00A0B0: u'I-O DATA DEVICE, INC.', 0x00A0B1: u'FIRST VIRTUAL CORPORATION', 0x00A0B2: u'SHIMA SEIKI', 0x00A0B3: u'ZYKRONIX', 0x00A0B4: u'TEXAS MICROSYSTEMS, INC.', 0x00A0B5: u'3H TECHNOLOGY', 0x00A0B6: u'SANRITZ AUTOMATION CO., LTD.', 0x00A0B7: u'CORDANT, INC.', 0x00A0B8: u'SYMBIOS LOGIC INC.', 0x00A0B9: u'EAGLE TECHNOLOGY, INC.', 0x00A0BA: u'PATTON ELECTRONICS CO.', 0x00A0BB: u'HILAN GMBH', 0x00A0BC: u'VIASAT, INCORPORATED', 0x00A0BD: u'I-TECH CORP.', 0x00A0BE: u'INTEGRATED CIRCUIT SYSTEMS, INC. COMMUNICATIONS GROUP', 0x00A0BF: u'WIRELESS DATA GROUP MOTOROLA', 0x00A0C0: u'DIGITAL LINK CORP.', 0x00A0C1: u'ORTIVUS MEDICAL AB', 0x00A0C2: u'R.A. SYSTEMS CO., LTD.', 0x00A0C3: u'UNICOMPUTER GMBH', 0x00A0C4: u'CRISTIE ELECTRONICS LTD.', 0x00A0C5: u'ZYXEL COMMUNICATION', 0x00A0C6: u'QUALCOMM INCORPORATED', 0x00A0C7: u'TADIRAN TELECOMMUNICATIONS', 0x00A0C8: u'ADTRAN INC.', 0x00A0C9: u'INTEL CORPORATION - HF1-06', 0x00A0CA: u'FUJITSU DENSO LTD.', 0x00A0CB: u'ARK TELECOMMUNICATIONS, INC.', 0x00A0CC: u'LITE-ON COMMUNICATIONS, INC.', 0x00A0CD: u'DR. JOHANNES HEIDENHAIN GmbH', 0x00A0CE: u'ASTROCOM CORPORATION', 0x00A0CF: u'SOTAS, INC.', 0x00A0D0: u'TEN X TECHNOLOGY, INC.', 0x00A0D1: u'INVENTEC CORPORATION', 0x00A0D2: u'ALLIED TELESIS INTERNATIONAL CORPORATION', 0x00A0D3: u'INSTEM COMPUTER SYSTEMS, LTD.', 0x00A0D4: u'RADIOLAN, INC.', 0x00A0D5: u'SIERRA WIRELESS INC.', 0x00A0D6: u'SBE, INC.', 0x00A0D7: u'KASTEN CHASE APPLIED RESEARCH', 0x00A0D8: u'SPECTRA - TEK', 0x00A0D9: u'CONVEX COMPUTER CORPORATION', 0x00A0DA: u'INTEGRATED SYSTEMS Technology, Inc.', 0x00A0DB: u'FISHER & PAYKEL PRODUCTION', 0x00A0DC: u'O.N. ELECTRONIC CO., LTD.', 0x00A0DD: u'AZONIX CORPORATION', 0x00A0DE: u'YAMAHA CORPORATION', 0x00A0DF: u'STS TECHNOLOGIES, INC.', 0x00A0E0: u'TENNYSON TECHNOLOGIES PTY LTD', 0x00A0E1: u'WESTPORT RESEARCH ASSOCIATES, INC.', 0x00A0E2: u'KEISOKU GIKEN CORP.', 0x00A0E3: u'XKL SYSTEMS CORP.', 0x00A0E4: u'OPTIQUEST', 0x00A0E5: u'NHC COMMUNICATIONS', 0x00A0E6: u'DIALOGIC CORPORATION', 0x00A0E7: u'CENTRAL DATA CORPORATION', 0x00A0E8: u'REUTERS HOLDINGS PLC', 0x00A0E9: u'ELECTRONIC RETAILING SYSTEMS INTERNATIONAL', 0x00A0EA: u'ETHERCOM CORP.', 0x00A0EB: u'Encore Networks', 0x00A0EC: u'TRANSMITTON LTD.', 0x00A0ED: u'Brooks Automation, Inc.', 0x00A0EE: u'NASHOBA NETWORKS', 0x00A0EF: u'LUCIDATA LTD.', 0x00A0F0: u'TORONTO MICROELECTRONICS INC.', 0x00A0F1: u'MTI', 0x00A0F2: u'INFOTEK COMMUNICATIONS, INC.', 0x00A0F3: u'STAUBLI', 0x00A0F4: u'GE', 0x00A0F5: u'RADGUARD LTD.', 0x00A0F6: u'AutoGas Systems Inc.', 0x00A0F7: u'V.I COMPUTER CORP.', 0x00A0F8: u'SYMBOL TECHNOLOGIES, INC.', 0x00A0F9: u'BINTEC COMMUNICATIONS GMBH', 0x00A0FA: u'Marconi Communication GmbH', 0x00A0FB: u'TORAY ENGINEERING CO., LTD.', 0x00A0FC: u'IMAGE SCIENCES, INC.', 0x00A0FD: u'SCITEX DIGITAL PRINTING, INC.', 0x00A0FE: u'BOSTON TECHNOLOGY, INC.', 0x00A0FF: u'TELLABS OPERATIONS, INC.', 0x00AA00: u'INTEL CORPORATION', 0x00AA01: u'INTEL CORPORATION', 0x00AA02: u'INTEL CORPORATION', 0x00AA3C: u'OLIVETTI TELECOM SPA (OLTECO)', 0x00B009: u'Grass Valley Group', 0x00B017: u'InfoGear Technology Corp.', 0x00B019: u'Casi-Rusco', 0x00B01C: u'Westport Technologies', 0x00B01E: u'Rantic Labs, Inc.', 0x00B02A: u'ORSYS GmbH', 0x00B02D: u'ViaGate Technologies, Inc.', 0x00B03B: u'HiQ Networks', 0x00B048: u'Marconi Communications Inc.', 0x00B04A: u'Cisco Systems, Inc.', 0x00B052: u'Intellon Corporation', 0x00B064: u'Cisco Systems, Inc.', 0x00B069: u'Honewell Oy', 0x00B06D: u'Jones Futurex Inc.', 0x00B080: u'Mannesmann Ipulsys B.V.', 0x00B086: u'LocSoft Limited', 0x00B08E: u'Cisco Systems, Inc.', 0x00B091: u'Transmeta Corp.', 0x00B094: u'Alaris, Inc.', 0x00B09A: u'Morrow Technologies Corp.', 0x00B09D: u'Point Grey Research Inc.', 0x00B0AC: u'SIAE-Microelettronica S.p.A.', 0x00B0AE: u'Symmetricom', 0x00B0B3: u'Xstreamis PLC', 0x00B0C2: u'Cisco Systems, Inc.', 0x00B0C7: u'Tellabs Operations, Inc.', 0x00B0CE: u'TECHNOLOGY RESCUE', 0x00B0D0: u'Dell Computer Corp.', 0x00B0DB: u'Nextcell, Inc.', 0x00B0DF: u'Reliable Data Technology, Inc.', 0x00B0E7: u'British Federal Ltd.', 0x00B0EC: u'EACEM', 0x00B0EE: u'Ajile Systems, Inc.', 0x00B0F0: u'CALY NETWORKS', 0x00B0F5: u'NetWorth Technologies, Inc.', 0x00BAC0: u'Biometric Access Company', 0x00BB01: u'OCTOTHORPE CORP.', 0x00BBF0: u'UNGERMANN-BASS INC.', 0x00C000: u'LANOPTICS, LTD.', 0x00C001: u'DIATEK PATIENT MANAGMENT', 0x00C002: u'SERCOMM CORPORATION', 0x00C003: u'GLOBALNET COMMUNICATIONS', 0x00C004: u'JAPAN BUSINESS COMPUTER CO.LTD', 0x00C005: u'LIVINGSTON ENTERPRISES, INC.', 0x00C006: u'NIPPON AVIONICS CO., LTD.', 0x00C007: u'PINNACLE DATA SYSTEMS, INC.', 0x00C008: u'SECO SRL', 0x00C009: u'KT TECHNOLOGY (S) PTE LTD', 0x00C00A: u'MICRO CRAFT', 0x00C00B: u'NORCONTROL A.S.', 0x00C00C: u'RELIA TECHNOLGIES', 0x00C00D: u'ADVANCED LOGIC RESEARCH, INC.', 0x00C00E: u'PSITECH, INC.', 0x00C00F: u'QUANTUM SOFTWARE SYSTEMS LTD.', 0x00C010: u'HIRAKAWA HEWTECH CORP.', 0x00C011: u'INTERACTIVE COMPUTING DEVICES', 0x00C012: u'NETSPAN CORPORATION', 0x00C013: u'NETRIX', 0x00C014: u'TELEMATICS CALABASAS INT\'L,INC', 0x00C015: u'NEW MEDIA CORPORATION', 0x00C016: u'ELECTRONIC THEATRE CONTROLS', 0x00C017: u'FORTE NETWORKS', 0x00C018: u'LANART CORPORATION', 0x00C019: u'LEAP TECHNOLOGY, INC.', 0x00C01A: u'COROMETRICS MEDICAL SYSTEMS', 0x00C01B: u'SOCKET COMMUNICATIONS, INC.', 0x00C01C: u'INTERLINK COMMUNICATIONS LTD.', 0x00C01D: u'GRAND JUNCTION NETWORKS, INC.', 0x00C01E: u'LA FRANCAISE DES JEUX', 0x00C01F: u'S.E.R.C.E.L.', 0x00C020: u'ARCO ELECTRONIC, CONTROL LTD.', 0x00C021: u'NETEXPRESS', 0x00C022: u'LASERMASTER TECHNOLOGIES, INC.', 0x00C023: u'TUTANKHAMON ELECTRONICS', 0x00C024: u'EDEN SISTEMAS DE COMPUTACAO SA', 0x00C025: u'DATAPRODUCTS CORPORATION', 0x00C026: u'LANS TECHNOLOGY CO., LTD.', 0x00C027: u'CIPHER SYSTEMS, INC.', 0x00C028: u'JASCO CORPORATION', 0x00C029: u'Nexans Deutschland AG - ANS', 0x00C02A: u'OHKURA ELECTRIC CO., LTD.', 0x00C02B: u'GERLOFF GESELLSCHAFT FUR', 0x00C02C: u'CENTRUM COMMUNICATIONS, INC.', 0x00C02D: u'FUJI PHOTO FILM CO., LTD.', 0x00C02E: u'NETWIZ', 0x00C02F: u'OKUMA CORPORATION', 0x00C030: u'INTEGRATED ENGINEERING B. V.', 0x00C031: u'DESIGN RESEARCH SYSTEMS, INC.', 0x00C032: u'I-CUBED LIMITED', 0x00C033: u'TELEBIT COMMUNICATIONS APS', 0x00C034: u'TRANSACTION NETWORK', 0x00C035: u'QUINTAR COMPANY', 0x00C036: u'RAYTECH ELECTRONIC CORP.', 0x00C037: u'DYNATEM', 0x00C038: u'RASTER IMAGE PROCESSING SYSTEM', 0x00C039: u'Teridian Semiconductor Corporation', 0x00C03A: u'MEN-MIKRO ELEKTRONIK GMBH', 0x00C03B: u'MULTIACCESS COMPUTING CORP.', 0x00C03C: u'TOWER TECH S.R.L.', 0x00C03D: u'WIESEMANN & THEIS GMBH', 0x00C03E: u'FA. GEBR. HELLER GMBH', 0x00C03F: u'STORES AUTOMATED SYSTEMS, INC.', 0x00C040: u'ECCI', 0x00C041: u'DIGITAL TRANSMISSION SYSTEMS', 0x00C042: u'DATALUX CORP.', 0x00C043: u'STRATACOM', 0x00C044: u'EMCOM CORPORATION', 0x00C045: u'ISOLATION SYSTEMS, LTD.', 0x00C046: u'KEMITRON LTD.', 0x00C047: u'UNIMICRO SYSTEMS, INC.', 0x00C048: u'BAY TECHNICAL ASSOCIATES', 0x00C049: u'U.S. ROBOTICS, INC.', 0x00C04A: u'GROUP 2000 AG', 0x00C04B: u'CREATIVE MICROSYSTEMS', 0x00C04C: u'DEPARTMENT OF FOREIGN AFFAIRS', 0x00C04D: u'MITEC, INC.', 0x00C04E: u'COMTROL CORPORATION', 0x00C04F: u'DELL COMPUTER CORPORATION', 0x00C050: u'TOYO DENKI SEIZO K.K.', 0x00C051: u'ADVANCED INTEGRATION RESEARCH', 0x00C052: u'BURR-BROWN', 0x00C053: u'Concerto Software', 0x00C054: u'NETWORK PERIPHERALS, LTD.', 0x00C055: u'MODULAR COMPUTING TECHNOLOGIES', 0x00C056: u'SOMELEC', 0x00C057: u'MYCO ELECTRONICS', 0x00C058: u'DATAEXPERT CORP.', 0x00C059: u'NIPPON DENSO CO., LTD.', 0x00C05A: u'SEMAPHORE COMMUNICATIONS CORP.', 0x00C05B: u'NETWORKS NORTHWEST, INC.', 0x00C05C: u'ELONEX PLC', 0x00C05D: u'L&N TECHNOLOGIES', 0x00C05E: u'VARI-LITE, INC.', 0x00C05F: u'FINE-PAL COMPANY LIMITED', 0x00C060: u'ID SCANDINAVIA AS', 0x00C061: u'SOLECTEK CORPORATION', 0x00C062: u'IMPULSE TECHNOLOGY', 0x00C063: u'MORNING STAR TECHNOLOGIES, INC', 0x00C064: u'GENERAL DATACOMM IND. INC.', 0x00C065: u'SCOPE COMMUNICATIONS, INC.', 0x00C066: u'DOCUPOINT, INC.', 0x00C067: u'UNITED BARCODE INDUSTRIES', 0x00C068: u'PHILIP DRAKE ELECTRONICS LTD.', 0x00C069: u'Axxcelera Broadband Wireless', 0x00C06A: u'ZAHNER-ELEKTRIK GMBH & CO. KG', 0x00C06B: u'OSI PLUS CORPORATION', 0x00C06C: u'SVEC COMPUTER CORP.', 0x00C06D: u'BOCA RESEARCH, INC.', 0x00C06E: u'HAFT TECHNOLOGY, INC.', 0x00C06F: u'KOMATSU LTD.', 0x00C070: u'SECTRA SECURE-TRANSMISSION AB', 0x00C071: u'AREANEX COMMUNICATIONS, INC.', 0x00C072: u'KNX LTD.', 0x00C073: u'XEDIA CORPORATION', 0x00C074: u'TOYODA AUTOMATIC LOOM', 0x00C075: u'XANTE CORPORATION', 0x00C076: u'I-DATA INTERNATIONAL A-S', 0x00C077: u'DAEWOO TELECOM LTD.', 0x00C078: u'COMPUTER SYSTEMS ENGINEERING', 0x00C079: u'FONSYS CO.,LTD.', 0x00C07A: u'PRIVA B.V.', 0x00C07B: u'ASCEND COMMUNICATIONS, INC.', 0x00C07C: u'HIGHTECH INFORMATION', 0x00C07D: u'RISC DEVELOPMENTS LTD.', 0x00C07E: u'KUBOTA CORPORATION ELECTRONIC', 0x00C07F: u'NUPON COMPUTING CORP.', 0x00C080: u'NETSTAR, INC.', 0x00C081: u'METRODATA LTD.', 0x00C082: u'MOORE PRODUCTS CO.', 0x00C083: u'TRACE MOUNTAIN PRODUCTS, INC.', 0x00C084: u'DATA LINK CORP. LTD.', 0x00C085: u'ELECTRONICS FOR IMAGING, INC.', 0x00C086: u'THE LYNK CORPORATION', 0x00C087: u'UUNET TECHNOLOGIES, INC.', 0x00C088: u'EKF ELEKTRONIK GMBH', 0x00C089: u'TELINDUS DISTRIBUTION', 0x00C08A: u'LAUTERBACH DATENTECHNIK GMBH', 0x00C08B: u'RISQ MODULAR SYSTEMS, INC.', 0x00C08C: u'PERFORMANCE TECHNOLOGIES, INC.', 0x00C08D: u'TRONIX PRODUCT DEVELOPMENT', 0x00C08E: u'NETWORK INFORMATION TECHNOLOGY', 0x00C08F: u'Matsushita Electric Works, Ltd.', 0x00C090: u'PRAIM S.R.L.', 0x00C091: u'JABIL CIRCUIT, INC.', 0x00C092: u'MENNEN MEDICAL INC.', 0x00C093: u'ALTA RESEARCH CORP.', 0x00C094: u'VMX INC.', 0x00C095: u'ZNYX', 0x00C096: u'TAMURA CORPORATION', 0x00C097: u'ARCHIPEL SA', 0x00C098: u'CHUNTEX ELECTRONIC CO., LTD.', 0x00C099: u'YOSHIKI INDUSTRIAL CO.,LTD.', 0x00C09A: u'PHOTONICS CORPORATION', 0x00C09B: u'RELIANCE COMM/TEC, R-TEC', 0x00C09C: u'TOA ELECTRONIC LTD.', 0x00C09D: u'DISTRIBUTED SYSTEMS INT\'L, INC', 0x00C09E: u'CACHE COMPUTERS, INC.', 0x00C09F: u'QUANTA COMPUTER, INC.', 0x00C0A0: u'ADVANCE MICRO RESEARCH, INC.', 0x00C0A1: u'TOKYO DENSHI SEKEI CO.', 0x00C0A2: u'INTERMEDIUM A/S', 0x00C0A3: u'DUAL ENTERPRISES CORPORATION', 0x00C0A4: u'UNIGRAF OY', 0x00C0A5: u'DICKENS DATA SYSTEMS', 0x00C0A6: u'EXICOM AUSTRALIA PTY. LTD', 0x00C0A7: u'SEEL LTD.', 0x00C0A8: u'GVC CORPORATION', 0x00C0A9: u'BARRON MCCANN LTD.', 0x00C0AA: u'SILICON VALLEY COMPUTER', 0x00C0AB: u'Telco Systems, Inc.', 0x00C0AC: u'GAMBIT COMPUTER COMMUNICATIONS', 0x00C0AD: u'MARBEN COMMUNICATION SYSTEMS', 0x00C0AE: u'TOWERCOM CO. INC. DBA PC HOUSE', 0x00C0AF: u'TEKLOGIX INC.', 0x00C0B0: u'GCC TECHNOLOGIES,INC.', 0x00C0B1: u'GENIUS NET CO.', 0x00C0B2: u'NORAND CORPORATION', 0x00C0B3: u'COMSTAT DATACOMM CORPORATION', 0x00C0B4: u'MYSON TECHNOLOGY, INC.', 0x00C0B5: u'CORPORATE NETWORK SYSTEMS,INC.', 0x00C0B6: u'Adaptec, Inc.', 0x00C0B7: u'AMERICAN POWER CONVERSION CORP', 0x00C0B8: u'FRASER\'S HILL LTD.', 0x00C0B9: u'FUNK SOFTWARE, INC.', 0x00C0BA: u'NETVANTAGE', 0x00C0BB: u'FORVAL CREATIVE, INC.', 0x00C0BC: u'TELECOM AUSTRALIA/CSSC', 0x00C0BD: u'INEX TECHNOLOGIES, INC.', 0x00C0BE: u'ALCATEL - SEL', 0x00C0BF: u'TECHNOLOGY CONCEPTS, LTD.', 0x00C0C0: u'SHORE MICROSYSTEMS, INC.', 0x00C0C1: u'QUAD/GRAPHICS, INC.', 0x00C0C2: u'INFINITE NETWORKS LTD.', 0x00C0C3: u'ACUSON COMPUTED SONOGRAPHY', 0x00C0C4: u'COMPUTER OPERATIONAL', 0x00C0C5: u'SID INFORMATICA', 0x00C0C6: u'PERSONAL MEDIA CORP.', 0x00C0C7: u'SPARKTRUM MICROSYSTEMS, INC.', 0x00C0C8: u'MICRO BYTE PTY. LTD.', 0x00C0C9: u'ELSAG BAILEY PROCESS', 0x00C0CA: u'ALFA, INC.', 0x00C0CB: u'CONTROL TECHNOLOGY CORPORATION', 0x00C0CC: u'TELESCIENCES CO SYSTEMS, INC.', 0x00C0CD: u'COMELTA, S.A.', 0x00C0CE: u'CEI SYSTEMS & ENGINEERING PTE', 0x00C0CF: u'IMATRAN VOIMA OY', 0x00C0D0: u'RATOC SYSTEM INC.', 0x00C0D1: u'COMTREE TECHNOLOGY CORPORATION', 0x00C0D2: u'SYNTELLECT, INC.', 0x00C0D3: u'OLYMPUS IMAGE SYSTEMS, INC.', 0x00C0D4: u'AXON NETWORKS, INC.', 0x00C0D5: u'QUANCOM ELECTRONIC GMBH', 0x00C0D6: u'J1 SYSTEMS, INC.', 0x00C0D7: u'TAIWAN TRADING CENTER DBA', 0x00C0D8: u'UNIVERSAL DATA SYSTEMS', 0x00C0D9: u'QUINTE NETWORK CONFIDENTIALITY', 0x00C0DA: u'NICE SYSTEMS LTD.', 0x00C0DB: u'IPC CORPORATION (PTE) LTD.', 0x00C0DC: u'EOS TECHNOLOGIES, INC.', 0x00C0DD: u'QLogic Corporation', 0x00C0DE: u'ZCOMM, INC.', 0x00C0DF: u'KYE Systems Corp.', 0x00C0E0: u'DSC COMMUNICATION CORP.', 0x00C0E1: u'SONIC SOLUTIONS', 0x00C0E2: u'CALCOMP, INC.', 0x00C0E3: u'OSITECH COMMUNICATIONS, INC.', 0x00C0E4: u'SIEMENS BUILDING', 0x00C0E5: u'GESPAC, S.A.', 0x00C0E6: u'Verilink Corporation', 0x00C0E7: u'FIBERDATA AB', 0x00C0E8: u'PLEXCOM, INC.', 0x00C0E9: u'OAK SOLUTIONS, LTD.', 0x00C0EA: u'ARRAY TECHNOLOGY LTD.', 0x00C0EB: u'SEH COMPUTERTECHNIK GMBH', 0x00C0EC: u'DAUPHIN TECHNOLOGY', 0x00C0ED: u'US ARMY ELECTRONIC', 0x00C0EE: u'KYOCERA CORPORATION', 0x00C0EF: u'ABIT CORPORATION', 0x00C0F0: u'KINGSTON TECHNOLOGY CORP.', 0x00C0F1: u'SHINKO ELECTRIC CO., LTD.', 0x00C0F2: u'TRANSITION NETWORKS', 0x00C0F3: u'NETWORK COMMUNICATIONS CORP.', 0x00C0F4: u'INTERLINK SYSTEM CO., LTD.', 0x00C0F5: u'METACOMP, INC.', 0x00C0F6: u'CELAN TECHNOLOGY INC.', 0x00C0F7: u'ENGAGE COMMUNICATION, INC.', 0x00C0F8: u'ABOUT COMPUTING INC.', 0x00C0F9: u'Motorola Embedded Computing Group', 0x00C0FA: u'CANARY COMMUNICATIONS, INC.', 0x00C0FB: u'ADVANCED TECHNOLOGY LABS', 0x00C0FC: u'ELASTIC REALITY, INC.', 0x00C0FD: u'PROSUM', 0x00C0FE: u'APTEC COMPUTER SYSTEMS, INC.', 0x00C0FF: u'DOT HILL SYSTEMS CORPORATION', 0x00CBBD: u'Cambridge Broadband Ltd.', 0x00CF1C: u'COMMUNICATION MACHINERY CORP.', 0x00D000: u'FERRAN SCIENTIFIC, INC.', 0x00D001: u'VST TECHNOLOGIES, INC.', 0x00D002: u'DITECH CORPORATION', 0x00D003: u'COMDA ENTERPRISES CORP.', 0x00D004: u'PENTACOM LTD.', 0x00D005: u'ZHS ZEITMANAGEMENTSYSTEME', 0x00D006: u'CISCO SYSTEMS, INC.', 0x00D007: u'MIC ASSOCIATES, INC.', 0x00D008: u'MACTELL CORPORATION', 0x00D009: u'HSING TECH. ENTERPRISE CO. LTD', 0x00D00A: u'LANACCESS TELECOM S.A.', 0x00D00B: u'RHK TECHNOLOGY, INC.', 0x00D00C: u'SNIJDER MICRO SYSTEMS', 0x00D00D: u'MICROMERITICS INSTRUMENT', 0x00D00E: u'PLURIS, INC.', 0x00D00F: u'SPEECH DESIGN GMBH', 0x00D010: u'CONVERGENT NETWORKS, INC.', 0x00D011: u'PRISM VIDEO, INC.', 0x00D012: u'GATEWORKS CORP.', 0x00D013: u'PRIMEX AEROSPACE COMPANY', 0x00D014: u'ROOT, INC.', 0x00D015: u'UNIVEX MICROTECHNOLOGY CORP.', 0x00D016: u'SCM MICROSYSTEMS, INC.', 0x00D017: u'SYNTECH INFORMATION CO., LTD.', 0x00D018: u'QWES. COM, INC.', 0x00D019: u'DAINIPPON SCREEN CORPORATE', 0x00D01A: u'URMET TLC S.P.A.', 0x00D01B: u'MIMAKI ENGINEERING CO., LTD.', 0x00D01C: u'SBS TECHNOLOGIES,', 0x00D01D: u'FURUNO ELECTRIC CO., LTD.', 0x00D01E: u'PINGTEL CORP.', 0x00D01F: u'CTAM PTY. LTD.', 0x00D020: u'AIM SYSTEM, INC.', 0x00D021: u'REGENT ELECTRONICS CORP.', 0x00D022: u'INCREDIBLE TECHNOLOGIES, INC.', 0x00D023: u'INFORTREND TECHNOLOGY, INC.', 0x00D024: u'Cognex Corporation', 0x00D025: u'XROSSTECH, INC.', 0x00D026: u'HIRSCHMANN AUSTRIA GMBH', 0x00D027: u'APPLIED AUTOMATION, INC.', 0x00D028: u'OMNEON VIDEO NETWORKS', 0x00D029: u'WAKEFERN FOOD CORPORATION', 0x00D02A: u'Voxent Systems Ltd.', 0x00D02B: u'JETCELL, INC.', 0x00D02C: u'CAMPBELL SCIENTIFIC, INC.', 0x00D02D: u'ADEMCO', 0x00D02E: u'COMMUNICATION AUTOMATION CORP.', 0x00D02F: u'VLSI TECHNOLOGY INC.', 0x00D030: u'SAFETRAN SYSTEMS CORP.', 0x00D031: u'INDUSTRIAL LOGIC CORPORATION', 0x00D032: u'YANO ELECTRIC CO., LTD.', 0x00D033: u'DALIAN DAXIAN NETWORK', 0x00D034: u'ORMEC SYSTEMS CORP.', 0x00D035: u'BEHAVIOR TECH. COMPUTER CORP.', 0x00D036: u'TECHNOLOGY ATLANTA CORP.', 0x00D037: u'PHILIPS-DVS-LO BDR', 0x00D038: u'FIVEMERE, LTD.', 0x00D039: u'UTILICOM, INC.', 0x00D03A: u'ZONEWORX, INC.', 0x00D03B: u'VISION PRODUCTS PTY. LTD.', 0x00D03C: u'Vieo, Inc.', 0x00D03D: u'GALILEO TECHNOLOGY, LTD.', 0x00D03E: u'ROCKETCHIPS, INC.', 0x00D03F: u'AMERICAN COMMUNICATION', 0x00D040: u'SYSMATE CO., LTD.', 0x00D041: u'AMIGO TECHNOLOGY CO., LTD.', 0x00D042: u'MAHLO GMBH & CO. UG', 0x00D043: u'ZONAL RETAIL DATA SYSTEMS', 0x00D044: u'ALIDIAN NETWORKS, INC.', 0x00D045: u'KVASER AB', 0x00D046: u'DOLBY LABORATORIES, INC.', 0x00D047: u'XN TECHNOLOGIES', 0x00D048: u'ECTON, INC.', 0x00D049: u'IMPRESSTEK CO., LTD.', 0x00D04A: u'PRESENCE TECHNOLOGY GMBH', 0x00D04B: u'LA CIE GROUP S.A.', 0x00D04C: u'EUROTEL TELECOM LTD.', 0x00D04D: u'DIV OF RESEARCH & STATISTICS', 0x00D04E: u'LOGIBAG', 0x00D04F: u'BITRONICS, INC.', 0x00D050: u'ISKRATEL', 0x00D051: u'O2 MICRO, INC.', 0x00D052: u'ASCEND COMMUNICATIONS, INC.', 0x00D053: u'CONNECTED SYSTEMS', 0x00D054: u'SAS INSTITUTE INC.', 0x00D055: u'KATHREIN-WERKE KG', 0x00D056: u'SOMAT CORPORATION', 0x00D057: u'ULTRAK, INC.', 0x00D058: u'CISCO SYSTEMS, INC.', 0x00D059: u'AMBIT MICROSYSTEMS CORP.', 0x00D05A: u'SYMBIONICS, LTD.', 0x00D05B: u'ACROLOOP MOTION CONTROL', 0x00D05C: u'TECHNOTREND SYSTEMTECHNIK GMBH', 0x00D05D: u'INTELLIWORXX, INC.', 0x00D05E: u'STRATABEAM TECHNOLOGY, INC.', 0x00D05F: u'VALCOM, INC.', 0x00D060: u'PANASONIC EUROPEAN', 0x00D061: u'TREMON ENTERPRISES CO., LTD.', 0x00D062: u'DIGIGRAM', 0x00D063: u'CISCO SYSTEMS, INC.', 0x00D064: u'MULTITEL', 0x00D065: u'TOKO ELECTRIC', 0x00D066: u'WINTRISS ENGINEERING CORP.', 0x00D067: u'CAMPIO COMMUNICATIONS', 0x00D068: u'IWILL CORPORATION', 0x00D069: u'TECHNOLOGIC SYSTEMS', 0x00D06A: u'LINKUP SYSTEMS CORPORATION', 0x00D06B: u'SR TELECOM INC.', 0x00D06C: u'SHAREWAVE, INC.', 0x00D06D: u'ACRISON, INC.', 0x00D06E: u'TRENDVIEW RECORDERS LTD.', 0x00D06F: u'KMC CONTROLS', 0x00D070: u'LONG WELL ELECTRONICS CORP.', 0x00D071: u'ECHELON CORP.', 0x00D072: u'BROADLOGIC', 0x00D073: u'ACN ADVANCED COMMUNICATIONS', 0x00D074: u'TAQUA SYSTEMS, INC.', 0x00D075: u'ALARIS MEDICAL SYSTEMS, INC.', 0x00D076: u'Merrill Lynch & Co., Inc.', 0x00D077: u'LUCENT TECHNOLOGIES', 0x00D078: u'ELTEX OF SWEDEN AB', 0x00D079: u'CISCO SYSTEMS, INC.', 0x00D07A: u'AMAQUEST COMPUTER CORP.', 0x00D07B: u'COMCAM INTERNATIONAL LTD.', 0x00D07C: u'KOYO ELECTRONICS INC. CO.,LTD.', 0x00D07D: u'COSINE COMMUNICATIONS', 0x00D07E: u'KEYCORP LTD.', 0x00D07F: u'STRATEGY & TECHNOLOGY, LIMITED', 0x00D080: u'EXABYTE CORPORATION', 0x00D081: u'REAL TIME DEVICES USA, INC.', 0x00D082: u'IOWAVE INC.', 0x00D083: u'INVERTEX, INC.', 0x00D084: u'NEXCOMM SYSTEMS, INC.', 0x00D085: u'OTIS ELEVATOR COMPANY', 0x00D086: u'FOVEON, INC.', 0x00D087: u'MICROFIRST INC.', 0x00D088: u'Terayon Communications Systems', 0x00D089: u'DYNACOLOR, INC.', 0x00D08A: u'PHOTRON USA', 0x00D08B: u'ADVA Limited', 0x00D08C: u'GENOA TECHNOLOGY, INC.', 0x00D08D: u'PHOENIX GROUP, INC.', 0x00D08E: u'NVISION INC.', 0x00D08F: u'ARDENT TECHNOLOGIES, INC.', 0x00D090: u'CISCO SYSTEMS, INC.', 0x00D091: u'SMARTSAN SYSTEMS, INC.', 0x00D092: u'GLENAYRE WESTERN MULTIPLEX', 0x00D093: u'TQ - COMPONENTS GMBH', 0x00D094: u'TIMELINE VISTA, INC.', 0x00D095: u'Alcatel North America ESD', 0x00D096: u'3COM EUROPE LTD.', 0x00D097: u'CISCO SYSTEMS, INC.', 0x00D098: u'Photon Dynamics Canada Inc.', 0x00D099: u'ELCARD OY', 0x00D09A: u'FILANET CORPORATION', 0x00D09B: u'SPECTEL LTD.', 0x00D09C: u'KAPADIA COMMUNICATIONS', 0x00D09D: u'VERIS INDUSTRIES', 0x00D09E: u'2WIRE, INC.', 0x00D09F: u'NOVTEK TEST SYSTEMS', 0x00D0A0: u'MIPS DENMARK', 0x00D0A1: u'OSKAR VIERLING GMBH + CO. KG', 0x00D0A2: u'INTEGRATED DEVICE', 0x00D0A3: u'VOCAL DATA, INC.', 0x00D0A4: u'ALANTRO COMMUNICATIONS', 0x00D0A5: u'AMERICAN ARIUM', 0x00D0A6: u'LANBIRD TECHNOLOGY CO., LTD.', 0x00D0A7: u'TOKYO SOKKI KENKYUJO CO., LTD.', 0x00D0A8: u'NETWORK ENGINES, INC.', 0x00D0A9: u'SHINANO KENSHI CO., LTD.', 0x00D0AA: u'CHASE COMMUNICATIONS', 0x00D0AB: u'DELTAKABEL TELECOM CV', 0x00D0AC: u'GRAYSON WIRELESS', 0x00D0AD: u'TL INDUSTRIES', 0x00D0AE: u'ORESIS COMMUNICATIONS, INC.', 0x00D0AF: u'CUTLER-HAMMER, INC.', 0x00D0B0: u'BITSWITCH LTD.', 0x00D0B1: u'OMEGA ELECTRONICS SA', 0x00D0B2: u'XIOTECH CORPORATION', 0x00D0B3: u'DRS FLIGHT SAFETY AND', 0x00D0B4: u'KATSUJIMA CO., LTD.', 0x00D0B5: u'IPricot formerly DotCom', 0x00D0B6: u'CRESCENT NETWORKS, INC.', 0x00D0B7: u'INTEL CORPORATION', 0x00D0B8: u'Iomega Corporation', 0x00D0B9: u'MICROTEK INTERNATIONAL, INC.', 0x00D0BA: u'CISCO SYSTEMS, INC.', 0x00D0BB: u'CISCO SYSTEMS, INC.', 0x00D0BC: u'CISCO SYSTEMS, INC.', 0x00D0BD: u'SICAN GMBH', 0x00D0BE: u'EMUTEC INC.', 0x00D0BF: u'PIVOTAL TECHNOLOGIES', 0x00D0C0: u'CISCO SYSTEMS, INC.', 0x00D0C1: u'HARMONIC DATA SYSTEMS, LTD.', 0x00D0C2: u'BALTHAZAR TECHNOLOGY AB', 0x00D0C3: u'VIVID TECHNOLOGY PTE, LTD.', 0x00D0C4: u'TERATECH CORPORATION', 0x00D0C5: u'COMPUTATIONAL SYSTEMS, INC.', 0x00D0C6: u'THOMAS & BETTS CORP.', 0x00D0C7: u'PATHWAY, INC.', 0x00D0C8: u'I/O CONSULTING A/S', 0x00D0C9: u'ADVANTECH CO., LTD.', 0x00D0CA: u'INTRINSYC SOFTWARE INC.', 0x00D0CB: u'DASAN CO., LTD.', 0x00D0CC: u'TECHNOLOGIES LYRE INC.', 0x00D0CD: u'ATAN TECHNOLOGY INC.', 0x00D0CE: u'ASYST ELECTRONIC', 0x00D0CF: u'MORETON BAY', 0x00D0D0: u'ZHONGXING TELECOM LTD.', 0x00D0D1: u'SIROCCO SYSTEMS, INC.', 0x00D0D2: u'EPILOG CORPORATION', 0x00D0D3: u'CISCO SYSTEMS, INC.', 0x00D0D4: u'V-BITS, INC.', 0x00D0D5: u'GRUNDIG AG', 0x00D0D6: u'AETHRA TELECOMUNICAZIONI', 0x00D0D7: u'B2C2, INC.', 0x00D0D8: u'3Com Corporation', 0x00D0D9: u'DEDICATED MICROCOMPUTERS', 0x00D0DA: u'TAICOM DATA SYSTEMS CO., LTD.', 0x00D0DB: u'MCQUAY INTERNATIONAL', 0x00D0DC: u'MODULAR MINING SYSTEMS, INC.', 0x00D0DD: u'SUNRISE TELECOM, INC.', 0x00D0DE: u'PHILIPS MULTIMEDIA NETWORK', 0x00D0DF: u'KUZUMI ELECTRONICS, INC.', 0x00D0E0: u'DOOIN ELECTRONICS CO.', 0x00D0E1: u'AVIONITEK ISRAEL INC.', 0x00D0E2: u'MRT MICRO, INC.', 0x00D0E3: u'ELE-CHEM ENGINEERING CO., LTD.', 0x00D0E4: u'CISCO SYSTEMS, INC.', 0x00D0E5: u'SOLIDUM SYSTEMS CORP.', 0x00D0E6: u'IBOND INC.', 0x00D0E7: u'VCON TELECOMMUNICATION LTD.', 0x00D0E8: u'MAC SYSTEM CO., LTD.', 0x00D0E9: u'ADVANTAGE CENTURY', 0x00D0EA: u'NEXTONE COMMUNICATIONS, INC.', 0x00D0EB: u'LIGHTERA NETWORKS, INC.', 0x00D0EC: u'NAKAYO TELECOMMUNICATIONS, INC', 0x00D0ED: u'XIOX', 0x00D0EE: u'DICTAPHONE CORPORATION', 0x00D0EF: u'IGT', 0x00D0F0: u'CONVISION TECHNOLOGY GMBH', 0x00D0F1: u'SEGA ENTERPRISES, LTD.', 0x00D0F2: u'MONTEREY NETWORKS', 0x00D0F3: u'SOLARI DI UDINE SPA', 0x00D0F4: u'CARINTHIAN TECH INSTITUTE', 0x00D0F5: u'ORANGE MICRO, INC.', 0x00D0F6: u'Alcatel Canada', 0x00D0F7: u'NEXT NETS CORPORATION', 0x00D0F8: u'FUJIAN STAR TERMINAL', 0x00D0F9: u'ACUTE COMMUNICATIONS CORP.', 0x00D0FA: u'RACAL GUARDATA', 0x00D0FB: u'TEK MICROSYSTEMS, INCORPORATED', 0x00D0FC: u'GRANITE MICROSYSTEMS', 0x00D0FD: u'OPTIMA TELE.COM, INC.', 0x00D0FE: u'ASTRAL POINT', 0x00D0FF: u'CISCO SYSTEMS, INC.', 0x00DD00: u'UNGERMANN-BASS INC.', 0x00DD01: u'UNGERMANN-BASS INC.', 0x00DD02: u'UNGERMANN-BASS INC.', 0x00DD03: u'UNGERMANN-BASS INC.', 0x00DD04: u'UNGERMANN-BASS INC.', 0x00DD05: u'UNGERMANN-BASS INC.', 0x00DD06: u'UNGERMANN-BASS INC.', 0x00DD07: u'UNGERMANN-BASS INC.', 0x00DD08: u'UNGERMANN-BASS INC.', 0x00DD09: u'UNGERMANN-BASS INC.', 0x00DD0A: u'UNGERMANN-BASS INC.', 0x00DD0B: u'UNGERMANN-BASS INC.', 0x00DD0C: u'UNGERMANN-BASS INC.', 0x00DD0D: u'UNGERMANN-BASS INC.', 0x00DD0E: u'UNGERMANN-BASS INC.', 0x00DD0F: u'UNGERMANN-BASS INC.', 0x00E000: u'FUJITSU, LTD', 0x00E001: u'STRAND LIGHTING LIMITED', 0x00E002: u'CROSSROADS SYSTEMS, INC.', 0x00E003: u'NOKIA WIRELESS BUSINESS COMMUN', 0x00E004: u'PMC-SIERRA, INC.', 0x00E005: u'TECHNICAL CORP.', 0x00E006: u'SILICON INTEGRATED SYS. CORP.', 0x00E007: u'NETWORK ALCHEMY LTD.', 0x00E008: u'AMAZING CONTROLS! INC.', 0x00E009: u'MARATHON TECHNOLOGIES CORP.', 0x00E00A: u'DIBA, INC.', 0x00E00B: u'ROOFTOP COMMUNICATIONS CORP.', 0x00E00C: u'MOTOROLA', 0x00E00D: u'RADIANT SYSTEMS', 0x00E00E: u'AVALON IMAGING SYSTEMS, INC.', 0x00E00F: u'SHANGHAI BAUD DATA', 0x00E010: u'HESS SB-AUTOMATENBAU GmbH', 0x00E011: u'UNIDEN SAN DIEGO R&D CENTER, INC.', 0x00E012: u'PLUTO TECHNOLOGIES INTERNATIONAL INC.', 0x00E013: u'EASTERN ELECTRONIC CO., LTD.', 0x00E014: u'CISCO SYSTEMS, INC.', 0x00E015: u'HEIWA CORPORATION', 0x00E016: u'RAPID CITY COMMUNICATIONS', 0x00E017: u'EXXACT GmbH', 0x00E018: u'ASUSTEK COMPUTER INC.', 0x00E019: u'ING. GIORDANO ELETTRONICA', 0x00E01A: u'COMTEC SYSTEMS. CO., LTD.', 0x00E01B: u'SPHERE COMMUNICATIONS, INC.', 0x00E01C: u'MOBILITY ELECTRONICSY', 0x00E01D: u'WebTV NETWORKS, INC.', 0x00E01E: u'CISCO SYSTEMS, INC.', 0x00E01F: u'AVIDIA Systems, Inc.', 0x00E020: u'TECNOMEN OY', 0x00E021: u'FREEGATE CORP.', 0x00E022: u'Analog Devices Inc.', 0x00E023: u'TELRAD', 0x00E024: u'GADZOOX NETWORKS', 0x00E025: u'dit CO., LTD.', 0x00E026: u'Redlake MASD LLC', 0x00E027: u'DUX, INC.', 0x00E028: u'APTIX CORPORATION', 0x00E029: u'STANDARD MICROSYSTEMS CORP.', 0x00E02A: u'TANDBERG TELEVISION AS', 0x00E02B: u'EXTREME NETWORKS', 0x00E02C: u'AST COMPUTER', 0x00E02D: u'InnoMediaLogic, Inc.', 0x00E02E: u'SPC ELECTRONICS CORPORATION', 0x00E02F: u'MCNS HOLDINGS, L.P.', 0x00E030: u'MELITA INTERNATIONAL CORP.', 0x00E031: u'HAGIWARA ELECTRIC CO., LTD.', 0x00E032: u'MISYS FINANCIAL SYSTEMS, LTD.', 0x00E033: u'E.E.P.D. GmbH', 0x00E034: u'CISCO SYSTEMS, INC.', 0x00E035: u'LOUGHBOROUGH SOUND IMAGES, PLC', 0x00E036: u'PIONEER CORPORATION', 0x00E037: u'CENTURY CORPORATION', 0x00E038: u'PROXIMA CORPORATION', 0x00E039: u'PARADYNE CORP.', 0x00E03A: u'CABLETRON SYSTEMS, INC.', 0x00E03B: u'PROMINET CORPORATION', 0x00E03C: u'AdvanSys', 0x00E03D: u'FOCON ELECTRONIC SYSTEMS A/S', 0x00E03E: u'ALFATECH, INC.', 0x00E03F: u'JATON CORPORATION', 0x00E040: u'DeskStation Technology, Inc.', 0x00E041: u'CSPI', 0x00E042: u'Pacom Systems Ltd.', 0x00E043: u'VitalCom', 0x00E044: u'LSICS CORPORATION', 0x00E045: u'TOUCHWAVE, INC.', 0x00E046: u'BENTLY NEVADA CORP.', 0x00E047: u'INFOCUS SYSTEMS', 0x00E048: u'SDL COMMUNICATIONS, INC.', 0x00E049: u'MICROWI ELECTRONIC GmbH', 0x00E04A: u'ENHANCED MESSAGING SYSTEMS, INC', 0x00E04B: u'JUMP INDUSTRIELLE COMPUTERTECHNIK GmbH', 0x00E04C: u'REALTEK SEMICONDUCTOR CORP.', 0x00E04D: u'INTERNET INITIATIVE JAPAN, INC', 0x00E04E: u'SANYO DENKI CO., LTD.', 0x00E04F: u'CISCO SYSTEMS, INC.', 0x00E050: u'EXECUTONE INFORMATION SYSTEMS, INC.', 0x00E051: u'TALX CORPORATION', 0x00E052: u'FOUNDRY NETWORKS, INC.', 0x00E053: u'CELLPORT LABS, INC.', 0x00E054: u'KODAI HITEC CO., LTD.', 0x00E055: u'INGENIERIA ELECTRONICA COMERCIAL INELCOM S.A.', 0x00E056: u'HOLONTECH CORPORATION', 0x00E057: u'HAN MICROTELECOM. CO., LTD.', 0x00E058: u'PHASE ONE DENMARK A/S', 0x00E059: u'CONTROLLED ENVIRONMENTS, LTD.', 0x00E05A: u'GALEA NETWORK SECURITY', 0x00E05B: u'WEST END SYSTEMS CORP.', 0x00E05C: u'MATSUSHITA KOTOBUKI ELECTRONICS INDUSTRIES, LTD.', 0x00E05D: u'UNITEC CO., LTD.', 0x00E05E: u'JAPAN AVIATION ELECTRONICS INDUSTRY, LTD.', 0x00E05F: u'e-Net, Inc.', 0x00E060: u'SHERWOOD', 0x00E061: u'EdgePoint Networks, Inc.', 0x00E062: u'HOST ENGINEERING', 0x00E063: u'CABLETRON - YAGO SYSTEMS, INC.', 0x00E064: u'SAMSUNG ELECTRONICS', 0x00E065: u'OPTICAL ACCESS INTERNATIONAL', 0x00E066: u'ProMax Systems, Inc.', 0x00E067: u'eac AUTOMATION-CONSULTING GmbH', 0x00E068: u'MERRIMAC SYSTEMS INC.', 0x00E069: u'JAYCOR', 0x00E06A: u'KAPSCH AG', 0x00E06B: u'W&G SPECIAL PRODUCTS', 0x00E06C: u'AEP Systems International Ltd', 0x00E06D: u'COMPUWARE CORPORATION', 0x00E06E: u'FAR SYSTEMS S.p.A.', 0x00E06F: u'Terayon Communications Systems', 0x00E070: u'DH TECHNOLOGY', 0x00E071: u'EPIS MICROCOMPUTER', 0x00E072: u'LYNK', 0x00E073: u'NATIONAL AMUSEMENT NETWORK, INC.', 0x00E074: u'TIERNAN COMMUNICATIONS, INC.', 0x00E075: u'Verilink Corporation', 0x00E076: u'DEVELOPMENT CONCEPTS, INC.', 0x00E077: u'WEBGEAR, INC.', 0x00E078: u'BERKELEY NETWORKS', 0x00E079: u'A.T.N.R.', 0x00E07A: u'MIKRODIDAKT AB', 0x00E07B: u'BAY NETWORKS', 0x00E07C: u'METTLER-TOLEDO, INC.', 0x00E07D: u'NETRONIX, INC.', 0x00E07E: u'WALT DISNEY IMAGINEERING', 0x00E07F: u'LOGISTISTEM s.r.l.', 0x00E080: u'CONTROL RESOURCES CORPORATION', 0x00E081: u'TYAN COMPUTER CORP.', 0x00E082: u'ANERMA', 0x00E083: u'JATO TECHNOLOGIES, INC.', 0x00E084: u'COMPULITE R&D', 0x00E085: u'GLOBAL MAINTECH, INC.', 0x00E086: u'CYBEX COMPUTER PRODUCTS', 0x00E087: u'LeCroy - Networking Productions Division', 0x00E088: u'LTX CORPORATION', 0x00E089: u'ION Networks, Inc.', 0x00E08A: u'GEC AVERY, LTD.', 0x00E08B: u'QLogic Corp.', 0x00E08C: u'NEOPARADIGM LABS, INC.', 0x00E08D: u'PRESSURE SYSTEMS, INC.', 0x00E08E: u'UTSTARCOM', 0x00E08F: u'CISCO SYSTEMS, INC.', 0x00E090: u'BECKMAN LAB. AUTOMATION DIV.', 0x00E091: u'LG ELECTRONICS, INC.', 0x00E092: u'ADMTEK INCORPORATED', 0x00E093: u'ACKFIN NETWORKS', 0x00E094: u'OSAI SRL', 0x00E095: u'ADVANCED-VISION TECHNOLGIES CORP.', 0x00E096: u'SHIMADZU CORPORATION', 0x00E097: u'CARRIER ACCESS CORPORATION', 0x00E098: u'AboCom Systems, Inc.', 0x00E099: u'SAMSON AG', 0x00E09A: u'POSITRON INDUSTRIES, INC.', 0x00E09B: u'ENGAGE NETWORKS, INC.', 0x00E09C: u'MII', 0x00E09D: u'SARNOFF CORPORATION', 0x00E09E: u'QUANTUM CORPORATION', 0x00E09F: u'PIXEL VISION', 0x00E0A0: u'WILTRON CO.', 0x00E0A1: u'HIMA PAUL HILDEBRANDT GmbH Co. KG', 0x00E0A2: u'MICROSLATE INC.', 0x00E0A3: u'CISCO SYSTEMS, INC.', 0x00E0A4: u'ESAOTE S.p.A.', 0x00E0A5: u'ComCore Semiconductor, Inc.', 0x00E0A6: u'TELOGY NETWORKS, INC.', 0x00E0A7: u'IPC INFORMATION SYSTEMS, INC.', 0x00E0A8: u'SAT GmbH & Co.', 0x00E0A9: u'FUNAI ELECTRIC CO., LTD.', 0x00E0AA: u'ELECTROSONIC LTD.', 0x00E0AB: u'DIMAT S.A.', 0x00E0AC: u'MIDSCO, INC.', 0x00E0AD: u'EES TECHNOLOGY, LTD.', 0x00E0AE: u'XAQTI CORPORATION', 0x00E0AF: u'GENERAL DYNAMICS INFORMATION SYSTEMS', 0x00E0B0: u'CISCO SYSTEMS, INC.', 0x00E0B1: u'Alcatel North America ESD', 0x00E0B2: u'TELMAX COMMUNICATIONS CORP.', 0x00E0B3: u'EtherWAN Systems, Inc.', 0x00E0B4: u'TECHNO SCOPE CO., LTD.', 0x00E0B5: u'ARDENT COMMUNICATIONS CORP.', 0x00E0B6: u'Entrada Networks', 0x00E0B7: u'PI GROUP, LTD.', 0x00E0B8: u'GATEWAY 2000', 0x00E0B9: u'BYAS SYSTEMS', 0x00E0BA: u'BERGHOF AUTOMATIONSTECHNIK GmbH', 0x00E0BB: u'NBX CORPORATION', 0x00E0BC: u'SYMON COMMUNICATIONS, INC.', 0x00E0BD: u'INTERFACE SYSTEMS, INC.', 0x00E0BE: u'GENROCO INTERNATIONAL, INC.', 0x00E0BF: u'TORRENT NETWORKING TECHNOLOGIES CORP.', 0x00E0C0: u'SEIWA ELECTRIC MFG. CO., LTD.', 0x00E0C1: u'MEMOREX TELEX JAPAN, LTD.', 0x00E0C2: u'NECSY S.p.A.', 0x00E0C3: u'SAKAI SYSTEM DEVELOPMENT CORP.', 0x00E0C4: u'HORNER ELECTRIC, INC.', 0x00E0C5: u'BCOM ELECTRONICS INC.', 0x00E0C6: u'LINK2IT, L.L.C.', 0x00E0C7: u'EUROTECH SRL', 0x00E0C8: u'VIRTUAL ACCESS, LTD.', 0x00E0C9: u'AutomatedLogic Corporation', 0x00E0CA: u'BEST DATA PRODUCTS', 0x00E0CB: u'RESON, INC.', 0x00E0CC: u'HERO SYSTEMS, LTD.', 0x00E0CD: u'SENSIS CORPORATION', 0x00E0CE: u'ARN', 0x00E0CF: u'INTEGRATED DEVICE TECHNOLOGY, INC.', 0x00E0D0: u'NETSPEED, INC.', 0x00E0D1: u'TELSIS LIMITED', 0x00E0D2: u'VERSANET COMMUNICATIONS, INC.', 0x00E0D3: u'DATENTECHNIK GmbH', 0x00E0D4: u'EXCELLENT COMPUTER', 0x00E0D5: u'ARCXEL TECHNOLOGIES, INC.', 0x00E0D6: u'COMPUTER & COMMUNICATION RESEARCH LAB.', 0x00E0D7: u'SUNSHINE ELECTRONICS, INC.', 0x00E0D8: u'LANBit Computer, Inc.', 0x00E0D9: u'TAZMO CO., LTD.', 0x00E0DA: u'Alcatel North America ESD', 0x00E0DB: u'ViaVideo Communications, Inc.', 0x00E0DC: u'NEXWARE CORP.', 0x00E0DD: u'ZENITH ELECTRONICS CORPORATION', 0x00E0DE: u'DATAX NV', 0x00E0DF: u'KE KOMMUNIKATIONS-ELECTRONIK', 0x00E0E0: u'SI ELECTRONICS, LTD.', 0x00E0E1: u'G2 NETWORKS, INC.', 0x00E0E2: u'INNOVA CORP.', 0x00E0E3: u'SK-ELEKTRONIK GmbH', 0x00E0E4: u'FANUC ROBOTICS NORTH AMERICA, Inc.', 0x00E0E5: u'CINCO NETWORKS, INC.', 0x00E0E6: u'INCAA DATACOM B.V.', 0x00E0E7: u'RAYTHEON E-SYSTEMS, INC.', 0x00E0E8: u'GRETACODER Data Systems AG', 0x00E0E9: u'DATA LABS, INC.', 0x00E0EA: u'INNOVAT COMMUNICATIONS, INC.', 0x00E0EB: u'DIGICOM SYSTEMS, INCORPORATED', 0x00E0EC: u'CELESTICA INC.', 0x00E0ED: u'SILICOM, LTD.', 0x00E0EE: u'MAREL HF', 0x00E0EF: u'DIONEX', 0x00E0F0: u'ABLER TECHNOLOGY, INC.', 0x00E0F1: u'THAT CORPORATION', 0x00E0F2: u'ARLOTTO COMNET, INC.', 0x00E0F3: u'WebSprint Communications, Inc.', 0x00E0F4: u'INSIDE Technology A/S', 0x00E0F5: u'TELES AG', 0x00E0F6: u'DECISION EUROPE', 0x00E0F7: u'CISCO SYSTEMS, INC.', 0x00E0F8: u'DICNA CONTROL AB', 0x00E0F9: u'CISCO SYSTEMS, INC.', 0x00E0FA: u'TRL TECHNOLOGY, LTD.', 0x00E0FB: u'LEIGHTRONIX, INC.', 0x00E0FC: u'HUAWEI TECHNOLOGIES CO., LTD.', 0x00E0FD: u'A-TREND TECHNOLOGY CO., LTD.', 0x00E0FE: u'CISCO SYSTEMS, INC.', 0x00E0FF: u'SECURITY DYNAMICS TECHNOLOGIES, Inc.', 0x00E6D3: u'NIXDORF COMPUTER CORP.', 0x020701: u'RACAL-DATACOM', 0x021C7C: u'PERQ SYSTEMS CORPORATION', 0x026086: u'LOGIC REPLACEMENT TECH. LTD.', 0x02608C: u'3COM CORPORATION', 0x027001: u'RACAL-DATACOM', 0x0270B0: u'M/A-COM INC. COMPANIES', 0x0270B3: u'DATA RECALL LTD', 0x029D8E: u'CARDIAC RECORDERS INC.', 0x02AA3C: u'OLIVETTI TELECOMM SPA (OLTECO)', 0x02BB01: u'OCTOTHORPE CORP.', 0x02C08C: u'3COM CORPORATION', 0x02CF1C: u'COMMUNICATION MACHINERY CORP.', 0x02E6D3: u'NIXDORF COMPUTER CORPORATION', 0x040AE0: u'XMIT AG COMPUTER NETWORKS', 0x04E0C4: u'TRIUMPH-ADLER AG', 0x080001: u'COMPUTERVISION CORPORATION', 0x080002: u'BRIDGE COMMUNICATIONS INC.', 0x080003: u'ADVANCED COMPUTER COMM.', 0x080004: u'CROMEMCO INCORPORATED', 0x080005: u'SYMBOLICS INC.', 0x080006: u'SIEMENS AG', 0x080007: u'APPLE COMPUTER INC.', 0x080008: u'BOLT BERANEK AND NEWMAN INC.', 0x080009: u'HEWLETT PACKARD', 0x08000A: u'NESTAR SYSTEMS INCORPORATED', 0x08000B: u'UNISYS CORPORATION', 0x08000C: u'MIKLYN DEVELOPMENT CO.', 0x08000D: u'INTERNATIONAL COMPUTERS LTD.', 0x08000E: u'NCR CORPORATION', 0x08000F: u'MITEL CORPORATION', 0x080011: u'TEKTRONIX INC.', 0x080012: u'BELL ATLANTIC INTEGRATED SYST.', 0x080013: u'EXXON', 0x080014: u'EXCELAN', 0x080015: u'STC BUSINESS SYSTEMS', 0x080016: u'BARRISTER INFO SYS CORP', 0x080017: u'NATIONAL SEMICONDUCTOR', 0x080018: u'PIRELLI FOCOM NETWORKS', 0x080019: u'GENERAL ELECTRIC CORPORATION', 0x08001A: u'TIARA/ 10NET', 0x08001B: u'DATA GENERAL', 0x08001C: u'KDD-KOKUSAI DEBNSIN DENWA CO.', 0x08001D: u'ABLE COMMUNICATIONS INC.', 0x08001E: u'APOLLO COMPUTER INC.', 0x08001F: u'SHARP CORPORATION', 0x080020: u'SUN MICROSYSTEMS INC.', 0x080021: u'3M COMPANY', 0x080022: u'NBI INC.', 0x080023: u'Panasonic Communications Co., Ltd.', 0x080024: u'10NET COMMUNICATIONS/DCA', 0x080025: u'CONTROL DATA', 0x080026: u'NORSK DATA A.S.', 0x080027: u'CADMUS COMPUTER SYSTEMS', 0x080028: u'Texas Instruments', 0x080029: u'MEGATEK CORPORATION', 0x08002A: u'MOSAIC TECHNOLOGIES INC.', 0x08002B: u'DIGITAL EQUIPMENT CORPORATION', 0x08002C: u'BRITTON LEE INC.', 0x08002D: u'LAN-TEC INC.', 0x08002E: u'METAPHOR COMPUTER SYSTEMS', 0x08002F: u'PRIME COMPUTER INC.', 0x080030: u'NETWORK RESEARCH CORPORATION', 0x080030: u'CERN', 0x080030: u'ROYAL MELBOURNE INST OF TECH', 0x080031: u'LITTLE MACHINES INC.', 0x080032: u'TIGAN INCORPORATED', 0x080033: u'BAUSCH & LOMB', 0x080034: u'FILENET CORPORATION', 0x080035: u'MICROFIVE CORPORATION', 0x080036: u'INTERGRAPH CORPORATION', 0x080037: u'FUJI-XEROX CO. LTD.', 0x080038: u'CII HONEYWELL BULL', 0x080039: u'SPIDER SYSTEMS LIMITED', 0x08003A: u'ORCATECH INC.', 0x08003B: u'TORUS SYSTEMS LIMITED', 0x08003C: u'SCHLUMBERGER WELL SERVICES', 0x08003D: u'CADNETIX CORPORATIONS', 0x08003E: u'CODEX CORPORATION', 0x08003F: u'FRED KOSCHARA ENTERPRISES', 0x080040: u'FERRANTI COMPUTER SYS. LIMITED', 0x080041: u'RACAL-MILGO INFORMATION SYS..', 0x080042: u'JAPAN MACNICS CORP.', 0x080043: u'PIXEL COMPUTER INC.', 0x080044: u'DAVID SYSTEMS INC.', 0x080045: u'CONCURRENT COMPUTER CORP.', 0x080046: u'SONY CORPORATION LTD.', 0x080047: u'SEQUENT COMPUTER SYSTEMS INC.', 0x080048: u'EUROTHERM GAUGING SYSTEMS', 0x080049: u'UNIVATION', 0x08004A: u'BANYAN SYSTEMS INC.', 0x08004B: u'PLANNING RESEARCH CORP.', 0x08004C: u'HYDRA COMPUTER SYSTEMS INC.', 0x08004D: u'CORVUS SYSTEMS INC.', 0x08004E: u'3COM EUROPE LTD.', 0x08004F: u'CYGNET SYSTEMS', 0x080050: u'DAISY SYSTEMS CORP.', 0x080051: u'EXPERDATA', 0x080052: u'INSYSTEC', 0x080053: u'MIDDLE EAST TECH. UNIVERSITY', 0x080055: u'STANFORD TELECOMM. INC.', 0x080056: u'STANFORD LINEAR ACCEL. CENTER', 0x080057: u'EVANS & SUTHERLAND', 0x080058: u'SYSTEMS CONCEPTS', 0x080059: u'A/S MYCRON', 0x08005A: u'IBM CORPORATION', 0x08005B: u'VTA TECHNOLOGIES INC.', 0x08005C: u'FOUR PHASE SYSTEMS', 0x08005D: u'GOULD INC.', 0x08005E: u'COUNTERPOINT COMPUTER INC.', 0x08005F: u'SABER TECHNOLOGY CORP.', 0x080060: u'INDUSTRIAL NETWORKING INC.', 0x080061: u'JAROGATE LTD.', 0x080062: u'GENERAL DYNAMICS', 0x080063: u'PLESSEY', 0x080064: u'AUTOPHON AG', 0x080065: u'GENRAD INC.', 0x080066: u'AGFA CORPORATION', 0x080067: u'COMDESIGN', 0x080068: u'RIDGE COMPUTERS', 0x080069: u'SILICON GRAPHICS INC.', 0x08006A: u'ATT BELL LABORATORIES', 0x08006B: u'ACCEL TECHNOLOGIES INC.', 0x08006C: u'SUNTEK TECHNOLOGY INT\'L', 0x08006D: u'WHITECHAPEL COMPUTER WORKS', 0x08006E: u'MASSCOMP', 0x08006F: u'PHILIPS APELDOORN B.V.', 0x080070: u'MITSUBISHI ELECTRIC CORP.', 0x080071: u'MATRA (DSIE)', 0x080072: u'XEROX CORP UNIV GRANT PROGRAM', 0x080073: u'TECMAR INC.', 0x080074: u'CASIO COMPUTER CO. LTD.', 0x080075: u'DANSK DATA ELECTRONIK', 0x080076: u'PC LAN TECHNOLOGIES', 0x080077: u'TSL COMMUNICATIONS LTD.', 0x080078: u'ACCELL CORPORATION', 0x080079: u'THE DROID WORKS', 0x08007A: u'INDATA', 0x08007B: u'SANYO ELECTRIC CO. LTD.', 0x08007C: u'VITALINK COMMUNICATIONS CORP.', 0x08007E: u'AMALGAMATED WIRELESS(AUS) LTD', 0x08007F: u'CARNEGIE-MELLON UNIVERSITY', 0x080080: u'AES DATA INC.', 0x080081: u'ASTECH INC.', 0x080082: u'VERITAS SOFTWARE', 0x080083: u'Seiko Instruments Inc.', 0x080084: u'TOMEN ELECTRONICS CORP.', 0x080085: u'ELXSI', 0x080086: u'KONICA MINOLTA HOLDINGS, INC.', 0x080087: u'XYPLEX', 0x080088: u'MCDATA CORPORATION', 0x080089: u'KINETICS', 0x08008A: u'PERFORMANCE TECHNOLOGY', 0x08008B: u'PYRAMID TECHNOLOGY CORP.', 0x08008C: u'NETWORK RESEARCH CORPORATION', 0x08008D: u'XYVISION INC.', 0x08008E: u'TANDEM COMPUTERS', 0x08008F: u'CHIPCOM CORPORATION', 0x080090: u'SONOMA SYSTEMS', 0x081443: u'UNIBRAIN S.A.', 0x08BBCC: u'AK-NORD EDV VERTRIEBSGES. mbH', 0x100000: u'PRIVATE', 0x10005A: u'IBM CORPORATION', 0x1000E8: u'NATIONAL SEMICONDUCTOR', 0x1100AA: u'PRIVATE', 0x800010: u'ATT BELL LABORATORIES', 0xA06A00: u'Verilink Corporation', 0xAA0000: u'DIGITAL EQUIPMENT CORPORATION', 0xAA0001: u'DIGITAL EQUIPMENT CORPORATION', 0xAA0002: u'DIGITAL EQUIPMENT CORPORATION', 0xAA0003: u'DIGITAL EQUIPMENT CORPORATION', 0xAA0004: u'DIGITAL EQUIPMENT CORPORATION', 0xACDE48: u'PRIVATE', }
goofwear/raspberry_pwn
src/pentest/metagoofil/hachoir_parser/network/ouid.py
Python
gpl-3.0
385,658
[ "ASE", "Amber", "BWA", "CRYSTAL", "Galaxy", "NEURON" ]
6671aa0858e3b89aa39b1da9528b96880aaef931974e425b8618ccb4e3ba46a5
from numpy import * from plotting.myplot import * from plotting.plot_images import * from mystat.correlate_cov import * import scipy.stats as st def running_mean(x, N): cs = cumsum(insert(x, 0, 0)) return (cs[N:] - cs[:-N]) / N def bin_means(x, y, binnr = 10): vals, bins, binnr = st.binned_statistic(x, y, bins=binnr) for i in range (0, len(bins)-1): # dinaning hack. if(isnan(vals[i])): if (i==0): vals[i] = 0 else: vals[i] = vals[i-1] bins[i] = (bins[i]+bins[i+1])*0.5 bins = bins[0:len(bins)-1] return vals, bins def spk_contr_mean(spk, contr, fn, tl, make=1): if(make == 1): mn = mean(mean(spk, axis = 1)*1000,axis=1) savetxt("txt/spk_contr_mean_mn.txt", mn) savetxt("txt/spk_contr_mean_contr.txt", contr) else: mn = loadtxt("txt/spk_contr_mean_mn.txt") contr = loadtxt("txt/spk_contr_mean_contr.txt") bnr = 100 l = 10 vals, bins = bin_means(contr, mn, bnr) run_avg = running_mean(vals, l) print (run_avg) d = len(run_avg) print (d) plt.clf() plt.plot(contr, mn, 'go', alpha=.4) plt.plot(bins[l/2:d+l/2], run_avg, lw = 8, color = "red") myplot("inferred contrast", "mean spiking rate", tl, fn, xlog=False, legend=False) def spk_contr_var(spk, contr, fn, tl, make = 1, mn=0): if(make == 1): vr = var(spk, axis = 1)*1000 vr = mean(vr,axis=1) if(mn == 1): spk_avg = mean(mean(spk, axis=1), axis=1) contr = spk_avg * 1000 savetxt("txt/spk_contr_var_vr.txt", vr) savetxt("txt/spk_contr_var_contr.txt", contr) else: vr = loadtxt("txt/spk_contr_var_vr.txt") contr = loadtxt("txt/spk_contr_var_contr.txt") bnr = 100 l = 10 vals, bins = bin_means(contr, vr, bnr) run_avg = running_mean(vals, l) print (run_avg) d = len(run_avg) print (d) plt.clf() plt.plot(contr, vr, 'go', alpha=.4) plt.plot(bins[l/2:d+l/2], run_avg, lw = 8, color = "red") # plt.ylim([0,300]) if (mn == 1): myplot("mean spiking rate", "spike count variance", tl, fn, xlog=False, legend=False) else: myplot("inferred contrast", "spike count variance", tl, fn, xlog=False, legend=False) def spk_contr_fano(spk, contr, fn, tl, make = 1, mn=0): if(make == 1): vr = var(spk, axis = 1)*1000 vr = mean(vr,axis=1) spk_avg = mean(mean(spk, axis=1), axis=1)*1000 savetxt("txt/vr.txt", vr) savetxt("txt/spk_avg.txt", spk_avg) else: vr = loadtxt("txt/vr.txt") spk_avg = loadtxt("txt/spk_avg.txt") fan = vr/spk_avg bnr = 100 l = 10 vals, bins = bin_means(spk_avg, fan, bnr) run_avg = running_mean(vals, l) print (run_avg) d = len(run_avg) print (d) plt.clf() plt.plot(spk_avg, fan, 'go', alpha=.4) plt.plot(bins[l/2:d+l/2], run_avg, lw = 8, color = "red") # was started from 1, first was buggy # plt.ylim([0.7,1.1]) if (mn == 1): myplot("mean spiking rate", "Fano-factor of spiketrains", tl, fn, xlog=False, legend=False) else: myplot("inferred contrast", "spike count variance", tl, fn, xlog=False, legend=False) def spk_contr_corr(spk, contr, fn, tl, make = 1, mn=0): if(make==1): imgNr = size(spk,0) avg_cv = zeros(imgNr) if(mn == 1): spk_avg = mean(mean(spk, axis=1), axis=1) contr = spk_avg * 1000 for i in range(0,imgNr): cv = cov(spk[i], rowvar=0) corr = nan_to_num(correlate_cov(cv)) a = abs(corr-diag(diag(corr))) avg_cv[i] = mean(a) savetxt("txt/spk_contr_corr_avg_cv.txt", avg_cv) savetxt("txt/spk_contr_corr_contr_cv.txt", contr) else: avg_cv = loadtxt("txt/spk_contr_corr_avg_cv.txt") contr = loadtxt("txt/spk_contr_corr_contr_cv.txt") bnr = 100 l = 10 vals, bins = bin_means(contr, avg_cv, bnr) run_avg = running_mean(vals, l) d = len(run_avg) plt.clf() plt.plot(contr, avg_cv, 'go', alpha=.4) plt.plot(bins[l/2:d+l/2], run_avg, lw = 8, color = "red") # plt.ylim([0,0.035]) if(mn == 1): myplot("mean spiking rate", "average correlations", tl, fn, xlog=False, legend=False) else: myplot("inferred contrast", "average correlations", tl, fn, xlog=False, legend=False) def spk_contr_cov(spk, contr, fn, tl, make = 1, mn=0): if(make==1): imgNr = size(spk,0) avg_cv = zeros(imgNr) if(mn == 1): spk_avg = mean(mean(spk, axis=1), axis=1) contr = spk_avg * 1000 for i in range(0,imgNr): cv = cov(spk[i], rowvar=0) a = abs(cv-diag(diag(cv))) avg_cv[i] = mean(a) savetxt("txt/spk_contr_cov_avg_cv.txt", avg_cv) savetxt("txt/spk_contr_cov_contr_cv.txt", contr) else: avg_cv = loadtxt("txt/spk_contr_cov_avg_cv.txt") contr = loadtxt("txt/spk_contr_cov_contr_cv.txt") bnr = 100 l = 10 vals, bins = bin_means(contr, avg_cv, bnr) run_avg = running_mean(vals, l) d = len(run_avg) plt.clf() plt.plot(contr, avg_cv, 'go', alpha=.4) plt.plot(bins[l/2:d+l/2], run_avg, lw = 8, color = "red") # plt.ylim([0,0.005]) if(mn == 1): myplot("mean spiking rate", "average covariance", tl, fn, xlog=False, legend=False) else: myplot("inferred contrast", "average covariance", tl, fn, xlog=False, legend=False) def pairs_samples(samp_arr, fn, tl, b1, b2): bs1 = samp_arr[:,b1] bs2 = samp_arr[:,b2] plt.clf() plt.plot(bs1, bs2, "b.", ms = 10) plt.xlim([-6, 9]) plt.ylim([-6, 9]) myplot("potential of neuron 1", "potential of neuron 2", tl, fn, xlog=False, legend=False) def pairs_rates(rate_arr, fn, tl, b1, b2): br1 = rate_arr[:,b1] br2 = rate_arr[:,b2] plt.clf() plt.plot(br1, br2, "b.", ms = 10) plt.xlim([0, 80]) plt.ylim([0, 90]) myplot("rate of neuron 1", "rate of neuron 2", tl, fn, xlog=False, legend=False) def new_pairs_spikec(contrasts, pcoeffs, icoeffs, fn, tl): plt.plot(contrasts, pcoeffs, label = "Poisson"); plt.plot(contrasts, icoeffs, label = "Integration"); myplot("potential means", "Trial to trial correlations", tl, fn, xlog=False, legend=True) def pairs_spikec(spike_arr, fn, tl, b1, b2, l): bs1 = spike_arr[:,b1].astype(float) bs2 = spike_arr[:,b2].astype(float) # sl = len(bs1) # vals1, bins1 = bin_means(range(0,sl), bs1, l) # vals2, bins2 = bin_means(range(0,sl), bs2, l) # vals1 = vals1*sl/l # vals2 = vals2*sl/l # plt.clf() # plt.plot(vals1, vals2, "o") print(sum(bs1), sum(bs2)) sl = len(bs1) delta = 0.23 vals1, bins1 = bin_means(range(0,sl), bs1, l) vals2, bins2 = bin_means(range(0,sl), bs2, l) vals1 = vals1*sl/l vals2 = vals2*sl/l xedges = arange(min(vals1), min(vals1) + int((max(vals1)-min(vals1))/delta)*delta + 2* delta, delta)-delta/2 yedges = arange(min(vals2), min(vals2)+ int((max(vals2)-min(vals2))/delta)*delta + 2 * delta, delta)-delta/2 h, xedges, yedges = histogram2d(vals1, vals2, bins = [xedges, yedges]) xmids = arange(min(vals1), min(vals1) + int((max(vals1)-min(vals1))/delta)*delta + delta, delta) ymids = arange(min(vals2), min(vals2) + int((max(vals2)-min(vals2))/delta)*delta + delta, delta) X, Y = meshgrid(xmids, ymids) plt.clf() plt.scatter(X, Y, s = h*20) # plt.xlim([0, 24]) # plt.ylim([0, 28]) print(corrcoef(vals1, vals2)) # print(sum(bs1), sum(bs2)) # sl = len(bs1) # vals1, bins1 = bin_means(range(0,sl), bs1, l) # vals2, bins2 = bin_means(range(0,sl), bs2, l) # vals1 = (vals1*sl/l).astype(int) # vals2 = (vals2*sl/l).astype(int) # xedges = arange(int(min(vals1)), int(max(vals1))+2)-0.5 # yedges = arange(int(min(vals2)), int(max(vals2))+2)-0.5 # h, xedges, yedges = histogram2d(vals1, vals2, bins = [xedges, yedges]) # xmids = arange(int(min(vals1)), int(max(vals1))+1) # ymids = arange(int(min(vals2)), int(max(vals2))+1) # X, Y = meshgrid(xmids, ymids) # plt.clf() # plt.scatter(X, Y, s = h*20) # plt.xlim([0, 21]) # plt.ylim([0, 21]) # plt.xlim([int(min(vals1))-0.1, int(max(vals1))+0.1]) # plt.ylim([int(min(vals2))-0.1, int(max(vals2))+0.1]) myplot("spike count of neuron 1", "spike count of neuron 2", tl, fn, xlog=False, legend=False)
zsomko/visualcortex
python/spikes/analyze_spikes.py
Python
gpl-2.0
8,294
[ "NEURON" ]
37565a6f9636e8161fe5ea7cc7aeda512b1d653609d02824f6ff80857a6dcfac
#!/usr/bin/env python # written by: Oliver Cordes 2016-05-30 # changed by: Oliver Cordes 2016-05-30 import os, sys import getopt try: import numpy as np except: print( 'Install the numpy python module!' ) sys.exit( -1 ) try: from astropy.io import fits except: print( 'Install the astropy python modules!' ) sys.exit( -1 ) mean = 5.0 stddev = 0.1 lam = 1.0 noise_model = 'normal' infile = None outfile = None def syntax( exitcode=0 ): print( 'SYNTAX: %s' % sys.argv[0] ) print( ' -h|-? : this help page' ) print( ' -i input_file : use this input image' ) print( ' -o output_file : write this output image') print( '') print( ' --mean=val : use this mean value (default: 5.0)') print( ' --stddef=val : use this stddev value (default: 0.1)') print( ' --lambda=val : use this lambda value (default: 1)') print( ' --model=val : noise models (default: normal)') print( ' normal : gaussian distribution') print( ' poisson : poisson distribuntion') print( ' uniform : uniform distribution') sys.exit( exitcode ) long_options = [ 'mean=', 'stddev=', 'model=' ] try: opts, args = getopt.gnu_getopt( sys.argv[1:], 'h?i:o:', long_options ) except getopt.GetoptError as s: print( 'Error while parsing command parameters!' ) syntax( 1 ) for key, val in opts: if ( key == '-?' ) or ( key == '-h' ): syntax() elif ( key == '-i' ): infile = val elif ( key == '-o') : outfile = val elif ( key == '--mean' ): mean = float( val ) elif ( key == '--stddev' ): stddev = float( val ) elif ( key == '--model'): noise_model = val elif ( key == 'lambda' ): lam = float( val ) # chack parameter if ( infile is None ): print( 'No infile given! Program aborted!') sys.exit( -1 ) if ( outfile is None ): print( 'No outfile given! Program aborted!' ) sys.exit( -1 ) if ( noise_model not in [ 'normal', 'poisson', 'uniform' ]): print( 'Noise model uknown! Program aborted!') sys.exit( -1 ) def add_noise_to_file( infile, outfile ): inf = fits.open( infile, 'readonly' ) imdata = inf[0].data if ( noise_model == 'normal'): print( 'using gaussion distribution') noise = np.random.normal( mean, stddev, imdata.shape ) elif ( noise_model == 'poisson' ): print( 'using poisson distribution') noise = np.random.poisson( lam, imdata.shape ) else: noise = np.zeros( imdata.shape ) imdata += noise #hdu = fits.PrimaryHDU( imdata, header=inf[0].header ) hdu = fits.PrimaryHDU( imdata ) print( 'Saving %s ...' % outfile ) hdu.writeto( outfile, clobber=True) # main print( 'Add noise to file...' ) add_noise_to_file( infile, outfile ) print( 'Done.' )
ocordes/arctic
test_suite/bin/add_noise.py
Python
lgpl-3.0
2,942
[ "Gaussian" ]
6c0163bd10d434201d2db576c9a4e693818df5cefd5f156f528f047afb3e6c75
#!/usr/bin/env python from __future__ import print_function, unicode_literals import getpass import imp import os import pkg_resources import platform import re import shutil import sys import textwrap import subprocess import warnings from optparse import OptionGroup, OptionParser from random import choice as random_choice from django.db.utils import OperationalError from django.utils import six from django.utils.encoding import force_str from django.utils.six.moves import input from django.utils.six.moves.urllib.request import urlopen from reviewboard import get_manual_url, get_version_string from reviewboard.rb_platform import (SITELIST_FILE_UNIX, DEFAULT_FS_CACHE_PATH, INSTALLED_SITE_PATH) # Ignore the PendingDeprecationWarnings that we'll get from Django. # See bug 1683. warnings.filterwarnings("ignore", category=PendingDeprecationWarning) VERSION = get_version_string() DEBUG = False # Global State options = None args = None site = None ui = None class Dependencies(object): """An object which queries and caches dependency information.""" memcached_modules = ["memcache"] sqlite_modules = ["pysqlite2", "sqlite3"] mysql_modules = ["MySQLdb"] postgresql_modules = ["psycopg2"] cache_dependency_info = { 'required': False, 'title': 'Server Cache', 'dependencies': [ ("memcached", memcached_modules), ], } db_dependency_info = { 'required': True, 'title': 'Databases', 'dependencies': [ ("sqlite3", sqlite_modules), ("MySQL", mysql_modules), ("PostgreSQL", postgresql_modules) ], } @classmethod def get_support_memcached(cls): """Return whether memcached is supported.""" return cls.has_modules(cls.memcached_modules) @classmethod def get_support_mysql(cls): """Return whether mysql is supported.""" return cls.has_modules(cls.mysql_modules) @classmethod def get_support_postgresql(cls): """Return whether postgresql is supported.""" return cls.has_modules(cls.postgresql_modules) @classmethod def get_support_sqlite(cls): """Return whether sqlite is supported.""" return cls.has_modules(cls.sqlite_modules) @classmethod def get_missing(cls): """Return any missing dependencies. This will return a two-tuple, where the first item is a boolean indicating if any missing dependencies are fatal, and the second is a list of missing dependency groups. """ fatal = False missing_groups = [] for dep_info in [cls.cache_dependency_info, cls.db_dependency_info]: missing_deps = [] for desc, modules in dep_info['dependencies']: if not cls.has_modules(modules): missing_deps.append("%s (%s)" % (desc, ", ".join(modules))) if missing_deps: if (dep_info['required'] and len(missing_deps) == len(dep_info['dependencies'])): fatal = True text = "%s (required)" % dep_info['title'] else: text = "%s (optional)" % dep_info['title'] missing_groups.append({ 'title': text, 'dependencies': missing_deps, }) return fatal, missing_groups @classmethod def has_modules(cls, names): """Return True if one of the specified modules is installed.""" for name in names: try: __import__(name) return True except ImportError: continue return False class Site(object): """An object which contains the configuration for a Review Board site.""" CACHE_BACKENDS = { 'memcached': 'django.core.cache.backends.memcached.MemcachedCache', 'file': 'django.core.cache.backends.filebased.FileBasedCache', } def __init__(self, install_dir, options): """Initialize the site.""" self.install_dir = self.get_default_site_path(install_dir) self.abs_install_dir = os.path.abspath(self.install_dir) self.site_id = \ os.path.basename(install_dir).replace(" ", "_").replace(".", "_") self.options = options # State saved during installation self.company = None self.domain_name = None self.web_server_port = None self.site_root = None self.static_url = None self.media_url = None self.db_type = None self.db_name = None self.db_host = None self.db_port = None self.db_user = None self.db_pass = None self.reenter_db_pass = None self.cache_type = None self.cache_info = None self.web_server_type = None self.python_loader = None self.admin_user = None self.admin_password = None self.reenter_admin_password = None self.send_support_usage_stats = True def get_default_site_path(self, install_dir): """Return the default site path.""" if os.path.isabs(install_dir): return install_dir return os.path.join(INSTALLED_SITE_PATH, install_dir) def rebuild_site_directory(self): """Rebuild the site hierarchy.""" htdocs_dir = os.path.join(self.install_dir, "htdocs") media_dir = os.path.join(htdocs_dir, "media") static_dir = os.path.join(htdocs_dir, "static") self.mkdir(self.install_dir) self.mkdir(os.path.join(self.install_dir, "logs")) self.mkdir(os.path.join(self.install_dir, "conf")) self.mkdir(os.path.join(self.install_dir, "tmp")) os.chmod(os.path.join(self.install_dir, "tmp"), 0o777) self.mkdir(os.path.join(self.install_dir, "data")) self.mkdir(htdocs_dir) self.mkdir(media_dir) self.mkdir(static_dir) uploaded_dir = os.path.join(media_dir, 'uploaded') self.mkdir(uploaded_dir) # Assuming this is an upgrade, the 'uploaded' directory should # already have the right permissions for writing, so use that as a # template for all the new directories. writable_st = os.stat(uploaded_dir) writable_dirs = [ os.path.join(uploaded_dir, 'images'), os.path.join(uploaded_dir, 'files'), os.path.join(media_dir, 'ext'), os.path.join(static_dir, 'ext'), ] for writable_dir in writable_dirs: self.mkdir(writable_dir) try: if hasattr(os, 'chown'): os.chown(writable_dir, writable_st.st_uid, writable_st.st_gid) except OSError: # The user didn't have permission to change the ownership, # they'll have to do this manually later. pass self.link_pkg_dir( "reviewboard", "htdocs/errordocs", os.path.join(self.install_dir, "htdocs", "errordocs")) self.link_pkg_dir("reviewboard", "htdocs/static/lib", os.path.join(static_dir, 'lib')) self.link_pkg_dir("reviewboard", "htdocs/static/rb", os.path.join(static_dir, 'rb')) self.link_pkg_dir("reviewboard", "htdocs/static/admin", os.path.join(static_dir, 'admin')) self.link_pkg_dir("djblets", "htdocs/static/djblets", os.path.join(static_dir, 'djblets')) # Remove any old media directories from old sites self.unlink_media_dir(os.path.join(media_dir, 'admin')) self.unlink_media_dir(os.path.join(media_dir, 'djblets')) self.unlink_media_dir(os.path.join(media_dir, 'rb')) # Generate .htaccess files that enable compression and # never expires various file types. htaccess = '\n'.join([ '<IfModule mod_expires.c>', ' <FilesMatch "\.(jpg|gif|png|css|js|htc)">', ' ExpiresActive on', ' ExpiresDefault "access plus 1 year"', ' </FilesMatch>', '</IfModule>', '', '<IfModule mod_deflate.c>', ] + [ ' AddOutputFilterByType DEFLATE %s' % mimetype for mimetype in [ 'text/html', 'text/plain', 'text/xml', 'text/css', 'text/javascript', 'application/javascript', 'application/x-javascript', ] ] + [ '</IfModule>', ]) for dirname in (static_dir, media_dir): with open(os.path.join(dirname, '.htaccess'), 'w') as fp: fp.write(htaccess) def setup_settings(self): """Set up the environment for running django management commands.""" # Make sure that we have our settings_local.py in our path for when # we need to run manager commands. sys.path.insert(0, os.path.join(self.abs_install_dir, "conf")) os.environ[b'DJANGO_SETTINGS_MODULE'] = b'reviewboard.settings' def get_apache_version(self): """Return the version of the installed apache.""" try: apache_version = subprocess.check_output(['httpd', '-v']) # Extract the major and minor version from the string m = re.search('Apache\/(\d+).(\d+)', apache_version) if m: return m.group(1, 2) else: # Raise a generic regex error so we go to the # exception handler to pick a default raise re.error except: # Version check returned an error or the regular # expression did not match. Guess 2.2 for historic # compatibility return (2, 2) def generate_cron_files(self): """Generate sample crontab for this site.""" self.process_template("cmdline/conf/cron.conf.in", os.path.join(self.install_dir, "conf", "cron.conf")) def generate_config_files(self): """Generate the configuration files for this site.""" web_conf_filename = "" enable_fastcgi = False enable_wsgi = False if self.web_server_type == "apache": if self.python_loader == "fastcgi": web_conf_filename = "apache-fastcgi.conf" enable_fastcgi = True elif self.python_loader == "wsgi": web_conf_filename = "apache-wsgi.conf" enable_wsgi = True else: # Should never be reached. assert False # Get the Apache version so we know which # authorization directive to use apache_version = self.get_apache_version() if apache_version[0] >= 2 and apache_version[1] >= 4: self.apache_auth = "Require all granted" else: self.apache_auth = "Allow from all" elif self.web_server_type == "lighttpd": web_conf_filename = "lighttpd.conf" enable_fastcgi = True else: # Should never be reached. assert False conf_dir = os.path.join(self.install_dir, "conf") htdocs_dir = os.path.join(self.install_dir, "htdocs") self.process_template("cmdline/conf/%s.in" % web_conf_filename, os.path.join(conf_dir, web_conf_filename)) self.generate_cron_files() if enable_fastcgi: fcgi_filename = os.path.join(htdocs_dir, "reviewboard.fcgi") self.process_template("cmdline/conf/reviewboard.fcgi.in", fcgi_filename) os.chmod(fcgi_filename, 0o755) elif enable_wsgi: wsgi_filename = os.path.join(htdocs_dir, "reviewboard.wsgi") self.process_template("cmdline/conf/reviewboard.wsgi.in", wsgi_filename) os.chmod(wsgi_filename, 0o755) # Generate a secret key based on Django's code. secret_key = ''.join([ random_choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50) ]) # Generate the settings_local.py fp = open(os.path.join(conf_dir, "settings_local.py"), "w") fp.write("# Site-specific configuration settings for Review Board\n") fp.write("# Definitions of these settings can be found at\n") fp.write("# http://docs.djangoproject.com/en/dev/ref/settings/\n") fp.write("\n") fp.write("# Database configuration\n") db_engine = self.db_type if db_engine == "postgresql": db_engine = "postgresql_psycopg2" fp.write("DATABASES = {\n") fp.write(" 'default': {\n") fp.write(" 'ENGINE': 'django.db.backends.%s',\n" % db_engine) fp.write(" 'NAME': '%s',\n" % self.db_name.replace("\\", "\\\\")) if self.db_type != "sqlite3": if ':' in self.db_host: self.db_host, self.db_port = self.db_host.split(':', 1) fp.write(" 'USER': '%s',\n" % (self.db_user or "")) fp.write(" 'PASSWORD': '%s',\n" % (self.db_pass or "")) fp.write(" 'HOST': '%s',\n" % (self.db_host or "")) fp.write(" 'PORT': '%s',\n" % (self.db_port or "")) fp.write(" },\n") fp.write("}\n") fp.write("\n") fp.write("# Unique secret key. Don't share this with anybody.\n") fp.write("SECRET_KEY = '%s'\n" % secret_key) fp.write("\n") fp.write("# Cache backend settings.\n") fp.write("CACHES = {\n") fp.write(" 'default': {\n") fp.write(" 'BACKEND': '%s',\n" % self.CACHE_BACKENDS[self.cache_type]) fp.write(" 'LOCATION': '%s',\n" % self.cache_info) fp.write(" },\n") fp.write("}\n") fp.write("\n") fp.write("# Extra site information.\n") fp.write("SITE_ID = 1\n") fp.write("SITE_ROOT = '%s'\n" % self.site_root) fp.write("FORCE_SCRIPT_NAME = ''\n") fp.write("DEBUG = False\n") fp.write("ALLOWED_HOSTS = ['%s']\n" % (self.domain_name or '*')) fp.close() self.setup_settings() def sync_database(self, allow_input=False): """Synchronize the database.""" params = [] if not allow_input: params.append("--noinput") while True: try: self.run_manage_command("syncdb", params) break except OperationalError as e: ui.error('There was an error synchronizing the database. ' 'Make sure the database is created and has the ' 'appropriate permissions, and then continue.' '\n' 'Details: %s' % e, force_wait=True) except Exception: # This is an unexpected error, and we don't know how to # handle this. Bubble it up. raise self.run_manage_command("registerscmtools") def migrate_database(self): """Perform a database migration.""" self.run_manage_command("evolve", ["--noinput", "--execute"]) def encrypt_passwords(self): """Harden any password storage. Any legacy plain-text passwords will be encrypted. """ from reviewboard.scmtools.models import Repository Repository.objects.encrypt_plain_text_passwords() def get_static_media_upgrade_needed(self): """Determine if a static media config upgrade is needed.""" from djblets.siteconfig.models import SiteConfiguration siteconfig = SiteConfiguration.objects.get_current() manual_updates = siteconfig.settings.get('manual-updates', {}) resolved_update = manual_updates.get('static-media', False) return (not resolved_update and (pkg_resources.parse_version(siteconfig.version) < pkg_resources.parse_version("1.7"))) def get_diff_dedup_needed(self): """Determine if there's likely duplicate diff data stored.""" from reviewboard.diffviewer.models import FileDiff try: return FileDiff.objects.unmigrated().count() > 0 except: # Very likely, there was no diffviewer_filediff.diff_hash_id # column, indicating a pre-1.7 database. We want to assume # a dedup is needed. return True def get_settings_upgrade_needed(self): """Determine if a settings upgrade is needed.""" try: import settings_local if (hasattr(settings_local, 'DATABASE_ENGINE') or hasattr(settings_local, 'CACHE_BACKEND')): return True if hasattr(settings_local, 'DATABASES'): engine = settings_local.DATABASES['default']['ENGINE'] if not engine.startswith('django.db.backends'): return True except ImportError: sys.stderr.write("Unable to import settings_local. " "Cannot determine if upgrade is needed.\n") return False def upgrade_settings(self): """Perform a settings upgrade.""" settings_file = os.path.join(self.abs_install_dir, "conf", "settings_local.py") perform_upgrade = False buf = [] database_info = {} database_keys = ('ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT') backend_info = {} from django.core.cache import (parse_backend_uri, InvalidCacheBackendError) try: import settings_local if hasattr(settings_local, 'DATABASE_ENGINE'): engine = settings_local.DATABASE_ENGINE # Don't convert anything other than the ones we know about, # or third parties with custom databases may have problems. if engine in ('sqlite3', 'mysql', 'postgresql', 'postgresql_psycopg2'): engine = 'django.db.backends.' + engine database_info['ENGINE'] = engine for key in database_keys: if key != 'ENGINE': database_info[key] = getattr(settings_local, 'DATABASE_%s' % key, '') perform_upgrade = True if hasattr(settings_local, 'DATABASES'): engine = settings_local.DATABASES['default']['ENGINE'] if engine == 'postgresql_psycopg2': perform_upgrade = True if hasattr(settings_local, 'CACHE_BACKEND'): try: backend_info = parse_backend_uri( settings_local.CACHE_BACKEND) perform_upgrade = True except InvalidCacheBackendError: pass except ImportError: sys.stderr.write("Unable to import settings_local for upgrade.\n") return if not perform_upgrade: return fp = open(settings_file, 'r') found_database = False found_cache = False for line in fp.readlines(): if line.startswith('DATABASE_'): if not found_database: found_database = True buf.append("DATABASES = {\n") buf.append(" 'default': {\n") for key in database_keys: if database_info[key]: buf.append(" '%s': '%s',\n" % (key, database_info[key])) buf.append(" },\n") buf.append("}\n") elif line.startswith('CACHE_BACKEND') and backend_info: if not found_cache: found_cache = True buf.append("CACHES = {\n") buf.append(" 'default': {\n") buf.append(" 'BACKEND': '%s',\n" % self.CACHE_BACKENDS[backend_info[0]]) buf.append(" 'LOCATION': '%s',\n" % backend_info[1]) buf.append(" },\n") buf.append("}\n") elif line.strip().startswith("'ENGINE': 'postgresql_psycopg2'"): buf.append(" 'ENGINE': '" "django.db.backends.postgresql_psycopg2',\n") else: buf.append(line) fp.close() fp = open(settings_file, 'w') fp.writelines(buf) fp.close() # Reload the settings module del sys.modules['settings_local'] del sys.modules['reviewboard.settings'] import django.conf django.conf.settings = django.conf.LazySettings() def create_admin_user(self): """Create an administrator user account.""" cwd = os.getcwd() os.chdir(self.abs_install_dir) from django.contrib.auth.models import User User.objects.create_superuser(self.admin_user, self.admin_email, self.admin_password) os.chdir(cwd) def register_support_page(self): """Register this installation with the support data tracker.""" from reviewboard.admin.support import get_register_support_url url = get_register_support_url(force_is_admin=True) try: urlopen(url, timeout=5).read() except: # There may be a number of issues preventing this from working, # such as a restricted network environment or a server issue on # our side. This isn't a catastrophic issue, so don't bother them # about it. pass def run_manage_command(self, cmd, params=None): """Run a given django management command.""" cwd = os.getcwd() os.chdir(self.abs_install_dir) try: from django.core.management import (execute_from_command_line, get_commands) os.environ.setdefault(b'DJANGO_SETTINGS_MODULE', b'reviewboard.settings') if not params: params = [] if DEBUG: params.append("--verbosity=0") commands_dir = os.path.join(self.abs_install_dir, 'commands') if os.path.exists(commands_dir): # Pre-fetch all the available management commands. get_commands() # Insert our own management commands into this list. # Yes, this is a bit of a hack. from django.core.management import _commands for command in os.listdir(commands_dir): module_globals = {} filename = os.path.join(commands_dir, command) with open(filename) as f: code = compile(f.read(), filename, 'exec') exec(code, module_globals) if 'Command' in module_globals: name = os.path.splitext(f)[0] _commands[name] = module_globals['Command']() execute_from_command_line([__file__, cmd] + params) except ImportError as e: ui.error("Unable to execute the manager command %s: %s" % (cmd, e)) os.chdir(cwd) def mkdir(self, dirname): """Create a directory, but only if it doesn't already exist.""" if not os.path.exists(dirname): os.mkdir(dirname) def link_pkg_dir(self, pkgname, src_path, dest_dir, replace=True): """Create the package directory.""" src_dir = pkg_resources.resource_filename(pkgname, src_path) if os.path.islink(dest_dir) and not os.path.exists(dest_dir): os.unlink(dest_dir) if os.path.exists(dest_dir): if not replace: return self.unlink_media_dir(dest_dir) if self.options.copy_media: shutil.copytree(src_dir, dest_dir) else: os.symlink(src_dir, dest_dir) def unlink_media_dir(self, path): """Delete the given media directory and all contents.""" if os.path.exists(path): if os.path.islink(path): os.unlink(path) else: shutil.rmtree(path) def process_template(self, template_path, dest_filename): """Generate a file from a template.""" domain_name = self.domain_name or '' domain_name_escaped = domain_name.replace(".", "\\.") template = pkg_resources.resource_string("reviewboard", template_path) sitedir = os.path.abspath(self.install_dir).replace("\\", "/") if self.site_root: site_root = self.site_root site_root_noslash = site_root[1:-1] else: site_root = '/' site_root_noslash = '' # Check if this is a .exe. if (hasattr(sys, "frozen") or # new py2exe hasattr(sys, "importers") or # new py2exe imp.is_frozen("__main__")): # tools/freeze rbsite_path = sys.executable else: rbsite_path = '"%s" "%s"' % (sys.executable, sys.argv[0]) data = { 'rbsite': rbsite_path, 'port': self.web_server_port, 'sitedir': sitedir, 'sitedomain': domain_name, 'sitedomain_escaped': domain_name_escaped, 'siteid': self.site_id, 'siteroot': site_root, 'siteroot_noslash': site_root_noslash, } if hasattr(self, 'apache_auth'): data['apache_auth'] = self.apache_auth template = re.sub(r"@([a-z_]+)@", lambda m: data.get(m.group(1)), template) fp = open(dest_filename, "w") fp.write(template) fp.close() class SiteList(object): """Maintains the list of sites installed on the system.""" def __init__(self, path): """Initialize the site list.""" self.path = path # Read the list in as a unique set. # This way, we can easily eliminate duplicates. self.sites = set() if os.path.exists(self.path): f = open(self.path, 'r') for line in f: site = line.strip() # Verify that this path exists on the system # And add it to the dictionary. if os.path.exists(site): self.sites.add(site) f.close() def add_site(self, site_path): """Add a site to the site list.""" self.sites.add(site_path) # Write all of the sites back to the file. # Sort keys to ensure consistent order. ordered_sites = list(self.sites) ordered_sites.sort() # Create the parent directory of the site # if it doesn't already exist if not os.path.exists(os.path.dirname(self.path)): # Create the parent directory with read-write # permissions for user but read and execute # only for others. try: os.makedirs(os.path.dirname(self.path), 0o755) except: # We shouldn't consider this an abort-worthy error # We'll warn the user and just complete setup print("WARNING: Could not save site to sitelist %s" % self.path) return with open(self.path, 'w') as f: for site in ordered_sites: f.write("%s\n" % site) class UIToolkit(object): """An abstract class that forms the basis for all UI interaction. Subclasses can override this to provide new ways of representing the UI to the user. """ def run(self): """Run the UI.""" pass def page(self, text, allow_back=True, is_visible_func=None, on_show_func=None): """Add a new "page" to display to the user. Input and text are associated with this page and may be displayed immediately or later, depending on the toolkit. If is_visible_func is specified and returns False, this page will be skipped. """ return None def prompt_input(self, page, prompt, default=None, password=False, normalize_func=None, save_obj=None, save_var=None): """Prompt the user for some text. This may contain a default value.""" raise NotImplementedError def prompt_choice(self, page, prompt, choices, save_obj=None, save_var=None): """Prompt the user for an item amongst a list of choices.""" raise NotImplementedError def text(self, page, text): """Display a block of text to the user.""" raise NotImplementedError def disclaimer(self, page, text): """Display a block of disclaimer text to the user.""" raise NotImplementedError def urllink(self, page, url): """Display a URL to the user.""" raise NotImplementedError def itemized_list(self, page, title, items): """Display an itemized list.""" raise NotImplementedError def step(self, page, text, func): """Add a step of a multi-step operation. This will indicate when it's starting and when it's complete. """ raise NotImplementedError def error(self, text, force_wait=False, done_func=None): """Display a block of error text to the user.""" raise NotImplementedError class ConsoleUI(UIToolkit): """A UI toolkit that simply prints to the console.""" def __init__(self): """Initialize the UI toolkit.""" super(UIToolkit, self).__init__() self.header_wrapper = textwrap.TextWrapper(initial_indent="* ", subsequent_indent=" ") indent_str = " " * 4 self.text_wrapper = textwrap.TextWrapper(initial_indent=indent_str, subsequent_indent=indent_str, break_long_words=False) self.error_wrapper = textwrap.TextWrapper(initial_indent="[!] ", subsequent_indent=" ", break_long_words=False) def page(self, text, allow_back=True, is_visible_func=None, on_show_func=None): """Add a new "page" to display to the user. In the console UI, we only care if we need to display or ask questions for this page. Our representation of a page in this case is simply a boolean value. If False, nothing associated with this page will be displayed to the user. """ visible = not is_visible_func or is_visible_func() if not visible: return False if on_show_func: on_show_func() print() print() print(self.header_wrapper.fill(text)) return True def prompt_input(self, page, prompt, default=None, password=False, yes_no=False, optional=False, normalize_func=None, save_obj=None, save_var=None): """Prompt the user for some text. This may contain a default value.""" assert save_obj assert save_var if not page: return if yes_no: if default: prompt = '%s [Y/n]' % prompt else: prompt = '%s [y/N]' % prompt default = False elif default: self.text(page, "The default is %s" % default) prompt = "%s [%s]" % (prompt, default) elif optional: prompt = '%s (optional)' % prompt print() prompt += ": " value = None while not value: if password: temp_value = getpass.getpass(force_str(prompt)) if save_var.startswith('reenter'): if not self.confirm_reentry(save_obj, save_var, temp_value): self.error("Passwords must match.") continue value = temp_value else: value = input(prompt) if not value: if default: value = default elif optional: break if yes_no: if isinstance(value, bool): # This came from the 'default' value. norm_value = value else: assert isinstance(value, six.string_types) norm_value = value.lower() if norm_value not in (True, False, 'y', 'n', 'yes', 'no'): self.error('Must specify one of Y/y/yes or N/n/no.') value = None continue else: value = norm_value in (True, 'y', 'yes') break elif not value: self.error("You must answer this question.") if normalize_func: value = normalize_func(value) setattr(save_obj, save_var, value) def confirm_reentry(self, obj, reenter_var, value): """Confirm whether a re-entered piece of data matches. This is used to ensure that secrets and passwords are what the user intended to type. """ first_var = reenter_var.replace('reenter_', '') first_entry = getattr(site, first_var) return first_entry == value def prompt_choice(self, page, prompt, choices, save_obj=None, save_var=None): """Prompt the user for an item amongst a list of choices.""" assert save_obj assert save_var if not page: return self.text(page, "You can type either the name or the number " "from the list below.") valid_choices = [] i = 0 for choice in choices: description = '' enabled = True if isinstance(choice, six.string_types): text = choice elif len(choice) == 2: text, enabled = choice else: text, description, enabled = choice if enabled: self.text(page, "(%d) %s %s\n" % (i + 1, text, description), leading_newline=(i == 0)) valid_choices.append(text) i += 1 print() prompt += ": " choice = None while not choice: choice = input(prompt) if choice not in valid_choices: try: i = int(choice) - 1 if 0 <= i < len(valid_choices): choice = valid_choices[i] break except ValueError: pass self.error("'%s' is not a valid option." % choice) choice = None setattr(save_obj, save_var, choice) def text(self, page, text, leading_newline=True, wrap=True): """Display a block of text to the user. This will wrap the block to fit on the user's screen. """ if not page: return if leading_newline: print() if wrap: print(self.text_wrapper.fill(text)) else: print(' %s' % text) def disclaimer(self, page, text): """Display a disclaimer to the user.""" self.text(page, 'NOTE: %s' % text) def urllink(self, page, url): """Display a URL to the user.""" self.text(page, url, wrap=False) def itemized_list(self, page, title, items): """Display an itemized list.""" if title: self.text(page, "%s:" % title) for item in items: self.text(page, " * %s" % item, False) def step(self, page, text, func): """Add a step of a multi-step operation. This will indicate when it's starting and when it's complete. """ sys.stdout.write("%s ... " % text) func() print("OK") def error(self, text, force_wait=False, done_func=None): """Display a block of error text to the user.""" print() for text_block in text.split('\n'): print(self.error_wrapper.fill(text_block)) if force_wait: print() input('Press Enter to continue') if done_func: done_func() class Command(object): """An abstract command.""" needs_ui = False def add_options(self, parser): """Add any command-specific options to the parser.""" pass def run(self): """Run the command.""" pass class InstallCommand(Command): """Installer command. This command installs a new Review Board site tree and generates web server configuration files. This will ask several questions about the site before performing the installation. """ needs_ui = True def add_options(self, parser): """Add any command-specific options to the parser.""" is_windows = platform.system() == "Windows" group = OptionGroup(parser, "'install' command", self.__doc__.strip()) group.add_option('--advanced', action='store_true', dest='advanced', default=False, help='provide more advanced configuration options') group.add_option("--copy-media", action="store_true", dest="copy_media", default=is_windows, help="copy media files instead of symlinking") group.add_option("--noinput", action="store_true", default=False, help="run non-interactively using configuration " "provided in command-line options") group.add_option('--opt-out-support-data', action='store_false', default=True, dest='send_support_usage_stats', help='opt out of sending data and stats for ' 'improved user and admin support') group.add_option("--company", help="the name of the company or organization that " "owns the server") group.add_option("--domain-name", help="fully-qualified host name of the site, " "excluding the http://, port or path") group.add_option("--site-root", default="/", help="path to the site relative to the domain name") group.add_option("--static-url", default="static/", help="the URL containing the static (shipped) " "media files") group.add_option("--media-url", default="media/", help="the URL containing the uploaded media files") group.add_option("--db-type", help="database type (mysql, postgresql or sqlite3)") group.add_option("--db-name", default="reviewboard", help="database name (not for sqlite3)") group.add_option("--db-host", default="localhost", help="database host (not for sqlite3)") group.add_option("--db-user", help="database user (not for sqlite3)") group.add_option("--db-pass", help="password for the database user " "(not for sqlite3)") group.add_option("--cache-type", default='memcached', help="cache server type (memcached or file)") group.add_option("--cache-info", default='localhost:11211', help="cache identifier (memcached connection string " "or file cache directory)") group.add_option("--web-server-type", default='apache', help="web server (apache or lighttpd)") group.add_option("--web-server-port", help="port that the web server should listen on", default='80') group.add_option("--python-loader", default='wsgi', help="python loader for apache (fastcgi or wsgi)") group.add_option("--admin-user", default="admin", help="the site administrator's username") group.add_option("--admin-password", help="the site administrator's password") group.add_option("--admin-email", help="the site administrator's e-mail address") # UNIX-specific arguments if not is_windows: group.add_option("--sitelist", default=SITELIST_FILE_UNIX, help="the path to a file storing a list of " "installed sites") parser.add_option_group(group) def run(self): """Run the command.""" if not self.check_permissions(): return site.__dict__.update(options.__dict__) self.print_introduction() if self.print_missing_dependencies(): # There were required dependencies missing. Don't show any more # pages. return if not options.noinput: self.ask_domain() self.ask_site_root() if options.advanced: self.ask_shipped_media_url() self.ask_uploaded_media_url() self.ask_database_type() self.ask_database_name() self.ask_database_host() self.ask_database_login() if options.advanced: self.ask_cache_type() self.ask_cache_info() if options.advanced: self.ask_web_server_type() self.ask_python_loader() self.ask_admin_user() self.ask_support_data() # Do not ask for sitelist file, it should not be common. self.show_install_status() self.show_finished() self.show_get_more() def normalize_root_url_path(self, path): """Convert user-specified root URL paths to a normal format.""" if not path.endswith("/"): path += "/" if not path.startswith("/"): path = "/" + path return path def normalize_media_url_path(self, path): """Convert user-specified media URLs to a normal format.""" if not path.endswith("/"): path += "/" if path.startswith("/"): path = path[1:] return path def check_permissions(self): """Check that permissions are usable. If not, this will show an error to the user. """ # Make sure we can create the directory first. try: # TODO: Do some chown tests too. if os.path.exists(site.install_dir): # Remove it first, to see if we own it and to handle the # case where the directory is empty as a result of a # previously canceled install. os.rmdir(site.install_dir) os.mkdir(site.install_dir) # Don't leave a mess. We'll actually do this at the end. os.rmdir(site.install_dir) return True except OSError: # Likely a permission error. ui.error("Unable to create the %s directory. Make sure " "you're running as an administrator and that the " "directory does not contain any files." % site.install_dir, done_func=lambda: sys.exit(1)) return False def print_introduction(self): """Print an introduction to the site installer.""" page = ui.page("Welcome to the Review Board site installation wizard") ui.text(page, "This will prepare a Review Board site installation in:") ui.text(page, site.abs_install_dir) ui.text(page, "We need to know a few things before we can prepare " "your site for installation. This will only take a few " "minutes.") def print_missing_dependencies(self): """Print information on any missing dependencies.""" fatal, missing_dep_groups = Dependencies.get_missing() if missing_dep_groups: if fatal: page = ui.page("Required modules are missing") ui.text(page, "You are missing Python modules that are " "needed before the installation process. " "You will need to install the necessary " "modules and restart the install.") else: page = ui.page("Make sure you have the modules you need") ui.text(page, "Depending on your installation, you may need " "certain Python modules and servers that are " "missing.") ui.text(page, "If you need support for any of the following, " "you will need to install the necessary " "modules and restart the install.") for group in missing_dep_groups: ui.itemized_list(page, group['title'], group['dependencies']) return fatal def ask_domain(self): """Ask the user what domain Review Board will be served from.""" page = ui.page("What's the host name for this site?") ui.text(page, "This should be the fully-qualified host name without " "the http://, port or path.") ui.prompt_input(page, "Domain Name", site.domain_name, save_obj=site, save_var="domain_name") def ask_site_root(self): """Ask the user what site root they'd like.""" page = ui.page("What URL path points to Review Board?") ui.text(page, "Typically, Review Board exists at the root of a URL. " "For example, http://reviews.example.com/. In this " "case, you would specify \"/\".") ui.text(page, "However, if you want to listen to, say, " "http://example.com/reviews/, you can specify " '"/reviews/".') ui.text(page, "Note that this is the path relative to the domain and " "should not include the domain name.") ui.prompt_input(page, "Root Path", site.site_root, normalize_func=self.normalize_root_url_path, save_obj=site, save_var="site_root") def ask_shipped_media_url(self): """Ask the user the URL where shipped media files are served.""" page = ui.page("What URL will point to the shipped media files?") ui.text(page, "While most installations distribute media files on " "the same server as the rest of Review Board, some " "custom installs may instead have a separate server " "for this purpose.") ui.text(page, "If unsure, don't change the default.") ui.prompt_input(page, "Shipped Media URL", site.static_url, normalize_func=self.normalize_media_url_path, save_obj=site, save_var="static_url") def ask_uploaded_media_url(self): """Ask the user the URL where uploaded media files are served.""" page = ui.page("What URL will point to the uploaded media files?") ui.text(page, "Note that this is different from shipped media. This " "is where all uploaded screenshots, file attachments, " "and extension media will go. It must be a different " "location from the shipped media.") ui.text(page, "If unsure, don't change the default.") ui.prompt_input(page, "Uploaded Media URL", site.media_url, normalize_func=self.normalize_media_url_path, save_obj=site, save_var="media_url") def ask_database_type(self): """Ask the user for the database type.""" page = ui.page("What database type will you be using?") ui.prompt_choice( page, "Database Type", [ ("mysql", Dependencies.get_support_mysql()), ("postgresql", Dependencies.get_support_postgresql()), ("sqlite3", "(not supported for production use)", Dependencies.get_support_sqlite()) ], save_obj=site, save_var="db_type") def ask_database_name(self): """Ask the user for the database name.""" def determine_sqlite_path(): site.db_name = sqlite_db_name sqlite_db_name = os.path.join(site.abs_install_dir, "data", "reviewboard.db") # Appears only if using sqlite. page = ui.page("Determining database file path", is_visible_func=lambda: site.db_type == "sqlite3", on_show_func=determine_sqlite_path) ui.text(page, "The sqlite database file will be stored in %s" % sqlite_db_name) ui.text(page, "If you are migrating from an existing " "installation, you can move your existing " "database there, or edit settings_local.py to " "point to your old location.") # Appears only if not using sqlite. page = ui.page("What database name should Review Board use?", is_visible_func=lambda: site.db_type != "sqlite3") ui.disclaimer(page, "You need to create this database and grant " "user modification rights before continuing. " "See your database documentation for more " "information.") ui.prompt_input(page, "Database Name", site.db_name, save_obj=site, save_var="db_name") def ask_database_host(self): """Ask the user for the database host.""" page = ui.page("What is the database server's address?", is_visible_func=lambda: site.db_type != "sqlite3") ui.text(page, "This should be specified in hostname:port form. " "The port is optional if you're using a standard " "port for the database type.") ui.prompt_input(page, "Database Server", site.db_host, save_obj=site, save_var="db_host") def ask_database_login(self): """Ask the user for database login credentials.""" page = ui.page("What is the login and password for this database?", is_visible_func=lambda: site.db_type != "sqlite3") ui.text(page, "This must be a user that has table creation and " "modification rights on the database you already " "specified.") ui.prompt_input(page, "Database Username", site.db_user, save_obj=site, save_var="db_user") ui.prompt_input(page, "Database Password", site.db_pass, password=True, save_obj=site, save_var="db_pass") ui.prompt_input(page, "Confirm Database Password", password=True, save_obj=site, save_var="reenter_db_pass") def ask_cache_type(self): """Ask the user what type of caching they'd like to use.""" page = ui.page("What cache mechanism should be used?") ui.text(page, "memcached is strongly recommended. Use it unless " "you have a good reason not to.") ui.prompt_choice(page, "Cache Type", [("memcached", "(recommended)", Dependencies.get_support_memcached()), "file"], save_obj=site, save_var="cache_type") def ask_cache_info(self): """Ask the user for caching configuration.""" # Appears only if using memcached. page = ui.page("What memcached host should be used?", is_visible_func=lambda: site.cache_type == "memcached") ui.text(page, "This is in the format of hostname:port") ui.prompt_input(page, "Memcache Server", site.cache_info, save_obj=site, save_var="cache_info") # Appears only if using file caching. page = ui.page("Where should the temporary cache files be stored?", is_visible_func=lambda: site.cache_type == "file") ui.prompt_input(page, "Cache Directory", site.cache_info or DEFAULT_FS_CACHE_PATH, save_obj=site, save_var="cache_info") def ask_web_server_type(self): """Ask the user which web server they're using.""" page = ui.page("What web server will you be using?") ui.prompt_choice(page, "Web Server", ["apache", "lighttpd"], save_obj=site, save_var="web_server_type") def ask_python_loader(self): """Ask the user which Python loader they're using.""" page = ui.page("What Python loader module will you be using?", is_visible_func=lambda: (site.web_server_type == "apache")) ui.text(page, "Based on our experiences, we recommend using " "wsgi with Review Board.") ui.prompt_choice(page, "Python Loader", [ ("wsgi", "(recommended)", True), "fastcgi", ], save_obj=site, save_var="python_loader") def ask_admin_user(self): """Ask the user to create an admin account.""" page = ui.page("Create an administrator account") ui.text(page, "To configure Review Board, you'll need an " "administrator account. It is advised to have one " "administrator and then use that account to grant " "administrator permissions to your personal user " "account.") ui.text(page, "If you plan to use NIS or LDAP, use an account name " "other than your NIS/LDAP account so as to prevent " "conflicts.") ui.prompt_input(page, "Username", site.admin_user, save_obj=site, save_var="admin_user") ui.prompt_input(page, "Password", site.admin_password, password=True, save_obj=site, save_var="admin_password") ui.prompt_input(page, "Confirm Password", password=True, save_obj=site, save_var="reenter_admin_password") ui.prompt_input(page, "E-Mail Address", site.admin_email, save_obj=site, save_var="admin_email") ui.prompt_input(page, "Company/Organization Name", site.company, save_obj=site, save_var="company", optional=True) def ask_support_data(self): """Ask the user if they'd like to enable support data collection.""" page = ui.page('Enable collection of data for better support') ui.text(page, 'We would like to periodically collect data and ' 'statistics about your installation to provide a ' 'better support experience for you and your users.') ui.text(page, 'The data collected includes basic information such as ' 'your company name, the version of Review Board, and ' 'the size of your install. It does NOT include ' 'confidential data such as source code. Data collected ' 'never leaves our server and is never given to any ' 'third parties for any purposes.') ui.text(page, 'We use this to provide a user support page that\'s ' 'more specific to your server. We also use it to ' 'determine which versions to continue to support, and ' 'to help track how upgrades affect our number of bug ' 'reports and support incidents.') ui.text(page, 'You can choose to turn this off at any time in ' 'Support Settings in Review Board.') ui.prompt_input(page, 'Allow us to collect support data?', site.send_support_usage_stats, yes_no=True, save_obj=site, save_var='send_support_usage_stats') def show_install_status(self): """Show the install status page.""" page = ui.page("Installing the site...", allow_back=False) ui.step(page, "Building site directories", site.rebuild_site_directory) ui.step(page, "Building site configuration files", site.generate_config_files) ui.step(page, "Creating database", site.sync_database) ui.step(page, "Performing migrations", site.migrate_database) ui.step(page, "Creating administrator account", site.create_admin_user) ui.step(page, "Saving site settings", self.save_settings) ui.step(page, "Setting up support", self.setup_support) def show_finished(self): """Show the finished page.""" page = ui.page("The site has been installed", allow_back=False) ui.text(page, "The site has been installed in %s" % site.abs_install_dir) ui.text(page, "Sample configuration files for web servers and " "cron are available in the conf/ directory.") ui.text(page, "You need to modify the ownership of the " "following directories and their contents to be owned " "by the web server:") ui.itemized_list(page, None, [ os.path.join(site.abs_install_dir, 'htdocs', 'media', 'uploaded'), os.path.join(site.abs_install_dir, 'htdocs', 'media', 'ext'), os.path.join(site.abs_install_dir, 'htdocs', 'static', 'ext'), os.path.join(site.abs_install_dir, 'data'), ]) ui.text(page, "For more information, visit:") ui.urllink(page, "%sadmin/installation/creating-sites/" % get_manual_url()) def show_get_more(self): """Show the "Get More out of Review Board" page.""" from reviewboard.admin.support import get_install_key page = ui.page('Get more out of Review Board', allow_back=False) ui.text(page, 'To enable PDF document review, enhanced scalability, ' 'GitHub Enterprise support, and more, download ' 'Power Pack at:') ui.urllink(page, 'https://www.reviewboard.org/powerpack/') ui.text(page, 'Your install key for Power Pack is: %s' % get_install_key()) ui.text(page, 'Support contracts for Review Board are also available:') ui.urllink(page, 'https://www.beanbaginc.com/support/contracts/') def save_settings(self): """Save some settings in the database.""" from django.contrib.sites.models import Site from djblets.siteconfig.models import SiteConfiguration cur_site = Site.objects.get_current() cur_site.domain = site.domain_name cur_site.save() if site.static_url.startswith("http"): site_static_url = site.static_url else: site_static_url = site.site_root + site.static_url if site.media_url.startswith("http"): site_media_url = site.media_url else: site_media_url = site.site_root + site.media_url htdocs_path = os.path.join(site.abs_install_dir, 'htdocs') site_media_root = os.path.join(htdocs_path, "media") site_static_root = os.path.join(htdocs_path, "static") siteconfig = SiteConfiguration.objects.get_current() siteconfig.set("company", site.company) siteconfig.set("send_support_usage_stats", site.send_support_usage_stats) siteconfig.set("site_static_url", site_static_url) siteconfig.set("site_static_root", site_static_root) siteconfig.set("site_media_url", site_media_url) siteconfig.set("site_media_root", site_media_root) siteconfig.set("site_admin_name", site.admin_user) siteconfig.set("site_admin_email", site.admin_email) siteconfig.save() if platform.system() != 'Windows': abs_sitelist = os.path.abspath(site.sitelist) # Add the site to the sitelist file. print("Saving site %s to the sitelist %s\n" % ( site.install_dir, abs_sitelist)) sitelist = SiteList(abs_sitelist) sitelist.add_site(site.install_dir) def setup_support(self): """Set up the support page for the installation.""" if site.send_support_usage_stats: site.register_support_page() class UpgradeCommand(Command): """Upgrades an existing site installation. This will synchronize media trees and upgrade the database, unless otherwise specified. """ def add_options(self, parser): """Add any command-specific options to the parser.""" group = OptionGroup(parser, "'upgrade' command", self.__doc__.strip()) group.add_option("--no-db-upgrade", action="store_false", dest="upgrade_db", default=True, help="don't upgrade the database") group.add_option("--all-sites", action="store_true", dest="all_sites", default=False, help="Upgrade all installed sites") parser.add_option_group(group) def run(self): """Run the command.""" site.setup_settings() diff_dedup_needed = site.get_diff_dedup_needed() static_media_upgrade_needed = site.get_static_media_upgrade_needed() data_dir_exists = os.path.exists( os.path.join(site.install_dir, "data")) print("Rebuilding directory structure") site.rebuild_site_directory() site.generate_cron_files() if site.get_settings_upgrade_needed(): print("Upgrading site settings_local.py") site.upgrade_settings() if options.upgrade_db: print("Updating database. This may take a while.\n" "\n" "The log output below, including warnings and errors,\n" "can be ignored unless upgrade fails.\n" "\n" "------------------ <begin log output> ------------------") site.sync_database() site.migrate_database() print("------------------- <end log output> -------------------\n" "\n" "Resetting in-database caches.") site.run_manage_command("fixreviewcounts") site.encrypt_passwords() from djblets.siteconfig.models import SiteConfiguration siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('send_support_usage_stats'): site.register_support_page() print() print("Upgrade complete!") if not data_dir_exists: # This is an upgrade of a site that pre-dates the new $HOME # directory ($sitedir/data). Tell the user how to upgrade things. print() print("A new 'data' directory has been created inside of your " "site") print("directory. This will act as the home directory for " "programs") print("invoked by Review Board.") print() print("You need to change the ownership of this directory so that") print("the web server can write to it.") if static_media_upgrade_needed: from django.conf import settings if 'manual-updates' not in siteconfig.settings: siteconfig.settings['manual-updates'] = {} siteconfig.settings['manual-updates']['static-media'] = False siteconfig.save() static_dir = "%s/htdocs/static" % \ site.abs_install_dir.replace('\\', '/') print() print("The location of static media files (CSS, JavaScript, " "images)") print("has changed. You will need to make manual changes to ") print("your web server configuration.") print() print("For Apache, you will need to add:") print() print(" <Location \"%sstatic\">" % settings.SITE_ROOT) print(" SetHandler None") print(" </Location>") print() print(" Alias %sstatic \"%s\"" % (settings.SITE_ROOT, static_dir)) print() print("For lighttpd:") print() print(" alias.url = (") print(" ...") print(" \"%sstatic\" => \"%s\"," % (settings.SITE_ROOT, static_dir)) print(" ...") print(" )") print() print(" url.rewrite-once = (") print(" ...") print(" \"^(%sstatic/.*)$\" => \"$1\"," % settings.SITE_ROOT) print(" ...") print(" )") print() print("Once you have made these changes, type the following ") print("to resolve this:") print() print(" $ rb-site manage %s resolve-check static-media" % site.abs_install_dir) if diff_dedup_needed: print() print('There are duplicate copies of diffs in your database that ' 'can be condensed.') print('These are the result of posting several iterations of a ' 'change for review on') print('older versions of Review Board.') print() print('Removing duplicate diff data will save space in your ' 'database and speed up') print('future upgrades.') print() print('To condense duplicate diffs, type the following:') print() print(' $ rb-site manage %s condensediffs' % site.abs_install_dir) class ManageCommand(Command): """Runs a Django management command on the site.""" help_text = ( 'Runs a Django management command on the site. ' 'Usage: `rb-site manage <path> <command> -- <arguments>.` ' 'Run `manage -- --help` for the list of commands.' ) def add_options(self, parser): """Add any command-specific options to the parser.""" group = OptionGroup(parser, "'manage' command", self.help_text) parser.add_option_group(group) def run(self): """Run the command.""" site.setup_settings() from reviewboard import initialize initialize() if len(args) == 0: ui.error("A manage command is needed.", done_func=lambda: sys.exit(1)) else: site.run_manage_command(args[0], args[1:]) sys.exit(0) # A list of all commands supported by rb-site. COMMANDS = { "install": InstallCommand(), "upgrade": UpgradeCommand(), "manage": ManageCommand(), } def parse_options(args): """Parse the given options.""" global options parser = OptionParser(usage="%prog command [options] path", version="%prog " + VERSION) parser.add_option("-d", "--debug", action="store_true", dest="debug", default=DEBUG, help="display debug output") sorted_commands = list(COMMANDS.keys()) sorted_commands.sort() for cmd_name in sorted_commands: command = COMMANDS[cmd_name] command.add_options(parser) (options, args) = parser.parse_args(args) if len(args) < 1: parser.print_help() sys.exit(1) command = args[0] # Check whether we've been asked to upgrade all installed sites # by 'rb-site upgrade' with no path specified. if command == 'upgrade' and options.all_sites: sitelist = SiteList(options.sitelist) site_paths = sitelist.sites if len(site_paths) == 0: print("No Review Board sites listed in %s" % sitelist.path) sys.exit(0) elif len(args) >= 2 and command in COMMANDS: site_paths = [args[1]] else: parser.print_help() sys.exit(1) globals()["args"] = args[2:] return (command, site_paths) def main(): """Main application loop.""" global site global ui command_name, site_paths = parse_options(sys.argv[1:]) command = COMMANDS[command_name] ui = ConsoleUI() for install_dir in site_paths: site = Site(install_dir, options) os.putenv(b'HOME', os.path.join(site.install_dir, 'data').encode('utf-8')) command.run() ui.run() if __name__ == "__main__": main()
beol/reviewboard
reviewboard/cmdline/rbsite.py
Python
mit
71,263
[ "VisIt" ]
72dd6f2595ce518c5f4ba8d6811fdf6154fd15c234543743ae7836574ae9417c
#!/usr/bin/env python """ Utilities to help with grid io NOTE: this isn't used yet, but should be useful for loading non UGRID-compliant files. Ideally, we'll add loading from shape files or what have you in the future """ from __future__ import (absolute_import, division, print_function) import netCDF4 import numpy as np from gridded.pyugrid.ugrid import UGrid from gridded.utilities import get_dataset def load_from_varnames(filename, names_mapping, attribute_check=None, post_process=None): """ Load a UGrid from a netcdf file where the roles are defined by the names of the variables. :param filename: names of the file to load (or OPeNDAP URL). :param names_mapping: dict that maps the variable names to UGrid components :param attribute_check=None: list of global attributes that are expected :type attribute_check: list of tuples to check. Example: [('grid_type','triangular'),] will check if the grid_type attribute is set to "triangular" :param post_process: function to call to do some custom post processing. it should be a callable that takes (Dataset, UGrid) The names_mapping dict has to contain at least: 'nodes_lon', 'nodes_lat' Optionally (and mostly required), it can contain: face_face_connectivity', 'face_coordinates_lon', 'face_coordinates_lat', and 'faces' """ ug = UGrid() attribute_check = {} if attribute_check is None else attribute_check nc = get_dataset(filename) # nc = netCDF4.Dataset(filename) # Check for the specified attributes. for name, value in attribute_check: if nc.getncattr(name).lower() != value: raise ValueError('This does not appear to be a valid file:\n' 'It does not have the "{}"="{}"' 'global attribute set'.format(name, value)) # Nodes. lon = nc.variables[names_mapping['nodes_lon']] lat = nc.variables[names_mapping['nodes_lat']] num_nodes = lon.size ug.nodes = np.zeros((num_nodes, 2), dtype=lon.dtype) ug.nodes[:, 0] = lon[:] ug.nodes[:, 1] = lat[:] # Faces. faces = nc.variables[names_mapping['faces']] # FIXME: This logic assumes there are more than three triangles. if faces.shape[0] <= faces.shape[1]: # Fortran order. faces = faces[:].T else: faces = faces[:] # One-indexed? if faces.min() == 1: one_indexed = True else: one_indexed = False if one_indexed: faces -= 1 ug.faces = faces # Connectivity (optional). if 'face_face_connectivity' in names_mapping: face_face_connectivity = nc.variables[names_mapping['face_face_connectivity']] # noqa # FIXME: This logic assumes there are more than three triangles. if face_face_connectivity.shape[0] <= face_face_connectivity.shape[1]: # Fortran order. face_face_connectivity = face_face_connectivity[:].T else: face_face_connectivity = face_face_connectivity[:] if one_indexed: face_face_connectivity -= 1 ug.face_face_connectivity = face_face_connectivity # Center (optional). if ('face_coordinates_lon' in names_mapping and 'face_coordinates_lat' in names_mapping): ug.face_coordinates = np.zeros((len(ug.faces), 2), dtype=lon.dtype) ug.face_coordinates[:, 0] = nc.variables[names_mapping['face_coordinates_lon']][:] # noqa ug.face_coordinates[:, 1] = nc.variables[names_mapping['face_coordinates_lat']][:] # noqa # Boundaries (optional). if 'boundaries' in names_mapping: # FIXME: this one is weird and non-conforming! # Ignoring the second two fields. What are they? boundaries = nc.variables[names_mapping['boundaries']][:, :2] if one_indexed: boundaries -= 1 ug.boundaries = boundaries if post_process is not None: post_process(nc, ug) return ug
NOAA-ORR-ERD/gridded
gridded/pyugrid/grid_io/utils.py
Python
unlicense
4,122
[ "NetCDF" ]
234b1c5c360cc0c5ec10e74fea8b9b36fc486a996cb9e79c02c16af04f051f4f
# $Id$ # # Copyright (C) 2003 Rational Discovery LLC # All Rights Reserved # from rdkit import RDConfig import sys,os,types from rdkit import Chem from rdkit.VLib.Filter import FilterNode class SmartsFilter(FilterNode): """ filter out molecules matching one or more SMARTS patterns There is a count associated with each pattern. Molecules are allowed to match the pattern up to this number of times. Assumptions: - inputs are molecules Sample Usage: >>> smis = ['C1CCC1','C1CCC1C=O','CCCC','CCC=O','CC(=O)C','CCN','NCCN','NCC=O'] >>> mols = [Chem.MolFromSmiles(x) for x in smis] >>> from rdkit.VLib.Supply import SupplyNode >>> suppl = SupplyNode(contents=mols) >>> ms = [x for x in suppl] >>> len(ms) 8 We can pass in SMARTS strings: >>> smas = ['C=O','CN'] >>> counts = [1,2] >>> filt = SmartsFilter(patterns=smas,counts=counts) >>> filt.AddParent(suppl) >>> ms = [x for x in filt] >>> len(ms) 5 Alternatively, we can pass in molecule objects: >>> mols =[Chem.MolFromSmarts(x) for x in smas] >>> counts = [1,2] >>> filt.Destroy() >>> filt = SmartsFilter(patterns=mols,counts=counts) >>> filt.AddParent(suppl) >>> ms = [x for x in filt] >>> len(ms) 5 Negation does what you'd expect: >>> filt.SetNegate(1) >>> ms = [x for x in filt] >>> len(ms) 3 """ def __init__(self,patterns=[],counts=[],**kwargs): FilterNode.__init__(self,func=self.filter,**kwargs) self._initPatterns(patterns,counts) def _initPatterns(self,patterns,counts): nPatts = len(patterns) if len(counts) and len(counts)!=nPatts: raise ValueError,'if counts is specified, it must match patterns in length' if not len(counts): counts = [1]*nPatts targets = [None]*nPatts for i in range(nPatts): p = patterns[i] c = counts[i] if type(p) in types.StringTypes: m = Chem.MolFromSmarts(p) if not m: raise ValueError,'bad smarts: %s'%(p) p = m targets[i] = p,c self._patterns = tuple(targets) def filter(self,cmpd): neg = self.Negate() res = 0 #sys.stderr.write('\tFILTER: %s\n'%(Chem.MolToSmiles(cmpd))) for patt,count in self._patterns: ms = cmpd.GetSubstructMatches(patt) nMatches = len(ms) if nMatches >= count: # this query is an or, so we short circuit true: res = 1 break return res #------------------------------------ # # doctest boilerplate # def _test(): import doctest,sys return doctest.testmod(sys.modules["__main__"]) if __name__ == '__main__': import sys failed,tried = _test() sys.exit(failed)
rdkit/rdkit-orig
rdkit/VLib/NodeLib/SmartsMolFilter.py
Python
bsd-3-clause
2,725
[ "RDKit" ]
2af8d7b799bc0a933d1d48576492245144606cfcabf1eb7835d29e2cb927b166
#!/usr/bin/python3 import numpy as np import matplotlib.pyplot as plt from auryntools import * # This code snipped assumes that you have run the example simulation # sim_coba_binmon with default paramters. # This generates spk output files under /tmp/ filename = "/tmp/coba.0.e.spk" seconds = 0.1 sf = AurynBinarySpikeFile(filename) spikes = np.array(sf.get_last(seconds)) plt.scatter(spikes[:,0], spikes[:,1]) plt.xlabel("Time [s]") plt.ylabel("Neuron ID") plt.show()
fzenke/auryn
tools/python/simple_spike_raster.py
Python
gpl-3.0
484
[ "NEURON" ]
72329f864af9954c9cd9da5819e79cadfab367cf70a27d9e173ccf112ecabeaa
######### # getNavData.py # This program is part of the online PS-Drone-API-tutorial on www.playsheep.de/drone. # It shows how to read out selected NavData-values from a Parrot AR.Drone 2.0 using the PS-Drone-API. The drone will stay on the ground. # Dependencies: a POSIX OS, PS-Drone-API 2.0 beta or higher. # (w) J. Philipp de Graaff, www.playsheep.de, 2014 ########## # LICENCE: # Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014) # Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0. ########### ##### Suggested clean drone startup sequence ##### import time, sys import api.ps_drone as ps_drone # Import PS-Drone-API drone = ps_drone.Drone() # Start using drone drone.startup() # Connects to drone and starts subprocesses drone.reset() # Sets drone's status to good (LEDs turn green when red) while (drone.getBattery()[0] == -1): time.sleep(0.1) # Wait until the drone has done its reset print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1]) # Gives a battery-status drone.useDemoMode(False) # Give me everything...fast drone.getNDpackage(["demo","pressure_raw","altitude","magneto","wifi"]) # Packets, which shall be decoded time.sleep(1.0) # Give it some time to awake fully after reset ##### Mainprogram begin ##### NDC = drone.NavDataCount end = False while not end: while drone.NavDataCount == NDC: time.sleep(0.001) # Wait until next time-unit if drone.getKey(): end = True # Stop if any key is pressed NDC=drone.NavDataCount print "-----------" print "Aptitude [X,Y,Z] : "+str(drone.NavData["demo"][2]) print "Altitude / sensor / pressure: "+str(drone.NavData["altitude"][3])+" / "+str(drone.State[21])+" / "+str(drone.NavData["pressure_raw"][0]) print "Megnetometer [X,Y,Z]: "+str(drone.NavData["magneto"][0]) print "Wifi link quality: "+str(drone.NavData["wifi"])
reixd/ps-drone
tutorials/getNavData.py
Python
artistic-2.0
2,454
[ "VisIt" ]
bbe834f00ac9ce5e71a2ec8f8127d636c85b6f11ed35e8b08e49ed37625830e8
#!/usr/bin/python # # Copyright 2007 Google Inc. # Licensed to PSF under a Contributor Agreement. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for ipaddr module.""" import unittest import time import ipaddr # Compatibility function to cast str to bytes objects if ipaddr._compat_has_real_bytes: _cb = lambda bytestr: bytes(bytestr, 'charmap') else: _cb = str class IpaddrUnitTest(unittest.TestCase): def setUp(self): self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24') self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255') self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64') def tearDown(self): del(self.ipv4) del(self.ipv4_hostmask) del(self.ipv6) del(self) def testRepr(self): self.assertEqual("IPv4Network('1.2.3.4/32')", repr(ipaddr.IPv4Network('1.2.3.4'))) self.assertEqual("IPv6Network('::1/128')", repr(ipaddr.IPv6Network('::1'))) def testAutoMasking(self): addr1 = ipaddr.IPv4Network('1.1.1.255/24') addr1_masked = ipaddr.IPv4Network('1.1.1.0/24') self.assertEqual(addr1_masked, addr1.masked()) addr2 = ipaddr.IPv6Network('2000:cafe::efac:100/96') addr2_masked = ipaddr.IPv6Network('2000:cafe::/96') self.assertEqual(addr2_masked, addr2.masked()) # issue57 def testAddressIntMath(self): self.assertEqual(ipaddr.IPv4Address('1.1.1.1') + 255, ipaddr.IPv4Address('1.1.2.0')) self.assertEqual(ipaddr.IPv4Address('1.1.1.1') - 256, ipaddr.IPv4Address('1.1.0.1')) self.assertEqual(ipaddr.IPv6Address('::1') + (2**16 - 2), ipaddr.IPv6Address('::ffff')) self.assertEqual(ipaddr.IPv6Address('::ffff') - (2**16 - 2), ipaddr.IPv6Address('::1')) def testInvalidStrings(self): self.assertRaises(ValueError, ipaddr.IPNetwork, '') self.assertRaises(ValueError, ipaddr.IPNetwork, 'www.google.com') self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3') self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3.4.5') self.assertRaises(ValueError, ipaddr.IPNetwork, '301.2.2.2') self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7') self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:') self.assertRaises(ValueError, ipaddr.IPNetwork, ':2:3:4:5:6:7:8') self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:9') self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:') self.assertRaises(ValueError, ipaddr.IPNetwork, '1::3:4:5:6::8') self.assertRaises(ValueError, ipaddr.IPNetwork, 'a:') self.assertRaises(ValueError, ipaddr.IPNetwork, ':') self.assertRaises(ValueError, ipaddr.IPNetwork, ':::') self.assertRaises(ValueError, ipaddr.IPNetwork, '::a:') self.assertRaises(ValueError, ipaddr.IPNetwork, '1ffff::') self.assertRaises(ValueError, ipaddr.IPNetwork, '0xa::') self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:1a.2.3.4') self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:1.2.3.4:8') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, 'google.com') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '::1.2.3.4') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, 'google.com') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '1.2.3.4') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, 'cafe:cafe::/128/190') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '1234:axy::b') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address, '1234:axy::b') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address, '2001:db8:::1') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address, '2001:888888::1') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Address(1)._ip_int_from_string, '1.a.2.3') self.assertEqual(False, ipaddr.IPv4Network(1)._is_hostmask('1.a.2.3')) def testGetNetwork(self): self.assertEqual(int(self.ipv4.network), 16909056) self.assertEqual(str(self.ipv4.network), '1.2.3.0') self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0') self.assertEqual(int(self.ipv6.network), 42540616829182469433403647294022090752) self.assertEqual(str(self.ipv6.network), '2001:658:22a:cafe::') self.assertEqual(str(self.ipv6.hostmask), '::ffff:ffff:ffff:ffff') def testBadVersionComparison(self): # These should always raise TypeError v4addr = ipaddr.IPAddress('1.1.1.1') v4net = ipaddr.IPNetwork('1.1.1.1') v6addr = ipaddr.IPAddress('::1') v6net = ipaddr.IPAddress('::1') self.assertRaises(TypeError, v4addr.__lt__, v6addr) self.assertRaises(TypeError, v4addr.__gt__, v6addr) self.assertRaises(TypeError, v4net.__lt__, v6net) self.assertRaises(TypeError, v4net.__gt__, v6net) self.assertRaises(TypeError, v6addr.__lt__, v4addr) self.assertRaises(TypeError, v6addr.__gt__, v4addr) self.assertRaises(TypeError, v6net.__lt__, v4net) self.assertRaises(TypeError, v6net.__gt__, v4net) def testMixedTypeComparison(self): v4addr = ipaddr.IPAddress('1.1.1.1') v4net = ipaddr.IPNetwork('1.1.1.1/32') v6addr = ipaddr.IPAddress('::1') v6net = ipaddr.IPNetwork('::1/128') self.assertFalse(v4net.__contains__(v6net)) self.assertFalse(v6net.__contains__(v4net)) self.assertRaises(TypeError, lambda: v4addr < v4net) self.assertRaises(TypeError, lambda: v4addr > v4net) self.assertRaises(TypeError, lambda: v4net < v4addr) self.assertRaises(TypeError, lambda: v4net > v4addr) self.assertRaises(TypeError, lambda: v6addr < v6net) self.assertRaises(TypeError, lambda: v6addr > v6net) self.assertRaises(TypeError, lambda: v6net < v6addr) self.assertRaises(TypeError, lambda: v6net > v6addr) # with get_mixed_type_key, you can sort addresses and network. self.assertEqual([v4addr, v4net], sorted([v4net, v4addr], key=ipaddr.get_mixed_type_key)) self.assertEqual([v6addr, v6net], sorted([v6net, v6addr], key=ipaddr.get_mixed_type_key)) def testIpFromInt(self): self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip) self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, 2**32) self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, -1) ipv4 = ipaddr.IPNetwork('1.2.3.4') ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1') self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4))) self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6))) v6_int = 42540616829182469433547762482097946625 self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip) self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, 2**128) self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, -1) self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4) self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6) if ipaddr._compat_has_real_bytes: # on python3+ def testIpFromPacked(self): ip = ipaddr.IPNetwork self.assertEqual(self.ipv4.ip, ip(_cb('\x01\x02\x03\x04')).ip) self.assertEqual(ip('255.254.253.252'), ip(_cb('\xff\xfe\xfd\xfc'))) self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 3)) self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 5)) self.assertEqual(self.ipv6.ip, ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe' '\x02\x00\x00\x00\x00\x00\x00\x01')).ip) self.assertEqual(ip('ffff:2:3:4:ffff::'), ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' + '\xff\xff' + '\x00' * 6))) self.assertEqual(ip('::'), ip(_cb('\x00' * 16))) self.assertRaises(ValueError, ip, _cb('\x00' * 15)) self.assertRaises(ValueError, ip, _cb('\x00' * 17)) def testGetIp(self): self.assertEqual(int(self.ipv4.ip), 16909060) self.assertEqual(str(self.ipv4.ip), '1.2.3.4') self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1') self.assertEqual(int(self.ipv6.ip), 42540616829182469433547762482097946625) self.assertEqual(str(self.ipv6.ip), '2001:658:22a:cafe:200::1') def testGetNetmask(self): self.assertEqual(int(self.ipv4.netmask), 4294967040L) self.assertEqual(str(self.ipv4.netmask), '255.255.255.0') self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0') self.assertEqual(int(self.ipv6.netmask), 340282366920938463444927863358058659840) self.assertEqual(self.ipv6.prefixlen, 64) def testZeroNetmask(self): ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0') self.assertEqual(int(ipv4_zero_netmask.netmask), 0) self.assert_(ipv4_zero_netmask._is_valid_netmask(str(0))) ipv6_zero_netmask = ipaddr.IPv6Network('::1/0') self.assertEqual(int(ipv6_zero_netmask.netmask), 0) self.assert_(ipv6_zero_netmask._is_valid_netmask(str(0))) def testGetBroadcast(self): self.assertEqual(int(self.ipv4.broadcast), 16909311L) self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255') self.assertEqual(int(self.ipv6.broadcast), 42540616829182469451850391367731642367) self.assertEqual(str(self.ipv6.broadcast), '2001:658:22a:cafe:ffff:ffff:ffff:ffff') def testGetPrefixlen(self): self.assertEqual(self.ipv4.prefixlen, 24) self.assertEqual(self.ipv6.prefixlen, 64) def testGetSupernet(self): self.assertEqual(self.ipv4.supernet().prefixlen, 23) self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0') self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(), ipaddr.IPv4Network('0.0.0.0/0')) self.assertEqual(self.ipv6.supernet().prefixlen, 63) self.assertEqual(str(self.ipv6.supernet().network), '2001:658:22a:cafe::') self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(), ipaddr.IPv6Network('::0/0')) def testGetSupernet3(self): self.assertEqual(self.ipv4.supernet(3).prefixlen, 21) self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0') self.assertEqual(self.ipv6.supernet(3).prefixlen, 61) self.assertEqual(str(self.ipv6.supernet(3).network), '2001:658:22a:caf8::') def testGetSupernet4(self): self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2, new_prefix=1) self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25) self.assertEqual(self.ipv4.supernet(prefixlen_diff=2), self.ipv4.supernet(new_prefix=22)) self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2, new_prefix=1) self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65) self.assertEqual(self.ipv6.supernet(prefixlen_diff=2), self.ipv6.supernet(new_prefix=62)) def testIterSubnets(self): self.assertEqual(self.ipv4.subnet(), list(self.ipv4.iter_subnets())) self.assertEqual(self.ipv6.subnet(), list(self.ipv6.iter_subnets())) def testFancySubnetting(self): self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)), sorted(self.ipv4.subnet(new_prefix=27))) self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23) self.assertRaises(ValueError, self.ipv4.subnet, prefixlen_diff=3, new_prefix=27) self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)), sorted(self.ipv6.subnet(new_prefix=68))) self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63) self.assertRaises(ValueError, self.ipv6.subnet, prefixlen_diff=4, new_prefix=68) def testGetSubnet(self): self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25) self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0') self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128') self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65) def testGetSubnetForSingle32(self): ip = ipaddr.IPv4Network('1.2.3.4/32') subnets1 = [str(x) for x in ip.subnet()] subnets2 = [str(x) for x in ip.subnet(2)] self.assertEqual(subnets1, ['1.2.3.4/32']) self.assertEqual(subnets1, subnets2) def testGetSubnetForSingle128(self): ip = ipaddr.IPv6Network('::1/128') subnets1 = [str(x) for x in ip.subnet()] subnets2 = [str(x) for x in ip.subnet(2)] self.assertEqual(subnets1, ['::1/128']) self.assertEqual(subnets1, subnets2) def testSubnet2(self): ips = [str(x) for x in self.ipv4.subnet(2)] self.assertEqual( ips, ['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26']) ipsv6 = [str(x) for x in self.ipv6.subnet(2)] self.assertEqual( ipsv6, ['2001:658:22a:cafe::/66', '2001:658:22a:cafe:4000::/66', '2001:658:22a:cafe:8000::/66', '2001:658:22a:cafe:c000::/66']) def testSubnetFailsForLargeCidrDiff(self): self.assertRaises(ValueError, self.ipv4.subnet, 9) self.assertRaises(ValueError, self.ipv6.subnet, 65) def testSupernetFailsForLargeCidrDiff(self): self.assertRaises(ValueError, self.ipv4.supernet, 25) self.assertRaises(ValueError, self.ipv6.supernet, 65) def testSubnetFailsForNegativeCidrDiff(self): self.assertRaises(ValueError, self.ipv4.subnet, -1) self.assertRaises(ValueError, self.ipv6.subnet, -1) def testGetNumHosts(self): self.assertEqual(self.ipv4.numhosts, 256) self.assertEqual(self.ipv4.subnet()[0].numhosts, 128) self.assertEqual(self.ipv4.supernet().numhosts, 512) self.assertEqual(self.ipv6.numhosts, 18446744073709551616) self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808) self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232) def testContains(self): self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4) self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4) self.assertTrue(self.ipv4 in self.ipv4) self.assertTrue(self.ipv6 in self.ipv6) # We can test addresses and string as well. addr1 = ipaddr.IPv4Address('1.2.3.37') self.assertTrue(addr1 in self.ipv4) # issue 61, bad network comparison on like-ip'd network objects # with identical broadcast addresses. self.assertFalse(ipaddr.IPv4Network('1.1.0.0/16').__contains__( ipaddr.IPv4Network('1.0.0.0/15'))) def testBadAddress(self): self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, 'poop') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '1.2.3.256') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, 'poopv6') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '1.2.3.4/32/24') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '10/8') self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '10/8') def testBadNetMask(self): self.assertRaises(ipaddr.NetmaskValueError, ipaddr.IPv4Network, '1.2.3.4/') self.assertRaises(ipaddr.NetmaskValueError, ipaddr.IPv4Network, '1.2.3.4/33') self.assertRaises(ipaddr.NetmaskValueError, ipaddr.IPv4Network, '1.2.3.4/254.254.255.256') self.assertRaises(ipaddr.NetmaskValueError, ipaddr.IPv4Network, '1.1.1.1/240.255.0.0') self.assertRaises(ipaddr.NetmaskValueError, ipaddr.IPv6Network, '::1/') self.assertRaises(ipaddr.NetmaskValueError, ipaddr.IPv6Network, '::1/129') def testNth(self): self.assertEqual(str(self.ipv4[5]), '1.2.3.5') self.assertRaises(IndexError, self.ipv4.__getitem__, 256) self.assertEqual(str(self.ipv6[5]), '2001:658:22a:cafe::5') def testGetitem(self): # http://code.google.com/p/ipaddr-py/issues/detail?id=15 addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240') self.assertEqual(28, addr.prefixlen) addr_list = list(addr) self.assertEqual('172.31.255.128', str(addr_list[0])) self.assertEqual('172.31.255.128', str(addr[0])) self.assertEqual('172.31.255.143', str(addr_list[-1])) self.assertEqual('172.31.255.143', str(addr[-1])) self.assertEqual(addr_list[-1], addr[-1]) def testEquals(self): self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24')) self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23')) self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24')) self.assertFalse(self.ipv4 == '') self.assertFalse(self.ipv4 == []) self.assertFalse(self.ipv4 == 2) self.assertTrue(ipaddr.IPNetwork('1.1.1.1/32') == ipaddr.IPAddress('1.1.1.1')) self.assertTrue(ipaddr.IPNetwork('1.1.1.1/24') == ipaddr.IPAddress('1.1.1.1')) self.assertFalse(ipaddr.IPNetwork('1.1.1.0/24') == ipaddr.IPAddress('1.1.1.1')) self.assertTrue(self.ipv6 == ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64')) self.assertTrue(ipaddr.IPNetwork('::1/128') == ipaddr.IPAddress('::1')) self.assertTrue(ipaddr.IPNetwork('::1/127') == ipaddr.IPAddress('::1')) self.assertFalse(ipaddr.IPNetwork('::0/127') == ipaddr.IPAddress('::1')) self.assertFalse(self.ipv6 == ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63')) self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23')) self.assertFalse(self.ipv6 == '') self.assertFalse(self.ipv6 == []) self.assertFalse(self.ipv6 == 2) def testNotEquals(self): self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24')) self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23')) self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24')) self.assertTrue(self.ipv4 != '') self.assertTrue(self.ipv4 != []) self.assertTrue(self.ipv4 != 2) addr2 = ipaddr.IPAddress('2001:658:22a:cafe:200::1') self.assertFalse(self.ipv6 != ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64')) self.assertTrue(self.ipv6 != ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63')) self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23')) self.assertTrue(self.ipv6 != '') self.assertTrue(self.ipv6 != []) self.assertTrue(self.ipv6 != 2) def testSlash32Constructor(self): self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')), '1.2.3.4/32') def testSlash128Constructor(self): self.assertEquals(str(ipaddr.IPv6Network('::1/128')), '::1/128') def testSlash0Constructor(self): self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')), '1.2.3.4/0') def testCollapsing(self): # test only IP addresses including some duplicates ip1 = ipaddr.IPv4Address('1.1.1.0') ip2 = ipaddr.IPv4Address('1.1.1.1') ip3 = ipaddr.IPv4Address('1.1.1.2') ip4 = ipaddr.IPv4Address('1.1.1.3') ip5 = ipaddr.IPv4Address('1.1.1.4') ip6 = ipaddr.IPv4Address('1.1.1.0') # check that addreses are subsumed properly. collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6]) self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'), ipaddr.IPv4Network('1.1.1.4/32')]) # test a mix of IP addresses and networks including some duplicates ip1 = ipaddr.IPv4Address('1.1.1.0') ip2 = ipaddr.IPv4Address('1.1.1.1') ip3 = ipaddr.IPv4Address('1.1.1.2') ip4 = ipaddr.IPv4Address('1.1.1.3') ip5 = ipaddr.IPv4Network('1.1.1.4/30') ip6 = ipaddr.IPv4Network('1.1.1.4/30') # check that addreses are subsumed properly. collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6]) self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')]) # test only IP networks ip1 = ipaddr.IPv4Network('1.1.0.0/24') ip2 = ipaddr.IPv4Network('1.1.1.0/24') ip3 = ipaddr.IPv4Network('1.1.2.0/24') ip4 = ipaddr.IPv4Network('1.1.3.0/24') ip5 = ipaddr.IPv4Network('1.1.4.0/24') # stored in no particular order b/c we want CollapseAddr to call [].sort ip6 = ipaddr.IPv4Network('1.1.0.0/22') # check that addreses are subsumed properly. collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6]) self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'), ipaddr.IPv4Network('1.1.4.0/24')]) # test that two addresses are supernet'ed properly collapsed = ipaddr.collapse_address_list([ip1, ip2]) self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')]) # test same IP networks ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32') self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]), [ip_same1]) # test same IP addresses ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1') self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]), [ipaddr.IPNetwork('1.1.1.1/32')]) ip1 = ipaddr.IPv6Network('::2001:1/100') ip2 = ipaddr.IPv6Network('::2002:1/120') ip3 = ipaddr.IPv6Network('::2001:1/96') # test that ipv6 addresses are subsumed properly. collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3]) self.assertEqual(collapsed, [ip3]) # the toejam test ip1 = ipaddr.IPAddress('1.1.1.1') ip2 = ipaddr.IPAddress('::1') self.assertRaises(TypeError, ipaddr.collapse_address_list, [ip1, ip2]) def testSummarizing(self): #ip = ipaddr.IPAddress #ipnet = ipaddr.IPNetwork summarize = ipaddr.summarize_address_range ip1 = ipaddr.IPAddress('1.1.1.0') ip2 = ipaddr.IPAddress('1.1.1.255') # test a /24 is sumamrized properly self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24')) # test an IPv4 range that isn't on a network byte boundary ip2 = ipaddr.IPAddress('1.1.1.8') self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'), ipaddr.IPNetwork('1.1.1.8')]) ip1 = ipaddr.IPAddress('1::') ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff') # test a IPv6 is sumamrized properly self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16')) # test an IPv6 range that isn't on a network byte boundary ip2 = ipaddr.IPAddress('2::') self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'), ipaddr.IPNetwork('2::/128')]) # test exception raised when first is greater than last self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'), ipaddr.IPAddress('1.1.0.0')) # test exception raised when first and last aren't IP addresses self.assertRaises(TypeError, summarize, ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0')) self.assertRaises(TypeError, summarize, ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0')) # test exception raised when first and last are not same version self.assertRaises(TypeError, summarize, ipaddr.IPAddress('::'), ipaddr.IPNetwork('1.1.0.0')) def testAddressComparison(self): self.assertTrue(ipaddr.IPAddress('1.1.1.1') <= ipaddr.IPAddress('1.1.1.1')) self.assertTrue(ipaddr.IPAddress('1.1.1.1') <= ipaddr.IPAddress('1.1.1.2')) self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1')) self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2')) def testNetworkComparison(self): # ip1 and ip2 have the same network address ip1 = ipaddr.IPv4Network('1.1.1.0/24') ip2 = ipaddr.IPv4Network('1.1.1.1/24') ip3 = ipaddr.IPv4Network('1.1.2.0/24') self.assertTrue(ip1 < ip3) self.assertTrue(ip3 > ip2) self.assertEquals(ip1.compare_networks(ip2), 0) self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key()) self.assertEquals(ip1.compare_networks(ip3), -1) self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key()) ip1 = ipaddr.IPv6Network('2001::2000/96') ip2 = ipaddr.IPv6Network('2001::2001/96') ip3 = ipaddr.IPv6Network('2001:ffff::2000/96') self.assertTrue(ip1 < ip3) self.assertTrue(ip3 > ip2) self.assertEquals(ip1.compare_networks(ip2), 0) self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key()) self.assertEquals(ip1.compare_networks(ip3), -1) self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key()) # Test comparing different protocols. # Should always raise a TypeError. ipv6 = ipaddr.IPv6Network('::/0') ipv4 = ipaddr.IPv4Network('0.0.0.0/0') self.assertRaises(TypeError, ipv4.__lt__, ipv6) self.assertRaises(TypeError, ipv4.__gt__, ipv6) self.assertRaises(TypeError, ipv6.__lt__, ipv4) self.assertRaises(TypeError, ipv6.__gt__, ipv4) # Regression test for issue 19. ip1 = ipaddr.IPNetwork('10.1.2.128/25') self.assertFalse(ip1 < ip1) self.assertFalse(ip1 > ip1) ip2 = ipaddr.IPNetwork('10.1.3.0/24') self.assertTrue(ip1 < ip2) self.assertFalse(ip2 < ip1) self.assertFalse(ip1 > ip2) self.assertTrue(ip2 > ip1) ip3 = ipaddr.IPNetwork('10.1.3.0/25') self.assertTrue(ip2 < ip3) self.assertFalse(ip3 < ip2) self.assertFalse(ip2 > ip3) self.assertTrue(ip3 > ip2) # Regression test for issue 28. ip1 = ipaddr.IPNetwork('10.10.10.0/31') ip2 = ipaddr.IPNetwork('10.10.10.0') ip3 = ipaddr.IPNetwork('10.10.10.2/31') ip4 = ipaddr.IPNetwork('10.10.10.2') sorted = [ip1, ip2, ip3, ip4] unsorted = [ip2, ip4, ip1, ip3] unsorted.sort() self.assertEqual(sorted, unsorted) unsorted = [ip4, ip1, ip3, ip2] unsorted.sort() self.assertEqual(sorted, unsorted) self.assertRaises(TypeError, ip1.__lt__, ipaddr.IPAddress('10.10.10.0')) self.assertRaises(TypeError, ip2.__lt__, ipaddr.IPAddress('10.10.10.0')) # <=, >= self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <= ipaddr.IPNetwork('1.1.1.1')) self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <= ipaddr.IPNetwork('1.1.1.2')) self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <= ipaddr.IPNetwork('1.1.1.1')) self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1')) self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2')) self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1')) def testStrictNetworks(self): self.assertRaises(ValueError, ipaddr.IPNetwork, '192.168.1.1/24', strict=True) self.assertRaises(ValueError, ipaddr.IPNetwork, '::1/120', strict=True) def testOverlaps(self): other = ipaddr.IPv4Network('1.2.3.0/30') other2 = ipaddr.IPv4Network('1.2.2.0/24') other3 = ipaddr.IPv4Network('1.2.2.64/26') self.assertTrue(self.ipv4.overlaps(other)) self.assertFalse(self.ipv4.overlaps(other2)) self.assertTrue(other2.overlaps(other3)) def testEmbeddedIpv4(self): ipv4_string = '192.168.0.1' ipv4 = ipaddr.IPv4Network(ipv4_string) v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string) self.assertEquals(int(v4compat_ipv6.ip), int(ipv4.ip)) v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string) self.assertNotEquals(v4mapped_ipv6.ip, ipv4.ip) self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '2001:1.1.1.1:1.1.1.1') # Issue 67: IPv6 with embedded IPv4 address not recognized. def testIPv6AddressTooLarge(self): # RFC4291 2.5.5.2 self.assertEquals(ipaddr.IPAddress('::FFFF:192.0.2.1'), ipaddr.IPAddress('::FFFF:c000:201')) # RFC4291 2.2 (part 3) x::d.d.d.d self.assertEquals(ipaddr.IPAddress('FFFF::192.0.2.1'), ipaddr.IPAddress('FFFF::c000:201')) def testIPVersion(self): self.assertEqual(self.ipv4.version, 4) self.assertEqual(self.ipv6.version, 6) def testMaxPrefixLength(self): self.assertEqual(self.ipv4.max_prefixlen, 32) self.assertEqual(self.ipv6.max_prefixlen, 128) def testPacked(self): self.assertEqual(self.ipv4.packed, _cb('\x01\x02\x03\x04')) self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed, _cb('\xff\xfe\xfd\xfc')) self.assertEqual(self.ipv6.packed, _cb('\x20\x01\x06\x58\x02\x2a\xca\xfe' '\x02\x00\x00\x00\x00\x00\x00\x01')) self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed, _cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff' + '\x00' * 6)) self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed, _cb('\x00' * 6 + '\x00\x01' + '\x00' * 8)) def testIpStrFromPrefixlen(self): ipv4 = ipaddr.IPv4Network('1.2.3.4/24') self.assertEquals(ipv4._ip_string_from_prefix(), '255.255.255.0') self.assertEquals(ipv4._ip_string_from_prefix(28), '255.255.255.240') def testIpType(self): ipv4net = ipaddr.IPNetwork('1.2.3.4') ipv4addr = ipaddr.IPAddress('1.2.3.4') ipv6net = ipaddr.IPNetwork('::1.2.3.4') ipv6addr = ipaddr.IPAddress('::1.2.3.4') self.assertEquals(ipaddr.IPv4Network, type(ipv4net)) self.assertEquals(ipaddr.IPv4Address, type(ipv4addr)) self.assertEquals(ipaddr.IPv6Network, type(ipv6net)) self.assertEquals(ipaddr.IPv6Address, type(ipv6addr)) def testReservedIpv4(self): # test networks self.assertEquals(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast) self.assertEquals(False, ipaddr.IPNetwork('240.0.0.0').is_multicast) self.assertEquals(True, ipaddr.IPNetwork('192.168.1.1/17').is_private) self.assertEquals(False, ipaddr.IPNetwork('192.169.0.0').is_private) self.assertEquals(True, ipaddr.IPNetwork('10.255.255.255').is_private) self.assertEquals(False, ipaddr.IPNetwork('11.0.0.0').is_private) self.assertEquals(True, ipaddr.IPNetwork('172.31.255.255').is_private) self.assertEquals(False, ipaddr.IPNetwork('172.32.0.0').is_private) self.assertEquals(True, ipaddr.IPNetwork('169.254.100.200/24').is_link_local) self.assertEquals(False, ipaddr.IPNetwork('169.255.100.200/24').is_link_local) self.assertEquals(True, ipaddr.IPNetwork('127.100.200.254/32').is_loopback) self.assertEquals(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback) self.assertEquals(False, ipaddr.IPNetwork('128.0.0.0').is_loopback) # test addresses self.assertEquals(True, ipaddr.IPAddress('224.1.1.1').is_multicast) self.assertEquals(False, ipaddr.IPAddress('240.0.0.0').is_multicast) self.assertEquals(True, ipaddr.IPAddress('192.168.1.1').is_private) self.assertEquals(False, ipaddr.IPAddress('192.169.0.0').is_private) self.assertEquals(True, ipaddr.IPAddress('10.255.255.255').is_private) self.assertEquals(False, ipaddr.IPAddress('11.0.0.0').is_private) self.assertEquals(True, ipaddr.IPAddress('172.31.255.255').is_private) self.assertEquals(False, ipaddr.IPAddress('172.32.0.0').is_private) self.assertEquals(True, ipaddr.IPAddress('169.254.100.200').is_link_local) self.assertEquals(False, ipaddr.IPAddress('169.255.100.200').is_link_local) self.assertEquals(True, ipaddr.IPAddress('127.100.200.254').is_loopback) self.assertEquals(True, ipaddr.IPAddress('127.42.0.0').is_loopback) self.assertEquals(False, ipaddr.IPAddress('128.0.0.0').is_loopback) self.assertEquals(True, ipaddr.IPNetwork('0.0.0.0').is_unspecified) def testReservedIpv6(self): self.assertEquals(True, ipaddr.IPNetwork('ffff::').is_multicast) self.assertEquals(True, ipaddr.IPNetwork(2**128-1).is_multicast) self.assertEquals(True, ipaddr.IPNetwork('ff00::').is_multicast) self.assertEquals(False, ipaddr.IPNetwork('fdff::').is_multicast) self.assertEquals(True, ipaddr.IPNetwork('fecf::').is_site_local) self.assertEquals(True, ipaddr.IPNetwork( 'feff:ffff:ffff:ffff::').is_site_local) self.assertEquals(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local) self.assertEquals(False, ipaddr.IPNetwork('ff00::').is_site_local) self.assertEquals(True, ipaddr.IPNetwork('fc00::').is_private) self.assertEquals(True, ipaddr.IPNetwork( 'fc00:ffff:ffff:ffff::').is_private) self.assertEquals(False, ipaddr.IPNetwork('fbff:ffff::').is_private) self.assertEquals(False, ipaddr.IPNetwork('fe00::').is_private) self.assertEquals(True, ipaddr.IPNetwork('fea0::').is_link_local) self.assertEquals(True, ipaddr.IPNetwork('febf:ffff::').is_link_local) self.assertEquals(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local) self.assertEquals(False, ipaddr.IPNetwork('fec0::').is_link_local) self.assertEquals(True, ipaddr.IPNetwork('0:0::0:01').is_loopback) self.assertEquals(False, ipaddr.IPNetwork('::1/127').is_loopback) self.assertEquals(False, ipaddr.IPNetwork('::').is_loopback) self.assertEquals(False, ipaddr.IPNetwork('::2').is_loopback) self.assertEquals(True, ipaddr.IPNetwork('0::0').is_unspecified) self.assertEquals(False, ipaddr.IPNetwork('::1').is_unspecified) self.assertEquals(False, ipaddr.IPNetwork('::/127').is_unspecified) # test addresses self.assertEquals(True, ipaddr.IPAddress('ffff::').is_multicast) self.assertEquals(True, ipaddr.IPAddress(2**128-1).is_multicast) self.assertEquals(True, ipaddr.IPAddress('ff00::').is_multicast) self.assertEquals(False, ipaddr.IPAddress('fdff::').is_multicast) self.assertEquals(True, ipaddr.IPAddress('fecf::').is_site_local) self.assertEquals(True, ipaddr.IPAddress( 'feff:ffff:ffff:ffff::').is_site_local) self.assertEquals(False, ipaddr.IPAddress('fbf:ffff::').is_site_local) self.assertEquals(False, ipaddr.IPAddress('ff00::').is_site_local) self.assertEquals(True, ipaddr.IPAddress('fc00::').is_private) self.assertEquals(True, ipaddr.IPAddress( 'fc00:ffff:ffff:ffff::').is_private) self.assertEquals(False, ipaddr.IPAddress('fbff:ffff::').is_private) self.assertEquals(False, ipaddr.IPAddress('fe00::').is_private) self.assertEquals(True, ipaddr.IPAddress('fea0::').is_link_local) self.assertEquals(True, ipaddr.IPAddress('febf:ffff::').is_link_local) self.assertEquals(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local) self.assertEquals(False, ipaddr.IPAddress('fec0::').is_link_local) self.assertEquals(True, ipaddr.IPAddress('0:0::0:01').is_loopback) self.assertEquals(True, ipaddr.IPAddress('::1').is_loopback) self.assertEquals(False, ipaddr.IPAddress('::2').is_loopback) self.assertEquals(True, ipaddr.IPAddress('0::0').is_unspecified) self.assertEquals(False, ipaddr.IPAddress('::1').is_unspecified) # some generic IETF reserved addresses self.assertEquals(True, ipaddr.IPAddress('100::').is_reserved) self.assertEquals(True, ipaddr.IPNetwork('4000::1/128').is_reserved) def testIpv4Mapped(self): self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped, ipaddr.IPAddress('192.168.1.1')) self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None) self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped, ipaddr.IPAddress('192.168.1.1')) def testAddrExclude(self): addr1 = ipaddr.IPNetwork('10.1.1.0/24') addr2 = ipaddr.IPNetwork('10.1.1.0/26') addr3 = ipaddr.IPNetwork('10.2.1.0/24') addr4 = ipaddr.IPAddress('10.1.1.0') self.assertEqual(addr1.address_exclude(addr2), [ipaddr.IPNetwork('10.1.1.64/26'), ipaddr.IPNetwork('10.1.1.128/25')]) self.assertRaises(ValueError, addr1.address_exclude, addr3) self.assertRaises(TypeError, addr1.address_exclude, addr4) self.assertEqual(addr1.address_exclude(addr1), []) def testHash(self): self.assertEquals(hash(ipaddr.IPNetwork('10.1.1.0/24')), hash(ipaddr.IPNetwork('10.1.1.0/24'))) self.assertEquals(hash(ipaddr.IPAddress('10.1.1.0')), hash(ipaddr.IPAddress('10.1.1.0'))) # i70 self.assertEquals(hash(ipaddr.IPAddress('1.2.3.4')), hash(ipaddr.IPAddress( long(ipaddr.IPAddress('1.2.3.4')._ip)))) ip1 = ipaddr.IPAddress('10.1.1.0') ip2 = ipaddr.IPAddress('1::') dummy = {} dummy[self.ipv4] = None dummy[self.ipv6] = None dummy[ip1] = None dummy[ip2] = None self.assertTrue(self.ipv4 in dummy) self.assertTrue(ip2 in dummy) def testCopyConstructor(self): addr1 = ipaddr.IPNetwork('10.1.1.0/24') addr2 = ipaddr.IPNetwork(addr1) addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64') addr4 = ipaddr.IPNetwork(addr3) addr5 = ipaddr.IPv4Address('1.1.1.1') addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1') self.assertEqual(addr1, addr2) self.assertEqual(addr3, addr4) self.assertEqual(addr5, ipaddr.IPv4Address(addr5)) self.assertEqual(addr6, ipaddr.IPv6Address(addr6)) def testCompressIPv6Address(self): test_addresses = { '1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128', '2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128', '2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128', '2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128', '2001:0::3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128', '0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128', '0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128', '0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128', '1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128', '0:0:0:0:0:0:0:0': '::/128', '0:0:0:0:0:0:0:0/0': '::/0', '0:0:0:0:0:0:0:1': '::1/128', '2001:0658:022a:cafe:0000:0000:0000:0000/66': '2001:658:22a:cafe::/66', } for uncompressed, compressed in test_addresses.items(): self.assertEquals(compressed, str(ipaddr.IPv6Network(uncompressed))) def testExplodeShortHandIpStr(self): addr1 = ipaddr.IPv6Network('2001::1') self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001', addr1._explode_shorthand_ip_string(str(addr1.ip))) self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001', ipaddr.IPv6Network('::1/128').exploded) def testIntRepresentation(self): self.assertEqual(16909060, int(self.ipv4)) self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6)) def testHexRepresentation(self): self.assertEqual(hex(0x1020304), hex(self.ipv4)) self.assertEqual(hex(0x20010658022ACAFE0200000000000001), hex(self.ipv6)) # backwards compatibility def testBackwardsCompability(self): self.assertEqual(ipaddr.CollapseAddrList( [ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]), [ipaddr.IPNetwork('1.1.0.0/23')]) self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude( ipaddr.IPNetwork('::42:8000/113')), [ipaddr.IPNetwork('::42:0/113')]) self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks( ipaddr.IPNetwork('2::/9')) < 0) self.assertEqual(ipaddr.IPNetwork('1::/16').Contains( ipaddr.IPNetwork('2::/16')), False) self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(), [ipaddr.IPNetwork('0.0.0.0/1'), ipaddr.IPNetwork('128.0.0.0/1')]) self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(), [ipaddr.IPNetwork('::/128'), ipaddr.IPNetwork('::1/128')]) self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(), ipaddr.IPNetwork('1.0.0.0/31')) self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(), ipaddr.IPNetwork('::/120')) self.assertEqual(ipaddr.IPNetwork('10.0.0.02').IsRFC1918(), True) self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False) self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True) self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(), False) def testForceVersion(self): self.assertEqual(ipaddr.IPNetwork(1).version, 4) self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6) def testWithStar(self): self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24") self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0") self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255") self.assertEqual(str(self.ipv6.with_prefixlen), '2001:658:22a:cafe:200::1/64') # rfc3513 sec 2.3 says that ipv6 only uses cidr notation for # subnets self.assertEqual(str(self.ipv6.with_netmask), '2001:658:22a:cafe:200::1/64') # this probably don't make much sense, but it's included for # compatibility with ipv4 self.assertEqual(str(self.ipv6.with_hostmask), '2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff') def testNetworkElementCaching(self): # V4 - make sure we're empty self.assertFalse(self.ipv4._cache.has_key('network')) self.assertFalse(self.ipv4._cache.has_key('broadcast')) self.assertFalse(self.ipv4._cache.has_key('hostmask')) # V4 - populate and test self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0')) self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255')) self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255')) # V4 - check we're cached self.assertTrue(self.ipv4._cache.has_key('network')) self.assertTrue(self.ipv4._cache.has_key('broadcast')) self.assertTrue(self.ipv4._cache.has_key('hostmask')) # V6 - make sure we're empty self.assertFalse(self.ipv6._cache.has_key('network')) self.assertFalse(self.ipv6._cache.has_key('broadcast')) self.assertFalse(self.ipv6._cache.has_key('hostmask')) # V6 - populate and test self.assertEqual(self.ipv6.network, ipaddr.IPv6Address('2001:658:22a:cafe::')) self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address( '2001:658:22a:cafe:ffff:ffff:ffff:ffff')) self.assertEqual(self.ipv6.hostmask, ipaddr.IPv6Address('::ffff:ffff:ffff:ffff')) # V6 - check we're cached self.assertTrue(self.ipv6._cache.has_key('network')) self.assertTrue(self.ipv6._cache.has_key('broadcast')) self.assertTrue(self.ipv6._cache.has_key('hostmask')) def testIsValidIp(self): ip = ipaddr.IPv6Address('::') self.assertTrue(ip._is_valid_ip('2001:658:22a:cafe:200::1')) self.assertTrue(ip._is_valid_ip('::ffff:10.10.0.0')) self.assertTrue(ip._is_valid_ip('::ffff:192.168.0.0')) self.assertFalse(ip._is_valid_ip('2001:658:22a::::1')) self.assertFalse(ip._is_valid_ip(':658:22a:cafe:200::1')) self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200:')) self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200:127.0.0.1::1')) self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200::127.0.1')) self.assertFalse(ip._is_valid_ip('2001:658:22a:zzzz:200::1')) self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe1:200::1')) def testTeredo(self): # stolen from wikipedia server = ipaddr.IPv4Address('65.54.227.120') client = ipaddr.IPv4Address('192.0.2.45') teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2' self.assertEqual((server, client), ipaddr.IPAddress(teredo_addr).teredo) bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2' self.assertFalse(ipaddr.IPAddress(bad_addr).teredo) def testsixtofour(self): sixtofouraddr = ipaddr.IPAddress('2002:ac1d:2d64::1') bad_addr = ipaddr.IPAddress('2000:ac1d:2d64::1') self.assertEqual(ipaddr.IPv4Address('172.29.45.100'), sixtofouraddr.sixtofour) self.assertFalse(bad_addr.sixtofour) if __name__ == '__main__': unittest.main()
nouiz/fredericbastien-ipaddr-py-speed-up
tags/2.1.8/ipaddr_test.py
Python
apache-2.0
49,013
[ "FEFF" ]
7fefe031897f073d5308c9b1eeb903bec7ae9aae4c945d7eff7736bf4dbb2cda
""" JobRunningWaitingRatioPolicy Policy that calculates the efficiency following the formula:: ( running ) / ( running + waiting + staging ) if the denominator is smaller than 10, it does not take any decision. """ from DIRAC import S_OK from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase __RCSID__ = '$Id: JobRunningWaitingRatioPolicy.py 60769 2013-01-18 11:50:36Z ubeda $' class JobRunningWaitingRatioPolicy( PolicyBase ): """ The JobRunningWaitingRatioPolicy class is a policy that checks the efficiency of the jobs according to what is on JobDB. Evaluates the JobRunningWaitingRatioPolicy results given by the JobCommand.JobCommand """ @staticmethod def _evaluate( commandResult ): """ _evaluate efficiency < 0.5 :: Banned efficiency < 0.9 :: Degraded """ result = { 'Status' : None, 'Reason' : None } if not commandResult[ 'OK' ]: result[ 'Status' ] = 'Error' result[ 'Reason' ] = commandResult[ 'Message' ] return S_OK( result ) commandResult = commandResult[ 'Value' ] if not commandResult: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'No values to take a decision' return S_OK( result ) commandResult = commandResult[ 0 ] if not commandResult: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'No values to take a decision' return S_OK( result ) running = float( commandResult[ 'Running' ] ) waiting = float( commandResult[ 'Waiting' ] ) staging = float( commandResult[ 'Staging' ] ) total = running + waiting + staging #we want a minimum amount of jobs to take a decision ( at least 10 pilots ) if total < 10: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'Not enough jobs to take a decision' return S_OK( result ) efficiency = running / total if efficiency <= 0.4: result[ 'Status' ] = 'Banned' elif efficiency <= 0.65: result[ 'Status' ] = 'Degraded' else: result[ 'Status' ] = 'Active' result[ 'Reason' ] = 'Job Running / Waiting ratio of %.2f' % efficiency return S_OK( result ) #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
andresailer/DIRAC
ResourceStatusSystem/Policy/JobRunningWaitingRatioPolicy.py
Python
gpl-3.0
2,410
[ "DIRAC" ]
f72e1e6a45b28ec8142fd620ebf389ab643a80f4f4702ae75f7c74e4c76be720
#!/bin/python """ Program Monte-Carlo by Aksyonov Dmitry, Skoltech, Moscow """ import sys, json, os, glob, copy print('Python version is', sys.version) from shutil import copyfile import random from os.path import expanduser home = expanduser("~") sys.path.append(home+'/tools/') # for numpy libraries import numpy as np from siman import header from siman.monte_functions import metropolis from siman.header import runBash, printlog from siman.header import ALKALI_ION_ELEMENTS as AM from siman.header import TRANSITION_ELEMENTS as TM from siman.classes import CalculationVasp, Structure from siman.inout import read_poscar from siman.functions import invert from siman.analysis import suf_en debug2 = 0 def check(cl, exit = 0): # return 0 if ok, return 1 if failed if hasattr(cl, 'e0'): printlog('outcar is ok', imp = 'y') out = 0 else: printlog('outcar is broken ', imp = 'y') out = 1 if exit: printlog('exiting...', imp = 'y') sys.exit() return out def check_poscar(filename): # return 0 if ok, return 1 if failed try: cl = CalculationVasp() cl.read_poscar(filename) print(cl.init.natom) status = 0 # good except: status = 1 return status def vasp_run(n, des, vasprun_command = None): #allows to run vasp several times, here fireworks can be used #n - number of attempts #des - description of run for i in range(n): # max three attempts printlog(des, 'attempt', i) if not debug2: out = runBash(vasprun_command) printlog('out is', out) cl = CalculationVasp(output = 'OUTCAR') out = cl.read_results(show = 'fo') printlog('Results are', imp = 'y') printlog(out, imp = 'y') status = check(cl) if status == 0: break else: if os.path.exists('CONTCAR'): if check_poscar('CONTCAR') == 0: copyfile('CONTCAR', 'POSCAR') else: printlog('CONTCAR is broken. No further attempts to run VASP', imp = 'y') break else: printlog('No CONTCAR was found. No further attempts to run VASP', imp = 'y') break return cl def initial_run(xcart_voids, ): """1. Run initial calculation""" if debug: cl = CalculationVasp() cl.read_poscar('1.POSCAR') cl.end = cl.init if xcart_voids: # read void coordinates cl.end = cl.end.add_atoms(xcart_voids, 'void') last_number = 0 else: files_yes = glob.glob('*yes.pickle') #get list of calculated yes files files_all = glob.glob('*.pickle') #get list of all calculated files """Find last yes pickle file""" if files_yes: yes_numbers = [int(file.split('-')[0]) for file in files_yes] all_numbers = [int(file.split('-')[0]) for file in files_all] last_yes_n = max(yes_numbers) last_number = max(all_numbers) last_yes_file = str(last_yes_n)+'-yes.pickle' printlog('Last calculation file is ', last_yes_file, imp = 'y') else: last_number = 0 last_yes_file = None """Read last pickle file or run vasp """ if last_yes_file: cl = CalculationVasp().deserialize(last_yes_file) printlog('Successfully deserialized') xcart_voids = cl.end.get_specific_elements([300], fmt = 'x') # extract voids form the last calculation else: cl = vasp_run(3, 'first run', vasprun_command = vasprun_command) if xcart_voids: # read void coordinates cl.end = cl.end.add_atoms(xcart_voids, 'void') printlog('I found', len(xcart_voids), 'voids in config file. Added to structure.') if check(cl) == 0: cl.serialize('0-yes') copyfile('OUTCAR', 'OUTCAR-0') copyfile('OSZICAR', 'OSZICAR-0') copyfile('CONTCAR', 'CONTCAR-0') copyfile('OUTCAR', 'OUTCAR_last') copyfile('CONTCAR', 'CONTCAR_last') with open('ENERGIES', 'w') as f: f.write('{:5d} {:.5f}\n'.format(0, cl.e0)) else: printlog('Calculation is broken, no data was saved, exiting ...', imp = 'y') sys.exit() if debug2: sys.exit() return cl, last_number def get_zr_range(st, thickness, zr): red_thick = thickness/np.linalg.norm(st.rprimd[2]) # z_range = [z2 - thickness, z2] zr_range = [zr - red_thick, zr] printlog('zr_range is ', zr_range) return zr_range def exchange_atoms(st, xcart_voids, z2, thickness, zr = None, condition = None, ): """ Swap two atoms xcart_voids - list of xcart of voids voidz - list with z voids; actually either None or [300] zr - position of surface condition (str) - possible additional conditions 'no_surface_TM' - do not make swaps which reduce oxygen coordination of transition metals max_avdist_increase - maximum allowed increase of TM-O distance after swapping; (for example larger than 0.5 A allows to exclude swaps to surface) """ if xcart_voids: voidz = [300] printlog('Voids were extracted from st, adding them to z group for Monte-Carlo', xcart_voids) else: voidz = None z_groups = [AM, TM] if voidz: z_groups.append(voidz) printlog('All Z groups are ', z_groups) # sys.exit() zr_range = get_zr_range(st, thickness, zr) for i in range(100): # try 100 attempts until the condition is satisfied, otherwise terminate z_groups_cp = copy.deepcopy(z_groups) # print('z_groups_cp', z_groups_cp) gr1 = random.choice(z_groups_cp) z_groups_cp.remove(gr1) gr2 = random.choice(z_groups_cp) printlog('Chosen Z groups are ', gr1, gr2) # print(st.get_elements_z()) # sys.exit() # nn1 = st.get_specific_elements(gr1, z_range = z_range) # atom numbers # nn2 = st.get_specific_elements(gr2, z_range = z_range) nn1 = st.get_specific_elements(gr1, zr_range = zr_range) # atom numbers nn2 = st.get_specific_elements(gr2, zr_range = zr_range) if len(nn1) == 0 or len(nn2) == 0: printlog('Attention, nn1 or nn2 are too small:', nn1, nn2, 'trying another') # print(st.get_elements()) print(gr1, gr2, zr_range) print([st.xred[i] for i in st.get_specific_elements([300]) ]) # sys.exit() continue printlog('Two groups of atom numbers to swap are', nn1, nn2) # sys.exit() at1 = random.choice(nn1) if at1 in nn1: nn1.remove(at1) at2 = random.choice(nn2) els = st.get_elements() st_new_init = st.swap_atoms(at1, at2) printlog('I swapped', at1+1, els[at1], 'and', at2+1, els[at2], imp = 'y' ) #condition check-up if condition == 'no_surface_TM': elsz = st_new_init.get_elements_z() z1 = elsz[at1] z2 = elsz[at2] if (z1 in TM and z2 in TM) or (z1 not in TM and z2 not in TM): break # nothing to do # elif z1 in TM or z2 in TM: if z1 in TM: atTM = at1 else: atTM = at2 printlog('I found that one swapping atom is transition metal', atTM, els[atTM], 'checking coordination') # nO1 = st.nn(atTM, 6, only = [8], from_one = 0)['el'].count('O') # nO2 = st_new_init.nn(atTM, 6, only = [8], from_one = 0)['el'].count('O') av1 = st.nn(atTM, 6, only = [8], from_one = 0, silent = 1)['av(A-O,F)'] av2 = st_new_init.nn(atTM, 6, only = [8], from_one = 0, silent = 1)['av(A-O,F)'] # printlog('The oxygen-TM average', av1, av2, imp = 'y') if av2 > av1+0.5: printlog('Surface TM detected, the TM-O average distances before and after are {:.2f} {:.2f} A. Trying another swap.'.format(av1, av2), imp = 'y') else: printlog('TM-O av. dist before and after are {:.2f} {:.2f} A. Good, accepted'.format(av1, av2), imp = 'y') break # if nO1 == nO2: # printlog('The oxygen coordination of TM after swap is the same, accepting', nO1, nO2) # break # else: # printlog('Warning! The oxygen coordination of TM was reduced, trying another step:', nO1, nO2) else: printlog('exchange_atoms(): The given condition on atom swapping cant be satisfied! exitting', imp = 'y' ) sys.exit() return st_new_init def exchange_with_external(st, zr, thickness, external = None): """ external (dict) - {'Ni':['Li']} - atoms from external reservior which can replace existing elements, in this example Ni can replace Li """ printlog('Starting exchange with external') zr_range = get_zr_range(st, thickness, zr) # get external # print(external.keys()) el_ext = random.choice(list(external.keys())) # get element to change el_int = random.choice(external[el_ext]) nn1 = st.get_specific_elements([invert(el_int)], zr_range = zr_range) nn2 = st.get_specific_elements([invert(el_ext)], zr_range = zr_range) # just to know good TM-O distance j = random.choice(nn2) av2 = st.nn(j, 6, only = [8], from_one = 0, silent = 1)['av(A-O,F)'] # in slab if len(nn1) == 0: printlog('All atoms were replaced, exiting ', imp = 'y') sys.exit() for k in range(10): i = random.choice(nn1) av1 = st.nn(i, 6, only = [8], from_one = 0, silent = 1)['av(A-O,F)'] if av1 < av2+0.4: printlog('The chosen position is compared to the existing in slab for {:s}: {:.2f} {:.2f} A'.format(el_ext, av1, av2), imp = 'y') break else: printlog('Trying ', i) else: printlog('No more good options, trying all', imp = 'y') st_new = st.replace_atoms([i], el_ext) printlog('Atom', el_int, i, 'was replaced by ', el_ext, 'from external reservoir') return st_new if __name__ == "__main__": debug = 0 header.warnings = 'yY' # header.warnings = 'neyY' header.verbose_log = 1 printlog('\n\n\nStarting Monte-Carlo script!') """0. Read configuration file """ # params = read_monte_params() if os.path.exists('monte.json'): with open('monte.json', 'r') as fp: params = json.load(fp) else: printlog('Warning! no configuration file monte.json, exiting') sys.exit() params = {} vasprun_command = params.get('vasp_run') or 'vasp' nmcstep = params.get('mcsteps') or 2 # minimum two steps are done thickness = params.get('thickness') or 6 # minimum layer temperature = params.get('temp') or 1 xcart_voids = params.get('xvoid') if params.get('external'): if not params.get('chem_pot'): printlog('Error! no chem_pot parameter detected in external mode') cl_bulk = CalculationVasp().deserialize_json('bulk.json') printlog('Mode with external chemical potential. Reading of bulk structure is OK', cl_bulk.e0) printlog('Command to run vasp', vasprun_command) printlog('Total number of steps is', nmcstep) printlog('Thickness is ', thickness) printlog('Temperature is ', temperature, 'K') cl, last_number = initial_run(xcart_voids, ) st = cl.end """Determine surface position""" z2 = st.get_surface_pos()[1] zr2 = st.get_surface_pos(reduced = True)[1] printlog('Position of top surface is {:3.2f} {:3.2f}'.format(z2, zr2) ) # printlog """Start Monte-Carlo""" for i_mcstep in range(1+last_number, 1+last_number+nmcstep): printlog('---------------------------------', imp = 'y') printlog('\n\n\n\nMonte-Carlo step = ', i_mcstep, imp = 'y') """3. The section where changes are done """ if params.get('external'): st_new_init = exchange_with_external(st, zr2, thickness, external = params.get('external')) else: st_new_init = exchange_atoms(st, xcart_voids, z2, thickness, zr = zr2, condition = 'no_surface_TM') if xcart_voids: xcart_voids = st_new_init.get_specific_elements([300], fmt = 'x') printlog('The following voids after changes were extracted from st:', xcart_voids) if debug: st_new_init.write_poscar('POSCAR-'+str(i_mcstep)) st_new_init = read_poscar(st_new_init, 'POSCAR-'+str(i_mcstep)) # print(xcart_voids) # sys.exit() if xcart_voids: st_new_init = st_new_init.add_atoms(xcart_voids, 'void') # xcart_voids = st_new_init.get_specific_elements([300], fmt = 'x') # print('After writing and reading the voids are ', xcart_voids) st = st_new_init cl = CalculationVasp() cl_new = CalculationVasp() cl.end = st cl_new.end = st_new_init cl.e0 = random.random() cl_new.e0 = random.random() else: """4. Write new structure and calculate energy """ st_new_init.write_poscar('POSCAR') #here voids are lost cl_new = vasp_run(3, 'mcstep '+str(i_mcstep), vasprun_command = vasprun_command) if check(cl_new): printlog('{:5d} is unlucky configuration, trying another ... '.format(i_mcstep), imp = 'y') continue """5. Check if to accept new structure """ if params.get('external'): # print('') gamma, E1 = suf_en(cl, cl_bulk, chem_pot = params.get('chem_pot'), return_diff_energy =1) gamma_new, E2 = suf_en(cl_new, cl_bulk, chem_pot = params.get('chem_pot'), return_diff_energy =1) else: E1 = cl.e0 E2 = cl_new.e0 printlog('Energies before and after are {:3.3f} and {:3.3f}, dE = {:3.3f}'.format(E1, E2, E2 - E1), imp = 'y') with open('ENERGIES', 'a') as f: f.write('{:5d} {:.5f}\n'.format(i_mcstep, cl_new.e0)) if metropolis(E1, E2, temperature): cl = cl_new if xcart_voids: #insert voids cl_new.end = cl_new.end.add_atoms(xcart_voids, 'void') # here voids are inserted back if not debug: cl.serialize(str(i_mcstep)+'-yes') copyfile('CONTCAR', 'CONTCAR_last') copyfile('OUTCAR', 'OUTCAR_last') if os.path.exists('LOCPOT'): copyfile('LOCPOT', 'LOCPOT_last') st = cl_new.end printlog('The step was accepted', imp = 'y') else: printlog('The step was rejected', imp = 'y') if not debug: cl_new.serialize(str(i_mcstep)+'-no') if not debug: copyfile('OSZICAR', 'OSZICAR-'+str(i_mcstep)) copyfile('CONTCAR', 'CONTCAR-'+str(i_mcstep)) copyfile('OUTCAR', 'OUTCAR-'+str(i_mcstep)) if os.path.exists('LOCPOT'): copyfile('LOCPOT', 'LOCPOT-'+str(i_mcstep)) if not debug: copyfile('OUTCAR_last', 'OUTCAR') copyfile('CONTCAR_last', 'CONTCAR') if os.path.exists('LOCPOT_last'): copyfile('LOCPOT_last', 'LOCPOT') printlog('MC simulation finished!', imp = 'y')
dimonaks/siman
siman/monte.py
Python
gpl-2.0
16,446
[ "VASP" ]
4e9b01ace1bee62feb19552e0656018cef0e4436f01b88537b6bc6f144c110e7
# (c) 2014, Brian Coca, Josh Drake, et al # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import time import errno from abc import ABCMeta, abstractmethod from ansible import constants as C from ansible.compat.six import with_metaclass from ansible.errors import AnsibleError from ansible.module_utils._text import to_bytes try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class BaseCacheModule(with_metaclass(ABCMeta, object)): # Backwards compat only. Just import the global display instead _display = display @abstractmethod def get(self, key): pass @abstractmethod def set(self, key, value): pass @abstractmethod def keys(self): pass @abstractmethod def contains(self, key): pass @abstractmethod def delete(self, key): pass @abstractmethod def flush(self): pass @abstractmethod def copy(self): pass class BaseFileCacheModule(BaseCacheModule): """ A caching module backed by file based storage. """ def __init__(self, *args, **kwargs): self.plugin_name = self.__module__.split('.')[-1] self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) self._cache = {} self._cache_dir = None if C.CACHE_PLUGIN_CONNECTION: # expects a dir path self._cache_dir = os.path.expanduser(os.path.expandvars(C.CACHE_PLUGIN_CONNECTION)) if not self._cache_dir: raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option" " to be set (to a writeable directory path)" % self.plugin_name) if not os.path.exists(self._cache_dir): try: os.makedirs(self._cache_dir) except (OSError,IOError) as e: display.warning("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e))) return None def get(self, key): """ This checks the in memory cache first as the fact was not expired at 'gather time' and it would be problematic if the key did expire after some long running tasks and user gets 'undefined' error in the same play """ if key in self._cache: return self._cache.get(key) if self.has_expired(key) or key == "": raise KeyError cachefile = "%s/%s" % (self._cache_dir, key) try: try: value = self._load(cachefile) self._cache[key] = value return value except ValueError as e: display.warning("error in '%s' cache plugin while trying to read %s : %s." " Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e))) self.delete(key) raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data." " It has been removed, so you can re-run your command now." % cachefile) except (OSError,IOError) as e: display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e))) raise KeyError except Exception as e: raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e))) def set(self, key, value): self._cache[key] = value cachefile = "%s/%s" % (self._cache_dir, key) try: self._dump(value, cachefile) except (OSError,IOError) as e: display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e))) def has_expired(self, key): if self._timeout == 0: return False cachefile = "%s/%s" % (self._cache_dir, key) try: st = os.stat(cachefile) except (OSError,IOError) as e: if e.errno == errno.ENOENT: return False else: display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e))) pass if time.time() - st.st_mtime <= self._timeout: return False if key in self._cache: del self._cache[key] return True def keys(self): keys = [] for k in os.listdir(self._cache_dir): if not (k.startswith('.') or self.has_expired(k)): keys.append(k) return keys def contains(self, key): cachefile = "%s/%s" % (self._cache_dir, key) if key in self._cache: return True if self.has_expired(key): return False try: os.stat(cachefile) return True except (OSError,IOError) as e: if e.errno == errno.ENOENT: return False else: display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e))) pass def delete(self, key): try: del self._cache[key] except KeyError: pass try: os.remove("%s/%s" % (self._cache_dir, key)) except (OSError, IOError): pass #TODO: only pass on non existing? def flush(self): self._cache = {} for key in self.keys(): self.delete(key) def copy(self): ret = dict() for key in self.keys(): ret[key] = self.get(key) return ret @abstractmethod def _load(self, filepath): """ Read data from a filepath and return it as a value :arg filepath: The filepath to read from. :returns: The value stored in the filepath This method reads from the file on disk and takes care of any parsing and transformation of the data before returning it. The value returned should be what Ansible would expect if it were uncached data. .. note:: Filehandles have advantages but calling code doesn't know whether this file is text or binary, should be decoded, or accessed via a library function. Therefore the API uses a filepath and opens the file inside of the method. """ pass @abstractmethod def _dump(self, value, filepath): """ Write data to a filepath :arg value: The value to store :arg filepath: The filepath to store it at """ pass
grimmjow8/ansible
lib/ansible/plugins/cache/base.py
Python
gpl-3.0
7,445
[ "Brian" ]
2c11d6043986efa44114b8b7f1dfb8ddddc2fd3bc8c0b088a009930a1e288cf3
#!/usr/bin/env python # # Copyright 2012 The Plaso Project Authors. # Please see the AUTHORS file for details on individual authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file contains tests for the object filter.""" import unittest from plaso.lib import objectfilter class DummyObject(object): def __init__(self, key, value): setattr(self, key, value) class HashObject(object): def __init__(self, hash_value=None): self.value = hash_value @property def md5(self): return self.value def __eq__(self, y): return self.value == y def __lt__(self, y): return self.value < y class Dll(object): def __init__(self, name, imported_functions=None, exported_functions=None): self.name = name self._imported_functions = imported_functions or [] self.num_imported_functions = len(self._imported_functions) self.exported_functions = exported_functions or [] self.num_exported_functions = len(self.exported_functions) @property def imported_functions(self): for fn in self._imported_functions: yield fn class DummyFile(object): _FILENAME = 'boot.ini' ATTR1 = 'Backup' ATTR2 = 'Archive' HASH1 = '123abc' HASH2 = '456def' non_callable_leaf = 'yoda' def __init__(self): self.non_callable = HashObject(self.HASH1) self.non_callable_repeated = [ DummyObject('desmond', ['brotha', 'brotha']), DummyObject('desmond', ['brotha', 'sista'])] self.imported_dll1 = Dll('a.dll', ['FindWindow', 'CreateFileA']) self.imported_dll2 = Dll('b.dll', ['RegQueryValueEx']) @property def name(self): return self._FILENAME @property def attributes(self): return [self.ATTR1, self.ATTR2] @property def hash(self): return [HashObject(self.HASH1), HashObject(self.HASH2)] @property def size(self): return 10 @property def deferred_values(self): for v in ['a', 'b']: yield v @property def novalues(self): return [] @property def imported_dlls(self): return [self.imported_dll1, self.imported_dll2] def Callable(self): raise RuntimeError(u'This can not be called.') @property def float(self): return 123.9823 class ObjectFilterTest(unittest.TestCase): def setUp(self): self.file = DummyFile() self.filter_imp = objectfilter.LowercaseAttributeFilterImplementation self.value_expander = self.filter_imp.FILTERS['ValueExpander'] operator_tests = { objectfilter.Less: [ (True, ['size', 1000]), (True, ['size', 11]), (False, ['size', 10]), (False, ['size', 0]), (False, ['float', 1.0]), (True, ['float', 123.9824])], objectfilter.LessEqual: [ (True, ['size', 1000]), (True, ['size', 11]), (True, ['size', 10]), (False, ['size', 9]), (False, ['float', 1.0]), (True, ['float', 123.9823])], objectfilter.Greater: [ (True, ['size', 1]), (True, ['size', 9.23]), (False, ['size', 10]), (False, ['size', 1000]), (True, ['float', 122]), (True, ['float', 1.0])], objectfilter.GreaterEqual: [ (False, ['size', 1000]), (False, ['size', 11]), (True, ['size', 10]), (True, ['size', 0]), # Floats work fine too. (True, ['float', 122]), (True, ['float', 123.9823]), # Comparisons works with strings, although it might be a bit silly. (True, ['name', 'aoot.ini'])], objectfilter.Contains: [ # Contains works with strings. (True, ['name', 'boot.ini']), (True, ['name', 'boot']), (False, ['name', 'meh']), # Works with generators. (True, ['imported_dlls.imported_functions', 'FindWindow']), # But not with numbers. (False, ['size', 12])], objectfilter.Equals: [ (True, ['name', 'boot.ini']), (False, ['name', 'foobar']), (True, ['float', 123.9823])], objectfilter.NotEquals: [ (False, ['name', 'boot.ini']), (True, ['name', 'foobar']), (True, ['float', 25])], objectfilter.InSet: [ (True, ['name', ['boot.ini', 'autoexec.bat']]), (True, ['name', 'boot.ini']), (False, ['name', 'NOPE']), # All values of attributes are within these. (True, ['attributes', ['Archive', 'Backup', 'Nonexisting']]), # Not all values of attributes are within these. (False, ['attributes', ['Executable', 'Sparse']])], objectfilter.Regexp: [ (True, ['name', '^boot.ini$']), (True, ['name', 'boot.ini']), (False, ['name', '^$']), (True, ['attributes', 'Archive']), # One can regexp numbers if he's inclined to. (True, ['size', 0]), # But regexp doesn't work with lists or generators for the moment. (False, ['imported_dlls.imported_functions', 'FindWindow'])], } def testBinaryOperators(self): for operator, test_data in self.operator_tests.items(): for test_unit in test_data: # TODO: why is there a print statement here? print (u'Testing {0:s} with {1!s} and {2!s}'.format( operator, test_unit[0], test_unit[1])) kwargs = {'arguments': test_unit[1], 'value_expander': self.value_expander} ops = operator(**kwargs) self.assertEqual(test_unit[0], ops.Matches(self.file)) if hasattr(ops, 'FlipBool'): ops.FlipBool() # TODO: why is there a print statement here? print u'Testing negative matching.' self.assertEqual(not test_unit[0], ops.Matches(self.file)) def testExpand(self): # Case insensitivity. values_lowercase = self.value_expander().Expand(self.file, 'size') values_uppercase = self.value_expander().Expand(self.file, 'Size') self.assertListEqual(list(values_lowercase), list(values_uppercase)) # Existing, non-repeated, leaf is a value. values = self.value_expander().Expand(self.file, 'size') self.assertListEqual(list(values), [10]) # Existing, non-repeated, leaf is iterable. values = self.value_expander().Expand(self.file, 'attributes') self.assertListEqual(list(values), [[DummyFile.ATTR1, DummyFile.ATTR2]]) # Existing, repeated, leaf is value. values = self.value_expander().Expand(self.file, 'hash.md5') self.assertListEqual(list(values), [DummyFile.HASH1, DummyFile.HASH2]) # Existing, repeated, leaf is iterable. values = self.value_expander().Expand( self.file, 'non_callable_repeated.desmond') self.assertListEqual( list(values), [['brotha', 'brotha'], ['brotha', 'sista']]) # Now with an iterator. values = self.value_expander().Expand(self.file, 'deferred_values') self.assertListEqual([list(value) for value in values], [['a', 'b']]) # Iterator > generator. values = self.value_expander().Expand( self.file, 'imported_dlls.imported_functions') expected = [['FindWindow', 'CreateFileA'], ['RegQueryValueEx']] self.assertListEqual([list(value) for value in values], expected) # Non-existing first path. values = self.value_expander().Expand(self.file, 'nonexistant') self.assertListEqual(list(values), []) # Non-existing in the middle. values = self.value_expander().Expand(self.file, 'hash.mink.boo') self.assertListEqual(list(values), []) # Non-existing as a leaf. values = self.value_expander().Expand(self.file, 'hash.mink') self.assertListEqual(list(values), []) # Non-callable leaf. values = self.value_expander().Expand(self.file, 'non_callable_leaf') self.assertListEqual(list(values), [DummyFile.non_callable_leaf]) # callable. values = self.value_expander().Expand(self.file, 'Callable') self.assertListEqual(list(values), []) # leaf under a callable. Will return nothing. values = self.value_expander().Expand(self.file, 'Callable.a') self.assertListEqual(list(values), []) def testGenericBinaryOperator(self): class TestBinaryOperator(objectfilter.GenericBinaryOperator): values = list() def Operation(self, x, _): return self.values.append(x) # Test a common binary operator. tbo = TestBinaryOperator( arguments=['whatever', 0], value_expander=self.value_expander) self.assertEqual(tbo.right_operand, 0) self.assertEqual(tbo.args[0], 'whatever') tbo.Matches(DummyObject('whatever', 'id')) tbo.Matches(DummyObject('whatever', 'id2')) tbo.Matches(DummyObject('whatever', 'bg')) tbo.Matches(DummyObject('whatever', 'bg2')) self.assertListEqual(tbo.values, ['id', 'id2', 'bg', 'bg2']) def testContext(self): self.assertRaises( objectfilter.InvalidNumberOfOperands, objectfilter.Context, arguments=['context'], value_expander=self.value_expander) self.assertRaises( objectfilter.InvalidNumberOfOperands, objectfilter.Context, arguments=[ 'context', objectfilter.Equals( arguments=['path', 'value'], value_expander=self.value_expander), objectfilter.Equals( arguments=['another_path', 'value'], value_expander=self.value_expander)], value_expander=self.value_expander) # One imported_dll imports 2 functions AND one imported_dll imports # function RegQueryValueEx. arguments = [ objectfilter.Equals( arguments=['imported_dlls.num_imported_functions', 1], value_expander=self.value_expander), objectfilter.Contains( arguments=['imported_dlls.imported_functions', 'RegQueryValueEx'], value_expander=self.value_expander)] condition = objectfilter.AndFilter(arguments=arguments) # Without context, it matches because both filters match separately. self.assertEqual(True, condition.Matches(self.file)) arguments = [ objectfilter.Equals( arguments=['num_imported_functions', 2], value_expander=self.value_expander), objectfilter.Contains( arguments=['imported_functions', 'RegQueryValueEx'], value_expander=self.value_expander)] condition = objectfilter.AndFilter(arguments=arguments) # The same DLL imports 2 functions AND one of these is RegQueryValueEx. context = objectfilter.Context(arguments=['imported_dlls', condition], value_expander=self.value_expander) # With context, it doesn't match because both don't match in the same dll. self.assertEqual(False, context.Matches(self.file)) # One imported_dll imports only 1 function AND one imported_dll imports # function RegQueryValueEx. condition = objectfilter.AndFilter(arguments=[ objectfilter.Equals( arguments=['num_imported_functions', 1], value_expander=self.value_expander), objectfilter.Contains( arguments=['imported_functions', 'RegQueryValueEx'], value_expander=self.value_expander)]) # The same DLL imports 1 function AND it's RegQueryValueEx. context = objectfilter.Context(['imported_dlls', condition], value_expander=self.value_expander) self.assertEqual(True, context.Matches(self.file)) # Now test the context with a straight query. query = u'\n'.join([ '@imported_dlls', '(', ' imported_functions contains "RegQueryValueEx"', ' AND num_imported_functions == 1', ')']) filter_ = objectfilter.Parser(query).Parse() filter_ = filter_.Compile(self.filter_imp) self.assertEqual(True, filter_.Matches(self.file)) def testRegexpRaises(self): with self.assertRaises(ValueError): objectfilter.Regexp( arguments=['name', 'I [dont compile'], value_expander=self.value_expander) def testEscaping(self): parser = objectfilter.Parser(r'a is "\n"').Parse() self.assertEqual(parser.args[0], '\n') # Invalid escape sequence. parser = objectfilter.Parser(r'a is "\z"') with self.assertRaises(objectfilter.ParseError): parser.Parse() # Can escape the backslash. parser = objectfilter.Parser(r'a is "\\"').Parse() self.assertEqual(parser.args[0], '\\') # Test hexadecimal escaping. # This fails as it's not really a hex escaped string. parser = objectfilter.Parser(r'a is "\xJZ"') with self.assertRaises(objectfilter.ParseError): parser.Parse() # Instead, this is what one should write. parser = objectfilter.Parser(r'a is "\\xJZ"').Parse() self.assertEqual(parser.args[0], r'\xJZ') # Standard hex-escape. parser = objectfilter.Parser(r'a is "\x41\x41\x41"').Parse() self.assertEqual(parser.args[0], 'AAA') # Hex-escape + a character. parser = objectfilter.Parser(r'a is "\x414"').Parse() self.assertEqual(parser.args[0], r'A4') # How to include r'\x41'. parser = objectfilter.Parser(r'a is "\\x41"').Parse() self.assertEqual(parser.args[0], r'\x41') def testParse(self): # Arguments are either int, float or quoted string. objectfilter.Parser('attribute == 1').Parse() objectfilter.Parser('attribute == 0x10').Parse() parser = objectfilter.Parser('attribute == 1a') with self.assertRaises(objectfilter.ParseError): parser.Parse() objectfilter.Parser('attribute == 1.2').Parse() objectfilter.Parser('attribute == \'bla\'').Parse() objectfilter.Parser('attribute == "bla"').Parse() parser = objectfilter.Parser('something == red') self.assertRaises(objectfilter.ParseError, parser.Parse) # Can't start with AND. parser = objectfilter.Parser('and something is \'Blue\'') with self.assertRaises(objectfilter.ParseError): parser.Parse() # Test negative filters. parser = objectfilter.Parser('attribute not == \'dancer\'') with self.assertRaises(objectfilter.ParseError): parser.Parse() parser = objectfilter.Parser('attribute == not \'dancer\'') with self.assertRaises(objectfilter.ParseError): parser.Parse() parser = objectfilter.Parser('attribute not not equals \'dancer\'') with self.assertRaises(objectfilter.ParseError): parser.Parse() parser = objectfilter.Parser('attribute not > 23') with self.assertRaises(objectfilter.ParseError): parser.Parse() # Need to close braces. objectfilter.Parser('(a is 3)').Parse() parser = objectfilter.Parser('(a is 3') self.assertRaises(objectfilter.ParseError, parser.Parse) # Need to open braces to close them. parser = objectfilter.Parser('a is 3)') self.assertRaises(objectfilter.ParseError, parser.Parse) # Context Operator alone is not accepted. parser = objectfilter.Parser('@attributes') with self.assertRaises(objectfilter.ParseError): parser.Parse() # Accepted only with braces. objectfilter.Parser('@attributes( name is \'adrien\')').Parse() # Not without them. parser = objectfilter.Parser('@attributes name is \'adrien\'') with self.assertRaises(objectfilter.ParseError): parser.Parse() # Can nest context operators. query = '@imported_dlls( @imported_function( name is \'OpenFileA\'))' objectfilter.Parser(query).Parse() # Can nest context operators and mix braces without it messing up. query = '@imported_dlls( @imported_function( name is \'OpenFileA\'))' parser = objectfilter.Parser(query).Parse() query = u'\n'.join([ '@imported_dlls', '(', ' @imported_function', ' (', ' name is "OpenFileA" and ordinal == 12', ' )', ')']) parser = objectfilter.Parser(query).Parse() # Mix context and binary operators. query = u'\n'.join([ '@imported_dlls', '(', ' @imported_function', ' (', ' name is "OpenFileA"', ' ) AND num_functions == 2', ')']) parser = objectfilter.Parser(query).Parse() # Also on the right. query = u'\n'.join([ '@imported_dlls', '(', ' num_functions == 2 AND', ' @imported_function', ' (', ' name is "OpenFileA"', ' )', ')']) # Altogether. # There's an imported dll that imports OpenFileA AND # an imported DLL matching advapi32.dll that imports RegQueryValueExA AND # and it exports a symbol called 'inject'. query = u'\n'.join([ '@imported_dlls( @imported_function ( name is "OpenFileA" ) )', 'AND', '@imported_dlls (', ' name regexp "(?i)advapi32.dll"', ' AND @imported_function ( name is "RegQueryValueEx" )', ')', 'AND @exported_symbols(name is "inject")']) def testCompile(self): obj = DummyObject('something', 'Blue') parser = objectfilter.Parser('something == \'Blue\'').Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), True) parser = objectfilter.Parser('something == \'Red\'').Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) parser = objectfilter.Parser('something == "Red"').Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) obj = DummyObject('size', 4) parser = objectfilter.Parser('size < 3').Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) parser = objectfilter.Parser('size == 4').Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), True) query = 'something is \'Blue\' and size not contains 3' parser = objectfilter.Parser(query).Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) if __name__ == '__main__': unittest.main()
iwm911/plaso
plaso/lib/objectfilter_test.py
Python
apache-2.0
18,566
[ "Desmond" ]
5aaa0e64a483e1f3b9324a4203043181312a5a08d036ee89aead51c8297f4600
from io import BytesIO from threading import Lock import contextlib import itertools import os.path import pickle import shutil import tempfile import unittest import sys import numpy as np import pandas as pd import xray from xray import Dataset, open_dataset, open_mfdataset, backends, save_mfdataset from xray.backends.common import robust_getitem from xray.core.pycompat import iteritems, PY3 from . import (TestCase, requires_scipy, requires_netCDF4, requires_pydap, requires_scipy_or_netCDF4, requires_dask, requires_h5netcdf, has_netCDF4, has_scipy) from .test_dataset import create_test_data try: import netCDF4 as nc4 except ImportError: pass try: import dask import dask.array as da except ImportError: pass def open_example_dataset(name, *args, **kwargs): return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name), *args, **kwargs) def create_masked_and_scaled_data(): x = np.array([np.nan, np.nan, 10, 10.1, 10.2]) encoding = {'_FillValue': -1, 'add_offset': 10, 'scale_factor': np.float32(0.1), 'dtype': 'i2'} return Dataset({'x': ('t', x, {}, encoding)}) def create_encoded_masked_and_scaled_data(): attributes = {'_FillValue': -1, 'add_offset': 10, 'scale_factor': np.float32(0.1)} return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)}) class TestCommon(TestCase): def test_robust_getitem(self): class UnreliableArrayFailure(Exception): pass class UnreliableArray(object): def __init__(self, array, failures=1): self.array = array self.failures = failures def __getitem__(self, key): if self.failures > 0: self.failures -= 1 raise UnreliableArrayFailure return self.array[key] array = UnreliableArray([0]) with self.assertRaises(UnreliableArrayFailure): array[0] self.assertEqual(array[0], 0) actual = robust_getitem(array, 0, catch=UnreliableArrayFailure, initial_delay=0) self.assertEqual(actual, 0) class Only32BitTypes(object): pass class DatasetIOTestCases(object): def create_store(self): raise NotImplementedError def roundtrip(self, data, **kwargs): raise NotImplementedError def test_zero_dimensional_variable(self): expected = create_test_data() expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'}) expected['string_var'] = ([], np.array('foobar', dtype='S')) with self.roundtrip(expected) as actual: self.assertDatasetAllClose(expected, actual) def test_write_store(self): expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) # we need to cf decode the store because it has time and # non-dimension coordinates actual = xray.decode_cf(store) self.assertDatasetAllClose(expected, actual) def test_roundtrip_test_data(self): expected = create_test_data() with self.roundtrip(expected) as actual: self.assertDatasetAllClose(expected, actual) def test_load(self): expected = create_test_data() @contextlib.contextmanager def assert_loads(vars=None): if vars is None: vars = expected with self.roundtrip(expected) as actual: for v in actual.values(): self.assertFalse(v._in_memory) yield actual for k, v in actual.items(): if k in vars: self.assertTrue(v._in_memory) self.assertDatasetAllClose(expected, actual) with self.assertRaises(AssertionError): # make sure the contextmanager works! with assert_loads() as ds: pass with assert_loads() as ds: ds.load() with assert_loads(['var1', 'dim1', 'dim2']) as ds: ds['var1'].load() # verify we can read data even after closing the file with self.roundtrip(expected) as ds: actual = ds.load() self.assertDatasetAllClose(expected, actual) def test_roundtrip_None_variable(self): expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])}) with self.roundtrip(expected) as actual: self.assertDatasetAllClose(expected, actual) def test_roundtrip_object_dtype(self): floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object) floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object) letters = np.array(['ab', 'cdef', 'g'], dtype=object) letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object) all_nans = np.array([np.nan, np.nan], dtype=object) original = Dataset({'floats': ('a', floats), 'floats_nans': ('a', floats_nans), 'letters': ('b', letters), 'letters_nans': ('b', letters_nans), 'all_nans': ('c', all_nans), 'nan': ([], np.nan)}) expected = original.copy(deep=True) if isinstance(self, Only32BitTypes): # for netCDF3 tests, expect the results to come back as characters expected['letters_nans'] = expected['letters_nans'].astype('S') expected['letters'] = expected['letters'].astype('S') with self.roundtrip(original) as actual: try: self.assertDatasetIdentical(expected, actual) except AssertionError: # Most stores use '' for nans in strings, but some don't # first try the ideal case (where the store returns exactly) # the original Dataset), then try a more realistic case. # ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest # all end up using this case. expected['letters_nans'][-1] = '' self.assertDatasetIdentical(expected, actual) def test_roundtrip_string_data(self): expected = Dataset({'x': ('t', ['ab', 'cdef'])}) with self.roundtrip(expected) as actual: if isinstance(self, Only32BitTypes): expected['x'] = expected['x'].astype('S') self.assertDatasetIdentical(expected, actual) def test_roundtrip_datetime_data(self): times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT']) expected = Dataset({'t': ('t', times), 't0': times[0]}) with self.roundtrip(expected) as actual: self.assertDatasetIdentical(expected, actual) def test_roundtrip_timedelta_data(self): time_deltas = pd.to_timedelta(['1h', '2h', 'NaT']) expected = Dataset({'td': ('td', time_deltas), 'td0': time_deltas[0]}) with self.roundtrip(expected) as actual: self.assertDatasetIdentical(expected, actual) def test_roundtrip_float64_data(self): expected = Dataset({'x': ('y', np.array([1.0, 2.0, np.pi], dtype='float64'))}) with self.roundtrip(expected) as actual: self.assertDatasetIdentical(expected, actual) def test_roundtrip_example_1_netcdf(self): expected = open_example_dataset('example_1.nc') with self.roundtrip(expected) as actual: # we allow the attributes to differ since that # will depend on the encoding used. For example, # without CF encoding 'actual' will end up with # a dtype attribute. self.assertDatasetEqual(expected, actual) def test_roundtrip_coordinates(self): original = Dataset({'foo': ('x', [0, 1])}, {'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])}) with self.roundtrip(original) as actual: self.assertDatasetIdentical(original, actual) expected = original.drop('foo') with self.roundtrip(expected) as actual: self.assertDatasetIdentical(expected, actual) expected = original.copy() expected.attrs['coordinates'] = 'something random' with self.assertRaisesRegexp(ValueError, 'cannot serialize'): with self.roundtrip(expected): pass expected = original.copy(deep=True) expected['foo'].attrs['coordinates'] = 'something random' with self.assertRaisesRegexp(ValueError, 'cannot serialize'): with self.roundtrip(expected): pass def test_orthogonal_indexing(self): in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: indexers = {'dim1': np.arange(3), 'dim2': np.arange(4), 'dim3': np.arange(5)} expected = in_memory.isel(**indexers) actual = on_disk.isel(**indexers) self.assertDatasetAllClose(expected, actual) # do it twice, to make sure we're switched from orthogonal -> numpy # when we cached the values actual = on_disk.isel(**indexers) self.assertDatasetAllClose(expected, actual) def test_pickle(self): on_disk = open_example_dataset('bears.nc') unpickled = pickle.loads(pickle.dumps(on_disk)) self.assertDatasetIdentical(on_disk, unpickled) class CFEncodedDataTest(DatasetIOTestCases): def test_roundtrip_strings_with_fill_value(self): values = np.array(['ab', 'cdef', np.nan], dtype=object) encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')} original = Dataset({'x': ('t', values, {}, encoding)}) expected = original.copy(deep=True) expected['x'][:2] = values[:2].astype('S') with self.roundtrip(original) as actual: self.assertDatasetIdentical(expected, actual) original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})}) if not isinstance(self, Only32BitTypes): # these stores can save unicode strings expected = original.copy(deep=True) if isinstance(self, BaseNetCDF4Test): # netCDF4 can't keep track of an empty _FillValue for VLEN # variables expected['x'][-1] = '' elif (isinstance(self, (NetCDF3ViaNetCDF4DataTest, NetCDF4ClassicViaNetCDF4DataTest)) or (has_netCDF4 and type(self) is GenericNetCDFDataTest)): # netCDF4 can't keep track of an empty _FillValue for nc3, either: # https://github.com/Unidata/netcdf4-python/issues/273 expected['x'][-1] = np.string_('') with self.roundtrip(original) as actual: self.assertDatasetIdentical(expected, actual) def test_roundtrip_mask_and_scale(self): decoded = create_masked_and_scaled_data() encoded = create_encoded_masked_and_scaled_data() with self.roundtrip(decoded) as actual: self.assertDatasetAllClose(decoded, actual) with self.roundtrip(decoded, decode_cf=False) as actual: # TODO: this assumes that all roundtrips will first # encode. Is that something we want to test for? self.assertDatasetAllClose(encoded, actual) with self.roundtrip(encoded, decode_cf=False) as actual: self.assertDatasetAllClose(encoded, actual) # make sure roundtrip encoding didn't change the # original dataset. self.assertDatasetIdentical(encoded, create_encoded_masked_and_scaled_data()) with self.roundtrip(encoded) as actual: self.assertDatasetAllClose(decoded, actual) with self.roundtrip(encoded, decode_cf=False) as actual: self.assertDatasetAllClose(encoded, actual) def test_coordinates_encoding(self): def equals_latlon(obj): return obj == 'lat lon' or obj == 'lon lat' original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])}, {'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])}) with self.roundtrip(original) as actual: self.assertDatasetIdentical(actual, original) with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with open_dataset(tmp_file, decode_coords=False) as ds: self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates'])) self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates'])) self.assertNotIn('coordinates', ds.attrs) self.assertNotIn('coordinates', ds['lat'].attrs) self.assertNotIn('coordinates', ds['lon'].attrs) modified = original.drop(['temp', 'precip']) with self.roundtrip(modified) as actual: self.assertDatasetIdentical(actual, modified) with create_tmp_file() as tmp_file: modified.to_netcdf(tmp_file) with open_dataset(tmp_file, decode_coords=False) as ds: self.assertTrue(equals_latlon(ds.attrs['coordinates'])) self.assertNotIn('coordinates', ds['lat'].attrs) self.assertNotIn('coordinates', ds['lon'].attrs) def test_roundtrip_endian(self): ds = Dataset({'x': np.arange(3, 10, dtype='>i2'), 'y': np.arange(3, 20, dtype='<i4'), 'z': np.arange(3, 30, dtype='=i8'), 'w': ('x', np.arange(3, 10, dtype=np.float))}) with self.roundtrip(ds) as actual: # technically these datasets are slightly different, # one hold mixed endian data (ds) the other should be # all big endian (actual). assertDatasetIdentical # should still pass though. self.assertDatasetIdentical(ds, actual) if type(self) is NetCDF4DataTest: ds['z'].encoding['endian'] = 'big' with self.assertRaises(NotImplementedError): with self.roundtrip(ds) as actual: pass def test_invalid_dataarray_names_raise(self): te = (TypeError, 'string or None') ve = (ValueError, 'string must be length 1 or') data = np.random.random((2, 2)) da = xray.DataArray(data) for name, e in zip([0, (4, 5), True, ''], [te, te, te, ve]): ds = Dataset({name: da}) with self.assertRaisesRegexp(*e): with self.roundtrip(ds) as actual: pass _counter = itertools.count() @contextlib.contextmanager def create_tmp_file(suffix='.nc'): temp_dir = tempfile.mkdtemp() path = os.path.join(temp_dir, 'temp-%s%s' % (next(_counter), suffix)) try: yield path finally: shutil.rmtree(temp_dir) class BaseNetCDF4Test(CFEncodedDataTest): def test_open_group(self): # Create a netCDF file with a dataset stored within a group with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, 'w') as rootgrp: foogrp = rootgrp.createGroup('foo') ds = foogrp ds.createDimension('time', size=10) x = np.arange(10) ds.createVariable('x', np.int32, dimensions=('time',)) ds.variables['x'][:] = x expected = Dataset() expected['x'] = ('time', x) # check equivalent ways to specify group for group in 'foo', '/foo', 'foo/', '/foo/': with open_dataset(tmp_file, group=group) as actual: self.assertVariableEqual(actual['x'], expected['x']) # check that missing group raises appropriate exception with self.assertRaises(IOError): open_dataset(tmp_file, group='bar') with self.assertRaisesRegexp(ValueError, 'must be a string'): open_dataset(tmp_file, group=(1, 2, 3)) def test_open_subgroup(self): # Create a netCDF file with a dataset stored within a group within a group with create_tmp_file() as tmp_file: rootgrp = nc4.Dataset(tmp_file, 'w') foogrp = rootgrp.createGroup('foo') bargrp = foogrp.createGroup('bar') ds = bargrp ds.createDimension('time', size=10) x = np.arange(10) ds.createVariable('x', np.int32, dimensions=('time',)) ds.variables['x'][:] = x rootgrp.close() expected = Dataset() expected['x'] = ('time', x) # check equivalent ways to specify group for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/': with open_dataset(tmp_file, group=group) as actual: self.assertVariableEqual(actual['x'], expected['x']) def test_write_groups(self): data1 = create_test_data() data2 = data1 * 2 with create_tmp_file() as tmp_file: data1.to_netcdf(tmp_file, group='data/1') data2.to_netcdf(tmp_file, group='data/2', mode='a') with open_dataset(tmp_file, group='data/1') as actual1: self.assertDatasetIdentical(data1, actual1) with open_dataset(tmp_file, group='data/2') as actual2: self.assertDatasetIdentical(data2, actual2) def test_roundtrip_character_array(self): with create_tmp_file() as tmp_file: values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S') with nc4.Dataset(tmp_file, mode='w') as nc: nc.createDimension('x', 2) nc.createDimension('string3', 3) v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3')) v[:] = values values = np.array(['abc', 'def'], dtype='S') expected = Dataset({'x': ('x', values)}) with open_dataset(tmp_file) as actual: self.assertDatasetIdentical(expected, actual) # regression test for #157 with self.roundtrip(actual) as roundtripped: self.assertDatasetIdentical(expected, roundtripped) def test_default_to_char_arrays(self): data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')}) with self.roundtrip(data) as actual: self.assertDatasetIdentical(data, actual) self.assertEqual(actual['x'].dtype, np.dtype('S4')) def test_open_encodings(self): # Create a netCDF file with explicit time units # and make sure it makes it into the encodings # and survives a round trip with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, 'w') as ds: ds.createDimension('time', size=10) ds.createVariable('time', np.int32, dimensions=('time',)) units = 'days since 1999-01-01' ds.variables['time'].setncattr('units', units) ds.variables['time'][:] = np.arange(10) + 4 expected = Dataset() time = pd.date_range('1999-01-05', periods=10) encoding = {'units': units, 'dtype': np.dtype('int32')} expected['time'] = ('time', time, {}, encoding) with open_dataset(tmp_file) as actual: self.assertVariableEqual(actual['time'], expected['time']) actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding) if k in expected['time'].encoding) self.assertDictEqual(actual_encoding, expected['time'].encoding) def test_dump_and_open_encodings(self): # Create a netCDF file with explicit time units # and make sure it makes it into the encodings # and survives a round trip with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, 'w') as ds: ds.createDimension('time', size=10) ds.createVariable('time', np.int32, dimensions=('time',)) units = 'days since 1999-01-01' ds.variables['time'].setncattr('units', units) ds.variables['time'][:] = np.arange(10) + 4 with open_dataset(tmp_file) as xray_dataset: with create_tmp_file() as tmp_file2: xray_dataset.to_netcdf(tmp_file2) with nc4.Dataset(tmp_file2, 'r') as ds: self.assertEqual(ds.variables['time'].getncattr('units'), units) self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4) def test_compression_encoding(self): data = create_test_data() data['var2'].encoding.update({'zlib': True, 'chunksizes': (5, 5), 'fletcher32': True}) with self.roundtrip(data) as actual: for k, v in iteritems(data['var2'].encoding): self.assertEqual(v, actual['var2'].encoding[k]) # regression test for #156 expected = data.isel(dim1=0) with self.roundtrip(expected) as actual: self.assertDatasetEqual(expected, actual) def test_mask_and_scale(self): with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode='w') as nc: nc.createDimension('t', 5) nc.createVariable('x', 'int16', ('t',), fill_value=-1) v = nc.variables['x'] v.set_auto_maskandscale(False) v.add_offset = 10 v.scale_factor = 0.1 v[:] = np.array([-1, -1, 0, 1, 2]) # first make sure netCDF4 reads the masked and scaled data correctly with nc4.Dataset(tmp_file, mode='r') as nc: expected = np.ma.array([-1, -1, 10, 10.1, 10.2], mask=[True, True, False, False, False]) actual = nc.variables['x'][:] self.assertArrayEqual(expected, actual) # now check xray with open_dataset(tmp_file) as ds: expected = create_masked_and_scaled_data() self.assertDatasetIdentical(expected, ds) def test_0dimensional_variable(self): # This fix verifies our work-around to this netCDF4-python bug: # https://github.com/Unidata/netcdf4-python/pull/220 with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode='w') as nc: v = nc.createVariable('x', 'int16') v[...] = 123 with open_dataset(tmp_file) as ds: expected = Dataset({'x': ((), 123)}) self.assertDatasetIdentical(expected, ds) def test_variable_len_strings(self): with create_tmp_file() as tmp_file: values = np.array(['foo', 'bar', 'baz'], dtype=object) with nc4.Dataset(tmp_file, mode='w') as nc: nc.createDimension('x', 3) v = nc.createVariable('x', str, ('x',)) v[:] = values expected = Dataset({'x': ('x', values)}) for kwargs in [{}, {'decode_cf': True}]: with open_dataset(tmp_file, **kwargs) as actual: self.assertDatasetIdentical(expected, actual) @requires_netCDF4 class NetCDF4DataTest(BaseNetCDF4Test, TestCase): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore(tmp_file, mode='w') as store: yield store @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file) with open_dataset(tmp_file, **kwargs) as ds: yield ds def test_variable_order(self): # doesn't work with scipy or h5py :( ds = Dataset() ds['a'] = 1 ds['z'] = 2 ds['b'] = 3 ds.coords['c'] = 4 with self.roundtrip(ds) as actual: self.assertEqual(list(ds), list(actual)) @requires_netCDF4 @requires_dask class NetCDF4ViaDaskDataTest(NetCDF4DataTest): @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file) with open_dataset(tmp_file, **kwargs) as ds: yield ds.chunk() @requires_scipy class ScipyInMemoryDataTest(CFEncodedDataTest, Only32BitTypes, TestCase): @contextlib.contextmanager def create_store(self): fobj = BytesIO() yield backends.ScipyDataStore(fobj, 'w') @contextlib.contextmanager def roundtrip(self, data, **kwargs): serialized = data.to_netcdf() with open_dataset(BytesIO(serialized), **kwargs) as ds: yield ds @requires_scipy class ScipyOnDiskDataTest(CFEncodedDataTest, Only32BitTypes, TestCase): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.ScipyDataStore(tmp_file, mode='w') as store: yield store @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, engine='scipy') with open_dataset(tmp_file, engine='scipy', **kwargs) as ds: yield ds def test_array_attrs(self): ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]}) with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'): with self.roundtrip(ds) as roundtripped: pass def test_roundtrip_example_1_netcdf_gz(self): if sys.version_info[:2] < (2, 7): with self.assertRaisesRegexp(ValueError, 'gzipped netCDF not supported'): open_example_dataset('example_1.nc.gz') else: with open_example_dataset('example_1.nc.gz') as expected: with open_example_dataset('example_1.nc') as actual: self.assertDatasetIdentical(expected, actual) def test_netcdf3_endianness(self): # regression test for GH416 expected = open_example_dataset('bears.nc', engine='scipy') for var in expected.values(): self.assertTrue(var.dtype.isnative) @requires_netCDF4 class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore(tmp_file, mode='w', format='NETCDF3_CLASSIC') as store: yield store @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, format='NETCDF3_CLASSIC', engine='netcdf4') with open_dataset(tmp_file, engine='netcdf4', **kwargs) as ds: yield ds @requires_netCDF4 class NetCDF4ClassicViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore(tmp_file, mode='w', format='NETCDF4_CLASSIC') as store: yield store @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, format='NETCDF4_CLASSIC', engine='netcdf4') with open_dataset(tmp_file, engine='netcdf4', **kwargs) as ds: yield ds @requires_scipy_or_netCDF4 class GenericNetCDFDataTest(CFEncodedDataTest, Only32BitTypes, TestCase): # verify that we can read and write netCDF3 files as long as we have scipy # or netCDF4-python installed def test_write_store(self): # there's no specific store to test here pass @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, format='netcdf3_64bit') with open_dataset(tmp_file, **kwargs) as ds: yield ds def test_engine(self): data = create_test_data() with self.assertRaisesRegexp(ValueError, 'unrecognized engine'): data.to_netcdf('foo.nc', engine='foobar') with self.assertRaisesRegexp(ValueError, 'invalid engine'): data.to_netcdf(engine='netcdf4') with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file) with self.assertRaisesRegexp(ValueError, 'unrecognized engine'): open_dataset(tmp_file, engine='foobar') netcdf_bytes = data.to_netcdf() with self.assertRaisesRegexp(ValueError, 'can only read'): open_dataset(BytesIO(netcdf_bytes), engine='foobar') def test_cross_engine_read_write_netcdf3(self): data = create_test_data() valid_engines = set() if has_netCDF4: valid_engines.add('netcdf4') if has_scipy: valid_engines.add('scipy') for write_engine in valid_engines: for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, format=format, engine=write_engine) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: self.assertDatasetAllClose(data, actual) @requires_h5netcdf @requires_netCDF4 class H5NetCDFDataTest(BaseNetCDF4Test, TestCase): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: yield backends.H5NetCDFStore(tmp_file, 'w') @contextlib.contextmanager def roundtrip(self, data, **kwargs): with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, engine='h5netcdf') with open_dataset(tmp_file, engine='h5netcdf', **kwargs) as ds: yield ds def test_orthogonal_indexing(self): # doesn't work for h5py (without using dask as an intermediate layer) pass def test_complex(self): expected = Dataset({'x': ('y', np.ones(5) + 1j * np.ones(5))}) with self.roundtrip(expected) as actual: self.assertDatasetEqual(expected, actual) def test_cross_engine_read_write_netcdf4(self): # Drop dim3, because its labels include strings. These appear to be # not properly read with python-netCDF4, which converts them into # unicode instead of leaving them as bytes. if PY3: raise unittest.SkipTest('see https://github.com/xray/xray/issues/535') data = create_test_data().drop('dim3') data.attrs['foo'] = 'bar' valid_engines = ['netcdf4', 'h5netcdf'] for write_engine in valid_engines: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, engine=write_engine) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: self.assertDatasetIdentical(data, actual) def test_read_byte_attrs_as_unicode(self): with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, 'w') as nc: nc.foo = b'bar' actual = open_dataset(tmp_file) expected = Dataset(attrs={'foo': 'bar'}) self.assertDatasetIdentical(expected, actual) @requires_dask @requires_scipy @requires_netCDF4 class DaskTest(TestCase): def test_open_mfdataset(self): original = Dataset({'foo': ('x', np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset([tmp1, tmp2]) as actual: self.assertIsInstance(actual.foo.variable.data, da.Array) self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),)) self.assertDatasetAllClose(original, actual) with open_mfdataset([tmp1, tmp2], chunks={'x': 3}) as actual: self.assertEqual(actual.foo.variable.data.chunks, ((3, 2, 3, 2),)) with self.assertRaisesRegexp(IOError, 'no files to open'): open_mfdataset('foo-bar-baz-*.nc') def test_preprocess_mfdataset(self): original = Dataset({'foo': ('x', np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) preprocess = lambda ds: ds.assign_coords(z=0) expected = preprocess(original) with open_mfdataset(tmp, preprocess=preprocess) as actual: self.assertDatasetIdentical(expected, actual) def test_lock(self): original = Dataset({'foo': ('x', np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp, format='NETCDF3_CLASSIC') with open_dataset(tmp, chunks=10) as ds: task = ds.foo.data.dask[ds.foo.data.name, 0] self.assertIsInstance(task[-1], type(Lock())) with open_mfdataset(tmp) as ds: task = ds.foo.data.dask[ds.foo.data.name, 0] self.assertIsInstance(task[-1], type(Lock())) with open_mfdataset(tmp, engine='scipy') as ds: task = ds.foo.data.dask[ds.foo.data.name, 0] self.assertNotIsInstance(task[-1], type(Lock())) def test_save_mfdataset_roundtrip(self): original = Dataset({'foo': ('x', np.random.randn(10))}) datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: save_mfdataset(datasets, [tmp1, tmp2]) with open_mfdataset([tmp1, tmp2]) as actual: self.assertDatasetIdentical(actual, original) def test_save_mfdataset_invalid(self): ds = Dataset() with self.assertRaisesRegexp(ValueError, 'cannot use mode'): save_mfdataset([ds, ds], ['same', 'same']) with self.assertRaisesRegexp(ValueError, 'same length'): save_mfdataset([ds, ds], ['only one path']) def test_open_and_do_math(self): original = Dataset({'foo': ('x', np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_mfdataset(tmp) as ds: actual = 1.0 * ds self.assertDatasetAllClose(original, actual) def test_open_dataset(self): original = Dataset({'foo': ('x', np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(tmp, chunks={'x': 5}) as actual: self.assertIsInstance(actual.foo.variable.data, da.Array) self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),)) self.assertDatasetIdentical(original, actual) with open_dataset(tmp, chunks=5) as actual: self.assertDatasetIdentical(original, actual) with open_dataset(tmp) as actual: self.assertIsInstance(actual.foo.variable.data, np.ndarray) self.assertDatasetIdentical(original, actual) def test_dask_roundtrip(self): with create_tmp_file() as tmp: data = create_test_data() data.to_netcdf(tmp) chunks = {'dim1': 4, 'dim2': 4, 'dim3': 4, 'time': 10} with open_dataset(tmp, chunks=chunks) as dask_ds: self.assertDatasetIdentical(data, dask_ds) with create_tmp_file() as tmp2: dask_ds.to_netcdf(tmp2) with open_dataset(tmp2) as on_disk: self.assertDatasetIdentical(data, on_disk) def test_deterministic_names(self): with create_tmp_file() as tmp: data = create_test_data() data.to_netcdf(tmp) with open_mfdataset(tmp) as ds: original_names = dict((k, v.data.name) for k, v in ds.items()) with open_mfdataset(tmp) as ds: repeat_names = dict((k, v.data.name) for k, v in ds.items()) for var_name, dask_name in original_names.items(): self.assertIn(var_name, dask_name) self.assertIn(tmp, dask_name) self.assertEqual(original_names, repeat_names) @requires_scipy_or_netCDF4 @requires_pydap class PydapTest(TestCase): def test_cmp_local_file(self): url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc' @contextlib.contextmanager def create_datasets(): actual = open_dataset(url, engine='pydap') with open_example_dataset('bears.nc') as expected: # don't check attributes since pydap doesn't serialize them # correctly also skip the "bears" variable since the test DAP # server incorrectly concatenates it. actual = actual.drop('bears') expected = expected.drop('bears') yield actual, expected with create_datasets() as (actual, expected): self.assertDatasetEqual(actual, expected) with create_datasets() as (actual, expected): self.assertDatasetEqual(actual.isel(l=2), expected.isel(l=2)) with create_datasets() as (actual, expected): self.assertDatasetEqual(actual.isel(i=0, j=-1), expected.isel(i=0, j=-1)) with create_datasets() as (actual, expected): self.assertDatasetEqual(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2)))
cpaulik/xray
xray/test/test_backends.py
Python
apache-2.0
38,439
[ "NetCDF" ]
50e91f751b0f69ab32c8832f1e585ecdea71e918e4ab90f938e51c8c356f593c
def extractAdamantineDragonintheCrystalWorld(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None if 'Crystal World' in item['tags']: return buildReleaseMessageWithType(item, 'Adamantine Dragon in the Crystal World', vol, chp, frag=frag, postfix=postfix, tl_type='oel') return False
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractAdamantineDragonintheCrystalWorld.py
Python
bsd-3-clause
409
[ "CRYSTAL" ]
a63024c7fc5ec429714f402ab2fdf559c636defaccace61f83e51a3a61ae971f
# drizzle/base.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # Copyright (C) 2010-2011 Monty Taylor <mordred@inaugust.com> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: drizzle :name: Drizzle Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine is InnoDB (transactions, foreign-keys) rather than MyISAM. For more `Notable Differences <http://docs.drizzle.org/mysql_differences.html>`_, visit the `Drizzle Documentation <http://docs.drizzle.org/index.html>`_. The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of the :doc:`SQLAlchemy MySQL <mysql>` documentation is also relevant. """ from sqlalchemy import exc from sqlalchemy import log from sqlalchemy import types as sqltypes from sqlalchemy.engine import reflection from sqlalchemy.dialects.mysql import base as mysql_dialect from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \ BLOB, BINARY, VARBINARY class _NumericType(object): """Base for Drizzle numeric types.""" def __init__(self, **kw): super(_NumericType, self).__init__(**kw) class _FloatType(_NumericType, sqltypes.Float): def __init__(self, precision=None, scale=None, asdecimal=True, **kw): if isinstance(self, (REAL, DOUBLE)) and \ ( (precision is None and scale is not None) or (precision is not None and scale is None) ): raise exc.ArgumentError( "You must specify both precision and scale or omit " "both altogether.") super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw) self.scale = scale class _StringType(mysql_dialect._StringType): """Base for Drizzle string types.""" def __init__(self, collation=None, binary=False, **kw): kw['national'] = False super(_StringType, self).__init__(collation=collation, binary=binary, **kw) class NUMERIC(_NumericType, sqltypes.NUMERIC): """Drizzle NUMERIC type.""" __visit_name__ = 'NUMERIC' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a NUMERIC. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DECIMAL(_NumericType, sqltypes.DECIMAL): """Drizzle DECIMAL type.""" __visit_name__ = 'DECIMAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DECIMAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(DECIMAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DOUBLE(_FloatType): """Drizzle DOUBLE type.""" __visit_name__ = 'DOUBLE' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DOUBLE. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(DOUBLE, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class REAL(_FloatType, sqltypes.REAL): """Drizzle REAL type.""" __visit_name__ = 'REAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a REAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(REAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class FLOAT(_FloatType, sqltypes.FLOAT): """Drizzle FLOAT type.""" __visit_name__ = 'FLOAT' def __init__(self, precision=None, scale=None, asdecimal=False, **kw): """Construct a FLOAT. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(FLOAT, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) def bind_processor(self, dialect): return None class INTEGER(sqltypes.INTEGER): """Drizzle INTEGER type.""" __visit_name__ = 'INTEGER' def __init__(self, **kw): """Construct an INTEGER.""" super(INTEGER, self).__init__(**kw) class BIGINT(sqltypes.BIGINT): """Drizzle BIGINTEGER type.""" __visit_name__ = 'BIGINT' def __init__(self, **kw): """Construct a BIGINTEGER.""" super(BIGINT, self).__init__(**kw) class TIME(mysql_dialect.TIME): """Drizzle TIME type.""" class TIMESTAMP(sqltypes.TIMESTAMP): """Drizzle TIMESTAMP type.""" __visit_name__ = 'TIMESTAMP' class TEXT(_StringType, sqltypes.TEXT): """Drizzle TEXT type, for text up to 2^16 characters.""" __visit_name__ = 'TEXT' def __init__(self, length=None, **kw): """Construct a TEXT. :param length: Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store ``length`` characters. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(TEXT, self).__init__(length=length, **kw) class VARCHAR(_StringType, sqltypes.VARCHAR): """Drizzle VARCHAR type, for variable-length character data.""" __visit_name__ = 'VARCHAR' def __init__(self, length=None, **kwargs): """Construct a VARCHAR. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(VARCHAR, self).__init__(length=length, **kwargs) class CHAR(_StringType, sqltypes.CHAR): """Drizzle CHAR type, for fixed-length character data.""" __visit_name__ = 'CHAR' def __init__(self, length=None, **kwargs): """Construct a CHAR. :param length: Maximum data length, in characters. :param binary: Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data. :param collation: Optional, request a particular collation. Must be compatible with the national character set. """ super(CHAR, self).__init__(length=length, **kwargs) class ENUM(mysql_dialect.ENUM): """Drizzle ENUM type.""" def __init__(self, *enums, **kw): """Construct an ENUM. Example: Column('myenum', ENUM("foo", "bar", "baz")) :param enums: The range of valid values for this ENUM. Values will be quoted when generating the schema according to the quoting flag (see below). :param strict: Defaults to False: ensure that a given value is in this ENUM's range of permissible values when inserting or updating rows. Note that Drizzle will not raise a fatal error if you attempt to store an out of range value- an alternate value will be stored instead. (See Drizzle ENUM documentation.) :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. :param quoting: Defaults to 'auto': automatically determine enum value quoting. If all enum values are surrounded by the same quoting character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. 'quoted': values in enums are already quoted, they will be used directly when generating the schema - this usage is deprecated. 'unquoted': values in enums are not quoted, they will be escaped and surrounded by single quotes when generating the schema. Previous versions of this type always required manually quoted values to be supplied; future versions will always quote the string literals for you. This is a transitional option. """ super(ENUM, self).__init__(*enums, **kw) class _DrizzleBoolean(sqltypes.Boolean): def get_dbapi_type(self, dbapi): return dbapi.NUMERIC colspecs = { sqltypes.Numeric: NUMERIC, sqltypes.Float: FLOAT, sqltypes.Time: TIME, sqltypes.Enum: ENUM, sqltypes.Boolean: _DrizzleBoolean, } # All the types we have in Drizzle ischema_names = { 'BIGINT': BIGINT, 'BINARY': BINARY, 'BLOB': BLOB, 'BOOLEAN': BOOLEAN, 'CHAR': CHAR, 'DATE': DATE, 'DATETIME': DATETIME, 'DECIMAL': DECIMAL, 'DOUBLE': DOUBLE, 'ENUM': ENUM, 'FLOAT': FLOAT, 'INT': INTEGER, 'INTEGER': INTEGER, 'NUMERIC': NUMERIC, 'TEXT': TEXT, 'TIME': TIME, 'TIMESTAMP': TIMESTAMP, 'VARBINARY': VARBINARY, 'VARCHAR': VARCHAR, } class DrizzleCompiler(mysql_dialect.MySQLCompiler): def visit_typeclause(self, typeclause): type_ = typeclause.type.dialect_impl(self.dialect) if isinstance(type_, sqltypes.Integer): return 'INTEGER' else: return super(DrizzleCompiler, self).visit_typeclause(typeclause) def visit_cast(self, cast, **kwargs): type_ = self.process(cast.typeclause) if type_ is None: return self.process(cast.clause) return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler): pass class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler): def _extend_numeric(self, type_, spec): return spec def _extend_string(self, type_, defaults, spec): """Extend a string-type declaration with standard SQL COLLATE annotations and Drizzle specific extensions. """ def attr(name): return getattr(type_, name, defaults.get(name)) if attr('collation'): collation = 'COLLATE %s' % type_.collation elif attr('binary'): collation = 'BINARY' else: collation = None return ' '.join([c for c in (spec, collation) if c is not None]) def visit_NCHAR(self, type): raise NotImplementedError("Drizzle does not support NCHAR") def visit_NVARCHAR(self, type): raise NotImplementedError("Drizzle does not support NVARCHAR") def visit_FLOAT(self, type_): if type_.scale is not None and type_.precision is not None: return "FLOAT(%s, %s)" % (type_.precision, type_.scale) else: return "FLOAT" def visit_BOOLEAN(self, type_): return "BOOLEAN" def visit_BLOB(self, type_): return "BLOB" class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext): pass class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer): pass @log.class_logger class DrizzleDialect(mysql_dialect.MySQLDialect): """Details of the Drizzle dialect. Not used directly in application code. """ name = 'drizzle' _supports_cast = True supports_sequences = False supports_native_boolean = True supports_views = False default_paramstyle = 'format' colspecs = colspecs statement_compiler = DrizzleCompiler ddl_compiler = DrizzleDDLCompiler type_compiler = DrizzleTypeCompiler ischema_names = ischema_names preparer = DrizzleIdentifierPreparer def on_connect(self): """Force autocommit - Drizzle Bug#707842 doesn't set this properly""" def connect(conn): conn.autocommit(False) return connect @reflection.cache def get_table_names(self, connection, schema=None, **kw): """Return a Unicode SHOW TABLES from a given schema.""" if schema is not None: current_schema = schema else: current_schema = self.default_schema_name charset = 'utf8' rp = connection.execute("SHOW TABLES FROM %s" % self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)] @reflection.cache def get_view_names(self, connection, schema=None, **kw): raise NotImplementedError def _detect_casing(self, connection): """Sniff out identifier case sensitivity. Cached per-connection. This value can not change without a server restart. """ return 0 def _detect_collations(self, connection): """Pull the active COLLATIONS list from the server. Cached per-connection. """ collations = {} charset = self._connection_charset rs = connection.execute( 'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM' ' data_dictionary.COLLATIONS') for row in self._compat_fetchall(rs, charset): collations[row[0]] = row[1] return collations def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" self._server_ansiquotes = False self._backslash_escapes = False
jessekl/flixr
venv/lib/python2.7/site-packages/sqlalchemy/dialects/drizzle/base.py
Python
mit
14,993
[ "VisIt" ]
97c8e18989d79038dbc71b65a95ec44070bf4863dbdbc0222c67b8662af275fe
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import math import numpy from . import _ni_support from . import _nd_image from scipy.misc import doccer from scipy._lib._version import NumpyVersion __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter'] _input_doc = \ """input : array_like Input array to filter.""" _axis_doc = \ """axis : int, optional The axis of `input` along which to calculate. Default is -1.""" _output_doc = \ """output : array, optional The `output` parameter passes an array in which to store the filter output. Output array should have different name as compared to input array to avoid aliasing errors.""" _size_foot_doc = \ """size : scalar or tuple, optional See footprint, below footprint : array, optional Either `size` or `footprint` must be defined. `size` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. `footprint` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust `size` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and `size` is 2, then the actual size used is (2,2,2). """ _mode_doc = \ """mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect'""" _mode_multiple_doc = \ """mode : str or sequence, optional The `mode` parameter determines how the array borders are handled. Valid modes are {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}. `cval` is the value used when mode is equal to 'constant'. A list of modes with length equal to the number of axes can be provided to specify different modes for different axes. Default is 'reflect'""" _cval_doc = \ """cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0""" _origin_doc = \ """origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0.0.""" _extra_arguments_doc = \ """extra_arguments : sequence, optional Sequence of extra positional arguments to pass to passed function""" _extra_keywords_doc = \ """extra_keywords : dict, optional dict of extra keyword arguments to pass to passed function""" docdict = { 'input': _input_doc, 'axis': _axis_doc, 'output': _output_doc, 'size_foot': _size_foot_doc, 'mode': _mode_doc, 'mode_multiple': _mode_multiple_doc, 'cval': _cval_doc, 'origin': _origin_doc, 'extra_arguments': _extra_arguments_doc, 'extra_keywords': _extra_keywords_doc, } docfiller = doccer.filldoc(docdict) @docfiller def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array One-dimensional sequence of numbers. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Examples -------- >>> from scipy.ndimage import correlate1d >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([ 8, 26, 8, 12, 7, 28, 36, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) weights = numpy.asarray(weights, dtype=numpy.float64) if weights.ndim != 1 or weights.shape[0] < 1: raise RuntimeError('no filter weights given') if not weights.flags.contiguous: weights = weights.copy() axis = _ni_support._check_axis(axis, input.ndim) if (len(weights) // 2 + origin < 0) or (len(weights) // 2 + origin > len(weights)): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin) return return_value @docfiller def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray One-dimensional sequence of numbers. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- convolve1d : ndarray Convolved array with same shape as input Examples -------- >>> from scipy.ndimage import convolve1d >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([14, 24, 4, 13, 12, 36, 27, 0]) """ weights = weights[::-1] origin = -origin if not len(weights) & 1: origin -= 1 return correlate1d(input, weights, axis, output, mode, cval, origin) def _gaussian_kernel1d(sigma, order, radius): """ Computes a 1D Gaussian convolution kernel. """ if order < 0: raise ValueError('order must be non-negative') p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)]) x = numpy.arange(-radius, radius + 1) phi_x = numpy.exp(p(x), dtype=numpy.double) phi_x /= phi_x.sum() if order > 0: q = numpy.polynomial.Polynomial([1]) p_deriv = p.deriv() for _ in range(order): # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) q = q.deriv() + q * p_deriv phi_x *= q(x) return phi_x @docfiller def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0): """One-dimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar standard deviation for Gaussian kernel %(axis)s order : int, optional An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode)s %(cval)s truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. Returns ------- gaussian_filter1d : ndarray Examples -------- >>> from scipy.ndimage import gaussian_filter1d >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) >>> import matplotlib.pyplot as plt >>> np.random.seed(280490) >>> x = np.random.randn(101).cumsum() >>> y3 = gaussian_filter1d(x, 3) >>> y6 = gaussian_filter1d(x, 6) >>> plt.plot(x, 'k', label='original data') >>> plt.plot(y3, '--', label='filtered, sigma=3') >>> plt.plot(y6, ':', label='filtered, sigma=6') >>> plt.legend() >>> plt.grid() >>> plt.show() """ sd = float(sigma) # make the radius of the filter equal to truncate standard deviations lw = int(truncate * sd + 0.5) # Since we are calling correlate, not convolve, revert the kernel weights = _gaussian_kernel1d(sigma, order, lw)[::-1] return correlate1d(input, weights, axis, output, mode, cval, 0) @docfiller def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0): """Multidimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : int or sequence of ints, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode_multiple)s %(cval)s truncate : float Truncate the filter at this many standard deviations. Default is 4.0. Returns ------- gaussian_filter : ndarray Returned array of same shape as `input`. Notes ----- The multidimensional filter is implemented as a sequence of one-dimensional convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy.ndimage import gaussian_filter >>> a = np.arange(50, step=2).reshape((5,5)) >>> a array([[ 0, 2, 4, 6, 8], [10, 12, 14, 16, 18], [20, 22, 24, 26, 28], [30, 32, 34, 36, 38], [40, 42, 44, 46, 48]]) >>> gaussian_filter(a, sigma=1) array([[ 4, 6, 8, 9, 11], [10, 12, 14, 15, 17], [20, 22, 24, 25, 27], [29, 31, 33, 34, 36], [35, 37, 39, 40, 42]]) >>> from scipy import misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = gaussian_filter(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) orders = _ni_support._normalize_sequence(order, input.ndim) sigmas = _ni_support._normalize_sequence(sigma, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma, order, mode in axes: gaussian_filter1d(input, sigma, axis, order, output, mode, cval, truncate) input = output else: output[...] = input[...] return return_value @docfiller def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Prewitt filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.prewitt(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output, return_value = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) return return_value @docfiller def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Sobel filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.sobel(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output, return_value = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) return return_value @docfiller def generic_laplace(input, derivative2, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """ N-dimensional Laplace filter using a provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative2(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) for ii in range(1, len(axes)): tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) output += tmp else: output[...] = input[...] return return_value @docfiller def laplace(input, output=None, mode="reflect", cval=0.0): """N-dimensional Laplace filter based on approximate second derivatives. Parameters ---------- %(input)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.laplace(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ def derivative2(input, axis, output, mode, cval): return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) return generic_laplace(input, derivative2, output, mode, cval) @docfiller def gaussian_laplace(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional Laplace filter using gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> ascent = misc.ascent() >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> result = ndimage.gaussian_laplace(ascent, sigma=1) >>> ax1.imshow(result) >>> result = ndimage.gaussian_laplace(ascent, sigma=3) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative2(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_laplace(input, derivative2, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) @docfiller def generic_gradient_magnitude(input, derivative, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """Gradient magnitude using a provided gradient function. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. `derivative` can assume that `input` and `output` are ndarrays. Note that the output from `derivative` is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) numpy.multiply(output, output, output) for ii in range(1, len(axes)): tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) numpy.multiply(tmp, tmp, tmp) output += tmp # This allows the sqrt to work with a different default casting numpy.sqrt(output, output, casting='unsafe') else: output[...] = input[...] return return_value @docfiller def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional gradient magnitude using Gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_gradient_magnitude : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) def _correlate_or_convolve(input, weights, output, mode, cval, origin, convolution): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) weights = numpy.asarray(weights, dtype=numpy.float64) wshape = [ii for ii in weights.shape if ii > 0] if len(wshape) != input.ndim: raise RuntimeError('filter weights array has incorrect shape.') if convolution: weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] for ii in range(len(origins)): origins[ii] = -origins[ii] if not weights.shape[ii] & 1: origins[ii] -= 1 for origin, lenw in zip(origins, wshape): if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw): raise ValueError('invalid origin') if not weights.flags.contiguous: weights = weights.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate(input, weights, output, mode, cval, origins) return return_value @docfiller def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multi-dimensional correlation. The array is correlated with the given kernel. Parameters ---------- input : array-like input array to filter weights : ndarray array of weights, same number of dimensions as input output : array, optional The ``output`` parameter passes an array in which to store the filter output. Output array should have different name as compared to input array to avoid aliasing errors. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0 See Also -------- convolve : Convolve an image with a kernel. """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, False) @docfiller def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multidimensional convolution. The array is convolved with the given kernel. Parameters ---------- input : array_like Input array to filter. weights : array_like Array of weights, same number of dimensions as input output : ndarray, optional The `output` parameter passes an array in which to store the filter output. Output array should have different name as compared to input array to avoid aliasing errors. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional the `mode` parameter determines how the array borders are handled. For 'constant' mode, values beyond borders are set to be `cval`. Default is 'reflect'. cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 origin : array_like, optional The `origin` parameter controls the placement of the filter, relative to the centre of the current element of the input. Default of 0 is equivalent to ``(0,)*input.ndim``. Returns ------- result : ndarray The result of convolution of `input` with `weights`. See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where W is the `weights` kernel, j is the n-D spatial index over :math:`W`, I is the `input` and k is the coordinate of the center of W, specified by `origin` in the input parameters. Examples -------- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, because in this case borders (i.e. where the `weights` kernel, centered on any one value, extends beyond an edge of `input`. >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) >>> from scipy import ndimage >>> ndimage.convolve(a, k, mode='constant', cval=0.0) array([[11, 10, 7, 4], [10, 3, 11, 11], [15, 12, 14, 7], [12, 3, 7, 0]]) Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` with 1.0's (and then extracting only the original region of the result). >>> ndimage.convolve(a, k, mode='constant', cval=1.0) array([[13, 11, 8, 7], [11, 3, 11, 14], [16, 12, 14, 10], [15, 6, 10, 5]]) With ``mode='reflect'`` (the default), outer values are reflected at the edge of `input` to fill in missing values. >>> b = np.array([[2, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) >>> ndimage.convolve(b, k, mode='reflect') array([[5, 0, 0], [3, 0, 0], [1, 0, 0]]) This includes diagonally at the corners. >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) >>> ndimage.convolve(b, k) array([[4, 2, 0], [3, 2, 0], [1, 1, 0]]) With ``mode='nearest'``, the single nearest value in to an edge in `input` is repeated as many times as needed to match the overlapping `weights`. >>> c = np.array([[2, 0, 1], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, True) @docfiller def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : int length of uniform filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Examples -------- >>> from scipy.ndimage import uniform_filter1d >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([4, 3, 4, 1, 4, 6, 6, 3]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin) return return_value @docfiller def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, origin=0): """Multi-dimensional uniform filter. Parameters ---------- %(input)s size : int or sequence of ints, optional The sizes of the uniform filter are given for each axis as a sequence, or as a single number, in which case the size is equal for all axes. %(output)s %(mode_multiple)s %(cval)s %(origin)s Returns ------- uniform_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.uniform_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) sizes = _ni_support._normalize_sequence(size, input.ndim) origins = _ni_support._normalize_sequence(origin, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if len(axes) > 0: for axis, size, origin, mode in axes: uniform_filter1d(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] return return_value @docfiller def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Notes ----- This function implements the MINLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import minimum_filter1d >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([2, 0, 0, 0, 1, 1, 0, 0]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1) return return_value @docfiller def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int Length along which to calculate the 1-D maximum. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- maximum1d : ndarray, None Maximum-filtered array with same shape as input. None if `output` is not None Notes ----- This function implements the MAXLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import maximum_filter1d >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([8, 8, 8, 4, 9, 9, 9, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0) return return_value def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if structure is None: if footprint is None: if size is None: raise RuntimeError("no footprint provided") separable = True else: footprint = numpy.asarray(footprint, dtype=bool) if not footprint.any(): raise ValueError("All-zero footprint is not supported.") if footprint.all(): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numpy.asarray(structure, dtype=numpy.float64) separable = False if footprint is None: footprint = numpy.ones(structure.shape, bool) else: footprint = numpy.asarray(footprint, dtype=bool) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.ndim) if separable: sizes = _ni_support._normalize_sequence(size, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter_ = minimum_filter1d else: filter_ = maximum_filter1d if len(axes) > 0: for axis, size, origin, mode in axes: filter_(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.ndim: raise RuntimeError('structure array has incorrect shape') if not structure.flags.contiguous: structure = structure.copy() mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return return_value @docfiller def minimum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional minimum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin)s Returns ------- minimum_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.minimum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 1) @docfiller def maximum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin)s Returns ------- maximum_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.maximum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0) @docfiller def _rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation='rank'): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() filter_size = numpy.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError('invalid percentile') if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError('rank not within filter footprint size') if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origins) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origins) else: output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return return_value @docfiller def rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional rank filter. Parameters ---------- %(input)s rank : int The rank parameter may be less then zero, i.e., rank = -1 indicates the largest element. %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- rank_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.rank_filter(ascent, rank=42, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, rank, size, footprint, output, mode, cval, origin, 'rank') @docfiller def median_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a multidimensional median filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- median_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.median_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, 0, size, footprint, output, mode, cval, origin, 'median') @docfiller def percentile_filter(input, percentile, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional percentile filter. Parameters ---------- %(input)s percentile : scalar The percentile parameter may be less then zero, i.e., percentile = -20 equals percentile = 80 %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- percentile_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, percentile, size, footprint, output, mode, cval, origin, 'percentile') @docfiller def generic_filter1d(input, function, filter_size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a one-dimensional filter along the given axis. `generic_filter1d` iterates over the lines of the array, calling the given function at each line. The arguments of the line are the input line, and the output line. The input and output lines are 1D double arrays. The input line is extended appropriately according to the filter size and origin. The output line must be modified in-place with the result. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply along given axis. filter_size : scalar Length of the filter. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int function(double *input_line, npy_intp input_length, double *output_line, npy_intp output_length, void *user_data) int function(double *input_line, intptr_t input_length, double *output_line, intptr_t output_length, void *user_data) The calling function iterates over the lines of the input and output arrays, calling the callback function at each line. The current line is extended according to the border conditions set by the calling function, and the result is copied into the array that is passed through ``input_line``. The length of the input line (after extension) is passed through ``input_length``. The callback function should apply the filter and store the result in the array passed through ``output_line``. The length of the output line is passed through ``output_length``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) if filter_size < 1: raise RuntimeError('invalid filter size') axis = _ni_support._check_axis(axis, input.ndim) if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= filter_size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter1d(input, function, filter_size, axis, output, mode, cval, origin, extra_arguments, extra_keywords) return return_value @docfiller def generic_filter(input, function, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a multi-dimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1D array of double values. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply at each element. %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int callback(double *buffer, npy_intp filter_size, double *return_value, void *user_data) int callback(double *buffer, intptr_t filter_size, double *return_value, void *user_data) The calling function iterates over the elements of the input and output arrays, calling the callback function at each element. The elements within the footprint of the filter at the current element are passed through the ``buffer`` parameter, and the number of elements within the footprint through ``filter_size``. The calculated value is returned in ``return_value``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return return_value
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/scipy/ndimage/filters.py
Python
mit
52,520
[ "Gaussian" ]
8ababcfef262e72f16c509775e0675fb942c51f7f4252715ed427741b306a109
#!/usr/bin/env python # Install.py tool to build the CSlib library # used to automate the steps described in the README file in this dir from __future__ import print_function import sys,os,re,subprocess # help message help = """ Syntax from src dir: make lib-message args="-m" or: make lib-message args="-s -z" Syntax from lib dir: python Install.py -m or: python Install.py -s -z specify zero or more options, order does not matter -m = parallel build of CSlib library -s = serial build of CSlib library -z = build CSlib library with ZMQ socket support, default = no ZMQ support Example: make lib-message args="-m -z" # build parallel CSlib with ZMQ support make lib-message args="-s" # build serial CSlib with no ZMQ support """ # print error message or help def error(str=None): if not str: print(help) else: print("ERROR",str) sys.exit() # expand to full path name # process leading '~' or relative path def fullpath(path): return os.path.abspath(os.path.expanduser(path)) def which(program): def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None # parse args args = sys.argv[1:] nargs = len(args) if nargs == 0: error() mpiflag = False serialflag = False zmqflag = False iarg = 0 while iarg < nargs: if args[iarg] == "-m": mpiflag = True iarg += 1 elif args[iarg] == "-s": serialflag = True iarg += 1 elif args[iarg] == "-z": zmqflag = True iarg += 1 else: error() if (not mpiflag and not serialflag): error("Must use either -m or -s flag") if (mpiflag and serialflag): error("Cannot use -m and -s flag at the same time") # build CSlib # copy resulting lib to cslib/src/libmessage.a # copy appropriate Makefile.lammps.* to Makefile.lammps print("Building CSlib ...") srcdir = fullpath("./cslib/src") if mpiflag and zmqflag: cmd = "cd %s; make lib_parallel" % srcdir elif mpiflag and not zmqflag: cmd = "cd %s; make lib_parallel zmq=no" % srcdir elif not mpiflag and zmqflag: cmd = "cd %s; make lib_serial" % srcdir elif not mpiflag and not zmqflag: cmd = "cd %s; make lib_serial zmq=no" % srcdir print(cmd) txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) print(txt.decode('UTF-8')) if mpiflag: cmd = "cd %s; cp libcsmpi.a libmessage.a" % srcdir else: cmd = "cd %s; cp libcsnompi.a libmessage.a" % srcdir print(cmd) txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) print(txt.decode('UTF-8')) if zmqflag: cmd = "cp Makefile.lammps.zmq Makefile.lammps" else: cmd = "cp Makefile.lammps.nozmq Makefile.lammps" print(cmd) txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) print(txt.decode('UTF-8'))
quang-ha/lammps
lib/message/Install.py
Python
gpl-2.0
3,018
[ "LAMMPS" ]
058fe3095555879b0d01023602af3dcf040b20b35aeb910d83cb956631ff359a
#!/usr/local/bin/python """ setup.py file for LAMMPS with system MPICH library """ from distutils.core import setup, Extension import os, glob path = os.path.dirname(os.getcwd()) # list of src files for LAMMPS libfiles = glob.glob("%s/src/*.cpp" % path) lammps_library = Extension("_lammps", sources = libfiles, define_macros = [("MPICH_IGNORE_CXX_SEEK",1), ("LAMMPS_GZIP",1), ("FFT_NONE",1),], # src files for LAMMPS include_dirs = ["../src"], # additional libs for MPICH on Linux libraries = ["mpich","mpl","pthread"], # where to find the MPICH lib on Linux library_dirs = ["/usr/local/lib"], # additional libs for MPI on Mac # libraries = ["mpi"], ) setup(name = "lammps", version = "28Nov11", author = "Steve Plimpton", author_email = "sjplimp@sandia.gov", url = "http://lammps.sandia.gov", description = """LAMMPS molecular dynamics library - parallel""", py_modules = ["lammps"], ext_modules = [lammps_library] )
gladk/LIGGGHTS-PUBLIC
python/setup.py
Python
gpl-2.0
1,359
[ "LAMMPS" ]
3d880a208bc79eeae4f5a9fa8ff0d9838cde91ae2fdb348c6582519370d3ad39
paraview_plugin_version = '2.1.0' # This is module to import. It provides VTKPythonAlgorithmBase, the base class # for all python-based vtkAlgorithm subclasses in VTK and decorators used to # 'register' the algorithm with ParaView along with information about UI. from paraview.util.vtkAlgorithm import smdomain, smhint, smproperty, smproxy # Classes to Decorate from PVGeo.model_build import ( CreateEvenRectilinearGrid, CreateTensorMesh, CreateUniformGrid, GlobeSource, OutlineContinents, ) MENU_CAT = 'PVGeo: Model Building' ############################################################################### @smproxy.source( name='PVGeoCreateEvenRectilinearGrid', label='Create Even Rectilinear Grid' ) @smhint.xml( '''<ShowInMenu category="%s"/> <RepresentationType view="RenderView" type="Surface With Edges" />''' % MENU_CAT ) class PVGeoCreateEvenRectilinearGrid(CreateEvenRectilinearGrid): def __init__(self): CreateEvenRectilinearGrid.__init__(self) #### Setters / Getters #### @smproperty.intvector(name="Extent", default_values=[10, 10, 10]) def set_extent(self, nx, ny, nz): CreateEvenRectilinearGrid.set_extent(self, nx, ny, nz) @smproperty.doublevector(name="X Range", default_values=[-1.0, 1.0]) def set_x_range(self, start, stop): CreateEvenRectilinearGrid.set_x_range(self, start, stop) @smproperty.doublevector(name="Y Range", default_values=[-1.0, 1.0]) def set_y_range(self, start, stop): CreateEvenRectilinearGrid.set_y_range(self, start, stop) @smproperty.doublevector(name="Z Range", default_values=[-1.0, 1.0]) def set_z_range(self, start, stop): CreateEvenRectilinearGrid.set_z_range(self, start, stop) ############################################################################### @smproxy.source(name='PVGeoCreateTensorMesh', label='Create Tensor Mesh') @smhint.xml( '''<ShowInMenu category="%s"/> <RepresentationType view="RenderView" type="Surface With Edges" />''' % MENU_CAT ) class PVGeoCreateTensorMesh(CreateTensorMesh): def __init__(self): CreateTensorMesh.__init__(self) @smproperty.stringvector( name="X Cells", default_values='200 100 50 20*50.0 50 100 200' ) def set_x_cells_str(self, xcellstr): CreateTensorMesh.set_x_cells_str(self, xcellstr) @smproperty.stringvector( name="Y Cells", default_values='200 100 50 21*50.0 50 100 200' ) def set_y_cells_str(self, ycellstr): CreateTensorMesh.set_y_cells_str(self, ycellstr) @smproperty.stringvector(name="Z Cells", default_values='20*25.0 50 100 200') def set_z_cells_str(self, zcellstr): CreateTensorMesh.set_z_cells_str(self, zcellstr) @smproperty.doublevector(name="Origin", default_values=[-350.0, -400.0, 0.0]) def set_origin(self, x0, y0, z0): CreateTensorMesh.set_origin(self, x0, y0, z0) ############################################################################### @smproxy.source(name='PVGeoCreateUniformGrid', label='Create Uniform Grid') @smhint.xml( '''<ShowInMenu category="%s"/> <RepresentationType view="RenderView" type="Surface With Edges" />''' % MENU_CAT ) class PVGeoCreateUniformGrid(CreateUniformGrid): def __init__(self): CreateUniformGrid.__init__(self) #### Setters / Getters #### @smproperty.intvector(name="Extent", default_values=[10, 10, 10]) def set_extent(self, nx, ny, nz): CreateUniformGrid.set_extent(self, nx, ny, nz) @smproperty.doublevector(name="Spacing", default_values=[1.0, 1.0, 1.0]) def set_spacing(self, dx, dy, dz): CreateUniformGrid.set_spacing(self, dx, dy, dz) @smproperty.doublevector(name="Origin", default_values=[0.0, 0.0, 0.0]) def set_origin(self, x0, y0, z0): CreateUniformGrid.set_origin(self, x0, y0, z0) ############################################################################### @smproxy.source(name='PVGeoOutlineContinents', label=OutlineContinents.__displayname__) @smhint.xml('<ShowInMenu category="%s"/>' % MENU_CAT) class PVGeoOutlineContinents(OutlineContinents): def __init__(self): OutlineContinents.__init__(self) @smproperty.doublevector(name="Radius", default_values=6371.0e6) def set_radius(self, radius): OutlineContinents.set_radius(self, radius) ############################################################################### @smproxy.source(name='PVGeoGlobeSource', label=GlobeSource.__displayname__) @smhint.xml('<ShowInMenu category="%s"/>' % MENU_CAT) class PVGeoGlobeSource(GlobeSource): def __init__(self): GlobeSource.__init__(self) @smproperty.doublevector(name="Radius", default_values=6371.0e6) def set_radius(self, radius): GlobeSource.set_radius(self, radius) @smproperty.intvector(name="Meridians", default_values=36) @smdomain.intrange(min=2, max=100) def set_n_meridians(self, n): GlobeSource.set_n_meridians(self, n) @smproperty.intvector(name="Parallels", default_values=15) @smdomain.intrange(min=2, max=100) def set_n_parallels(self, n): GlobeSource.set_n_parallels(self, n) ###############################################################################
banesullivan/ParaViewGeophysics
PVPlugins/PVGeo_Model_Builder.py
Python
bsd-3-clause
5,281
[ "ParaView", "VTK" ]
9792327652c857afb19854e04a4907f5cc24e16985cc500732b33c5cb2d263b3
#!/usr/bin/env python # # Restriction Analysis Libraries. # Copyright (C) 2004. Frederic Sohm. # # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # """ Notes about the diverses class of the restriction enzyme implementation. RestrictionType is the type of all restriction enzymes. ---------------------------------------------------------------------------- AbstractCut implements some methods that are common to all enzymes. ---------------------------------------------------------------------------- NoCut, OneCut,TwoCuts represent the number of double strand cuts produced by the enzyme. they correspond to the 4th field of the rebase record emboss_e.NNN. 0->NoCut : the enzyme is not characterised. 2->OneCut : the enzyme produce one double strand cut. 4->TwoCuts : two double strand cuts. ---------------------------------------------------------------------------- Meth_Dep, Meth_Undep represent the methylation susceptibility to the enzyme. Not implemented yet. ---------------------------------------------------------------------------- Palindromic, if the site is palindromic or not. NotPalindromic allow some optimisations of the code. No need to check the reverse strand with palindromic sites. ---------------------------------------------------------------------------- Unknown, Blunt, represent the overhang. Ov5, Ov3 Unknown is here for symetry reasons and correspond to enzymes that are not characterised in rebase. ---------------------------------------------------------------------------- Defined, Ambiguous, represent the sequence of the overhang. NotDefined NotDefined is for enzymes not characterised in rebase. Defined correspond to enzymes that display a constant overhang whatever the sequence. ex : EcoRI. G^AATTC -> overhang :AATT CTTAA^G Ambiguous : the overhang varies with the sequence restricted. Typically enzymes which cut outside their restriction site or (but not always) inside an ambiguous site. ex: AcuI CTGAAG(22/20) -> overhang : NN AasI GACNNN^NNNGTC -> overhang : NN CTGN^NNNNNCAG note : these 3 classes refers to the overhang not the site. So the enzyme ApoI (RAATTY) is defined even if its restriction site is ambiguous. ApoI R^AATTY -> overhang : AATT -> Defined YTTAA^R Accordingly, blunt enzymes are always Defined even when they cut outside their restriction site. ---------------------------------------------------------------------------- Not_available, as found in rebase file emboss_r.NNN files. Commercially_available allow the selection of the enzymes according to their suppliers to reduce the quantity of results. Also will allow the implementation of buffer compatibility tables. Not implemented yet. the list of suppliers is extracted from emboss_s.NNN ---------------------------------------------------------------------------- """ import re import itertools from Bio.Seq import Seq, MutableSeq from Bio.Alphabet import IUPAC from Bio.Restriction.Restriction_Dictionary import rest_dict as enzymedict from Bio.Restriction.Restriction_Dictionary import typedict from Bio.Restriction.Restriction_Dictionary import suppliers as suppliers_dict from Bio.Restriction.RanaConfig import * from Bio.Restriction.PrintFormat import PrintFormat #Used to use Bio.Restriction.DNAUtils.check_bases (and expose it under this #namespace), but have deprecated that module. def _check_bases(seq_string): """Check characters in a string (PRIVATE). Remove digits and white space present in string. Allows any valid ambiguous IUPAC DNA single letters codes (ABCDGHKMNRSTVWY, lower case are converted). Other characters (e.g. symbols) trigger a TypeError. Returns the string WITH A LEADING SPACE (!). This is for backwards compatibility, and may in part be explained by the fact that Bio.Restriction doesn't use zero based counting. """ #Remove white space and make upper case: seq_string = "".join(seq_string.split()).upper() #Remove digits for c in "0123456789" : seq_string = seq_string.replace(c,"") #Check only allowed IUPAC letters if not set(seq_string).issubset(set("ABCDGHKMNRSTVWY")) : raise TypeError("Invalid character found in %s" % repr(seq_string)) return " " + seq_string matching = {'A' : 'ARWMHVDN', 'C' : 'CYSMHBVN', 'G' : 'GRSKBVDN', 'T' : 'TYWKHBDN', 'R' : 'ABDGHKMNSRWV', 'Y' : 'CBDHKMNSTWVY', 'W' : 'ABDHKMNRTWVY', 'S' : 'CBDGHKMNSRVY', 'M' : 'ACBDHMNSRWVY', 'K' : 'BDGHKNSRTWVY', 'H' : 'ACBDHKMNSRTWVY', 'B' : 'CBDGHKMNSRTWVY', 'V' : 'ACBDGHKMNSRWVY', 'D' : 'ABDGHKMNSRTWVY', 'N' : 'ACBDGHKMNSRTWVY'} DNA = Seq class FormattedSeq(object): """FormattedSeq(seq, [linear=True])-> new FormattedSeq. Translate a Bio.Seq into a formatted sequence to be used with Restriction. Roughly: remove anything which is not IUPAC alphabet and then add a space in front of the sequence to get a biological index instead of a python index (i.e. index of the first base is 1 not 0). Retains information about the shape of the molecule linear (default) or circular. Restriction sites are search over the edges of circular sequence.""" def __init__(self, seq, linear = True): """FormattedSeq(seq, [linear=True])-> new FormattedSeq. seq is either a Bio.Seq, Bio.MutableSeq or a FormattedSeq. if seq is a FormattedSeq, linear will have no effect on the shape of the sequence.""" if isinstance(seq, Seq) or isinstance(seq, MutableSeq): stringy = seq.tostring() self.lower = stringy.islower() #Note this adds a leading space to the sequence (!) self.data = _check_bases(stringy) self.linear = linear self.klass = seq.__class__ self.alphabet = seq.alphabet elif isinstance(seq, FormattedSeq): self.lower = seq.lower self.data = seq.data self.linear = seq.linear self.alphabet = seq.alphabet self.klass = seq.klass else: raise TypeError('expected Seq or MutableSeq, got %s' % type(seq)) def __len__(self): return len(self.data) - 1 def __repr__(self): return 'FormattedSeq(%s, linear=%s)' %(repr(self[1:]), repr(self.linear)) def __eq__(self, other): if isinstance(other, FormattedSeq): if repr(self) == repr(other): return True else: return False return False def circularise(self): """FS.circularise() -> circularise FS""" self.linear = False return def linearise(self): """FS.linearise() -> linearise FS""" self.linear = True return def to_linear(self): """FS.to_linear() -> new linear FS instance""" new = self.__class__(self) new.linear = True return new def to_circular(self): """FS.to_circular() -> new circular FS instance""" new = self.__class__(self) new.linear = False return new def is_linear(self): """FS.is_linear() -> bool. True if the sequence will analysed as a linear sequence.""" return self.linear def finditer(self, pattern, size): """FS.finditer(pattern, size) -> list. return a list of pattern into the sequence. the list is made of tuple (location, pattern.group). the latter is used with non palindromic sites. pattern is the regular expression pattern corresponding to the enzyme restriction site. size is the size of the restriction enzyme recognition-site size.""" if self.is_linear(): data = self.data else: data = self.data + self.data[1:size] return [(i.start(), i.group) for i in re.finditer(pattern, data)] def __getitem__(self, i): if self.lower: return self.klass((self.data[i]).lower(), self.alphabet) return self.klass(self.data[i], self.alphabet) class RestrictionType(type): """RestrictionType. Type from which derives all enzyme classes. Implement the operator methods.""" def __init__(cls, name='', bases=(), dct={}): """RE(name, bases, dct) -> RestrictionType instance. Not intended to be used in normal operation. The enzymes are instantiated when importing the module. see below.""" if "-" in name : raise ValueError("Problem with hyphen in %s as enzyme name" \ % repr(name)) # 2011/11/26 - Nobody knows what this call was supposed to accomplish, # but all unit tests seem to pass without it. # super(RestrictionType, cls).__init__(cls, name, bases, dct) try : cls.compsite = re.compile(cls.compsite) except Exception, err : raise ValueError("Problem with regular expression, re.compiled(%s)" \ % repr(cls.compsite)) def __add__(cls, other): """RE.__add__(other) -> RestrictionBatch(). if other is an enzyme returns a batch of the two enzymes. if other is already a RestrictionBatch add enzyme to it.""" if isinstance(other, RestrictionType): return RestrictionBatch([cls, other]) elif isinstance(other, RestrictionBatch): return other.add_nocheck(cls) else: raise TypeError def __div__(cls, other): """RE.__div__(other) -> list. RE/other returns RE.search(other).""" return cls.search(other) def __rdiv__(cls, other): """RE.__rdiv__(other) -> list. other/RE returns RE.search(other).""" return cls.search(other) def __truediv__(cls, other): """RE.__truediv__(other) -> list. RE/other returns RE.search(other).""" return cls.search(other) def __rtruediv__(cls, other): """RE.__rtruediv__(other) -> list. other/RE returns RE.search(other).""" return cls.search(other) def __floordiv__(cls, other): """RE.__floordiv__(other) -> list. RE//other returns RE.catalyse(other).""" return cls.catalyse(other) def __rfloordiv__(cls, other): """RE.__rfloordiv__(other) -> list. other//RE returns RE.catalyse(other).""" return cls.catalyse(other) def __str__(cls): """RE.__str__() -> str. return the name of the enzyme.""" return cls.__name__ def __repr__(cls): """RE.__repr__() -> str. used with eval or exec will instantiate the enzyme.""" return "%s" % cls.__name__ def __len__(cls): """RE.__len__() -> int. length of the recognition site.""" return cls.size def __hash__(cls): #Python default is to use id(...) #This is consistent with the __eq__ implementation return id(cls) def __eq__(cls, other): """RE == other -> bool True if RE and other are the same enzyme. Specifically this checks they are the same Python object. """ #assert (id(cls)==id(other)) == (other is cls) == (cls is other) return id(cls)==id(other) def __ne__(cls, other): """RE != other -> bool. isoschizomer strict, same recognition site, same restriction -> False all the other-> True WARNING - This is not the inverse of the __eq__ method. """ if not isinstance(other, RestrictionType): return True elif cls.charac == other.charac: return False else: return True def __rshift__(cls, other): """RE >> other -> bool. neoschizomer : same recognition site, different restriction. -> True all the others : -> False""" if not isinstance(other, RestrictionType): return False elif cls.site == other.site and cls.charac != other.charac: return True else: return False def __mod__(cls, other): """a % b -> bool. Test compatibility of the overhang of a and b. True if a and b have compatible overhang.""" if not isinstance(other, RestrictionType): raise TypeError( \ 'expected RestrictionType, got %s instead' % type(other)) return cls._mod1(other) def __ge__(cls, other): """a >= b -> bool. a is greater or equal than b if the a site is longer than b site. if their site have the same length sort by alphabetical order of their names.""" if not isinstance(other, RestrictionType): raise NotImplementedError if len(cls) > len(other): return True elif cls.size == len(other) and cls.__name__ >= other.__name__: return True else: return False def __gt__(cls, other): """a > b -> bool. sorting order: 1. size of the recognition site. 2. if equal size, alphabetical order of the names.""" if not isinstance(other, RestrictionType): raise NotImplementedError if len(cls) > len(other): return True elif cls.size == len(other) and cls.__name__ > other.__name__: return True else: return False def __le__(cls, other): """a <= b -> bool. sorting order: 1. size of the recognition site. 2. if equal size, alphabetical order of the names.""" if not isinstance(other, RestrictionType): raise NotImplementedError elif len(cls) < len(other): return True elif len(cls) == len(other) and cls.__name__ <= other.__name__: return True else: return False def __lt__(cls, other): """a < b -> bool. sorting order: 1. size of the recognition site. 2. if equal size, alphabetical order of the names.""" if not isinstance(other, RestrictionType): raise NotImplementedError elif len(cls) < len(other): return True elif len(cls) == len(other) and cls.__name__ < other.__name__: return True else: return False class AbstractCut(RestrictionType): """Implement the methods that are common to all restriction enzymes. All the methods are classmethod. For internal use only. Not meant to be instantiate.""" @classmethod def search(cls, dna, linear=True): """RE.search(dna, linear=True) -> list. return a list of all the site of RE in dna. Compensate for circular sequences and so on. dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance. if linear is False, the restriction sites than span over the boundaries will be included. The positions are the first base of the 3' fragment, i.e. the first base after the position the enzyme will cut. """ # # Separating search from _search allow a (very limited) optimisation # of the search when using a batch of restriction enzymes. # in this case the DNA is tested once by the class which implements # the batch instead of being tested by each enzyme single. # see RestrictionBatch.search() for example. # if isinstance(dna, FormattedSeq): cls.dna = dna return cls._search() else : cls.dna = FormattedSeq(dna, linear) return cls._search() @classmethod def all_suppliers(self): """RE.all_suppliers -> print all the suppliers of R""" supply = [x[0] for x in suppliers_dict.itervalues()] supply.sort() print ",\n".join(supply) return @classmethod def is_equischizomer(self, other): """RE.is_equischizomers(other) -> bool. True if other is an isoschizomer of RE. False else. equischizomer <=> same site, same position of restriction.""" return not self != other @classmethod def is_neoschizomer(self, other): """RE.is_neoschizomers(other) -> bool. True if other is an isoschizomer of RE. False else. neoschizomer <=> same site, different position of restriction.""" return self >> other @classmethod def is_isoschizomer(self, other): """RE.is_isoschizomers(other) -> bool. True if other is an isoschizomer of RE. False else. isoschizomer <=> same site.""" return (not self != other) or self >> other @classmethod def equischizomers(self, batch=None): """RE.equischizomers([batch]) -> list. return a tuple of all the isoschizomers of RE. if batch is supplied it is used instead of the default AllEnzymes. equischizomer <=> same site, same position of restriction.""" if not batch : batch = AllEnzymes r = [x for x in batch if not self != x] i = r.index(self) del r[i] r.sort() return r @classmethod def neoschizomers(self, batch=None): """RE.neoschizomers([batch]) -> list. return a tuple of all the neoschizomers of RE. if batch is supplied it is used instead of the default AllEnzymes. neoschizomer <=> same site, different position of restriction.""" if not batch : batch = AllEnzymes r = [x for x in batch if self >> x] r.sort() return r @classmethod def isoschizomers(self, batch=None): """RE.isoschizomers([batch]) -> list. return a tuple of all the equischizomers and neoschizomers of RE. if batch is supplied it is used instead of the default AllEnzymes.""" if not batch : batch = AllEnzymes r = [x for x in batch if (self >> x) or (not self != x)] i = r.index(self) del r[i] r.sort() return r @classmethod def frequency(self): """RE.frequency() -> int. frequency of the site.""" return self.freq class NoCut(AbstractCut): """Implement the methods specific to the enzymes that do not cut. These enzymes are generally enzymes that have been only partially characterised and the way they cut the DNA is unknow or enzymes for which the pattern of cut is to complex to be recorded in Rebase (ncuts values of 0 in emboss_e.###). When using search() with these enzymes the values returned are at the start of the restriction site. Their catalyse() method returns a TypeError. Unknown and NotDefined are also part of the base classes of these enzymes. Internal use only. Not meant to be instantiated.""" @classmethod def cut_once(self): """RE.cut_once() -> bool. True if the enzyme cut the sequence one time on each strand.""" return False @classmethod def cut_twice(self): """RE.cut_twice() -> bool. True if the enzyme cut the sequence twice on each strand.""" return False @classmethod def _modify(self, location): """RE._modify(location) -> int. for internal use only. location is an integer corresponding to the location of the match for the enzyme pattern in the sequence. _modify returns the real place where the enzyme will cut. example: EcoRI pattern : GAATTC EcoRI will cut after the G. so in the sequence: ______ GAATACACGGAATTCGA | 10 dna.finditer(GAATTC, 6) will return 10 as G is the 10th base EcoRI cut after the G so: EcoRI._modify(10) -> 11. if the enzyme cut twice _modify will returns two integer corresponding to each cutting site. """ yield location @classmethod def _rev_modify(self, location): """RE._rev_modify(location) -> generator of int. for internal use only. as _modify for site situated on the antiparallel strand when the enzyme is not palindromic """ yield location @classmethod def characteristic(self): """RE.characteristic() -> tuple. the tuple contains the attributes: fst5 -> first 5' cut ((current strand) or None fst3 -> first 3' cut (complementary strand) or None scd5 -> second 5' cut (current strand) or None scd5 -> second 3' cut (complementary strand) or None site -> recognition site.""" return None, None, None, None, self.site class OneCut(AbstractCut): """Implement the methods specific to the enzymes that cut the DNA only once Correspond to ncuts values of 2 in emboss_e.### Internal use only. Not meant to be instantiated.""" @classmethod def cut_once(self): """RE.cut_once() -> bool. True if the enzyme cut the sequence one time on each strand.""" return True @classmethod def cut_twice(self): """RE.cut_twice() -> bool. True if the enzyme cut the sequence twice on each strand.""" return False @classmethod def _modify(self, location): """RE._modify(location) -> int. for internal use only. location is an integer corresponding to the location of the match for the enzyme pattern in the sequence. _modify returns the real place where the enzyme will cut. example: EcoRI pattern : GAATTC EcoRI will cut after the G. so in the sequence: ______ GAATACACGGAATTCGA | 10 dna.finditer(GAATTC, 6) will return 10 as G is the 10th base EcoRI cut after the G so: EcoRI._modify(10) -> 11. if the enzyme cut twice _modify will returns two integer corresponding to each cutting site. """ yield location + self.fst5 @classmethod def _rev_modify(self, location): """RE._rev_modify(location) -> generator of int. for internal use only. as _modify for site situated on the antiparallel strand when the enzyme is not palindromic """ yield location - self.fst3 @classmethod def characteristic(self): """RE.characteristic() -> tuple. the tuple contains the attributes: fst5 -> first 5' cut ((current strand) or None fst3 -> first 3' cut (complementary strand) or None scd5 -> second 5' cut (current strand) or None scd5 -> second 3' cut (complementary strand) or None site -> recognition site.""" return self.fst5, self.fst3, None, None, self.site class TwoCuts(AbstractCut): """Implement the methods specific to the enzymes that cut the DNA twice Correspond to ncuts values of 4 in emboss_e.### Internal use only. Not meant to be instantiated.""" @classmethod def cut_once(self): """RE.cut_once() -> bool. True if the enzyme cut the sequence one time on each strand.""" return False @classmethod def cut_twice(self): """RE.cut_twice() -> bool. True if the enzyme cut the sequence twice on each strand.""" return True @classmethod def _modify(self, location): """RE._modify(location) -> int. for internal use only. location is an integer corresponding to the location of the match for the enzyme pattern in the sequence. _modify returns the real place where the enzyme will cut. example: EcoRI pattern : GAATTC EcoRI will cut after the G. so in the sequence: ______ GAATACACGGAATTCGA | 10 dna.finditer(GAATTC, 6) will return 10 as G is the 10th base EcoRI cut after the G so: EcoRI._modify(10) -> 11. if the enzyme cut twice _modify will returns two integer corresponding to each cutting site. """ yield location + self.fst5 yield location + self.scd5 @classmethod def _rev_modify(self, location): """RE._rev_modify(location) -> generator of int. for internal use only. as _modify for site situated on the antiparallel strand when the enzyme is not palindromic """ yield location - self.fst3 yield location - self.scd3 @classmethod def characteristic(self): """RE.characteristic() -> tuple. the tuple contains the attributes: fst5 -> first 5' cut ((current strand) or None fst3 -> first 3' cut (complementary strand) or None scd5 -> second 5' cut (current strand) or None scd5 -> second 3' cut (complementary strand) or None site -> recognition site.""" return self.fst5, self.fst3, self.scd5, self.scd3, self.site class Meth_Dep(AbstractCut): """Implement the information about methylation. Enzymes of this class possess a site which is methylable.""" @classmethod def is_methylable(self): """RE.is_methylable() -> bool. True if the recognition site is a methylable.""" return True class Meth_Undep(AbstractCut): """Implement informations about methylation sensitibility. Enzymes of this class are not sensible to methylation.""" @classmethod def is_methylable(self): """RE.is_methylable() -> bool. True if the recognition site is a methylable.""" return False class Palindromic(AbstractCut): """Implement the methods specific to the enzymes which are palindromic palindromic means : the recognition site and its reverse complement are identical. Remarks : an enzyme with a site CGNNCG is palindromic even if some of the sites that it will recognise are not. for example here : CGAACG Internal use only. Not meant to be instantiated.""" @classmethod def _search(self): """RE._search() -> list. for internal use only. implement the search method for palindromic and non palindromic enzyme. """ siteloc = self.dna.finditer(self.compsite,self.size) self.results = [r for s,g in siteloc for r in self._modify(s)] if self.results : self._drop() return self.results @classmethod def is_palindromic(self): """RE.is_palindromic() -> bool. True if the recognition site is a palindrom.""" return True class NonPalindromic(AbstractCut): """Implement the methods specific to the enzymes which are not palindromic palindromic means : the recognition site and its reverse complement are identical. Internal use only. Not meant to be instantiated.""" @classmethod def _search(self): """RE._search() -> list. for internal use only. implement the search method for palindromic and non palindromic enzyme. """ iterator = self.dna.finditer(self.compsite, self.size) self.results = [] modif = self._modify revmodif = self._rev_modify s = str(self) self.on_minus = [] for start, group in iterator: if group(s): self.results += [r for r in modif(start)] else: self.on_minus += [r for r in revmodif(start)] self.results += self.on_minus if self.results: self.results.sort() self._drop() return self.results @classmethod def is_palindromic(self): """RE.is_palindromic() -> bool. True if the recognition site is a palindrom.""" return False class Unknown(AbstractCut): """Implement the methods specific to the enzymes for which the overhang is unknown. These enzymes are also NotDefined and NoCut. Internal use only. Not meant to be instantiated.""" @classmethod def catalyse(self, dna, linear=True): """RE.catalyse(dna, linear=True) -> tuple of DNA. RE.catalyze(dna, linear=True) -> tuple of DNA. return a tuple of dna as will be produced by using RE to restrict the dna. dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance. if linear is False, the sequence is considered to be circular and the output will be modified accordingly.""" raise NotImplementedError('%s restriction is unknown.' \ % self.__name__) catalyze = catalyse @classmethod def is_blunt(self): """RE.is_blunt() -> bool. True if the enzyme produces blunt end. see also: RE.is_3overhang() RE.is_5overhang() RE.is_unknown()""" return False @classmethod def is_5overhang(self): """RE.is_5overhang() -> bool. True if the enzyme produces 5' overhang sticky end. see also: RE.is_3overhang() RE.is_blunt() RE.is_unknown()""" return False @classmethod def is_3overhang(self): """RE.is_3overhang() -> bool. True if the enzyme produces 3' overhang sticky end. see also: RE.is_5overhang() RE.is_blunt() RE.is_unknown()""" return False @classmethod def overhang(self): """RE.overhang() -> str. type of overhang of the enzyme., can be "3' overhang", "5' overhang", "blunt", "unknown" """ return 'unknown' @classmethod def compatible_end(self): """RE.compatible_end() -> list. list of all the enzymes that share compatible end with RE.""" return [] @classmethod def _mod1(self, other): """RE._mod1(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" return False class Blunt(AbstractCut): """Implement the methods specific to the enzymes for which the overhang is blunt. The enzyme cuts the + strand and the - strand of the DNA at the same place. Internal use only. Not meant to be instantiated.""" @classmethod def catalyse(self, dna, linear=True): """RE.catalyse(dna, linear=True) -> tuple of DNA. RE.catalyze(dna, linear=True) -> tuple of DNA. return a tuple of dna as will be produced by using RE to restrict the dna. dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance. if linear is False, the sequence is considered to be circular and the output will be modified accordingly.""" r = self.search(dna, linear) d = self.dna if not r : return d[1:], fragments = [] length = len(r)-1 if d.is_linear(): # # START of the sequence to FIRST site. # fragments.append(d[1:r[0]]) if length: # # if more than one site add them. # fragments += [d[r[x]:r[x+1]] for x in xrange(length)] # # LAST site to END of the sequence. # fragments.append(d[r[-1]:]) else: # # circular : bridge LAST site to FIRST site. # fragments.append(d[r[-1]:]+d[1:r[0]]) if not length: # # one site we finish here. # return tuple(fragments) # # add the others. # fragments += [d[r[x]:r[x+1]] for x in xrange(length)] return tuple(fragments) catalyze = catalyse @classmethod def is_blunt(self): """RE.is_blunt() -> bool. True if the enzyme produces blunt end. see also: RE.is_3overhang() RE.is_5overhang() RE.is_unknown()""" return True @classmethod def is_5overhang(self): """RE.is_5overhang() -> bool. True if the enzyme produces 5' overhang sticky end. see also: RE.is_3overhang() RE.is_blunt() RE.is_unknown()""" return False @classmethod def is_3overhang(self): """RE.is_3overhang() -> bool. True if the enzyme produces 3' overhang sticky end. see also: RE.is_5overhang() RE.is_blunt() RE.is_unknown()""" return False @classmethod def overhang(self): """RE.overhang() -> str. type of overhang of the enzyme., can be "3' overhang", "5' overhang", "blunt", "unknown" """ return 'blunt' @classmethod def compatible_end(self, batch=None): """RE.compatible_end() -> list. list of all the enzymes that share compatible end with RE.""" if not batch : batch = AllEnzymes r = [x for x in iter(AllEnzymes) if x.is_blunt()] r.sort() return r @staticmethod def _mod1(other): """RE._mod1(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" return issubclass(other, Blunt) class Ov5(AbstractCut): """Implement the methods specific to the enzymes for which the overhang is recessed in 3'. The enzyme cuts the + strand after the - strand of the DNA. Internal use only. Not meant to be instantiated.""" @classmethod def catalyse(self, dna, linear=True): """RE.catalyse(dna, linear=True) -> tuple of DNA. RE.catalyze(dna, linear=True) -> tuple of DNA. return a tuple of dna as will be produced by using RE to restrict the dna. dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance. if linear is False, the sequence is considered to be circular and the output will be modified accordingly.""" r = self.search(dna, linear) d = self.dna if not r : return d[1:], length = len(r)-1 fragments = [] if d.is_linear(): # # START of the sequence to FIRST site. # fragments.append(d[1:r[0]]) if length: # # if more than one site add them. # fragments += [d[r[x]:r[x+1]] for x in xrange(length)] # # LAST site to END of the sequence. # fragments.append(d[r[-1]:]) else: # # circular : bridge LAST site to FIRST site. # fragments.append(d[r[-1]:]+d[1:r[0]]) if not length: # # one site we finish here. # return tuple(fragments) # # add the others. # fragments += [d[r[x]:r[x+1]] for x in xrange(length)] return tuple(fragments) catalyze = catalyse @classmethod def is_blunt(self): """RE.is_blunt() -> bool. True if the enzyme produces blunt end. see also: RE.is_3overhang() RE.is_5overhang() RE.is_unknown()""" return False @classmethod def is_5overhang(self): """RE.is_5overhang() -> bool. True if the enzyme produces 5' overhang sticky end. see also: RE.is_3overhang() RE.is_blunt() RE.is_unknown()""" return True @classmethod def is_3overhang(self): """RE.is_3overhang() -> bool. True if the enzyme produces 3' overhang sticky end. see also: RE.is_5overhang() RE.is_blunt() RE.is_unknown()""" return False @classmethod def overhang(self): """RE.overhang() -> str. type of overhang of the enzyme., can be "3' overhang", "5' overhang", "blunt", "unknown" """ return "5' overhang" @classmethod def compatible_end(self, batch=None): """RE.compatible_end() -> list. list of all the enzymes that share compatible end with RE.""" if not batch : batch = AllEnzymes r = [x for x in iter(AllEnzymes) if x.is_5overhang() and x % self] r.sort() return r @classmethod def _mod1(self, other): """RE._mod1(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" if issubclass(other, Ov5) : return self._mod2(other) else : return False class Ov3(AbstractCut): """Implement the methods specific to the enzymes for which the overhang is recessed in 5'. The enzyme cuts the - strand after the + strand of the DNA. Internal use only. Not meant to be instantiated.""" @classmethod def catalyse(self, dna, linear=True): """RE.catalyse(dna, linear=True) -> tuple of DNA. RE.catalyze(dna, linear=True) -> tuple of DNA. return a tuple of dna as will be produced by using RE to restrict the dna. dna must be a Bio.Seq.Seq instance or a Bio.Seq.MutableSeq instance. if linear is False, the sequence is considered to be circular and the output will be modified accordingly.""" r = self.search(dna, linear) d = self.dna if not r : return d[1:], fragments = [] length = len(r)-1 if d.is_linear(): # # START of the sequence to FIRST site. # fragments.append(d[1:r[0]]) if length: # # if more than one site add them. # fragments += [d[r[x]:r[x+1]] for x in xrange(length)] # # LAST site to END of the sequence. # fragments.append(d[r[-1]:]) else: # # circular : bridge LAST site to FIRST site. # fragments.append(d[r[-1]:]+d[1:r[0]]) if not length: # # one site we finish here. # return tuple(fragments) # # add the others. # fragments += [d[r[x]:r[x+1]] for x in xrange(length)] return tuple(fragments) catalyze = catalyse @classmethod def is_blunt(self): """RE.is_blunt() -> bool. True if the enzyme produces blunt end. see also: RE.is_3overhang() RE.is_5overhang() RE.is_unknown()""" return False @classmethod def is_5overhang(self): """RE.is_5overhang() -> bool. True if the enzyme produces 5' overhang sticky end. see also: RE.is_3overhang() RE.is_blunt() RE.is_unknown()""" return False @classmethod def is_3overhang(self): """RE.is_3overhang() -> bool. True if the enzyme produces 3' overhang sticky end. see also: RE.is_5overhang() RE.is_blunt() RE.is_unknown()""" return True @classmethod def overhang(self): """RE.overhang() -> str. type of overhang of the enzyme., can be "3' overhang", "5' overhang", "blunt", "unknown" """ return "3' overhang" @classmethod def compatible_end(self, batch=None): """RE.compatible_end() -> list. list of all the enzymes that share compatible end with RE.""" if not batch : batch = AllEnzymes r = [x for x in iter(AllEnzymes) if x.is_3overhang() and x % self] r.sort() return r @classmethod def _mod1(self, other): """RE._mod1(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" # # called by RE._mod1(other) when the one of the enzyme is ambiguous # if issubclass(other, Ov3) : return self._mod2(other) else : return False class Defined(AbstractCut): """Implement the methods specific to the enzymes for which the overhang and the cut are not variable. Typical example : EcoRI -> G^AATT_C The overhang will always be AATT Notes: Blunt enzymes are always defined. even if there site is GGATCCNNN^_N There overhang is always the same : blunt! Internal use only. Not meant to be instantiated.""" @classmethod def _drop(self): """RE._drop() -> list. for internal use only. drop the site that are situated outside the sequence in linear sequence. modify the index for site in circular sequences.""" # # remove or modify the results that are outside the sequence. # This is necessary since after finding the site we add the distance # from the site to the cut with the _modify and _rev_modify methods. # For linear we will remove these sites altogether. # For circular sequence, we modify the result rather than _drop it # since the site is in the sequence. # length = len(self.dna) drop = itertools.dropwhile take = itertools.takewhile if self.dna.is_linear(): self.results = [x for x in drop(lambda x:x<1, self.results)] self.results = [x for x in take(lambda x:x<length, self.results)] else: for index, location in enumerate(self.results): if location < 1: self.results[index] += length else: break for index, location in enumerate(self.results[::-1]): if location > length: self.results[-(index+1)] -= length else: break return @classmethod def is_defined(self): """RE.is_defined() -> bool. True if the sequence recognised and cut is constant, i.e. the recognition site is not degenerated AND the enzyme cut inside the site. see also: RE.is_ambiguous() RE.is_unknown()""" return True @classmethod def is_ambiguous(self): """RE.is_ambiguous() -> bool. True if the sequence recognised and cut is ambiguous, i.e. the recognition site is degenerated AND/OR the enzyme cut outside the site. see also: RE.is_defined() RE.is_unknown()""" return False @classmethod def is_unknown(self): """RE.is_unknown() -> bool. True if the sequence is unknown, i.e. the recognition site has not been characterised yet. see also: RE.is_defined() RE.is_ambiguous()""" return False @classmethod def elucidate(self): """RE.elucidate() -> str return a representation of the site with the cut on the (+) strand represented as '^' and the cut on the (-) strand as '_'. ie: >>> EcoRI.elucidate() # 5' overhang 'G^AATT_C' >>> KpnI.elucidate() # 3' overhang 'G_GTAC^C' >>> EcoRV.elucidate() # blunt 'GAT^_ATC' >>> SnaI.elucidate() # NotDefined, cut profile unknown. '? GTATAC ?' >>> """ f5 = self.fst5 f3 = self.fst3 site = self.site if self.cut_twice() : re = 'cut twice, not yet implemented sorry.' elif self.is_5overhang(): if f5 == f3 == 0 : re = 'N^'+ self.site + '_N' elif f3 == 0 : re = site[:f5] + '^' + site[f5:] + '_N' else : re = site[:f5] + '^' + site[f5:f3] + '_' + site[f3:] elif self.is_blunt(): re = site[:f5] + '^_' + site[f5:] else: if f5 == f3 == 0 : re = 'N_'+ site + '^N' else : re = site[:f3] + '_' + site[f3:f5] +'^'+ site[f5:] return re @classmethod def _mod2(self, other): """RE._mod2(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" # # called by RE._mod1(other) when the one of the enzyme is ambiguous # if other.ovhgseq == self.ovhgseq: return True elif issubclass(other, Ambiguous): return other._mod2(self) else: return False class Ambiguous(AbstractCut): """Implement the methods specific to the enzymes for which the overhang is variable. Typical example : BstXI -> CCAN_NNNN^NTGG The overhang can be any sequence of 4 bases. Notes: Blunt enzymes are always defined. even if there site is GGATCCNNN^_N There overhang is always the same : blunt! Internal use only. Not meant to be instantiated.""" @classmethod def _drop(self): """RE._drop() -> list. for internal use only. drop the site that are situated outside the sequence in linear sequence. modify the index for site in circular sequences.""" length = len(self.dna) drop = itertools.dropwhile take = itertools.takewhile if self.dna.is_linear(): self.results = [x for x in drop(lambda x : x < 1, self.results)] self.results = [x for x in take(lambda x : x <length, self.results)] else: for index, location in enumerate(self.results): if location < 1: self.results[index] += length else: break for index, location in enumerate(self.results[::-1]): if location > length: self.results[-(index+1)] -= length else: break return @classmethod def is_defined(self): """RE.is_defined() -> bool. True if the sequence recognised and cut is constant, i.e. the recognition site is not degenerated AND the enzyme cut inside the site. see also: RE.is_ambiguous() RE.is_unknown()""" return False @classmethod def is_ambiguous(self): """RE.is_ambiguous() -> bool. True if the sequence recognised and cut is ambiguous, i.e. the recognition site is degenerated AND/OR the enzyme cut outside the site. see also: RE.is_defined() RE.is_unknown()""" return True @classmethod def is_unknown(self): """RE.is_unknown() -> bool. True if the sequence is unknown, i.e. the recognition site has not been characterised yet. see also: RE.is_defined() RE.is_ambiguous()""" return False @classmethod def _mod2(self, other): """RE._mod2(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" # # called by RE._mod1(other) when the one of the enzyme is ambiguous # if len(self.ovhgseq) != len(other.ovhgseq): return False else: se = self.ovhgseq for base in se: if base in 'ATCG': pass if base in 'N': se = '.'.join(se.split('N')) if base in 'RYWMSKHDBV': expand = '['+ matching[base] + ']' se = expand.join(se.split(base)) if re.match(se, other.ovhgseq): return True else: return False @classmethod def elucidate(self): """RE.elucidate() -> str return a representation of the site with the cut on the (+) strand represented as '^' and the cut on the (-) strand as '_'. ie: >>> EcoRI.elucidate() # 5' overhang 'G^AATT_C' >>> KpnI.elucidate() # 3' overhang 'G_GTAC^C' >>> EcoRV.elucidate() # blunt 'GAT^_ATC' >>> SnaI.elucidate() # NotDefined, cut profile unknown. '? GTATAC ?' >>> """ f5 = self.fst5 f3 = self.fst3 length = len(self) site = self.site if self.cut_twice() : re = 'cut twice, not yet implemented sorry.' elif self.is_5overhang(): if f3 == f5 == 0: re = 'N^' + site +'_N' elif 0 <= f5 <= length and 0 <= f3+length <= length: re = site[:f5] + '^' + site[f5:f3] + '_' + site[f3:] elif 0 <= f5 <= length: re = site[:f5] + '^' + site[f5:] + f3*'N' + '_N' elif 0 <= f3+length <= length: re = 'N^' + abs(f5) * 'N' + site[:f3] + '_' + site[f3:] elif f3+length < 0: re = 'N^'*abs(f5)*'N' + '_' + abs(length+f3)*'N' + site elif f5 > length: re = site + (f5-length)*'N'+'^'+(length+f3-f5)*'N'+'_N' else: re = 'N^' + abs(f5) * 'N' + site + f3*'N' + '_N' elif self.is_blunt(): if f5 < 0: re = 'N^_' + abs(f5)*'N' + site elif f5 > length: re = site + (f5-length)*'N' + '^_N' else: raise ValueError('%s.easyrepr() : error f5=%i' \ % (self.name,f5)) else: if f3 == 0: if f5 == 0 : re = 'N_' + site + '^N' else : re = site + '_' + (f5-length)*'N' + '^N' elif 0 < f3+length <= length and 0 <= f5 <= length: re = site[:f3] + '_' + site[f3:f5] + '^' + site[f5:] elif 0 < f3+length <= length: re = site[:f3] + '_' + site[f3:] + (f5-length)*'N' + '^N' elif 0 <= f5 <= length: re = 'N_' +'N'*(f3+length) + site[:f5] + '^' + site[f5:] elif f3 > 0: re = site + f3*'N' + '_' + (f5-f3-length)*'N' + '^N' elif f5 < 0: re = 'N_' + abs(f3-f5+length)*'N' + '^' + abs(f5)*'N' + site else: re = 'N_' + abs(f3+length)*'N' + site + (f5-length)*'N' + '^N' return re class NotDefined(AbstractCut): """Implement the methods specific to the enzymes for which the overhang is not characterised. Correspond to NoCut and Unknown. Internal use only. Not meant to be instantiated.""" @classmethod def _drop(self): """RE._drop() -> list. for internal use only. drop the site that are situated outside the sequence in linear sequence. modify the index for site in circular sequences.""" if self.dna.is_linear(): return else: length = len(self.dna) for index, location in enumerate(self.results): if location < 1: self.results[index] += length else: break for index, location in enumerate(self.results[:-1]): if location > length: self.results[-(index+1)] -= length else: break return @classmethod def is_defined(self): """RE.is_defined() -> bool. True if the sequence recognised and cut is constant, i.e. the recognition site is not degenerated AND the enzyme cut inside the site. see also: RE.is_ambiguous() RE.is_unknown()""" return False @classmethod def is_ambiguous(self): """RE.is_ambiguous() -> bool. True if the sequence recognised and cut is ambiguous, i.e. the recognition site is degenerated AND/OR the enzyme cut outside the site. see also: RE.is_defined() RE.is_unknown()""" return False @classmethod def is_unknown(self): """RE.is_unknown() -> bool. True if the sequence is unknown, i.e. the recognition site has not been characterised yet. see also: RE.is_defined() RE.is_ambiguous()""" return True @classmethod def _mod2(self, other): """RE._mod2(other) -> bool. for internal use only test for the compatibility of restriction ending of RE and other.""" # # Normally we should not arrive here. But well better safe than sorry. # the overhang is not defined we are compatible with nobody. # could raise an Error may be rather than return quietly. # #return False raise ValueError("%s.mod2(%s), %s : NotDefined. pas glop pas glop!" \ % (str(self), str(other), str(self))) @classmethod def elucidate(self): """RE.elucidate() -> str return a representation of the site with the cut on the (+) strand represented as '^' and the cut on the (-) strand as '_'. ie: >>> EcoRI.elucidate() # 5' overhang 'G^AATT_C' >>> KpnI.elucidate() # 3' overhang 'G_GTAC^C' >>> EcoRV.elucidate() # blunt 'GAT^_ATC' >>> SnaI.elucidate() # NotDefined, cut profile unknown. '? GTATAC ?' >>> """ return '? %s ?' % self.site class Commercially_available(AbstractCut): # # Recent addition to Rebase make this naming convention uncertain. # May be better to says enzymes which have a supplier. # """Implement the methods specific to the enzymes which are commercially available. Internal use only. Not meant to be instantiated.""" @classmethod def suppliers(self): """RE.suppliers() -> print the suppliers of RE.""" supply = suppliers_dict.items() for k,v in supply: if k in self.suppl: print v[0]+',' return @classmethod def supplier_list(self): """RE.supplier_list() -> list. list of the supplier names for RE.""" return [v[0] for k,v in suppliers_dict.items() if k in self.suppl] @classmethod def buffers(self, supplier): """RE.buffers(supplier) -> string. not implemented yet.""" return @classmethod def is_comm(self): """RE.iscomm() -> bool. True if RE has suppliers.""" return True class Not_available(AbstractCut): """Implement the methods specific to the enzymes which are not commercially available. Internal use only. Not meant to be instantiated.""" @staticmethod def suppliers(): """RE.suppliers() -> print the suppliers of RE.""" return None @classmethod def supplier_list(self): """RE.supplier_list() -> list. list of the supplier names for RE.""" return [] @classmethod def buffers(self, supplier): """RE.buffers(supplier) -> string. not implemented yet.""" raise TypeError("Enzyme not commercially available.") @classmethod def is_comm(self): """RE.iscomm() -> bool. True if RE has suppliers.""" return False ############################################################################### # # # Restriction Batch # # # ############################################################################### class RestrictionBatch(set): def __init__(self, first=[], suppliers=[]): """RestrictionBatch([sequence]) -> new RestrictionBatch.""" first = [self.format(x) for x in first] first += [eval(x) for n in suppliers for x in suppliers_dict[n][1]] set.__init__(self, first) self.mapping = dict.fromkeys(self) self.already_mapped = None def __str__(self): if len(self) < 5: return '+'.join(self.elements()) else: return '...'.join(('+'.join(self.elements()[:2]),\ '+'.join(self.elements()[-2:]))) def __repr__(self): return 'RestrictionBatch(%s)' % self.elements() def __contains__(self, other): try: other = self.format(other) except ValueError : # other is not a restriction enzyme return False return set.__contains__(self, other) def __div__(self, other): return self.search(other) def __rdiv__(self, other): return self.search(other) def get(self, enzyme, add=False): """B.get(enzyme[, add]) -> enzyme class. if add is True and enzyme is not in B add enzyme to B. if add is False (which is the default) only return enzyme. if enzyme is not a RestrictionType or can not be evaluated to a RestrictionType, raise a ValueError.""" e = self.format(enzyme) if e in self: return e elif add: self.add(e) return e else: raise ValueError('enzyme %s is not in RestrictionBatch' \ % e.__name__) def lambdasplit(self, func): """B.lambdasplit(func) -> RestrictionBatch . the new batch will contains only the enzymes for which func return True.""" d = [x for x in itertools.ifilter(func, self)] new = RestrictionBatch() new._data = dict(zip(d, [True]*len(d))) return new def add_supplier(self, letter): """B.add_supplier(letter) -> add a new set of enzyme to B. letter represents the suppliers as defined in the dictionary RestrictionDictionary.suppliers return None. raise a KeyError if letter is not a supplier code.""" supplier = suppliers_dict[letter] self.suppliers.append(letter) for x in supplier[1]: self.add_nocheck(eval(x)) return def current_suppliers(self): """B.current_suppliers() -> add a new set of enzyme to B. return a sorted list of the suppliers which have been used to create the batch.""" suppl_list = [suppliers_dict[x][0] for x in self.suppliers] suppl_list.sort() return suppl_list def __iadd__(self, other): """ b += other -> add other to b, check the type of other.""" self.add(other) return self def __add__(self, other): """ b + other -> new RestrictionBatch.""" new = self.__class__(self) new.add(other) return new def remove(self, other): """B.remove(other) -> remove other from B if other is a RestrictionType. Safe set.remove method. Verify that other is a RestrictionType or can be evaluated to a RestrictionType. raise a ValueError if other can not be evaluated to a RestrictionType. raise a KeyError if other is not in B.""" return set.remove(self, self.format(other)) def add(self, other): """B.add(other) -> add other to B if other is a RestrictionType. Safe set.add method. Verify that other is a RestrictionType or can be evaluated to a RestrictionType. raise a ValueError if other can not be evaluated to a RestrictionType. """ return set.add(self, self.format(other)) def add_nocheck(self, other): """B.add_nocheck(other) -> add other to B. don't check type of other. """ return set.add(self, other) def format(self, y): """B.format(y) -> RestrictionType or raise ValueError. if y is a RestrictionType return y if y can be evaluated to a RestrictionType return eval(y) raise a Value Error in all other case.""" try: if isinstance(y, RestrictionType): return y elif isinstance(eval(str(y)), RestrictionType): return eval(y) else: pass except (NameError, SyntaxError): pass raise ValueError('%s is not a RestrictionType' % y.__class__) def is_restriction(self, y): """B.is_restriction(y) -> bool. True is y or eval(y) is a RestrictionType.""" return isinstance(y, RestrictionType) or \ isinstance(eval(str(y)), RestrictionType) def split(self, *classes, **bool): """B.split(class, [class.__name__ = True]) -> new RestrictionBatch. it works but it is slow, so it has really an interest when splitting over multiple conditions.""" def splittest(element): for klass in classes: b = bool.get(klass.__name__, True) if issubclass(element, klass): if b: continue else: return False elif b: return False else: continue return True d = [k for k in itertools.ifilter(splittest, self)] new = RestrictionBatch() new._data = dict(zip(d, [True]*len(d))) return new def elements(self): """B.elements() -> tuple. give all the names of the enzymes in B sorted alphabetically.""" l = [str(e) for e in self] l.sort() return l def as_string(self): """B.as_string() -> list. return a list of the name of the elements of B.""" return [str(e) for e in self] @classmethod def suppl_codes(self): """B.suppl_codes() -> dict letter code for the suppliers""" supply = dict([(k,v[0]) for k,v in suppliers_dict.iteritems()]) return supply @classmethod def show_codes(self): "B.show_codes() -> letter codes for the suppliers""" supply = [' = '.join(i) for i in self.suppl_codes().iteritems()] print '\n'.join(supply) return def search(self, dna, linear=True): """B.search(dna) -> dict.""" # # here we replace the search method of the individual enzymes # with one unique testing method. # if not hasattr(self, "already_mapped") : #TODO - Why does this happen! #Try the "doctest" at the start of PrintFormat.py self.already_mapped = None if isinstance(dna, DNA): # For the searching, we just care about the sequence as a string, # if that is the same we can use the cached search results. # At the time of writing, Seq == method isn't implemented, # and therefore does object identity which is stricter. if (str(dna), linear) == self.already_mapped: return self.mapping else: self.already_mapped = str(dna), linear fseq = FormattedSeq(dna, linear) self.mapping = dict([(x, x.search(fseq)) for x in self]) return self.mapping elif isinstance(dna, FormattedSeq): if (str(dna), dna.linear) == self.already_mapped: return self.mapping else: self.already_mapped = str(dna), dna.linear self.mapping = dict([(x, x.search(dna)) for x in self]) return self.mapping raise TypeError("Expected Seq or MutableSeq instance, got %s instead"\ %type(dna)) ############################################################################### # # # Restriction Analysis # # # ############################################################################### class Analysis(RestrictionBatch, PrintFormat): def __init__(self, restrictionbatch=RestrictionBatch(),sequence=DNA(''), linear=True): """Analysis([restrictionbatch [, sequence] linear=True]) -> New Analysis class. For most of the method of this class if a dictionary is given it will be used as the base to calculate the results. If no dictionary is given a new analysis using the Restriction Batch which has been given when the Analysis class has been instantiated.""" RestrictionBatch.__init__(self, restrictionbatch) self.rb = restrictionbatch self.sequence = sequence self.linear = linear if self.sequence: self.search(self.sequence, self.linear) def __repr__(self): return 'Analysis(%s,%s,%s)'%\ (repr(self.rb),repr(self.sequence),self.linear) def _sub_set(self, wanted): """A._sub_set(other_set) -> dict. Internal use only. screen the results through wanted set. Keep only the results for which the enzymes is in wanted set. """ return dict([(k,v) for k,v in self.mapping.iteritems() if k in wanted]) def _boundaries(self, start, end): """A._boundaries(start, end) -> tuple. Format the boundaries for use with the methods that limit the search to only part of the sequence given to analyse. """ if not isinstance(start, int): raise TypeError('expected int, got %s instead' % type(start)) if not isinstance(end, int): raise TypeError('expected int, got %s instead' % type(end)) if start < 1: start += len(self.sequence) if end < 1: end += len(self.sequence) if start < end: pass else: start, end == end, start if start < 1: start == 1 if start < end: return start, end, self._test_normal else: return start, end, self._test_reverse def _test_normal(self, start, end, site): """A._test_normal(start, end, site) -> bool. Internal use only Test if site is in between start and end. """ return start <= site < end def _test_reverse(self, start, end, site): """A._test_reverse(start, end, site) -> bool. Internal use only Test if site is in between end and start (for circular sequences). """ return start <= site <= len(self.sequence) or 1 <= site < end def print_that(self, dct=None, title='', s1=''): """A.print_that([dct[, title[, s1]]]) -> print the results from dct. If dct is not given the full dictionary is used. """ if not dct: dct = self.mapping print return PrintFormat.print_that(self, dct, title, s1) def change(self, **what): """A.change(**attribute_name) -> Change attribute of Analysis. It is possible to change the width of the shell by setting self.ConsoleWidth to what you want. self.NameWidth refer to the maximal length of the enzyme name. Changing one of these parameters here might not give the results you expect. In which case, you can settle back to a 80 columns shell or try to change self.Cmodulo and self.PrefWidth in PrintFormat until you get it right.""" for k,v in what.iteritems(): if k in ('NameWidth', 'ConsoleWidth'): setattr(self, k, v) self.Cmodulo = self.ConsoleWidth % self.NameWidth self.PrefWidth = self.ConsoleWidth - self.Cmodulo elif k is 'sequence': setattr(self, 'sequence', v) self.search(self.sequence, self.linear) elif k is 'rb': self = Analysis.__init__(self, v, self.sequence, self.linear) elif k is 'linear': setattr(self, 'linear', v) self.search(self.sequence, v) elif k in ('Indent', 'Maxsize'): setattr(self, k, v) elif k in ('Cmodulo', 'PrefWidth'): raise AttributeError( \ 'To change %s, change NameWidth and/or ConsoleWidth' \ % name) else: raise AttributeError( \ 'Analysis has no attribute %s' % name) return def full(self, linear=True): """A.full() -> dict. Full Restriction Map of the sequence.""" return self.mapping def blunt(self, dct = None): """A.blunt([dct]) -> dict. Only the enzymes which have a 3'overhang restriction site.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if k.is_blunt()]) def overhang5(self, dct=None): """A.overhang5([dct]) -> dict. Only the enzymes which have a 5' overhang restriction site.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if k.is_5overhang()]) def overhang3(self, dct=None): """A.Overhang3([dct]) -> dict. Only the enzymes which have a 3'overhang restriction site.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if k.is_3overhang()]) def defined(self, dct=None): """A.defined([dct]) -> dict. Only the enzymes that have a defined restriction site in Rebase.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if k.is_defined()]) def with_sites(self, dct=None): """A.with_sites([dct]) -> dict. Enzymes which have at least one site in the sequence.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if v]) def without_site(self, dct=None): """A.without_site([dct]) -> dict. Enzymes which have no site in the sequence.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if not v]) def with_N_sites(self, N, dct=None): """A.With_N_Sites(N [, dct]) -> dict. Enzymes which cut N times the sequence.""" if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems()if len(v) == N]) def with_number_list(self, list, dct= None): if not dct: dct = self.mapping return dict([(k,v) for k,v in dct.iteritems() if len(v) in list]) def with_name(self, names, dct=None): """A.with_name(list_of_names [, dct]) -> Limit the search to the enzymes named in list_of_names.""" for i, enzyme in enumerate(names): if not enzyme in AllEnzymes: print "no datas for the enzyme:", str(name) del names[i] if not dct: return RestrictionBatch(names).search(self.sequence) return dict([(n, dct[n]) for n in names if n in dct]) def with_site_size(self, site_size, dct=None): """A.with_site_size(site_size [, dct]) -> Limit the search to the enzymes whose site is of size <site_size>.""" sites = [name for name in self if name.size == site_size] if not dct: return RestrictionBatch(sites).search(self.sequence) return dict([(k,v) for k,v in dct.iteritems() if k in site_size]) def only_between(self, start, end, dct=None): """A.only_between(start, end[, dct]) -> dict. Enzymes that cut the sequence only in between start and end.""" start, end, test = self._boundaries(start, end) if not dct: dct = self.mapping d = dict(dct) for key, sites in dct.iteritems(): if not sites: del d[key] continue for site in sites: if test(start, end, site): continue else: del d[key] break return d def between(self, start, end, dct=None): """A.between(start, end [, dct]) -> dict. Enzymes that cut the sequence at least in between start and end. They may cut outside as well.""" start, end, test = self._boundaries(start, end) d = {} if not dct: dct = self.mapping for key, sites in dct.iteritems(): for site in sites: if test(start, end, site): d[key] = sites break continue return d def show_only_between(self, start, end, dct=None): """A.show_only_between(start, end [, dct]) -> dict. Enzymes that cut the sequence outside of the region in between start and end but do not cut inside.""" d = [] if start <= end: d = [(k, [vv for vv in v if start<=vv<=end]) for v in self.between(start, end, dct)] else: d = [(k, [vv for vv in v if start<=vv or vv <= end]) for v in self.between(start, end, dct)] return dict(d) def only_outside(self, start, end, dct = None): """A.only_outside(start, end [, dct]) -> dict. Enzymes that cut the sequence outside of the region in between start and end but do not cut inside.""" start, end, test = self._boundaries(start, end) if not dct : dct = self.mapping d = dict(dct) for key, sites in dct.iteritems(): if not sites: del d[key] continue for site in sites: if test(start, end, site): del d[key] break else: continue return d def outside(self, start, end, dct=None): """A.outside((start, end [, dct]) -> dict. Enzymes that cut outside the region in between start and end. No test is made to know if they cut or not inside this region.""" start, end, test = self._boundaries(start, end) if not dct: dct = self.mapping d = {} for key, sites in dct.iteritems(): for site in sites: if test(start, end, site): continue else: d[key] = sites break return d def do_not_cut(self, start, end, dct = None): """A.do_not_cut(start, end [, dct]) -> dict. Enzymes that do not cut the region in between start and end.""" if not dct: dct = self.mapping d = self.without_site() d.update(self.only_outside(start, end, dct)) return d # # The restriction enzyme classes are created dynamically when the module is # imported. Here is the magic which allow the creation of the # restriction-enzyme classes. # # The reason for the two dictionaries in Restriction_Dictionary # one for the types (which will be called pseudo-type as they really # correspond to the values that instances of RestrictionType can take) # and one for the enzymes is efficiency as the bases are evaluated # once per pseudo-type. # # However Restriction is still a very inefficient module at import. But # remember that around 660 classes (which is more or less the size of Rebase) # have to be created dynamically. However, this processing take place only # once. # This inefficiency is however largely compensated by the use of metaclass # which provide a very efficient layout for the class themselves mostly # alleviating the need of if/else loops in the class methods. # # It is essential to run Restriction with doc string optimisation (-OO switch) # as the doc string of 660 classes take a lot of processing. # CommOnly = RestrictionBatch() # commercial enzymes NonComm = RestrictionBatch() # not available commercially for TYPE, (bases, enzymes) in typedict.iteritems(): # # The keys are the pseudo-types TYPE (stored as type1, type2...) # The names are not important and are only present to differentiate # the keys in the dict. All the pseudo-types are in fact RestrictionType. # These names will not be used after and the pseudo-types are not # kept in the locals() dictionary. It is therefore impossible to # import them. # Now, if you have look at the dictionary, you will see that not all the # types are present as those without corresponding enzymes have been # removed by Dictionary_Builder(). # # The values are tuples which contain # as first element a tuple of bases (as string) and # as second element the names of the enzymes. # # First eval the bases. # bases = tuple([eval(x) for x in bases]) # # now create the particular value of RestrictionType for the classes # in enzymes. # T = type.__new__(RestrictionType, 'RestrictionType', bases, {}) for k in enzymes: # # Now, we go through all the enzymes and assign them their type. # enzymedict[k] contains the values of the attributes for this # particular class (self.site, self.ovhg,....). # newenz = T(k, bases, enzymedict[k]) # # we add the enzymes to the corresponding batch. # # No need to verify the enzyme is a RestrictionType -> add_nocheck # if newenz.is_comm() : CommOnly.add_nocheck(newenz) else : NonComm.add_nocheck(newenz) # # AllEnzymes is a RestrictionBatch with all the enzymes from Rebase. # AllEnzymes = CommOnly | NonComm # # Now, place the enzymes in locals so they can be imported. # names = [str(x) for x in AllEnzymes] try: del x except NameError: #Scoping changed in Python 3, the variable isn't leaked pass locals().update(dict(zip(names, AllEnzymes))) __all__=['FormattedSeq', 'Analysis', 'RestrictionBatch','AllEnzymes','CommOnly','NonComm']+names del k, enzymes, TYPE, bases, names
bryback/quickseq
genescript/Bio/Restriction/Restriction.py
Python
mit
81,213
[ "Biopython" ]
786c242fd937c13ab61c7984d90a4dccf26fe657441eeb3fb6f8abe9feb912af
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'PreviousObstetricHistory.year' db.delete_column(u'patient_previousobstetrichistory', 'year') # Adding field 'PreviousObstetricHistory.dob' db.add_column(u'patient_previousobstetrichistory', 'dob', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 10, 19, 0, 0)), keep_default=False) def backwards(self, orm): # Adding field 'PreviousObstetricHistory.year' db.add_column(u'patient_previousobstetrichistory', 'year', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 10, 19, 0, 0)), keep_default=False) # Deleting field 'PreviousObstetricHistory.dob' db.delete_column(u'patient_previousobstetrichistory', 'dob') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'patient.additionalpatientinformation': { 'Meta': {'object_name': 'AdditionalPatientInformation'}, 'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'patient.familymedicalhistory': { 'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'Meta': {'object_name': 'FamilyMedicalHistory'}, 'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'patient.guardian': { 'Meta': {'object_name': 'Guardian'}, 'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'home_address': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, u'patient.gynaecologicalhistory': { 'Meta': {'object_name': 'GynaecologicalHistory'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, u'patient.immunizationhistory': { 'Meta': {'object_name': 'ImmunizationHistory'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'others_injection': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, u'patient.laboratorytest': { 'Meta': {'object_name': 'LaboratoryTest'}, 'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, u'patient.medicalhistory': { 'Meta': {'object_name': 'MedicalHistory'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}), 'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}), 'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}), 'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"}) }, u'patient.menstrualhistory': { 'Meta': {'object_name': 'MenstrualHistory'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'day_of_visit': ('django.db.models.fields.DateField', [], {}), 'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}), 'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'patient.obstetrichistory': { 'Meta': {'object_name': 'ObstetricHistory'}, 'check_if_you_have_been_miscarriages': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}), 'check_if_you_have_been_pregnant': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}) }, u'patient.pastmedicalhistory': { 'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'Meta': {'object_name': 'PastMedicalHistory'}, 'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'patient.patientinformation': { 'Meta': {'object_name': 'PatientInformation'}, 'address': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}) }, u'patient.prescription': { 'Meta': {'object_name': 'Prescription'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name_of_prescription': ('django.db.models.fields.TextField', [], {}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}) }, u'patient.presentmedicalhistory': { 'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'Meta': {'object_name': 'PresentMedicalHistory'}, 'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'patient.previousobstetrichistory': { 'Meta': {'object_name': 'PreviousObstetricHistory'}, 'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'dob': ('django.db.models.fields.DateField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, u'patient.previoussurgery': { 'Meta': {'object_name': 'PreviousSurgery'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'patient.report': { 'Meta': {'object_name': 'Report'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'patient.routinecheckup': { 'Meta': {'object_name': 'Routinecheckup'}, 'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'patient.signanaemia': { 'Meta': {'object_name': 'Signanaemia'}, 'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'patient.ultrasoundscanning': { 'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'Meta': {'object_name': 'UltrasoundScanning'}, 'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}), 'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['patient']
aazhbd/medical_info01
patient/migrations/0025_auto__del_field_previousobstetrichistory_year__add_field_previousobste.py
Python
bsd-3-clause
30,003
[ "VisIt" ]
c57f09124ec1a5b97e545bd592218f62c37568fd3cc243a403a6588b83931371
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAffyrnadegradation(RPackage): """The package helps with the assessment and correction of RNA degradation effects in Affymetrix 3' expression arrays. The parameter d gives a robust and accurate measure of RNA integrity. The correction removes the probe positional bias, and thus improves comparability of samples that are affected by RNA degradation.""" homepage = "https://www.bioconductor.org/packages/AffyRNADegradation/" git = "https://git.bioconductor.org/packages/AffyRNADegradation.git" version('1.22.0', commit='0fa78f8286494711a239ded0ba587b0de47c15d3') depends_on('r@3.4.0:3.4.9', when='@1.22.0') depends_on('r-affy', type=('build', 'run'))
mfherbst/spack
var/spack/repos/builtin/packages/r-affyrnadegradation/package.py
Python
lgpl-2.1
1,966
[ "Bioconductor" ]
b078d2570cd2118897ee6b94de35b76b46f05157b3a31de365e6fc87aa895f7a
from .sets import * import math import random class TimeBiasSet(BoxSet): """A set for an integration time variable that samples more-or-less uniformly from the *outcome* state, given a maximum integration duration and a space of controls uspace. It assumes the next state is obtained by the integral int[0,T] f(x(t),u) dt and the function f(x,u) is not degenerate. With this assumption, the the volume of the reachable set grows proportionally to T^d where d is the control dimension. Hence, the sampler samples T from the range [0,tmax] according to the distribution U(0,1)^(1/d)*tmax. In practice, this places more samples toward the tail end of the integration region. """ def __init__(self,tmax,uspace): BoxSet.__init__(self,[0],[tmax]) self.tmax = tmax self.controlDimension = len(uspace.sample()) #if self.controlDimension == 1: # self.controlDimension = 2 def sample(self): #plain time sampling #return [random.uniform(0,self.tmax)] #sampling from upper half #return [random.uniform(self.tmax*0.5,self.tmax)] #sampling with a power law bias return [math.pow(random.random(),1.0/self.controlDimension)*self.tmax] class BoxBiasSet(BoxSet): """A set that samples a box near its extrema, helpful for bang-bang control. Assume the box is [-1,1]^d. A dimension k is picked, and a variable s is sampled by s = rand()^(1/c) where rand() samples uniformly from [0,1]. Then its sign is randomly flipped with probability 0.5. u[k] is then set to s. The remaining dimensions are sampled as usual. To get back to an arbitrary box the range [-1,1]^d is simply scaled. """ def __init__(self,bmin,bmax,concentration=3): BoxSet.__init__(self,bmin,bmax) if math.isinf(concentration): self.power = 0 else: self.power = 1.0/concentration def sample(self): res = BoxSet.sample(self) d = random.randint(0,len(res)-1) (a,b) = (self.bmin[d],self.bmax[d]) sign = random.choice([-1,1]) s = math.pow(random.random(),self.power) res[d] = (a+b)*0.5 + sign*(b-a)*0.5*s return res class InfiniteBiasSet(Set): """An infinite set of dimension d. The variable is sampled from a multivariate gaussian distribution with the given concentration parameter """ def __init__(self,d,concentration=1.0): self.d = d self.concentration = concentration def __str__(self): return self.__class__.__name__+" of dim "+str(self.d) def dimension(self): return self.d def bounds(self): return None def sample(self): return (np.random.randn(self.d)/self.concentration).tolist() def contains(self,x): assert len(x) == self.d return True def project(self,x): return x def signedDistance(self,x): return -float('inf') def signedDistance_gradient(self,x): return np.zeros(len(x))
krishauser/pyOptimalMotionPlanning
pomp/spaces/biassets.py
Python
apache-2.0
3,155
[ "Gaussian" ]
df512f40d599911cdb7ae2df77ec02bbeece97a214d234b98ea27c307f3438ae
#!/usr/bin/env python3 # # Copyright (c) 2017 Weitian LI <weitian@aaronly.me> # MIT License # """ Create new randomized coordinates by adding random offset to the existing OSKAR sky model (i.e., osm), and replace original coordinates with the specified new ones. """ import os import argparse import numpy as np class OskarSkyModel: """ OSKAR sky model """ def __init__(self, infile): self.filename = infile self.header = self.get_header(infile) self.load_data(infile) @staticmethod def get_header(infile): """ Read the OSM header lines """ header = [] with open(infile) as f: for line in f.readlines(): if line[0] == "#": header.append(line) else: break print("Read OSM header:\n%s" % "".join(header)) return header def load_data(self, infile): try: data = np.loadtxt(infile) except ValueError: data = np.loadtxt(infile, delimiter=",") self.ra = data[:, 0] # [deg] self.dec = data[:, 1] # [deg] self.flux = data[:, 2] # [Jy] self.number = len(self.flux) print("Loaded OSM data from file: %s" % infile) def randomize_coord(self, sigma): """ Randomize the coordinates by adding an offset sampling from a Gaussian of sigma. """ self.offset_sigma = sigma/3600.0 # [arcsec] -> [deg] print("Random offset: %.1f [arcsec] == %.6f [deg]" % (sigma, self.offset_sigma)) self.ra += np.random.normal(loc=0.0, scale=self.offset_sigma, size=self.number) self.dec += np.random.normal(loc=0.0, scale=self.offset_sigma, size=self.number) print("Generated randomized coordinates") def replace_coord(self, coordfile): """ Replace the coordinates with the data from the given coordinate file. """ try: coord_new = np.loadtxt(coordfile) except ValueError: coord_new = np.loadtxt(coordfile, delimiter=",") ra_new = coord_new[:, 0] dec_new = coord_new[:, 1] if self.number != len(ra_new): raise RuntimeError("invalid coordinate file: %s" % coordfile) self.ra = ra_new self.dec = dec_new print("Replaced coordinates") def save_data(self, data, outfile, clobber=False): if os.path.exists(outfile) and not clobber: raise OSError("file already exists: %s" % outfile) with open(outfile, "wb") as fb: for line in self.header: fb.write(line.encode("utf-8")) np.savetxt(fb, data) def save_coord(self, outfile, clobber=False): data = np.column_stack([self.ra, self.dec]) self.save_data(data=data, outfile=outfile, clobber=clobber) print("Wrote coordinates to file: %s" % outfile) def save_osm(self, outfile, clobber=False): data = np.column_stack([self.ra, self.dec, self.flux]) self.save_data(data=data, outfile=outfile, clobber=clobber) print("Wrote OSM to file: %s" % outfile) def cmd_create(args): """ sub-command: create - create randomized coordinates """ osm = OskarSkyModel(args.infile) osm.randomize_coord(sigma=args.sigma) osm.save_coord(outfile=args.coordfile, clobber=args.clobber) def cmd_replace(args): """ sub-command: create - create randomized coordinates """ osm = OskarSkyModel(args.infile) osm.replace_coord(coordfile=args.coordfile) if not args.nobackup: backfile = args.infile + ".bak" os.rename(args.infile, backfile) print("Backed up OSM as: %s" % backfile) osm.save_osm(outfile=args.infile, clobber=True) def main(): parser = argparse.ArgumentParser( description="Randomize OSKAR sky model source coordinates") subparsers = parser.add_subparsers(dest="subparser_name", title="sub-commands", help="additional help") # sub-command: "create" parser_create = subparsers.add_parser( "create", help="create randomized coordinates") parser_create.add_argument("-C", "--clobber", dest="clobber", action="store_true", help="overwrite existing output file") parser_create.add_argument("-s", "--sigma", dest="sigma", required=True, type=float, help="random offset sigma [arcsec]") parser_create.add_argument("-c", "--coord-file", dest="coordfile", required=True, help="output coordinate file") parser_create.add_argument("infile", help="input OSKAR sky model") parser_create.set_defaults(func=cmd_create) # sub-command: "replace" parser_replace = subparsers.add_parser( "replace", help="replace coordinates of OSM") parser_replace.add_argument("-B", "--no-backup", dest="nobackup", action="store_true", help="do NOT backup original OSM") parser_replace.add_argument("-c", "--coord-file", dest="coordfile", required=True, help="file of new coordinates") parser_replace.add_argument("infile", help="input OSKAR sky model") parser_replace.set_defaults(func=cmd_replace) # args = parser.parse_args() args.func(args) if __name__ == "__main__": main()
liweitianux/atoolbox
astro/oskar/osm-randomize-coord.py
Python
mit
5,722
[ "Gaussian" ]
5b0c798c9ccaf43dd762ec3985251e954830da6e1b65b3fa5f680a81c74eebdf
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import TestCase import os import pytest import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from zoo.orca import init_orca_context, stop_orca_context from zoo.orca.data.pandas import read_csv from zoo.orca.data import SparkXShards from zoo.orca.learn.pytorch import Estimator from zoo.orca.learn.metrics import Accuracy from zoo.orca.learn.trigger import EveryEpoch from zoo.orca.learn.optimizers import SGD from zoo.orca.learn.optimizers.schedule import Default from zoo.orca import OrcaContext import tempfile resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources") class TestEstimatorForSpark(TestCase): def setUp(self): """ setup any state tied to the execution of the given method in a class. setup_method is invoked for every test method of a class. """ self.sc = init_orca_context(cores=4) def tearDown(self): """ teardown any state that was previously setup with a setup_method call. """ stop_orca_context() def test_bigdl_pytorch_estimator_shard(self): class SimpleModel(nn.Module): def __init__(self): super(SimpleModel, self).__init__() self.fc = nn.Linear(2, 2) def forward(self, x): x = self.fc(x) return F.log_softmax(x, dim=1) model = SimpleModel() def loss_func(input, target): return nn.CrossEntropyLoss().forward(input, target.flatten().long()) def transform(df): result = { "x": np.stack([df['user'].to_numpy(), df['item'].to_numpy()], axis=1), "y": df['label'].to_numpy() } return result def transform_del_y(d): result = {"x": d["x"]} return result OrcaContext.pandas_read_backend = "pandas" file_path = os.path.join(resource_path, "orca/learn/ncf.csv") data_shard = read_csv(file_path) data_shard = data_shard.transform_shard(transform) with tempfile.TemporaryDirectory() as temp_dir_name: estimator = Estimator.from_torch(model=model, loss=loss_func, metrics=[Accuracy()], optimizer=SGD(learningrate_schedule=Default()), model_dir=temp_dir_name) estimator.fit(data=data_shard, epochs=4, batch_size=2, validation_data=data_shard, checkpoint_trigger=EveryEpoch()) state_dict1 = estimator.get_model().state_dict() estimator.evaluate(data_shard, batch_size=2) est2 = Estimator.from_torch(model=model, loss=loss_func, metrics=[Accuracy()], optimizer=None) est2.load_orca_checkpoint(temp_dir_name) state_dict2 = est2.get_model().state_dict() for name in state_dict1: para1 = state_dict1[name] para2 = state_dict2[name] assert torch.all(torch.eq(para1, para2)), "After reloading the model, " \ "%r does not match" % name est2.fit(data=data_shard, epochs=8, batch_size=2, validation_data=data_shard, checkpoint_trigger=EveryEpoch()) est2.evaluate(data_shard, batch_size=2) pred_result = est2.predict(data_shard) pred_c = pred_result.collect() assert(pred_result, SparkXShards) pred_shard = data_shard.transform_shard(transform_del_y) pred_result2 = est2.predict(pred_shard) pred_c_2 = pred_result2.collect() assert (pred_c[0]["prediction"] == pred_c_2[0]["prediction"]).all() def test_bigdl_pytorch_estimator_pandas_dataframe(self): class SimpleModel(nn.Module): def __init__(self): super(SimpleModel, self).__init__() self.fc = nn.Linear(1, 10) def forward(self, x): x = torch.unsqueeze(x, dim=1) x = self.fc(x) return F.log_softmax(x, dim=1) def loss_func(input, target): return nn.CrossEntropyLoss().forward(input, target.flatten().long()) model = SimpleModel() OrcaContext.pandas_read_backend = "pandas" file_path = os.path.join(resource_path, "orca/learn/simple_feature_label.csv") data_shard = read_csv(file_path) with tempfile.TemporaryDirectory() as temp_dir_name: estimator = Estimator.from_torch(model=model, loss=loss_func, metrics=[Accuracy()], optimizer=SGD(learningrate_schedule=Default()), model_dir=temp_dir_name) estimator.fit(data=data_shard, epochs=1, batch_size=4, feature_cols=['feature'], label_cols=['label'], validation_data=data_shard, checkpoint_trigger=EveryEpoch()) estimator.evaluate(data_shard, batch_size=4, feature_cols=['feature'], label_cols=['label']) est2 = Estimator.from_torch(model=model, loss=loss_func, metrics=[Accuracy()], optimizer=None) est2.load_orca_checkpoint(temp_dir_name) est2.predict(data_shard, batch_size=4, feature_cols=['feature']) if __name__ == "__main__": pytest.main([__file__])
intel-analytics/analytics-zoo
pyzoo/test/zoo/orca/learn/jep/test_pytorch_estimator_for_spark.py
Python
apache-2.0
6,309
[ "ORCA" ]
e55d4d7fe18fad01b0f7e8daecfd6bc641e9f8e584f90a4a870a2d1adcbe2601
""" View for Courseware Index """ # pylint: disable=attribute-defined-outside-init from datetime import datetime from django.conf import settings from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.core.context_processors import csrf from django.core.urlresolvers import reverse from django.http import Http404 from django.utils.decorators import method_decorator from django.utils.timezone import UTC from django.views.decorators.cache import cache_control from django.views.decorators.csrf import ensure_csrf_cookie from django.views.generic import View from django.shortcuts import redirect from courseware.url_helpers import get_redirect_url_for_global_staff from edxmako.shortcuts import render_to_response, render_to_string import logging import newrelic.agent import urllib from lang_pref import LANGUAGE_KEY from xblock.fragment import Fragment from opaque_keys.edx.keys import CourseKey from openedx.core.lib.gating import api as gating_api from openedx.core.lib.time_zone_utils import get_user_time_zone from openedx.core.djangoapps.user_api.preferences.api import get_user_preference from shoppingcart.models import CourseRegistrationCode from student.models import CourseEnrollment from student.views import is_course_blocked from student.roles import GlobalStaff from util.views import ensure_valid_course_key from xmodule.modulestore.django import modulestore from xmodule.x_module import STUDENT_VIEW from survey.utils import must_answer_survey from ..access import has_access, _adjust_start_date_for_beta_testers from ..access_utils import in_preview_mode from ..courses import get_studio_url, get_course_with_access from ..entrance_exams import ( course_has_entrance_exam, get_entrance_exam_content, get_entrance_exam_score, user_has_passed_entrance_exam, user_must_complete_entrance_exam, ) from ..exceptions import Redirect from ..masquerade import setup_masquerade from ..model_data import FieldDataCache from ..module_render import toc_for_course, get_module_for_descriptor from .views import get_current_child, registered_for_course log = logging.getLogger("edx.courseware.views.index") TEMPLATE_IMPORTS = {'urllib': urllib} CONTENT_DEPTH = 2 class CoursewareIndex(View): """ View class for the Courseware page. """ @method_decorator(login_required) @method_decorator(ensure_csrf_cookie) @method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True)) @method_decorator(ensure_valid_course_key) def get(self, request, course_id, chapter=None, section=None, position=None): """ Displays courseware accordion and associated content. If course, chapter, and section are all specified, renders the page, or returns an error if they are invalid. If section is not specified, displays the accordion opened to the right chapter. If neither chapter or section are specified, displays the user's most recent chapter, or the first chapter if this is the user's first visit. Arguments: request: HTTP request course_id (unicode): course id chapter (unicode): chapter url_name section (unicode): section url_name position (unicode): position in module, eg of <sequential> module """ self.course_key = CourseKey.from_string(course_id) self.request = request self.original_chapter_url_name = chapter self.original_section_url_name = section self.chapter_url_name = chapter self.section_url_name = section self.position = position self.chapter, self.section = None, None self.url = request.path try: self._init_new_relic() self._clean_position() with modulestore().bulk_operations(self.course_key): self.course = get_course_with_access(request.user, 'load', self.course_key, depth=CONTENT_DEPTH) self.is_staff = has_access(request.user, 'staff', self.course) self._setup_masquerade_for_effective_user() return self._get() except Redirect as redirect_error: return redirect(redirect_error.url) except UnicodeEncodeError: raise Http404("URL contains Unicode characters") except Http404: # let it propagate raise except Exception: # pylint: disable=broad-except return self._handle_unexpected_error() def _setup_masquerade_for_effective_user(self): """ Setup the masquerade information to allow the request to be processed for the requested effective user. """ self.real_user = self.request.user self.masquerade, self.effective_user = setup_masquerade( self.request, self.course_key, self.is_staff, reset_masquerade_data=True ) # Set the user in the request to the effective user. self.request.user = self.effective_user def _get(self): """ Render the index page. """ self._redirect_if_needed_to_access_course() self._prefetch_and_bind_course() if self.course.has_children_at_depth(CONTENT_DEPTH): self._reset_section_to_exam_if_required() self.chapter = self._find_chapter() self.section = self._find_section() if self.chapter and self.section: self._redirect_if_not_requested_section() self._verify_section_not_gated() self._save_positions() self._prefetch_and_bind_section() return render_to_response('courseware/courseware.html', self._create_courseware_context()) def _redirect_if_not_requested_section(self): """ If the resulting section and chapter are different from what was initially requested, redirect back to the index page, but with an updated URL that includes the correct section and chapter values. We do this so that our analytics events and error logs have the appropriate URLs. """ if ( self.chapter.url_name != self.original_chapter_url_name or (self.original_section_url_name and self.section.url_name != self.original_section_url_name) ): raise Redirect( reverse( 'courseware_section', kwargs={ 'course_id': unicode(self.course_key), 'chapter': self.chapter.url_name, 'section': self.section.url_name, }, ) ) def _init_new_relic(self): """ Initialize metrics for New Relic so we can slice data in New Relic Insights """ newrelic.agent.add_custom_parameter('course_id', unicode(self.course_key)) newrelic.agent.add_custom_parameter('org', unicode(self.course_key.org)) def _clean_position(self): """ Verify that the given position is an integer. If it is not positive, set it to 1. """ if self.position is not None: try: self.position = max(int(self.position), 1) except ValueError: raise Http404(u"Position {} is not an integer!".format(self.position)) def _redirect_if_needed_to_access_course(self): """ Verifies that the user can enter the course. """ self._redirect_if_needed_to_pay_for_course() self._redirect_if_needed_to_register() self._redirect_if_needed_for_prereqs() self._redirect_if_needed_for_course_survey() def _redirect_if_needed_to_pay_for_course(self): """ Redirect to dashboard if the course is blocked due to non-payment. """ self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id) redeemed_registration_codes = CourseRegistrationCode.objects.filter( course_id=self.course_key, registrationcoderedemption__redeemed_by=self.real_user ) if is_course_blocked(self.request, redeemed_registration_codes, self.course_key): # registration codes may be generated via Bulk Purchase Scenario # we have to check only for the invoice generated registration codes # that their invoice is valid or not log.warning( u'User %s cannot access the course %s because payment has not yet been received', self.real_user, unicode(self.course_key), ) raise Redirect(reverse('dashboard')) def _redirect_if_needed_to_register(self): """ Verify that the user is registered in the course. """ if not registered_for_course(self.course, self.effective_user): log.debug( u'User %s tried to view course %s but is not enrolled', self.effective_user, unicode(self.course.id) ) user_is_global_staff = GlobalStaff().has_user(self.effective_user) user_is_enrolled = CourseEnrollment.is_enrolled(self.effective_user, self.course_key) if user_is_global_staff and not user_is_enrolled: redirect_url = get_redirect_url_for_global_staff(self.course_key, _next=self.url) raise Redirect(redirect_url) raise Redirect(reverse('about_course', args=[unicode(self.course.id)])) def _redirect_if_needed_for_prereqs(self): """ See if all pre-requisites (as per the milestones app feature) have been fulfilled. Note that if the pre-requisite feature flag has been turned off (default) then this check will always pass. """ if not has_access(self.effective_user, 'view_courseware_with_prerequisites', self.course): # Prerequisites have not been fulfilled. # Therefore redirect to the Dashboard. log.info( u'User %d tried to view course %s ' u'without fulfilling prerequisites', self.effective_user.id, unicode(self.course.id)) raise Redirect(reverse('dashboard')) def _redirect_if_needed_for_course_survey(self): """ Check to see if there is a required survey that must be taken before the user can access the course. """ if must_answer_survey(self.course, self.effective_user): raise Redirect(reverse('course_survey', args=[unicode(self.course.id)])) def _reset_section_to_exam_if_required(self): """ Check to see if an Entrance Exam is required for the user. """ if ( course_has_entrance_exam(self.course) and user_must_complete_entrance_exam(self.request, self.effective_user, self.course) ): exam_chapter = get_entrance_exam_content(self.effective_user, self.course) if exam_chapter and exam_chapter.get_children(): exam_section = exam_chapter.get_children()[0] if exam_section: self.chapter_url_name = exam_chapter.url_name self.section_url_name = exam_section.url_name def _verify_section_not_gated(self): """ Verify whether the section is gated and accessible to the user. """ gated_content = gating_api.get_gated_content(self.course, self.effective_user) if gated_content: if unicode(self.section.location) in gated_content: raise Http404 def _get_language_preference(self): """ Returns the preferred language for the actual user making the request. """ language_preference = get_user_preference(self.real_user, LANGUAGE_KEY) if not language_preference: language_preference = settings.LANGUAGE_CODE return language_preference def _is_masquerading_as_student(self): """ Returns whether the current request is masquerading as a student. """ return self.masquerade and self.masquerade.role == 'student' def _is_masquerading_as_specific_student(self): """ Returns whether the current request is masqueurading as a specific student. """ return self._is_masquerading_as_student() and self.masquerade.user_name def _find_block(self, parent, url_name, block_type, min_depth=None): """ Finds the block in the parent with the specified url_name. If not found, calls get_current_child on the parent. """ child = None if url_name: child = parent.get_child_by(lambda m: m.location.name == url_name) if not child: # User may be trying to access a child that isn't live yet if not self._is_masquerading_as_student(): raise Http404('No {block_type} found with name {url_name}'.format( block_type=block_type, url_name=url_name, )) elif min_depth and not child.has_children_at_depth(min_depth - 1): child = None if not child: child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child")) return child def _find_chapter(self): """ Finds the requested chapter. """ return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1) def _find_section(self): """ Finds the requested section. """ if self.chapter: return self._find_block(self.chapter, self.section_url_name, 'section') def _prefetch_and_bind_course(self): """ Prefetches all descendant data for the requested section and sets up the runtime, which binds the request user to the section. """ self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( self.course_key, self.effective_user, self.course, depth=CONTENT_DEPTH, ) self.course = get_module_for_descriptor( self.effective_user, self.request, self.course, self.field_data_cache, self.course_key, course=self.course, ) def _prefetch_and_bind_section(self): """ Prefetches all descendant data for the requested section and sets up the runtime, which binds the request user to the section. """ # Pre-fetch all descendant data self.section = modulestore().get_item(self.section.location, depth=None) self.field_data_cache.add_descriptor_descendents(self.section, depth=None) # Bind section to user self.section = get_module_for_descriptor( self.effective_user, self.request, self.section, self.field_data_cache, self.course_key, self.position, course=self.course, ) def _save_positions(self): """ Save where we are in the course and chapter. """ save_child_position(self.course, self.chapter_url_name) save_child_position(self.chapter, self.section_url_name) def _create_courseware_context(self): """ Returns and creates the rendering context for the courseware. Also returns the table of contents for the courseware. """ courseware_context = { 'csrf': csrf(self.request)['csrf_token'], 'COURSE_TITLE': self.course.display_name_with_default_escaped, 'course': self.course, 'init': '', 'fragment': Fragment(), 'staff_access': self.is_staff, 'studio_url': get_studio_url(self.course, 'course'), 'masquerade': self.masquerade, 'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"), 'bookmarks_api_url': reverse('bookmarks'), 'language_preference': self._get_language_preference(), 'disable_optimizely': True, } table_of_contents = toc_for_course( self.effective_user, self.request, self.course, self.chapter_url_name, self.section_url_name, self.field_data_cache, ) courseware_context['accordion'] = render_accordion(self.request, self.course, table_of_contents['chapters']) # entrance exam data if course_has_entrance_exam(self.course): if getattr(self.chapter, 'is_entrance_exam', False): courseware_context['entrance_exam_current_score'] = get_entrance_exam_score(self.request, self.course) courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.request, self.course) # staff masquerading data now = datetime.now(UTC()) effective_start = _adjust_start_date_for_beta_testers(self.effective_user, self.course, self.course_key) if not in_preview_mode() and self.is_staff and now < effective_start: # Disable student view button if user is staff and # course is not yet visible to students. courseware_context['disable_student_access'] = True if self.section: # chromeless data if self.section.chrome: chrome = [s.strip() for s in self.section.chrome.lower().split(",")] if 'accordion' not in chrome: courseware_context['disable_accordion'] = True if 'tabs' not in chrome: courseware_context['disable_tabs'] = True # default tab if self.section.default_tab: courseware_context['default_tab'] = self.section.default_tab # section data courseware_context['section_title'] = self.section.display_name_with_default_escaped section_context = self._create_section_context( table_of_contents['previous_of_active_section'], table_of_contents['next_of_active_section'], ) courseware_context['fragment'] = self.section.render(STUDENT_VIEW, section_context) return courseware_context def _create_section_context(self, previous_of_active_section, next_of_active_section): """ Returns and creates the rendering context for the section. """ def _compute_section_url(section_info, requested_child): """ Returns the section URL for the given section_info with the given child parameter. """ return "{url}?child={requested_child}".format( url=reverse( 'courseware_section', args=[unicode(self.course.id), section_info['chapter_url_name'], section_info['url_name']], ), requested_child=requested_child, ) section_context = { 'activate_block_id': self.request.GET.get('activate_block_id'), 'requested_child': self.request.GET.get("child"), } if previous_of_active_section: section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last') if next_of_active_section: section_context['next_url'] = _compute_section_url(next_of_active_section, 'first') # sections can hide data that masquerading staff should see when debugging issues with specific students section_context['specific_masquerade'] = self._is_masquerading_as_specific_student() return section_context def _handle_unexpected_error(self): """ Handle unexpected exceptions raised by View. """ # In production, don't want to let a 500 out for any reason if settings.DEBUG: raise log.exception( u"Error in index view: user=%s, effective_user=%s, course=%s, chapter=%s section=%s position=%s", self.real_user, self.effective_user, unicode(self.course_key), self.chapter_url_name, self.section_url_name, self.position, ) try: return render_to_response('courseware/courseware-error.html', { 'staff_access': self.is_staff, 'course': self.course }) except: # Let the exception propagate, relying on global config to # at least return a nice error message log.exception("Error while rendering courseware-error page") raise def render_accordion(request, course, table_of_contents): """ Returns the HTML that renders the navigation for the given course. Expects the table_of_contents to have data on each chapter and section, including which ones are active. """ context = dict( [ ('toc', table_of_contents), ('course_id', unicode(course.id)), ('csrf', csrf(request)['csrf_token']), ('due_date_display_format', course.due_date_display_format), ('time_zone', get_user_time_zone(request.user).zone), ] + TEMPLATE_IMPORTS.items() ) return render_to_string('courseware/accordion.html', context) def save_child_position(seq_module, child_name): """ child_name: url_name of the child """ for position, child in enumerate(seq_module.get_display_items(), start=1): if child.location.name == child_name: # Only save if position changed if position != seq_module.position: seq_module.position = position # Save this new position to the underlying KeyValueStore seq_module.save() def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None): """ Recurses up the course tree starting from a leaf Saving the position property based on the previous node as it goes """ current_module = xmodule while current_module: parent_location = modulestore().get_parent_location(current_module.location) parent = None if parent_location: parent_descriptor = modulestore().get_item(parent_location) parent = get_module_for_descriptor( user, request, parent_descriptor, field_data_cache, current_module.location.course_key, course=course ) if parent and hasattr(parent, 'position'): save_child_position(parent, current_module.location.name) current_module = parent
shabab12/edx-platform
lms/djangoapps/courseware/views/index.py
Python
agpl-3.0
22,960
[ "VisIt" ]
48138fde1968e188abc31f14159fdfe48edf75e63d7a6b191147a1201c91840e
#!/usr/bin/env python # # Wrapper script for starting the biopet-bamstats JAR package # # This script is written for use with the Conda package manager and is copied # from the peptide-shaker wrapper. Only the parameters are changed. # (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py) # # This file was automatically generated by the sbt-bioconda plugin. import os import subprocess import sys import shutil from os import access from os import getenv from os import X_OK jar_file = 'BamStats-assembly-1.0.jar' default_jvm_mem_opts = [] # !!! End of parameter section. No user-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] exec_dir = None for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) elif arg.startswith('--exec_dir='): exec_dir = arg.split('=')[1].strip('"').strip("'") if not os.path.exists(exec_dir): shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') is None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args, exec_dir) def main(): """ PeptideShaker updates files relative to the path of the jar file. In a multiuser setting, the option --exec_dir="exec_dir" can be used as the location for the peptide-shaker distribution. If the exec_dir dies not exist, we copy the jar file, lib, and resources to the exec_dir directory. """ java = java_executable() (mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:]) jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0]) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args sys.exit(subprocess.call(java_args)) if __name__ == '__main__': main()
CGATOxford/bioconda-recipes
recipes/biopet-bamstats/biopet-bamstats.py
Python
mit
3,367
[ "Bioconda" ]
c3c49fd6a432b61267914b08342a14f1a69cb1fb56571cabf4601f8c77ffaa3d
# -*- coding: utf-8 -*- #!/usr/bin/env python3 #------------------------------------------------------------------------------- # Name: # Purpose: This .py file extracts adjacency lists and detects communities # from the corresponding timeslots. # # Required libs: python-dateutil,pyparsing,numpy,matplotlib,networkx # Author: konkonst # # Created: 20/08/2013 # Copyright: (c) ITI (CERTH) 2013 # Licence: <apache licence 2.0> #------------------------------------------------------------------------------- import json, codecs, os, glob, time, dateutil.parser, collections, datetime, pickle, itertools, math, requests import numpy as np import matplotlib.pyplot as plt from matplotlib import interactive from operator import itemgetter import re class communityranking: @classmethod def from_json(cls, dataset_path, timeSeg, timeMin, timeMax): #Get filenames from json dataset path if not os.path.exists(dataset_path + './data/tmp/tweetDict.pck'): tweetDict = {'files':[],'tweets':{}} tweetDict['tweets'] = {} userDict = {} alltime, tweetIds = [], [] totTweets, totMentTws, totNonMentTws, totMents, hashes, urlCount, mediaCount = 0, 0, 0, 0, 0, 0, 0 stats = [0, 0, 0, 0, 0, 0, 0] else: tweetDict = pickle.load(open(dataset_path + './data/tmp/tweetDict.pck','rb')) userDict = tweetDict['userDict'] alltime, tweetIds = tweetDict['alltime'], tweetDict['tweetIds'] stats = tweetDict['stats'] totTweets, totMentTws, totNonMentTws, totMents, hashes, urlCount, mediaCount = stats[0],stats[1],stats[2],stats[3],stats[4],stats[5],stats[6] files = glob.glob(dataset_path + '/data/json/*.json') tweetDict['files'] = list(set(tweetDict['files'])) files = [x for x in files if x not in tweetDict['files']] '''Parse the json files into authors/mentions/alltime/hashtags/tweetIds/text lists''' for filename in files: flag = False # with codecs.open(filename, 'r', 'utf8') as f: with open(filename, 'r') as f: for line in f: # read_line = line.strip().encode('utf-8') # json_line = json.loads(read_line.decode('utf-8')) json_line = json.loads(line.strip()) try: dt = dateutil.parser.parse(json_line['created_at'],dayfirst=True) mytime = int(time.mktime(dt.timetuple())) if mytime >= timeMin and mytime <= timeMax: try: json_line['entities']['user_mentions'][0] len_ment = len(json_line['entities']['user_mentions']) tmpMents = [] for i in range(len_ment): tmpMents.append(json_line['entities']['user_mentions'][i]['screen_name']) if json_line['entities']['user_mentions'][i]['screen_name'] not in userDict: userDict[json_line['entities']['user_mentions'][i]['screen_name']] = {'id':json_line['entities']['user_mentions'][i]['id'],'followers_count':0, 'listed_count':0,'friends_count':0,'description':'','name':json_line['entities']['user_mentions'][i]['name'],'location':'','statuses_count':0} totMents += 1 tweetDict['tweets'][json_line['id_str']] = {} tweetDict['tweets'][json_line['id_str']]['user_mentions'] = tmpMents totMentTws += 1 alltime.append(mytime) tweetDict['tweets'][json_line['id_str']]['time'] = mytime tweetIds.append(json_line['id_str']) tweetDict['tweets'][json_line['id_str']]['authors'] = json_line['user']['screen_name'] userDict[json_line['user']['screen_name']] = {'id':json_line['user']['id'],'followers_count':json_line['user']['followers_count'], 'listed_count':json_line['user']['listed_count'],'friends_count':json_line['user']['friends_count'],'description':json_line['user']['description'], 'name':json_line['user']['name'],'location':json_line['user']['location'],'statuses_count':json_line['user']['statuses_count']} tweetDict['tweets'][json_line['id_str']]['text'] = json_line['text'] try: tmp = [] for textIdx in json_line['entities']['hashtags']: hashes += 1 tmp.append(textIdx['text']) tweetDict['tweets'][json_line['id_str']]['hashtags'] = tmp except: pass try: tmp = [] for textIdx in json_line['entities']['urls']: urlCount += 1 tmp.append(textIdx['expanded_url']) tweetDict['tweets'][json_line['id_str']]['urls'] = tmp except: pass try: tmp = [] for textIdx in json_line['extended_entities']['media']: mediaCount +=1 tmp.append(textIdx['type']) except: tweetDict['tweets'][json_line['id_str']]['media'] = [] pass except: totNonMentTws += 1 pass totTweets += 1 else: if mytime > timeMax: flag = True continue except: print('bad tweet') pass f.close() tweetDict['userDict'] = userDict zippedall=zip(alltime,tweetIds) zippedall=sorted(zippedall) alltime, tweetIds = zip(*zippedall) alltime, tweetIds = list(alltime), list(tweetIds) tweetDict['alltime'], tweetDict['tweetIds'] = alltime, tweetIds stats[0],stats[1],stats[2],stats[3],stats[4],stats[5],stats[6] = totTweets, totMentTws, totNonMentTws, totMents, hashes, urlCount, mediaCount tweetDict['stats'] = stats pickle.dump(tweetDict, open(dataset_path + './data/tmp/tweetDict.pck','wb'), protocol = 2) statsfile = open(dataset_path + '/data/results/basicstats.txt','w') statement = ('Total # of Tweets= ' + str(totTweets) + '\nTotal # of Tweets with mentions: ' + str(totMentTws) + '\nTotal # of Tweets without mentions: ' + str(totNonMentTws) + '\nTotal # of edges: ' + str(totMents) + '\nTotal # of hashtags: ' + str(hashes) + '\nTotal # of urls: ' + str(urlCount) + '\nTotal # of media: ' + str(mediaCount) + '\n') print(statement) statsfile.write(statement) statsfile.close() return cls(alltime, tweetIds, dataset_path, timeSeg),tweetDict def __init__(self, alltime, tweetIds, dataset_path, timeSeg): self.alltime = alltime self.tweetIds = tweetIds self.dataset_path = dataset_path self.timeSeg = timeSeg self.usersPerTmsl = {} self.userPgRnkBag = {} self.commBag = {} self.urlBag = {} self.adjListBag = {} self.commBetweenessBag = {} def timeslotselection(self): ###Parsing commences### # Create time segments a human can understand humanTimeSegs=[] for idx,seg in enumerate(self.timeSeg): if seg <3600: timeNum = seg / 60 timeTitle = ' mins' humanTimeSegs.append(str(idx+1)+'> '+str(round(timeNum))+timeTitle) elif seg >= 3600 and seg < 86400: timeNum = seg / 3600 timeTitle = ' hours' humanTimeSegs.append( str(idx+1)+'> '+str(round(timeNum))+timeTitle) elif seg >= 86400 and seg < 604800: timeNum = seg / 86400 timeTitle = ' days' humanTimeSegs.append(str(idx+1)+'> '+str(round(timeNum))+timeTitle) elif seg / 86400 == 1: timeTitle = ' day' humanTimeSegs.append(str(idx+1)+'> '+str(round(timeNum))+timeTitle) elif seg >= 604800 and seg < 2592000: timeNum = seg / 604800 timeTitle = ' weeks' humanTimeSegs.append(str(idx+1)+'> '+str(round(timeNum))+timeTitle) else: timeNum = seg / 2592000 timeTitle = ' months' humanTimeSegs.append(str(idx+1)+'> '+str(round(timeNum))+timeTitle) #Find time distance between posts# time2 = np.append(self.alltime[0], self.alltime) time2 = time2[0:len(time2) - 1] timeDif = self.alltime - time2 lT = len(self.alltime) '''Extract the first derivative''' font = {'size': 14} plt.rc('font', **font) fig = plt.figure()#figsize=(10,8) plotcount, globfirstderiv, globmentionLimit = 0, {}, {} for seg in self.timeSeg: if seg <3600: timeNum = seg / 60 timeTitle = ' mins' labelstr = '%H:%M' elif seg >= 3600 and seg < 86400: timeNum = seg / 3600 timeTitle = ' hours' labelstr = '%Hh/%d' elif seg >= 86400 and seg < 604800: timeNum = seg / 86400 timeTitle = ' days' labelstr = '%d/%b' elif seg / 86400 == 1: timeTitle = ' day' labelstr = '%d/%b' elif seg >= 604800 and seg < 2592000: timeNum = seg / 604800 timeTitle = ' weeks' labelstr = '%d/%b' else: timeNum = seg / 2592000 timeTitle = ' months' labelstr = '%b/%y' curTime, bin, freqStat, mentionLimit, timeLabels = 0, 0, [0], [], [] for i in range(lT): curTime += timeDif[i] if curTime <= seg: freqStat[bin] += 1 else: curTime = 0 mentionLimit = np.append(mentionLimit, i) timeLabels = np.append(timeLabels, datetime.datetime.fromtimestamp(self.alltime[i]).strftime(labelstr)) bin += 1 freqStat = np.append(freqStat, 0) mentionLimit = np.append(mentionLimit, i) timeLabels = np.append(timeLabels, datetime.datetime.fromtimestamp(self.alltime[-1]).strftime(labelstr)) freqStatIni = np.zeros(len(freqStat) + 1) freqStatMoved = np.zeros(len(freqStat) + 1) freqStatIni[0:len(freqStat)] = freqStat freqStatMoved[1:len(freqStat) + 1] = freqStat firstderiv = freqStatIni - freqStatMoved firstderiv[len(firstderiv) - 1] = 0 globfirstderiv[seg] = firstderiv globmentionLimit[seg] = mentionLimit plotcount += 1 if len(self.timeSeg) < 3: ax = fig.add_subplot(2, int(np.ceil(len(self.timeSeg) / 2)), plotcount, autoscale_on=True) else: ax = fig.add_subplot(int(np.ceil(len(self.timeSeg) / 2)), 2, plotcount, autoscale_on=True) plt.grid(axis='x') plt.plot(freqStat, 'b-', hold=True) plt.ylabel('User activity (mentions)') plt.xlabel('Init. time: ' + datetime.datetime.fromtimestamp(int(self.alltime[0])).strftime('%H:%M-%d/%m/%y')+ ', Last point:'+ datetime.datetime.fromtimestamp(int(self.alltime[-1])).strftime('%H:%M-%d/%m/%y') + ' (Ts:' + str(round(timeNum)) + timeTitle + ')') poi = [] for k in range(len(mentionLimit)): if firstderiv[k] < 0 <= firstderiv[k + 1]: poi = np.append(poi, k) poi = np.int32(poi) plt.plot(poi, freqStat[poi], 'ro', hold=True) pertick=np.ceil(len(freqStat)/self.xLablNum) ax.set_xticks(np.arange(0, len(freqStat), pertick))#, minor=False) ax.set_xticklabels(timeLabels[0::pertick], minor=False, fontsize = 14, rotation = 35) plt.xlim(xmax=(len(freqStat))) mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) interactive(True) plt.show() plt.savefig(self.dataset_path + '/data/results/tweet_activity.pdf', bbox_inches='tight', format='pdf') if len(self.timeSeg) > 1: timeSegInput = int(input('Please select sampling time: \n' + str(humanTimeSegs))) else: timeSegInput = 1 timeSegInput=self.timeSeg[timeSegInput-1] plt.close() del(fig,self.timeSeg) if timeSegInput < 3600: timeNum = timeSegInput / 60 timeTitle = 'per' + str(int(timeNum)) + 'mins' labelstr = '%H:%M' elif timeSegInput >= 3600 and timeSegInput < 86400: timeNum = timeSegInput / 3600 timeTitle = 'per' + str(int(timeNum)) + 'hours' labelstr = '%Hh/%d' elif timeSegInput >= 86400 and timeSegInput < 604800: timeNum = timeSegInput / 86400 timeTitle = 'per' + str(int(timeNum)) + 'days' labelstr = '%d/%b' elif timeSegInput>= 604800 and timeSegInput < 2592000: timeNum = timeSegInput / 604800 timeTitle = 'per' + str(int(timeNum)) + 'weeks' labelstr = '%d/%b' else: timeNum = timeSegInput / 2592000 timeTitle = 'per' + str(int(timeNum)) + 'months' labelstr = '%b/%y' self.fileTitle = timeTitle self.labelstr = labelstr firstderiv = globfirstderiv[timeSegInput] mentionLimit = globmentionLimit[timeSegInput] return firstderiv, mentionLimit def extraction(self): '''Extract adjacency lists,mats,user and community centrality and communities bags''' import igraph #Compute the first derivative and the point of timeslot separation firstderiv, mentionLimit = self.timeslotselection() t = time.time() #Extract unique users globally and construct dictionary authors, mentions = [], [] for x in self.tweetDict['tweets'].keys(): authors.append(self.tweetDict['tweets'][x]['authors']) mentions.append(self.tweetDict['tweets'][x]['user_mentions']) mentions = list(itertools.chain.from_iterable(mentions)) usrs = authors.copy() usrs.extend(mentions) usrs = list(set(usrs)) usrs.sort() self.uniqueUsers = {x:num for num,x in enumerate(usrs)} statement = 'Total # of unique users: %s\n' %len(self.uniqueUsers) statsfile = open(self.dataset_path + '/data/results/basicstats.txt','a') print(statement) statsfile.write(statement) statsfile.close() #Split time according to the first derivative of the users' activity# sesStart, timeslot, timeLimit,commCount = 0, 0, [], 0 self.commPgRnkBag, self.commPgRnkBagNormed, self.authorTwIdPerTmslDict = {}, {}, {} print('Forming timeslots') for k in range(len(mentionLimit)): if self.adaptive: locMin = firstderiv[k] < 0 and firstderiv[k + 1] >= 0 if locMin or k == len(mentionLimit)-1: del(locMin) else: continue #make timeslot timelimit array timeLimit.append(self.alltime[int(mentionLimit[k])]) fileNum = '{0}'.format(str(timeslot).zfill(2)) sesEnd = int(mentionLimit[k] + 1) tweetTempList = self.tweetIds[sesStart:sesEnd] #Make pairs of users with weights authors, mentions = [], [] self.authorTwIdPerTmslDict[timeslot] = {} for twId in tweetTempList: if self.tweetDict['tweets'][twId]['authors'] not in self.authorTwIdPerTmslDict[timeslot]: self.authorTwIdPerTmslDict[timeslot][self.tweetDict['tweets'][twId]['authors']] = [twId] else: self.authorTwIdPerTmslDict[timeslot][self.tweetDict['tweets'][twId]['authors']].append(twId) for m in self.tweetDict['tweets'][twId]['user_mentions']: authors.append(self.tweetDict['tweets'][twId]['authors']) mentions.append(m) usersPair = list(zip(authors,mentions)) #Create weighted adjacency list weighted = collections.Counter(usersPair) weighted = list(weighted.items()) adjusrs, weights = zip(*weighted) adjauthors, adjments = zip(*adjusrs) adjList = list(zip(adjauthors, adjments, weights)) print('For Timeslot: '+str(fileNum)+' comprising '+str(len(adjList))+' edges.') self.usersPerTmsl[timeslot] = list(set(itertools.chain.from_iterable([authors,mentions]))) '''Write pairs of users to txt file for Gephi''' if not os.path.exists(self.dataset_path + '/data/results/'+self.adaptStr+'/forGephi/'+str(self.fileTitle)): os.makedirs(self.dataset_path + '/data/results/'+self.adaptStr+'/forGephi/'+str(self.fileTitle)) my_txt = open(self.dataset_path + '/data/results/'+self.adaptStr+'/forGephi/'+str(self.fileTitle)+'/usersPairs_' + fileNum +'.txt', 'w')# my_txt.write('Source,Target,Weight' + '\n') for line in adjList: my_txt.write(','.join(str(x) for x in line) + '\n') my_txt.close() self.adjListBag[timeslot] = adjList #Construct igraph graph # edgelist = [(uniqueIds[u], uniqueIds[v]) for u, v, _ in adjList] # weights = [w for _, _, w in adjList] gDirected=igraph.Graph.TupleList(adjList, directed = True, weights=True) gDirected.simplify(multiple=False, loops=True, combine_edges=False) # gUndirected=igraph.Graph.TupleList(adjList, weights=True) #Extract the centrality of each user using the PageRank algorithm igraphUserPgRnk = gDirected.pagerank(weights = 'weight') pgRnkMax = max(igraphUserPgRnk) usrlist = gDirected.vs['name'] tempUserPgRnk = {} for i,k in enumerate(usrlist): tempUserPgRnk[k] = igraphUserPgRnk[i]#/pgRnkMax self.userPgRnkBag[timeslot] = tempUserPgRnk #Detect Communities using the louvain algorithm# # louvComms = gUndirected.community_multilevel(weights = 'weight') extractedComms = gDirected.community_infomap(edge_weights = 'weight') strCommDict, numCommDict, twIdCommDict = {}, {}, {} for k, v in enumerate(extractedComms.membership): strCommDict[v] = strCommDict.get(v, []) strCommDict[v].append(usrlist[k]) strCommDict[v].sort() numCommDict[v] = numCommDict.get(v, []) numCommDict[v].append(self.uniqueUsers[usrlist[k]]) numCommDict[v].sort() try: self.authorTwIdPerTmslDict[timeslot][usrlist[k]] twIdCommDict[v] = twIdCommDict.get(v, []) twIdCommDict[v].extend(self.authorTwIdPerTmslDict[timeslot][usrlist[k]]) twIdCommDict[v].sort() except: pass commCount+=len(strCommDict) self.commBag[timeslot] = {} self.commBag[timeslot]['strComms'] = strCommDict self.commBag[timeslot]['numComms'] = numCommDict self.commBag[timeslot]['tweetIds'] = twIdCommDict #Construct a graph using the communities as users tempCommGraph = extractedComms.cluster_graph(combine_edges = False) tempAllCommGraphs = extractedComms.subgraphs() tmprecipr = [] for x in tempAllCommGraphs: if math.isnan(x.reciprocity()): tmprecipr.append(0) else: tmprecipr.append(x.reciprocity()) tempCommGraph.simplify(multiple=False, loops=True, combine_edges=False) self.commBag[timeslot]['commEdgesOut'],self.commBag[timeslot]['commEdgesIn'] = {},{} for idx, commAdj in enumerate(tempCommGraph.get_adjlist(mode='ALL')): self.commBag[timeslot]['commEdgesOut'][idx] = [] self.commBag[timeslot]['commEdgesIn'][idx] = [] for x in commAdj: if x!=idx: self.commBag[timeslot]['commEdgesOut'][idx].append(x) else: self.commBag[timeslot]['commEdgesIn'][idx].append(x) # self.commBag[timeslot]['Similarity_Jaccard'] = tempCommGraph.similarity_jaccard() self.commBag[timeslot]['indegree'] = tempCommGraph.indegree() self.commBag[timeslot]['outdegree'] = tempCommGraph.outdegree() self.commBag[timeslot]['reciprocity'] = tmprecipr #Detect the centrality of each community using the PageRank algorithm commPgRnk = tempCommGraph.pagerank(weights = 'weight') minCPGR = min(commPgRnk) self.commPgRnkBag[timeslot] = commPgRnk self.commPgRnkBagNormed[timeslot] = [v/minCPGR for v in commPgRnk] #Detect the centrality of each community using the betweeness centrality algorithm commBetweeness = tempCommGraph.betweenness(weights = 'weight', directed = True) self.commBetweenessBag[timeslot] = commBetweeness #Extract community degree degreelist= tempCommGraph.degree(loops = False) self.commBag[timeslot]['alldegree'] = degreelist sesStart = sesEnd timeslot += 1 self.timeslots=timeslot day_month = [datetime.datetime.fromtimestamp(int(self.alltime[0])).strftime(self.labelstr)] day_month.extend([datetime.datetime.fromtimestamp(int(x)).strftime(self.labelstr) for x in timeLimit]) self.day_month = day_month self.timeLimit = timeLimit statement = '\nTotal # of communities is '+str(commCount) + '\n' statsfile = open(self.dataset_path + '/data/results/'+self.adaptStr+'/commstats_'+self.fileTitle+'.txt','w') print(statement) statsfile.write(statement) statsfile.close() dataCommPck = open(self.dataset_path + '/data/tmp/'+self.adaptStr+'/dataComm_'+str(self.fileTitle)+'.pck','wb') pickle.dump(self, dataCommPck , protocol = 2) dataCommPck.close() elapsed = time.time() - t print('Stage 2 took: %.2f seconds' % elapsed) def evol_detect(self, prevTimeslots, xLablNum, adaptive): import random self.xLablNum=xLablNum self.adaptive = adaptive if adaptive: self.adaptStr = 'adaptive' else: self.adaptStr = '' try: self.commPgRnkBag print('Comms have already been extracted. Moving to Stage 3...') except: print('Comms have not been extracted. Moving to Stage 2...') self.extraction() pass timeslots = self.timeslots '''find out the users that appear in more that one timestamps''' countedTmslUsers = collections.Counter(list(itertools.chain.from_iterable(self.usersPerTmsl.values()))) '''Construct Community Dictionary''' # print('Constructing Community Dictionary') commSizeBag = {} lC = [] #Number of communities>2people for each timeslot for cBlen in range(timeslots): commStrBag2 = dict(self.commBag[cBlen]['strComms']) commSizeBag[cBlen] = {} for k,v in commStrBag2.items(): croppedv = [x for x in v if countedTmslUsers[x] > 1] lenV = len(croppedv) if lenV < 3: del(self.commBag[cBlen]['strComms'][k]) del(self.commBag[cBlen]['numComms'][k]) del(self.commBag[cBlen]['commEdgesOut'][k])#cut out communities that contain users that do not appear in more than one timeslots del(self.commBag[cBlen]['commEdgesIn'][k]) try: del(self.commBag[cBlen]['tweetIds'][k]) except: pass else: commSizeBag[cBlen][k] = len(v) self.commPgRnkBag[cBlen] lC.append(len(self.commBag[cBlen]['strComms'])) # '''Fix Borda count ''' # bordaCentralityBag = {} # for cBlen in range(timeslots): statement = '\nTotal # of reduced communities is '+str(sum(lC)) + '\n' statsfile = open(self.dataset_path + '/data/results/'+self.adaptStr+'/commstats_'+self.fileTitle+'.txt','a') print(statement) statsfile.write(statement) statsfile.close() self.commPerTmslt=lC #Detect any evolution and name the evolving communities #uniCommIdsEvol is structured as such {'Id':[rowAppearence],[commCentrality],[commSize],[users]} self.commTweetBag, self.commHashtagBag, self.commTweetIdBag, self.commUrlBag = {}, {}, {}, {} evolcounter, uniCommIdsEvol, commCntr, dynCommCount, commIds = 0, {}, 0, 0, [] thres = 0.2 print('Community similarity search...') t = time.time() for rows in range(1, timeslots): print('...for timeslot: '+str(rows)+' of '+str(timeslots-1)) t2 = time.time() for clmns,bag1 in self.commBag[rows]['numComms'].items(): # idx = str(rows) + ',' + str(clmns) tempcommSize = len(bag1) for invrow in range(1, prevTimeslots + 1): prevrow = rows - invrow tmpsim = {} if prevrow >= 0: for clmns2,prevComms in self.commBag[prevrow]['numComms'].items(): lenprevComms = len(prevComms) # tmpratio = lenprevComms / tempcommSize tmpratio = min(tempcommSize,lenprevComms)/max(tempcommSize,lenprevComms) if thres >= tmpratio or thres >= 1/tmpratio: continue else: sim = len(set(bag1).intersection(prevComms)) / len(set(np.append(bag1, prevComms))) if sim > thres: tmpsim[clmns2] = sim if tmpsim: tmpsim = {x:v+round(random.random()/10000,5) for x,v in tmpsim.items()} maxval = max(tmpsim.values()) else: maxval = 0 if maxval >= thres: dynCommCountList = [] for idx, val in tmpsim.items(): if str(prevrow) + ',' + str(idx) not in commIds: evolcounter += 1 uniCommIdsEvol[dynCommCount] = [[], [], [], [], [], [], [], [], [], [], [], [], [], []] uniCommIdsEvol[dynCommCount][0].append(prevrow)#timeslot num for first evolution uniCommIdsEvol[dynCommCount][1].append(self.commPgRnkBag[prevrow][idx])#community pagerank for first evolution uniCommIdsEvol[dynCommCount][2].append(commSizeBag[prevrow][idx])#community size per timeslot for first evolution uniCommIdsEvol[dynCommCount][3].append(self.commBag[prevrow]['strComms'][idx])#users in each community for first evolution uniCommIdsEvol[dynCommCount][4].append(self.commBag[prevrow]['alldegree'][idx])#community degree for first evolution uniCommIdsEvol[dynCommCount][5].append(self.commPgRnkBagNormed[prevrow][idx])#normed community pagerank for first evolution uniCommIdsEvol[dynCommCount][6].append(self.commBetweenessBag[prevrow][idx])#community betweeness centrality for first evolution #uniCommIdsEvol[dynCommCount][7].append(0) uniCommIdsEvol[dynCommCount][8].append(str(prevrow) + ',' + str(idx))#community names in between uniCommIdsEvol[dynCommCount][9].append(self.commBag[prevrow]['commEdgesOut'][idx]) uniCommIdsEvol[dynCommCount][10].append(self.commBag[prevrow]['indegree'][idx])#indegree of community uniCommIdsEvol[dynCommCount][11].append(self.commBag[prevrow]['outdegree'][idx])#outdegree of community uniCommIdsEvol[dynCommCount][12].append(self.commBag[prevrow]['reciprocity'][idx])#reciprocity of community uniCommIdsEvol[dynCommCount][13].append(self.commBag[prevrow]['commEdgesIn'][idx]) commIds.append(str(prevrow) + ',' + str(idx)) dynCommCountList.append(dynCommCount) tmpTw, tmpHa, tmptwId, tmpUrl = [], [], [], [] self.commTweetBag[dynCommCount], self.commHashtagBag[dynCommCount], self.commTweetIdBag[dynCommCount], self.commUrlBag[dynCommCount] = [], [], [], [] try: for twId in self.commBag[prevrow]['tweetIds'][idx]: tmptwId.append(twId) tmpTw.append(self.tweetDict['tweets'][twId]['text']) tmpHa.extend(self.tweetDict['tweets'][twId]['hashtags']) tmpUrl.extend(self.tweetDict['tweets'][twId]['urls']) except: pass self.commTweetBag[dynCommCount].append(tmpTw) self.commHashtagBag[dynCommCount].append(tmpHa) self.commTweetIdBag[dynCommCount].append(tmptwId) self.commUrlBag[dynCommCount].append(tmpUrl) dynCommCount += 1 commCntr += 1 else: for dyn, innerDict in uniCommIdsEvol.items(): if str(prevrow) + ',' + str(idx) in innerDict[8]: dynCommCountList.append(dyn) for d in dynCommCountList: uniCommIdsEvol[d][0].append(rows)#timeslot num uniCommIdsEvol[d][1].append(self.commPgRnkBag[rows][clmns])#community pagerank per timeslot uniCommIdsEvol[d][2].append(commSizeBag[rows][clmns])#community size per timeslot uniCommIdsEvol[d][3].append(self.commBag[rows]['strComms'][clmns])#users in each community uniCommIdsEvol[d][4].append(self.commBag[rows]['alldegree'][clmns])#community degree per timeslot uniCommIdsEvol[d][5].append(self.commPgRnkBagNormed[rows][clmns])#normed community pagerank per timeslot uniCommIdsEvol[d][6].append(self.commBetweenessBag[rows][clmns])#community betweeness centrality per timeslot uniCommIdsEvol[d][7].append(val)#similarity between the two communities in evolving timesteps uniCommIdsEvol[d][8].append(str(rows) + ',' + str(clmns))#community names in between uniCommIdsEvol[d][9].append(self.commBag[rows]['commEdgesOut'][clmns]) uniCommIdsEvol[d][10].append(self.commBag[rows]['indegree'][clmns])#indegree of community uniCommIdsEvol[d][11].append(self.commBag[rows]['outdegree'][clmns])#outdegree of community uniCommIdsEvol[d][12].append(self.commBag[rows]['reciprocity'][clmns])#reciprocity of community uniCommIdsEvol[d][13].append(self.commBag[rows]['commEdgesIn'][clmns]) commIds.append(str(rows) + ',' + str(clmns)) tmpTw, tmpHa, tmptwId, tmpUrl = [], [], [], [] try: for twId in self.commBag[rows]['tweetIds'][clmns]: tmptwId.append(twId) tmpTw.append(self.tweetDict['tweets'][twId]['text']) tmpHa.extend(self.tweetDict['tweets'][twId]['hashtags']) tmpUrl.extend(self.tweetDict['tweets'][twId]['urls']) except: pass self.commTweetBag[d].append(tmpTw) self.commHashtagBag[d].append(tmpHa) self.commTweetIdBag[d].append(tmptwId) self.commUrlBag[d].append(tmpUrl) commCntr += 1 break elapsed = time.time() - t2 print('Elapsed: %.2f seconds' % elapsed) uniCommIds = list(uniCommIdsEvol.keys()) uniCommIds.sort() elapsed = time.time() - t print('Elapsed: %.2f seconds' % elapsed) self.uniCommIds,self.uniCommIdsEvol=uniCommIds,uniCommIdsEvol del(commIds,self.alltime,self.commBetweenessBag,commSizeBag)#,self.commPgRnkBag,self.commBag,) statement = (str(evolcounter) + ' evolutions and ' + str(len(uniCommIds)) + ' dynamic communities and ' + str(commCntr)+' evolving communities' + '\n') statsfile = open(self.dataset_path + '/data/results/'+self.adaptStr+'/commstats_'+self.fileTitle+'.txt','a') print(statement) statsfile.write(statement) statsfile.close() return self def commRanking(self,numTopComms, prevTimeslots,xLablNum): import tfidf, random, twython, nltk, urllib.parse from pymongo import MongoClient from nltk.corpus import stopwords regex1 = re.compile("(?:\@|#|https?\://)\S+",re.UNICODE) regex2 = re.compile("\w+'?\w+",re.UNICODE) '''Detect the evolving communities''' uniCommIdsEvol=self.uniCommIdsEvol timeslots=self.timeslots tempcommRanking = {} #structure: tempcommRanking={Id:[persistence,stability,commCentrality,degreeness]} definiteStop = ['gt','amp','rt','via'] commRanking, rankingDict, lifetime, simpleEntropyDict, bigramEntropyDict = {}, {},0, {}, {} for Id in self.uniCommIds: tempcommRanking[Id] = [] rankingDict[Id] = {} uniqueTimeSlLen = len(set(uniCommIdsEvol[Id][0])) timeSlLen=len(uniCommIdsEvol[Id][0]) # '''Checking Theseus Ship''' rankingDict[Id]['theseus'] = 1+len(set(uniCommIdsEvol[Id][3][0]).intersection(uniCommIdsEvol[Id][3][-1])) #/ len(set(np.append(uniCommIdsEvol[Id][3][0], uniCommIdsEvol[Id][3][-1]))) '''text entropy extraction''' tmptextlist = [[i for i in regex2.findall(regex1.sub('',' '.join(x).lower())) if i and not i.startswith(('rt','htt','(@','\'@','t.co')) and len(i)>2 and i not in definiteStop] for x in self.commTweetBag[Id]] simpleEntropyDict[Id] = [myentropy(x) for x in tmptextlist] bigramList = [[' '.join(x) for x in list(nltk.bigrams(tmpTopic))] for tmpTopic in tmptextlist] bigramEntropyDict[Id] = [myentropy(x) for x in bigramList] rankingDict[Id]['avgBigramTextentropy'] = sum(bigramEntropyDict[Id])/timeSlLen rankingDict[Id]['textentropy'] = sum(simpleEntropyDict[Id])/timeSlLen rankingDict[Id]['size'] = sum(uniCommIdsEvol[Id][2]) / uniqueTimeSlLen rankingDict[Id]['persistence'] = uniqueTimeSlLen / timeslots #persistence) rankingDict[Id]['stability'] = (sum(np.diff(list(set(uniCommIdsEvol[Id][0]))) == 1) + 1) / (timeslots + 1) #stability rankingDict[Id]['perstability'] = rankingDict[Id]['stability']*rankingDict[Id]['persistence'] #perstability) rankingDict[Id]['commCentrality'] = sum(uniCommIdsEvol[Id][1]) / uniqueTimeSlLen #commCentrality rankingDict[Id]['commCentralityNormed'] = sum(uniCommIdsEvol[Id][5]) / uniqueTimeSlLen #normed commCentrality rankingDict[Id]['commMaxCentralityNormed'] = max(uniCommIdsEvol[Id][5]) #max normed commCentrality rankingDict[Id]['betweeness'] = sum(uniCommIdsEvol[Id][6])#/ uniqueTimeSlLen #betweeness rankingDict[Id]['connections'] = sum([len(y) for y in [set(x) for x in uniCommIdsEvol[Id][9]]])/ uniqueTimeSlLen #connections to other communities rankingDict[Id]['urlAvg'] = sum([len(set(y)) for y in self.commUrlBag[Id]]) / uniqueTimeSlLen #average number of unique urls in every community rankingDict[Id]['similarityAvg'] = sum(uniCommIdsEvol[Id][7]) / uniqueTimeSlLen #average jaccardian between timeslots for each dyn comm rankingDict[Id]['reciprocity'] = sum(uniCommIdsEvol[Id][12]) / uniqueTimeSlLen #average reciprocity between timeslots for each dyn comm '''Comms ranked in order of features''' rankedPersistence = sorted(rankingDict, key=lambda k: [rankingDict[k]['persistence'],rankingDict[k]['stability'],rankingDict[k]['connections'],rankingDict[k]['commCentralityNormed']], reverse = True) rankedStability = sorted(rankingDict, key=lambda k: [rankingDict[k]['stability'],rankingDict[k]['persistence'],rankingDict[k]['connections'],rankingDict[k]['commCentralityNormed']], reverse = True) rankedPerstability = sorted(rankingDict, key=lambda k: [rankingDict[Id]['perstability'],rankingDict[k]['connections'],rankingDict[k]['commCentralityNormed']], reverse = True) rankedcommCentrality = sorted(rankingDict, key=lambda k: [rankingDict[k]['commCentrality'],rankingDict[k]['connections'],rankingDict[k]['size']], reverse = True) rankedcommBetweeness = sorted(rankingDict, key=lambda k: [rankingDict[k]['betweeness'],rankingDict[k]['size'],rankingDict[k]['connections']], reverse = True) rankedcommCentralityNormed = sorted(rankingDict, key=lambda k: [rankingDict[k]['commCentralityNormed'],rankingDict[k]['connections'],rankingDict[k]['size']], reverse = True) rankedcommMaxCentralityNormed = sorted(rankingDict, key=lambda k: [rankingDict[k]['commMaxCentralityNormed'],rankingDict[k]['connections'],rankingDict[k]['size']], reverse = True) rankedTheseus = sorted(rankingDict, key=lambda k: [rankingDict[k]['theseus'],rankingDict[k]['connections'],rankingDict[k]['commCentralityNormed']], reverse = True) rankedConnections = sorted(rankingDict, key=lambda k: [rankingDict[k]['connections'],rankingDict[k]['size'],rankingDict[k]['commCentralityNormed']], reverse = True) rankedcommSize = sorted(rankingDict, key=lambda k: [rankingDict[k]['size'],rankingDict[k]['connections'],rankingDict[k]['commCentralityNormed']], reverse = True) rankedtextentropy = sorted(rankingDict, key=lambda k: [rankingDict[Id]['textentropy'],rankingDict[k]['size'],rankingDict[k]['commMaxCentralityNormed']], reverse = True) rankedUrlAvg = sorted(rankingDict, key=lambda k: [rankingDict[k]['urlAvg'],rankingDict[k]['size'],rankingDict[k]['commMaxCentralityNormed']], reverse = True) rankedSimilarityAvg = sorted(rankingDict, key=lambda k: [rankingDict[k]['similarityAvg'],rankingDict[k]['commMaxCentralityNormed']], reverse = True) rankedReciprocity = sorted(rankingDict, key=lambda k: [rankingDict[k]['reciprocity'],rankingDict[k]['connections']], reverse = True) commRanking = {} whichmethod = 'TISCI'# size centrality perstability TISCI for Id in self.uniCommIds:#rankedPersistence.index(Id),rankedStability.index(Id),rankedcommBetweeness.index(Id),rankedcommMaxCentralityNormed.index(Id),rankedUrlAvg.index(Id),rankedConnections.index(Id) if whichmethod == 'TISCI': commRanking[Id] = recRank([rankedSimilarityAvg.index(Id),rankedReciprocity.index(Id),rankedUrlAvg.index(Id),rankedtextentropy.index(Id),rankedPerstability.index(Id),rankedcommCentralityNormed.index(Id),rankedcommSize.index(Id)]) if whichmethod == 'size': commRanking[Id] = 1/(1+rankedcommSize.index(Id)) if whichmethod == 'perstability': commRanking[Id] = 1/(1+rankedPerstability.index(Id)) if whichmethod == 'centrality': commRanking[Id] = 1/(1+rankedcommCentralityNormed.index(Id)) if whichmethod == 'diversity': commRanking[Id] = 1/(1+rankedtextentropy.index(Id)) # commRanking[Id] = recRank([rankedUrlAvg.index(Id),rankedtextentropy.index(Id)]) self.rankingDict = rankingDict '''All the communities ranked in order of combined importance''' rankedCommunities = sorted(commRanking, key=commRanking.get, reverse=True) if numTopComms > len(rankedCommunities): numTopComms = len(rankedCommunities) '''Fix url dictionary''' print('Fixing urls...') self.commUrlCategory = self.urlDictionaryUpdate(rankedCommunities[0:numTopComms]) '''Constructing community size heatmap data''' commSizeHeatData = np.zeros([numTopComms, timeslots]) commUrlCategoryHeatmap = {} catsPerComm = {} for rCIdx, comms in enumerate(rankedCommunities[0:numTopComms]): commUrlCategoryHeatmap[rCIdx]={} for sizeIdx, timesteps in enumerate(uniCommIdsEvol[comms][0]): if commSizeHeatData[rCIdx, timesteps] != 0: commSizeHeatData[rCIdx, timesteps] = np.sum([np.log(uniCommIdsEvol[comms][2][sizeIdx]),commSizeHeatData[rCIdx, timesteps]]) if self.commUrlCategory[comms][sizeIdx]: addCategory = self.commUrlCategory[comms][sizeIdx][0] if comms in catsPerComm: catsPerComm[comms].append(self.commUrlCategory[comms][sizeIdx][0].lower()) else: catsPerComm[comms] = [self.commUrlCategory[comms][sizeIdx][0].lower()] if timesteps in commUrlCategoryHeatmap[rCIdx] and commUrlCategoryHeatmap[rCIdx][timesteps]: commUrlCategoryHeatmap[rCIdx][timesteps] = '\n'.join(list(set([commUrlCategoryHeatmap[rCIdx][timesteps],addCategory]))) else: commUrlCategoryHeatmap[rCIdx][timesteps] = addCategory else: commSizeHeatData[rCIdx, timesteps] = np.log(uniCommIdsEvol[comms][2][sizeIdx]) if self.commUrlCategory[comms][sizeIdx]: commUrlCategoryHeatmap[rCIdx][timesteps] = self.commUrlCategory[comms][sizeIdx][0] if comms in catsPerComm: catsPerComm[comms].append(self.commUrlCategory[comms][sizeIdx][0].lower()) else: catsPerComm[comms] = [self.commUrlCategory[comms][sizeIdx][0]] # print('single'+commUrlCategoryHeatmap[rCIdx, timesteps]) normedHeatdata = commSizeHeatData/commSizeHeatData.max() '''Retrieve profile images from usernames''' CONS_KEY = 'AvLwOrpwRUQ8lGTNmZmPA' CONS_SECRET = '9PxFSwG6DiiAOOCZ5oLHi649gxK3iwf8Q9czNZXFE' OAUTH_TOKEN = '1161058188-vlXu5zNTP3SZfubVFWJBMQd4Dq7YBBSYOQPMSyP' OAUTH_TOKEN_SECRET = '6sR2NpNGcVkPJsiI1oG0xGKrvssL9O9ARnMycHLV54' twitter1 = twython.Twython(CONS_KEY, CONS_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) CONS_KEY = 'H0gzu5Y4JNKtQ6TwkmyOg' CONS_SECRET = 'iLZo1hU7052Nnacj3vRUy974rxastZVzXYuJRKw' OAUTH_TOKEN = '545997015-Tl9IQc22jBOBXWxOO0Ysu4oAkzYrN1AkGzBvl4u3' OAUTH_TOKEN_SECRET = 'YP4Vng1T4oEHrUODTnMXePIEvidlGtnqAshlu8U2M' twitter2 = twython.Twython(CONS_KEY, CONS_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) CONS_KEY = 'uQIvzF5Staqpn33Yi4SfYA' CONS_SECRET = 'i9khJfAMTxu8dLJaGmWCyPkLYDrieUDRxMxyhGWBW8' OAUTH_TOKEN = '545997015-AU26dstIdSD5vi0JtV111Z5ZIjNQ2tSs8SBrB3on' OAUTH_TOKEN_SECRET = 'vP9OzlHlatuztwvVbQdtytQxcwFrMB6RzbHnY2h0' twitter3 = twython.Twython(CONS_KEY, CONS_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) mytwitter = [twitter1,twitter2,twitter3] try: self.usernameProfPicDict = pickle.load(open(self.dataset_path + '/data/tmp/usernameProfPicDict.pck', 'rb')) print('Old avatar dictionary has '+str(len(self.usernameProfPicDict))+' users; '+str(len(set(list(self.usernameProfPicDict.values()))))+' of which are active.') profPicDicKeys = list(self.usernameProfPicDict.keys()) for k in profPicDicKeys: if not self.usernameProfPicDict[k]: del self.usernameProfPicDict[k] except: self.usernameProfPicDict = {} print('New avatar dictionary is created') pass '''Create corpus and stopwords''' stop = stopwords.words('english') # stop = [] if self.dirName.startswith('greek'): session = requests.Session() grstopwords = session.get('https://www.dropbox.com/s/d6rvcmfu6c5jlsp/greek_stopwords.txt?raw=1').content.decode('ISO-8859-7').split('\r\n') stop.extend(grstopwords) definiteStop = ['gt','amp','rt','via'] stop.extend(definiteStop) if not os.path.exists(self.dataset_path + '/data/tmp/'+self.adaptStr+'/datasetCorpus_prev'+str(prevTimeslots)+self.fileTitle +'.pck'): idf,idfBigram = self.corpusExtraction(prevTimeslots)#rankedCommunities[:numTopComms]) else: idf = pickle.load(open(self.dataset_path + '/data/tmp/'+self.adaptStr+'/datasetCorpus_prev'+str(prevTimeslots)+self.fileTitle +'.pck', 'rb')) idfBigram = pickle.load(open(self.dataset_path + '/data/tmp/'+self.adaptStr+'/datasetBigramsCorpus_prev'+str(prevTimeslots)+self.fileTitle +'.pck', 'rb')) print('loaded word corpus from file') if not os.path.exists(self.dataset_path + '/data/tmp/'+self.adaptStr+'/datasetHashtagCorpus_prev'+str(prevTimeslots)+self.fileTitle +'.pck'): idfHashtag = self.hashtagCorpusExtraction(prevTimeslots)#rankedCommunities[:numTopComms]) else: idfHashtag = pickle.load(open(self.dataset_path + '/data/tmp/'+self.adaptStr+'/datasetHashtagCorpus_prev'+str(prevTimeslots)+self.fileTitle +'.pck', 'rb')) print('loaded hashtag corpus from file') #------------------------- '''Writing ranked communities to json files + MongoDB''' dataset_name=self.dataset_path.split('/') dataset_name=dataset_name[-1]+self.adaptStr #Mongo-------------------- # try: # client = MongoClient('160.40.50.236') # db = client[dataset_name] # dyccos=db.dyccos # except: # print('mongo client is dead') # pass #------------------------- jsondata = dict() jsondata['ranked_communities'] = [] jsondata['datasetInfo'] = {'allTimeslots':self.timeLimit, 'limits':{'min':10000,'max':45000,'usersmin':10,'usersmax':1000,'centmin':1,'centmax':30,'conmin':20,'conmax':200,'fixed':2}} ''' min - distance from left border max - distance from right border usersmin - min population in comms usersmax - max population in comms centmin - centrality minimum centmax - centrality max conmin - min num of connections/edges conmax - max num of connections/edges fixed - centrality accuracy in digits ''' rankedCommunitiesFinal = {} bigramEntropy = {} for rank, rcomms in enumerate(rankedCommunities[:numTopComms]): tmslUsrsCentral, tmslUsrsProfPics, hashtagList, keywordList, bigramList, tmptweetids, commTwText, urlList, domainList, topic, tmpkeywrds = [], [], [], [], [], [], [], [], [], [], [] strRank = str(rank)#'{0}'.format(str(rank).zfill(2)) rankedCommunitiesFinal[strRank] = [rcomms] rankedCommunitiesFinal[strRank].append(commRanking[rcomms]) # rankedCommunitiesFinal[strRank].append(uniCommIdsEvol[rcomms][3]) timeSlotApp = [self.timeLimit[x] for x in uniCommIdsEvol[rcomms][0]] timeStmp_Centrality_Dict = {str(k):0 for k in self.timeLimit} communitySizePerSlot = {str(k):0 for k in self.timeLimit} communityEdgesPerSlot = {str(k):0 for k in self.timeLimit} communityKeywordsPerSlot = {str(k):[] for k in self.timeLimit} communityBigramsPerSlot = {str(k):[] for k in self.timeLimit} communityTagsPerSlot = {str(k):[] for k in self.timeLimit} communityUrlsPerSlot = {str(k):[] for k in self.timeLimit} communityDomainsPerSlot = {str(k):[] for k in self.timeLimit} communityTweetsPerSlot = {str(k):[] for k in self.timeLimit} usersCentralityPerSlot = {str(k):[] for k in self.timeLimit} commUserDict = {k:[] for k in range(len(self.timeLimit))} print('Building json for dynComm: '+str(rcomms)+' ranked '+str(strRank)+' via value '+str(commRanking[rcomms])) for tmsl, users in enumerate(uniCommIdsEvol[rcomms][3]): if tmsl>0 and uniCommIdsEvol[rcomms][0][tmsl] == uniCommIdsEvol[rcomms][0][tmsl-1] and uniCommIdsEvol[rcomms][2][tmsl] < uniCommIdsEvol[rcomms][2][tmsl-1]: continue#ensure that the community with the biggest size goes to print... timeStmp_Centrality_Dict[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = uniCommIdsEvol[rcomms][5][tmsl] communitySizePerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = uniCommIdsEvol[rcomms][2][tmsl] '''tmp script for edge computation. normally it would result straight from the extraction def''' lines = self.adjListBag[uniCommIdsEvol[rcomms][0][tmsl]] tmpNumEdges = 0 for l in lines: if l[0] in users and l[1] in users: tmpNumEdges += int(l[2]) communityEdgesPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = tmpNumEdges tmpHashtagBag = self.commHashtagBag[rcomms][tmsl]#hashtags for only this slot if tmpHashtagBag: tmppopHashtags = [x.lower() for x in tmpHashtagBag] tmppopHashtags = collections.Counter(tmppopHashtags) communityTagsPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = tfidf.comm_tfidf(tmppopHashtags,idfHashtag,10) else: tmppopHashtags = {} hashtagList.append(list(tmppopHashtags.keys()))#hashtags for each slot tmpURLBagAll = [x.rstrip('/').lstrip('http://').lstrip('https://') for x in self.commUrlBag[rcomms][tmsl] if x]#urls for only this slot if tmpURLBagAll: # tmppopUrls = [x for x in list(itertools.chain.from_iterable(tmpURLBag))] tmpURLBag = collections.Counter(tmpURLBagAll) communityUrlsPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = tmpURLBag.most_common(10) else: tmpURLBag = {} tmpDomainBagAll = [urllib.parse.urlparse(x).netloc.lower() for x in self.commUrlBag[rcomms][tmsl] if x]#urls for only this slot if tmpDomainBagAll: # tmppopUrls = [x for x in list(itertools.chain.from_iterable(tmpURLBag))] tmpDomainBag = collections.Counter(tmpDomainBagAll) communityDomainsPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = tmpDomainBag.most_common(10) else: tmpDomainBag = {} # urlList.append(list(tmpURLBag.keys())) # uncomment this to find pop urls over timeslots urlList.append(tmpURLBagAll) # uncomment this to find pop urls overall domainList.append(tmpDomainBagAll) # uncomment this to find pop urls overall commUserDict[uniCommIdsEvol[rcomms][0][tmsl]] = users croppedUsers = list(set(users).difference(list(self.usernameProfPicDict.keys()))) userbatches = [croppedUsers[x:x+100] for x in range(0, len(croppedUsers), 100)] #Retrieve user avatars for screenNameList in userbatches: comma_separated_string = ','.join(screenNameList) eror = '429' while '429' in eror: try: output = mytwitter[random.randint(0,2)].lookup_user(screen_name=comma_separated_string) for user in output: self.usernameProfPicDict[user['screen_name']] = user['profile_image_url'].replace('_normal','') eror = 'ok' except twython.exceptions.TwythonError as er: eror = str(er) if '429' in eror: print('delaying for batch api...') time.sleep(5*60+2) pass uscentr = [] good = 0 for us in users: if us not in self.usernameProfPicDict or not self.usernameProfPicDict[us]: self.usernameProfPicDict[us] = '' else: good +=1 uscentr.append([us, self.userPgRnkBag[uniCommIdsEvol[rcomms][0][tmsl]][us]]) uscentr = sorted(uscentr, key=itemgetter(1), reverse=True) usersCentralityPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = uscentr[:10] tmptweetText = [' '.join([i for i in regex2.findall(regex1.sub('',x.lower())) if i and not i.startswith(('rt','htt','(@','\'@','t.co')) and i not in definiteStop]) for x in self.commTweetBag[rcomms][tmsl]] tmptweetText = [x for x in tmptweetText if x] seen = set() seen_add = seen.add tmptweetText2 = [x for x in tmptweetText if x not in seen and not seen_add(x)] popTweets = collections.Counter(tmptweetText) communityTweetsPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = popTweets.most_common(10) #topic extraction topicList = ' '.join(tmptweetText) topicList = topicList.lower() # topicList = regex1.sub('', topicList) topicList = regex2.findall(topicList) if len(set(topicList)) > 5: for i in list(topicList): if len(i)<=2 or i in stop: del topicList[topicList.index(i)] else: for i in list(topicList): if i in definiteStop or not i: del topicList[topicList.index(i)] if not topicList: topicList = ['noText','OnlyRefs'] topicBigrams = list(nltk.bigrams(topicList)) topicBigrams = [' '.join(x) for x in list(nltk.bigrams(topicList))] topicListCounted = collections.Counter(topicList) topicBigramsCounted = collections.Counter(topicBigrams) timeSlLen=len(uniCommIdsEvol[Id][0]) tmpTopic=tfidf.comm_tfidf(topicListCounted,idf,10) tmpBigrams=tfidf.comm_tfidf(topicBigramsCounted,idfBigram,10) communityKeywordsPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = tmpTopic communityBigramsPerSlot[str(self.timeLimit[uniCommIdsEvol[rcomms][0][tmsl]])] = tmpBigrams keywordList.append(list(topicListCounted.keys())) bigramList.append(list(topicBigramsCounted.keys())) try: hashtagList = list(itertools.chain.from_iterable(hashtagList)) except: pass if hashtagList: popHashtags = [x.lower() for x in hashtagList] popHashtags = collections.Counter(popHashtags) popHashtags=tfidf.comm_tfidf(popHashtags,idfHashtag,10) # popHashtags = popHashtags.most_common(10) else: popHashtags=[] if urlList: urlList=[x for x in list(itertools.chain.from_iterable(urlList)) if x] popUrls = collections.Counter(urlList) popUrls = popUrls.most_common(10) else: popUrls=[] if domainList: domainList=[x for x in list(itertools.chain.from_iterable(domainList)) if x] popDomains = collections.Counter(domainList) popDomains = popDomains.most_common(10) else: popDomains=[] # commTweetIds = list(set(tmptweetids)) try: keywordList = list(itertools.chain.from_iterable(keywordList)) except: pass if keywordList: # popKeywords = [x.lower() for x in keywordList] popKeywords = collections.Counter(keywordList) popKeywords=tfidf.comm_tfidf(popKeywords,idf,10) try: bigramList = list(itertools.chain.from_iterable(bigramList)) except: pass if bigramList: popBigrams = [x.lower() for x in bigramList] popBigrams = collections.Counter(popBigrams) popBigrams=tfidf.comm_tfidf(popBigrams,idfBigram,10) dyccoDict = [{ 'timestamp':str(k), 'commCentrality':timeStmp_Centrality_Dict[str(k)], 'commSize':communitySizePerSlot[str(k)], 'commKeywords':communityKeywordsPerSlot[str(k)], 'connectionsNum':communityEdgesPerSlot[str(k)], 'communityBigramsPerSlot':communityBigramsPerSlot[str(k)], 'usersCentrality':usersCentralityPerSlot[str(k)], 'commHashTags':communityTagsPerSlot[str(k)], 'commUrls':communityUrlsPerSlot[str(k)], 'commDomains':communityDomainsPerSlot[str(k)], 'commTweets':communityTweetsPerSlot[str(k)]} for k in self.timeLimit] self.buildDynCommGraphFiles(strRank, commUserDict,prevTimeslots) dycco={ 'communityLabels': uniCommIdsEvol[rcomms][8], 'DyCContainer': dyccoDict, 'avgDyccoCentrality': rankingDict[rcomms]['commCentralityNormed'], 'dyccoPopHashtags': popHashtags, 'dyccoPopUrls': popUrls, 'dyccoPopDomains': popDomains, 'dyccoPopKeywords': popKeywords, 'dyccoPopBigrams': popBigrams} jsondycco=dycco.copy() # dyccos.insert(dycco) jsondata['ranked_communities'].append(jsondycco) twitterDataFile = open(self.dataset_path + '/data/results/'+self.adaptStr+'/'+self.dirName+'communities_prev'+str(prevTimeslots)+self.fileTitle+'.json', 'w')#, encoding='utf-8-sig') twitterDataFile.write(json.dumps(jsondata, sort_keys=True))#,ensure_ascii=False).replace('\u200f','')) twitterDataFile.close() webDrawFile = open('./Com_Graph/web/jsons/'+self.dirName+'communities.json', 'w') webDrawFile.write(json.dumps(jsondata, sort_keys=True)) webDrawFile.close() dataEvolPck = open(self.dataset_path + '/data/tmp/'+self.adaptStr+'/dataEvol_prev'+str(prevTimeslots)+self.fileTitle+'.pck', 'wb') pickle.dump(self, dataEvolPck, protocol = 2) dataEvolPck.close() usernameProfPicDictPck = open(self.dataset_path + '/data/tmp/usernameProfPicDict.pck', 'wb') # store the dictionary, for future reference pickle.dump(self.usernameProfPicDict, usernameProfPicDictPck, protocol = 2) usernameProfPicDictPck.close() self.simpleEntropyDict = simpleEntropyDict makefigures(whichmethod,commSizeHeatData,self.fileTitle,self.day_month,commRanking,numTopComms,timeslots,uniCommIdsEvol,rankedCommunities,self.commPerTmslt,self.uniCommIds,prevTimeslots,self.dataset_path,self.xLablNum, self.adaptStr,commUrlCategoryHeatmap,catsPerComm,simpleEntropyDict,bigramEntropyDict) return rankedCommunitiesFinal def buildDynCommGraphFiles(self, strRank, commUserDict,prevTimeslots): # print('Creating a json containing the graphs for dynamic community: '+str(int(strRank)+1)) '''make and save dynamic community json files''' if not os.path.exists(self.dataset_path + '/data/results/' + self.adaptStr +'/partialGraphs/prev'+str(prevTimeslots)+ self.fileTitle): os.makedirs(self.dataset_path + '/data/results/' + self.adaptStr +'/partialGraphs/prev'+str(prevTimeslots)+ self.fileTitle) allUsers = list(set(itertools.chain.from_iterable(list(commUserDict.values())))) allUsers.sort() allUsernames = [] for name in allUsers: if name in self.tweetDict['userDict']: allUsernames.append({'screen_name':name,'avatar':self.usernameProfPicDict[name],'id':self.tweetDict['userDict'][name]['id'], 'followers_count':self.tweetDict['userDict'][name]['followers_count'],'listed_count':self.tweetDict['userDict'][name]['listed_count'], 'friends_count':self.tweetDict['userDict'][name]['friends_count'],'description':self.tweetDict['userDict'][name]['description'], 'name':self.tweetDict['userDict'][name]['name'],'location':self.tweetDict['userDict'][name]['location'], 'statuses_count':self.tweetDict['userDict'][name]['statuses_count']}) else: allUsernames.append({'screen_name':name,'avatar':self.usernameProfPicDict[name],'id':'','followers_count':'','listed_count':'','friends_count':'','description':'','name':''}) jsondata = {'datasetInfo':{'allUsernames':allUsernames},'connections':[]} allTmsls = sorted(list(commUserDict.keys())) appearingTmsls = [x for x in list(commUserDict.keys()) if commUserDict[x]] for tmsl in allTmsls: if tmsl in appearingTmsls: lines = self.adjListBag[tmsl] tmpConnections = [] # tmpNumEdges = 0 for l in lines: if l[0] in commUserDict[tmsl] and l[1] in commUserDict[tmsl] and l[0]!=l[1]: tmpConnections.append(l[0]+';'+l[1]+';'+str(l[2])) # tmpNumEdges += l[2] jsondata['connections'].append({'timestamp_connections':tmpConnections}) # jsondata['edges'].append({'timestamp_connections':tmpNumEdges}) else: jsondata['connections'].append({'timestamp_connections':[]}) # jsondata['edges'].append({'timestamp_connections':0}) twitterDataFile = open(self.dataset_path + '/data/results/' + self.adaptStr +'/partialGraphs/prev'+str(prevTimeslots)+ self.fileTitle + '/'+self.dirName+'users' + str(int(strRank)+1) +'.json', 'w')#, encoding='utf-8-sig') twitterDataFile.write(json.dumps(jsondata, sort_keys=True))#,ensure_ascii=False).replace('\u200f','')) twitterDataFile.close() webDrawDataFile = open('./Com_Graph/web/jsons/'+self.dirName+'users' + str(int(strRank)+1) +'.json', 'w') webDrawDataFile.write(json.dumps(jsondata, sort_keys=True))#,ensure_ascii=False).replace('\u200f','')) webDrawDataFile.close() def corpusExtraction(self,prevTimeslots): from nltk.corpus import stopwords from math import log import nltk print('Extracting dataset corpus') stop = stopwords.words('english') if self.dirName.startswith('greek'): grstopwords=pickle.load(open('./globalDics/greek_stopwords.pck', 'rb')) stop.extend(grstopwords) stop.extend(['gt','amp','rt','via']) stop.sort() textList, bigramList = [], [] # cntr=0 regex1 = re.compile("(?:\@|#|https?\://)\S+",re.UNICODE) regex2 = re.compile("\w+'?\w+",re.UNICODE) for k,v in self.commTweetBag.items(): bagitems = [regex2.findall(regex1.sub('',' '.join(list(set(x))).lower())) for x in v] for commWords in bagitems: tmpTopicCC = [i for i in commWords if len(i)>2 and not i.startswith(('htt','(@','\'@','t.co')) and i not in stop] textList.append(list(set(tmpTopicCC))) bigramTopicCC = [' '.join(x) for x in list(nltk.bigrams(tmpTopicCC))] bigramList.append(list(set(bigramTopicCC))) # print(cntr) allWords=list(itertools.chain.from_iterable(textList)) countAllWords = collections.Counter(allWords) allBigrams = list(itertools.chain.from_iterable(bigramList)) countAllBigrams = collections.Counter(allBigrams) dictTokens, dictBigramTokens = {},{} textListLength = len(textList) for word in set(allWords): dictTokens[word]=log(textListLength/(1+countAllWords[word])) for bigr in set(allBigrams): dictBigramTokens[bigr]=log(textListLength/(1+countAllBigrams[bigr])) dictTokensPck = open(self.dataset_path + '/data/tmp/'+self.adaptStr +'/datasetCorpus_prev'+str(prevTimeslots)+ self.fileTitle +'.pck', 'wb') # store the dictionary, for future reference pickle.dump(dictTokens, dictTokensPck, protocol = 2) dictTokensPck.close() dictTokensPck = open(self.dataset_path + '/data/tmp/'+self.adaptStr +'/datasetBigramsCorpus_prev'+str(prevTimeslots)+ self.fileTitle +'.pck', 'wb') # store the dictionary, for future reference pickle.dump(dictBigramTokens, dictTokensPck, protocol = 2) dictTokensPck.close() print('Extracted %s words and %s bigrams' %(len(dictTokens),len(dictBigramTokens))) return dictTokens, dictBigramTokens def hashtagCorpusExtraction(self,prevTimeslots): from math import log print('Extracting hashtag corpus') fullList = [] for k,v in self.commHashtagBag.items(): listofcomms = [set([y.lower() for y in x if len(y)>2]) for x in v] fullList.extend(listofcomms) # print(cntr) allTags=set(list(itertools.chain.from_iterable(fullList))) dictTokens={} for word in allTags: wordCount=0 for tmptextlist in fullList: if word in tmptextlist: wordCount+=1 dictTokens[word]=log(len(fullList)/(1+wordCount)) dictTokensPck = open(self.dataset_path + '/data/tmp/'+self.adaptStr +'/datasetHashtagCorpus_prev'+str(prevTimeslots)+ self.fileTitle +'.pck', 'wb') # store the dictionary, for future reference pickle.dump(dictTokens, dictTokensPck, protocol = 2) dictTokensPck.close() print('Extracted %s hashtags' %len(dictTokens)) return dictTokens def urlDictionaryUpdate(self,rankedCommunities): import urllib.parse import goslate from urllib.request import urlopen import unshortenCommUrls import xml.etree.ElementTree as et t=time.time() try: postsForQueue = pickle.load(open(self.dataset_path + '/data/tmp/commsUrls.pck','rb')) except: postsForQueue = {} pass try: urlDict = pickle.load(open(self.dataset_path + '/data/tmp/urlDict.pck.pck','rb')) except: urlDict = {} pass for Id in rankedCommunities: for commUrls in self.commUrlBag[Id]: for url in set(commUrls): if url and url not in postsForQueue: if url in urlDict: postsForQueue[url] = {'trueUrl':urlDict[url],'domain':urllib.parse.urlparse(urlDict[url]).netloc.lower()} else: postsForQueue[url] = {'trueUrl':url,'domain':urllib.parse.urlparse(url).netloc.lower()} postsForQueue = unshortenCommUrls.unshrinkUrlsInParallel(postsForQueue,self.dataset_path) gs = goslate.Goslate() #Get shrinked urls shrinkedUrls = codecs.open('./globalDics/allShrinks.txt','r','utf-8').readlines() shrinkedUrls = [x.strip().lower() for x in shrinkedUrls] shrinkedUrls = list(set(shrinkedUrls)) shrinkedUrls.sort() try: domainDict = pickle.load(open('./globalDics/catCommDomDict.pck','rb'))#load domain dictionary if '' in domainDict: del(domainDict['']) except: domainDict = {} print('no domainDict') pass try: urlCategoryDict = pickle.load(open(self.dataset_path + '/data/tmp/urlCategoryDict.pck','rb')) except: urlCategoryDict = {} print('no urlCategoryDict') pass try: wordTranslator = pickle.load(open('./globalDics/wordTranslator.pck','rb')) except: wordTranslator = {} print('no wordTranslator') pass #Make lists of categories from category files catfiles = [f[:-4] for f in os.listdir('./url_corpus/categories/') if f.endswith('.txt') and not f.startswith('shrinks')] for catnames in catfiles: vars()[catnames] = codecs.open('./url_corpus/categories/'+catnames+'.txt','r').readlines() # vars()[filed+'Full'] = [] vars()[catnames] = [x.strip() for x in vars()[catnames]] elapsed = time.time() - t print('Elapsed: %.2f seconds' % elapsed) t=time.time() commUrlCategory = {} for Id in rankedCommunities: commUrlCategory[Id] = [] for idxC,commUrls in enumerate(self.commUrlBag[Id]): commUrls = [x for x in commUrls if x] tmpCats = [] for idxU,url in enumerate(commUrls): try: self.commUrlBag[Id][idxC][idxU] = postsForQueue[url]['trueUrl'] trueUrl = postsForQueue[url]['trueUrl'] domain = postsForQueue[url]['domain'] except: trueUrl = url domain = urllib.parse.urlparse(url).netloc.lower() print(url +' not in dictionary') pass if trueUrl not in urlCategoryDict: for catnames in catfiles: for cat in vars()[catnames]: if cat.lower() in trueUrl.lower(): if trueUrl not in urlCategoryDict: urlCategoryDict[trueUrl] = [catnames] else: urlCategoryDict[trueUrl].append(catnames) if trueUrl not in urlCategoryDict: if domain not in domainDict: try: dataFromDom = urlopen('http://data.alexa.com/data?cli=10&url='+domain).read() data = et.fromstring(dataFromDom) for cat in data.iter('CAT'): # print (cat.attrib['ID']) domainDict[domain].extend(cat.attrib['ID'].split('/')) tmpDom = [x.lower().replace('_',' ').replace('-',' ') for x in list(set(domainDict[domain])) if len(x)>2] for widx, w in enumerate(tmpDom.copy()): if w not in wordTranslator: wordTranslator[w] = gs.translate(w, 'en') tmpDom[widx] = wordTranslator[w] domainDict[domain] = tmpDom except:# UnicodeEncodeError: print('errored alexa') pass if domain in domainDict: if trueUrl not in urlCategoryDict: urlCategoryDict[trueUrl] = domainDict[domain] else: urlCategoryDict[trueUrl].extend(domainDict[domain]) try: tmpCats.extend(urlCategoryDict[trueUrl]) except: pass tmpCats = [x for x in tmpCats if x] countTmpCats = collections.Counter(tmpCats) sortedTmpCats = sorted(countTmpCats, key=lambda k: [countTmpCats[k],len(k)], reverse = True) if sortedTmpCats: commUrlCategory[Id].append(sortedTmpCats) else: commUrlCategory[Id].append(['none']) elapsed = time.time() - t print('Elapsed: %.2f seconds' % elapsed) urlCategoryDictfile = open(self.dataset_path + '/data/tmp/urlCategoryDict.pck', 'wb') pickle.dump(urlCategoryDict, urlCategoryDictfile, protocol = 2) urlCategoryDictfile.close() wordTranslatorDictfile = open('./globalDics/wordTranslator.pck', 'wb') pickle.dump(wordTranslator, wordTranslatorDictfile, protocol = 2) wordTranslatorDictfile.close() domCatPck = open('./globalDics/catCommDomDict.pck','wb') pickle.dump(domainDict, domCatPck) domCatPck.close() return commUrlCategory def makefigures(whichmethod,commSizeHeatData,fileTitle,day_month,commRanking,numTopComms,timeslots,uniCommIdsEvol,rankedCommunities,commPerTmslt,uniCommIds,prevTimeslots,dataset_path,xLablNum, adaptStr,commUrlCategoryHeatmap,catsPerComm,simpleEntropyDict,bigramEntropyDict): print('method selected is: '+whichmethod) if not os.path.exists(dataset_path + '/data/results/figs/'+adaptStr): os.makedirs(dataset_path + '/data/results/figs/'+adaptStr) if not os.path.exists(dataset_path + '/data/tmp/figs/'+adaptStr): os.makedirs(dataset_path + '/data/tmp/figs/'+adaptStr) '''Label parameters''' pertick=int(np.ceil(timeslots/xLablNum)) if numTopComms>len(rankedCommunities): numTopComms=len(rankedCommunities) row_labels = day_month#(range(timeslots)) column_labels = list(range(numTopComms)) column_labels2 = rankedCommunities[:numTopComms] #line styles style, color = ['*', '+', 'o','d','h','p'], ['g','r','m','c', 'y', 'k'] '''Categories per all communities''' allCats = list(itertools.chain.from_iterable(list(catsPerComm.values()))) countCats = collections.Counter(allCats) sortCats = sorted(countCats, key=countCats.get, reverse=True) for cat in sortCats: if countCats[cat] > 1: print(cat+'\t'+str(countCats[cat])) allCats = list(itertools.chain.from_iterable(list(catsPerComm.values()))) countCats = collections.Counter(allCats) sortCats = sorted(countCats, key=countCats.get, reverse=True) sortedVals = sorted(list(countCats.values()), reverse=True) # fig6, ax6 = plt.subplots() # ax6.stem(sortedVals, 'b-') # ax6.set_xticks(range(len(sortedVals))) # ax6.set_xticklabels(sortCats,fontsize=7, rotation = 30) # for tick in ax6.yaxis.get_major_ticks(): # tick.label.set_fontsize(7) # xmin, xmax = plt.xlim() # plt.xlim( -1, xmax+1 ) # plt.ylabel('Frequency') # plt.xlabel('Categories') # plt.tight_layout() # fig6 = plt.gcf() # plt.draw() # fig6.savefig(dataset_path + '/data/results/figs/'+adaptStr +'/categsIn'+str(numTopComms)+'FirstComms' + str(prevTimeslots) + fileTitle + '_' + whichmethod + '.pdf',bbox_inches='tight', format='pdf') # plt.close() # del(fig6) # print('Finished with category frequency fig') '''Number of communities/timeslot''' fig3, ax3 = plt.subplots() ax3.plot(commPerTmslt, 'b-') ax3.set_xticks(np.arange(0,len(commPerTmslt),pertick), minor=False) ax3.set_xticklabels(row_labels[0::pertick], minor=False, fontsize=7, rotation = 30) for tick in ax3.yaxis.get_major_ticks(): tick.label.set_fontsize(7) xmin, xmax = plt.xlim() # plt.xlim( 0, xmax+1 ) plt.ylabel('Community Number Fluctuation') plt.xlabel('Timeslots') plt.tight_layout() fig3 = plt.gcf() plt.draw() fig3.savefig(dataset_path + '/data/results/figs/'+adaptStr +'/commNumberFlux_prev' + str(prevTimeslots) + fileTitle + '.pdf',bbox_inches='tight', format='pdf') plt.close() del(fig3) print('Finished with number of communities\' fluctuation fig') '''Make community size evolution heatmap''' fig2, ax = plt.subplots() heatmap = ax.pcolormesh(commSizeHeatData, cmap=plt.cm.gist_gray_r) ax.set_xticks(np.arange(0,commSizeHeatData.shape[1],pertick), minor=False) plt.xlim(xmax=timeslots) ax.xaxis.tick_top() ax.set_xticklabels(row_labels[0::pertick], minor=False,fontsize=8) ax.set_yticks(np.arange(commSizeHeatData.shape[0]), minor=False) plt.ylim(ymax=numTopComms) ax.invert_yaxis() ax.set_yticklabels(column_labels, minor=False, fontsize=7) plt.ylabel('Ranked Communities (Best ' + str(numTopComms) + ')') ax2 = ax.twinx() ax2.set_yticks(np.arange(commSizeHeatData.shape[0]), minor=False) plt.ylim(ymax=numTopComms) ax2.invert_yaxis() ax2.set_yticklabels(column_labels2, minor=False, fontsize=7) plt.xlabel('Timeslot', {'verticalalignment': 'top'}) if numTopComms < 101: plt.grid(axis='y') fig2 = plt.gcf() plt.tight_layout() mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.draw() fig2.savefig(dataset_path + '/data/results/figs/'+adaptStr +'/communitySizeHeatmap_prev' + str(prevTimeslots) + fileTitle + '_' + whichmethod + '.pdf',bbox_inches='tight', format='pdf') plt.close() print('Finished with heat map fig') '''Make community size evolution color heatmap''' fig5, ax5a = plt.subplots() heatmap = ax5a.pcolor(commSizeHeatData,cmap=plt.cm.Blues) for y in range(commSizeHeatData.shape[0]): for x in range(commSizeHeatData.shape[1]): try: plt.text(x + 0.5, y + 0.5, '%s' % commUrlCategoryHeatmap[y][x].lower(), horizontalalignment='center',verticalalignment='center',fontsize = 5, rotation = 35)#.decode('utf-8') .encode('utf-8').decode('utf-8-sig') # print(commUrlCategoryHeatmap[y][x].lower()) except KeyError: plt.text(x + 0.5, y + 0.5, '',horizontalalignment='center',verticalalignment='center',fontsize = 5, rotation = 35) pass # plt.colorbar(heatmap) ax5a.set_xticks(np.arange(0,commSizeHeatData.shape[1],pertick), minor=False) plt.xlim(xmax=timeslots) ax5a.xaxis.tick_top() ax5a.set_xticklabels(row_labels[0::pertick], minor=False,fontsize=8) ax5a.set_yticks(np.arange(commSizeHeatData.shape[0]), minor=False) plt.ylim(ymax=numTopComms) ax5a.invert_yaxis() ax5a.set_yticklabels(column_labels, minor=False, fontsize=7) plt.ylabel('Ranked Communities (Best ' + str(numTopComms) + ')') ax5b = ax5a.twinx() ax5b.set_yticks(np.arange(commSizeHeatData.shape[0]), minor=False) plt.ylim(ymax=numTopComms) ax5b.invert_yaxis() ax5b.set_yticklabels(column_labels2, minor=False, fontsize=7) plt.xlabel('Timeslot', {'verticalalignment': 'top'}) if numTopComms < 101: plt.grid(axis='y') fig5 = plt.gcf() plt.tight_layout() mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.draw() fig5.savefig(dataset_path + '/data/results/figs/'+adaptStr +'/communitySizeColorHeatmap_prev' + str(prevTimeslots) + fileTitle + '_' + whichmethod + '.pdf',bbox_inches='tight', format='pdf') plt.close() print('Finished with heat colormap fig') '''Text entropy flux''' font = {'size': 12} plt.rc('font', **font) fig6, ax6 = plt.subplots() colormap = plt.cm.gist_ncar plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, len(rankedCommunities[:numTopComms]))]) entropySum = [0]*timeslots for Id in rankedCommunities[:numTopComms]: commEntropy = [0]*timeslots myxaxis=[] for idx,timesteps in enumerate(uniCommIdsEvol[Id][0]): if not commEntropy[timesteps]: commEntropy[timesteps] = bigramEntropyDict[Id][idx]+0.000001 myxaxis.append(timesteps) else: commEntropy[timesteps] = max((bigramEntropyDict[Id][idx]),commEntropy[timesteps]) commEntropyNew=[] for x in commEntropy: if x: commEntropyNew.append(x) entropySum = [x + y for x, y in zip(entropySum, commEntropy)] # print(Id) # print(myxaxis) # print(commEntropyNew) plt.plot(myxaxis,commEntropyNew)#, hold=True) ax6.set_xticks(np.arange(0,len(commPerTmslt),pertick), minor=False) ax6.set_xticklabels(row_labels[0::pertick], minor=False, fontsize=12, rotation = 30) plt.ylabel('Community text entropy') plt.xlabel('Timeslots') plt.tight_layout() fig6 = plt.gcf() plt.draw() fig6.savefig(dataset_path + '/data/results/figs/'+adaptStr +'/commEntropyFlux_prev' + str(prevTimeslots) + fileTitle + '_' + whichmethod + '.pdf',bbox_inches='tight', format='pdf') plt.close() del(fig6) print('Finished with community entropy fluctuation fig') '''sum of entropies ''' methodfiles = [f[:-4] for f in os.listdir(dataset_path + '/data/tmp/figs/'+adaptStr) if f.endswith('.pck')] entropySum = [x/numTopComms for x in entropySum] font = {'size': 12} plt.rc('font', **font) fig7, ax7 = plt.subplots() ymax = 0 sumofSumsofEntropy = {} for styleIdx, methodname in enumerate(methodfiles): if methodname == whichmethod: continue oldentropy = pickle.load(open(dataset_path + '/data/tmp/figs/'+adaptStr +'/' + methodname + '.pck','rb')) plt.plot(oldentropy,linestyle='None', marker=style[styleIdx], color=color[styleIdx], label = methodname) #linestyles[styleIdx] ymax = max([ymax,max(oldentropy)]) sumofSumsofEntropy[methodname] = sum(oldentropy) plt.plot(entropySum,linestyle='None', marker=r'$\bowtie$', color='b', label = whichmethod) plt.ylim(ymax = int(np.ceil(max([max(entropySum),ymax])))) plt.xlim(xmin = -1 , xmax = len(entropySum)) ax7.set_xticks(np.arange(0,len(commPerTmslt),pertick), minor=False) ax7.set_xticklabels(row_labels[0::pertick], minor=False, fontsize=12, rotation = 30) plt.ylabel('Sum of entropy') plt.xlabel('Timeslots') plt.legend() plt.tight_layout() fig7 = plt.gcf() plt.draw() fig7.savefig(dataset_path + '/data/results/figs/'+adaptStr +'/dyCCoEntropyFlux_prev' + str(prevTimeslots) + fileTitle + '.pdf',bbox_inches='tight', format='pdf') plt.close() del(fig7) pickle.dump(entropySum, open(dataset_path + '/data/tmp/figs/'+adaptStr +'/' + whichmethod + '.pck', 'wb'), protocol = 2) sumofSumsofEntropy[whichmethod] = sum(entropySum) for k in sumofSumsofEntropy.keys(): print(k + "'s sum of entropy is: "+str(sumofSumsofEntropy[k])) def product(mylist): p = 1 for i in mylist: p *= i return p def recRank(mylist):#Perform the Reciprocal Rank Fusion for a list of rank values finscore = [] mylist=[x+1 for x in mylist] for rank in mylist: finscore.append(1/(rank)) return sum(finscore) def intersectComms(clmns2, prevComms, tempcommSize, bag1, thres): # clmns2, prevComms = clmns2prevComms[0], clmns2prevComms[1] # print('tempcommSize '+str(tempcommSize)+'\n') # print('bag1 '+','.join([str(x) for x in bag1])+'\n') # print('clmns2 '+str(clmns2)+'\n') # print('prevComms '+','.join([str(x) for x in prevComms])+'\n') # print('thres '+str(thres)+'\n') # time.sleep(60) if thres > (len(prevComms) / tempcommSize) or thres > (tempcommSize / len(prevComms)): interResult = False else: sim = len(set(bag1).intersection(prevComms)) / len(set(np.append(bag1, prevComms))) if sim >= thres: interResult = sim else: interResult = False return clmns2, interResult def rankdata(a): n = len(a) ivec=sorted(range(len(a)), key=a.__getitem__) svec=[a[rank] for rank in ivec] sumranks = 0 dupcount = 0 newarray = [0]*n for i in range(n): sumranks += i dupcount += 1 if i==n-1 or svec[i] != svec[i+1]: averank = sumranks / (dupcount) + 1 for j in range(i-dupcount+1,i+1): newarray[ivec[j]] = averank sumranks = 0 dupcount = 0 return newarray def myentropy(data): if not data: return 0 entropy = 0 for x in set(data): p_x = float(data.count(x))/len(data) if p_x > 0: entropy += -p_x*math.log(p_x, 2) return entropy def myselection(url,dataList): datadict={} for idx,i in enumerate(dataList): datadict[idx] = i print(url) print(datadict) myinput = int(input('Which should i select?')) return datadict[myinput]
dinos66/commRankingMine
CommunityRanking_v3.py
Python
apache-2.0
88,525
[ "Bowtie" ]
09e05126d083f8f1a102440b30d6701eb352a9dee064e706883f51c9d918bd9a
# Copyright (C) 2015 Samuel Owen, Ivan Pechenezhskiy # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This module could be used to create the waveforms that are used to populate the DAC boars. See the __main__ section of this file for examples. """ import collections import itertools import warnings import numpy as np import scipy.signal as ss import matplotlib import matplotlib.pyplot as plt import matplotlib.cbook warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation) from win32api import SetConsoleCtrlHandler import labrad.units as units def _flatten(iterable): """ De-nest a list of _WavePulses for convenience. Input: iterable: an iterable object. Output: list: de-nested list of _WavePulses. """ remainder = iter(iterable) while True: first = next(remainder) if (isinstance(first, collections.Iterable) and not isinstance(first, _WavePulse)): remainder = itertools.chain(first, remainder) else: yield first class _WavePulse(): """ Base pulse class that contains shared methods. """ def _ns(self, time): """ Convert time to nanoseconds. Return an integer without any units attached. Input: time: physical or numerical (in ns) time value. Output: time: numerical time value in ns. """ if isinstance(time, units.Value): time = time['ns'] return int(np.round(time)) def _init_times(self, start=None, duration=None, end=None): """ Define the pulse start, end, and duration attributes. Inputs: start: start time of the pulse. duration: duration of the pulse. end: end time of the pulse. Output: None. """ if [start, duration, end].count(None) > 1: raise ValueError("A pair of time parameters is required " + "to define a pulse. These possible time " + "parameters are 'start', 'duration', and 'end'.") if start is not None: self.start = self._ns(start) if duration is not None: self.duration = self._ns(duration) if end is not None: self.end = self._ns(end) if start is None: self.start = self.end - self.duration + 1 if duration is None: self.duration = self.end - self.start + 1 if end is None: self.end = self.start + self.duration - 1 if self.start > self.end + 1: raise ValueError("The pulse ends before it starts: " + "the pulse starts at " + str(self.start) + " ns " + "and ends at " + str(self.end) + " ns.") if self.end - self.start + 1 != self.duration: raise ValueError("Inconsistent time parameters: the pulse" + " starts at " + str(self.start) + " ns, its " + "duration is " + str(self.duration) + " ns, while" + " the pulse is expected to end at " + str(self.end) + " ns.") def _amplitude(self, amplitude): """ Process the amplitude (strip units from the amplitude value). Input: amplitude: amplitude of the pulse. Output: amplitude: amplitude of the pulse. """ if isinstance(amplitude, units.Value): return amplitude[units.Unit(amplitude)] else: return float(amplitude) def _harmonic(self, frequency, phase): """ Process the pulse frequency and phase. Inputs: frequency: frequency of the harmonic pulse. phase: phase of the harmonic pulse. Outputs: frequency: frequency of the harmonic pulse. phase: phase of the harmonic pulse. """ if isinstance(frequency, units.Value): frequency = frequency['GHz'] else: frequency = float(frequency) if isinstance(phase, units.Value): phase = phase['rad'] else: phase = float(phase) return frequency, phase def _check_pulse(self): """ Check whether the pulse amplitudes are in -1.0 to 1.0 range. Input: None. Output: None. """ if any(abs(self.pulse) > 1): raise ValueError('The pulse amplitude should not exceed 1.') def after(self, time=0): """ Time point after the pulse. Input: time: time delay after this pulse in ns. Output: time: absolute time. """ return self.end + 1 + self._ns(time) def before(self, time=0): """ Time point before the pulse. Input: time: time delay before this pulse in ns. Output: time: absolute time. """ return self.start - 1 - self._ns(time) class DC(_WavePulse): """ DC pulse. Inputs: amplitude: amplitude of the dc pulse. start: starting time of the dc pulse. duration: length of the dc pulse. end: ending time of the dc pulse. """ def __init__(self, amplitude=0, start=None, duration=None, end=None): self._init_times(start, duration, end) amplitude = self._amplitude(amplitude) self.pulse = np.full(self.duration, amplitude) self._check_pulse() class Sine(_WavePulse): """ Sine pulse. Inputs: amplitude: amplitude of the sine pulse (default: 0). frequency: frequency of the sine pulse (default: 0 Hz). phase: phase of the sine pulse (default: 0 rad). offset: constant dc offset of the sine pulse (default: 0). start: starting time of the sine pulse. duration: length of the sine pulse. end: ending time of the sine pulse. phase_ref: point in time that should have the specified phase (default: start pulse time). """ def __init__(self, amplitude=0, frequency=0, phase=0, offset=0, start=None, duration=None, end=None, phase_ref=None): self._init_times(start, duration, end) amplitude = self._amplitude(amplitude) frequency, phase = self._harmonic(frequency, phase) offset = self._amplitude(offset) if phase_ref is None: t0 = 0 else: t0 = self.start - self._ns(phase_ref) t = np.linspace(t0, t0 + self.duration - 1, self.duration) self.pulse = (offset + amplitude * np.sin(2 * np.pi * frequency * t + phase)) self._check_pulse() class Cosine(_WavePulse): """ Cosine pulse. Inputs: amplitude: amplitude of the cosine pulse (default: 0). frequency: frequency of the cosine pulse (default: 0 Hz). phase: phase of the cosine pulse (default: 0 rad). offset: constant dc offset of the cosine pulse (default: 0). start: starting time of the cosine pulse. duration: length of the cosine pulse. end: ending time of the cosine pulse. phase_ref: point in time that should have the specified phase (default: start pulse time). """ def __init__(self, amplitude=0, frequency=0, phase=0, offset=0, start=None, duration=None, end=None, phase_ref=None): self._init_times(start, duration, end) amplitude = self._amplitude(amplitude) frequency, phase = self._harmonic(frequency, phase) offset = self._amplitude(offset) if phase_ref is None: t0 = 0 else: t0 = self.start - self._ns(phase_ref) t = np.linspace(t0, t0 + self.duration - 1, self.duration) self.pulse = (offset + amplitude * np.cos(2 * np.pi * frequency * t + phase)) self._check_pulse() class Gaussian(_WavePulse): """ Gaussian window pulse. The pulse is truncated at about 1 per 2^14 level since the DACs have 14-bit resolution. Inputs: amplitude: amplitude of the gaussian pulse. start: starting time of the gaussian pulse. duration: length of the gaussian pulse. end: ending time of the gaussian pulse. """ def __init__(self, amplitude=0, start=None, duration=None, end=None): self._init_times(start, duration, end) amplitude = self._amplitude(amplitude) sigma = (float(self.duration) - 1) / np.sqrt(112 * np.log(2)) self.pulse = amplitude * ss.gaussian(self.duration, sigma) self._check_pulse() class FromArray(_WavePulse): """ Generate a pulse from a numpy array. The start or end times can be arbitrary, and the duration is derived automatically from the length of the array Inputs: pulse_data: numpy array containing the pulse data in 1 ns chunks. start: starting time of the pulse. end: ending time of the pulse. """ def __init__(self, pulse_data=[], start=None, end=None): duration = len(pulse_data) self._init_times(start, duration, end) if isinstance(pulse_data, list): pulse_data = np.array(pulse_data) self.pulse = pulse_data self._check_pulse() class Waveform(): """ Create a waveform from pulses. The start of one pulse is expected to be one unit (i.e. one nanosecond) after the end of the previous pulse (i.e. pulse2.end - pulse1.start >= 1). Therefore, to make pulse B start immediately after another pulse A initialize B.start to (A.end + 1), or simply assign A.after() to B.start. Inputs: label: waveform label string. args: arbitrarily long set of _WavePulses to create the waveform from. To create a _WavePulse use one of the "public" classes such as DC, Sine, Cosine, etc. """ def __init__(self, label='None', *args): if not isinstance(label, str): raise ValueError('Invalid waveform label.') self.label = label args = list(_flatten(args)) pulses = [arg for arg in args if isinstance(arg, _WavePulse)] if len(pulses) > 0: # Sort based on the start times. for i in range(len(pulses))[::-1]: for j in range(i): if pulses[j].start > pulses[j + 1].start: tmp = pulses[j + 1] pulses[j + 1] = pulses[j] pulses[j] = tmp # Ensure there are no overlaps. for i in range(len(pulses) - 1): if pulses[i].end > pulses[i + 1].start: raise ValueError("There are overlaps between " + "the waveform pulses.") # Loop through and fill unused spots with zeros. pulses_filled = [] for i in range(len(pulses) - 1): pulses_filled.append(pulses[i].pulse) gap = pulses[i + 1].start - pulses[i].end if gap > 1: pulses_filled.append(np.zeros(gap - 1)) pulses_filled.append(pulses[len(pulses) - 1].pulse) self.pulses = np.hstack(pulses_filled) else: self.pulses = np.array([0]) self.start = pulses[0].start self.end = pulses[-1].end self.duration = self.end - self.start + 1 def ECLDuringPulses(*args, **kwargs): """ Return _WavePulse to make ECL outputs go high during a set of specified _WavePulses Inputs: args: set (or list) of _WavePulses during which an ECL pulse should be generated. pad_length: time before and after the pulses (default: 8 ns). Output: ECL: list of ECL _WavePulses. """ if 'pad_length' in kwargs: if isinstance(kwargs['pad_length'], units.Value): pad_length = kwargs['pad_length']['ns'] else: pad_length = kwargs['pad_length'] try: pad_length = int(np.round(pad_length)) except: raise Exception("Invalid ECL pad length value.") else: pad_length = 8 args = list(_flatten(args)) pulses = [arg for arg in args if isinstance(arg, _WavePulse)] ECL = [] for pulse in pulses: ECL.append(DC(amplitude = 1, start = pulse.before(pad_length), end = pulse.after(pad_length))) return ECL def Harmonic(amplitude=0, frequency=0, phase=0, cosine_offset=0, sine_offset=0, start=None, duration=None, end=None, phase_ref=None): """ Return cosine and sine pulses. Inputs: amplitude: amplitude of the pulses (default: 0). frequency: frequency of the pulses (default: 0 Hz). phase: phase of the pulses (default: 0 rad). cosine_offset: constant dc offset of the cosine pulse (default: 0). sine_offset: constant dc offset of the sine pulse (default: 0). start: starting time of the pulses. duration: length of the pulses. end: ending time of the pulses. phase_ref: point in time that should have the specified phase (default: start pulse time). Outputs: sine: Sine pulse object. cosine: Cosine pulse object. """ return (Cosine(amplitude, frequency, phase, cosine_offset, start, duration, end, phase_ref), Sine(amplitude, frequency, phase, sine_offset, start, duration, end, phase_ref)) def wfs_dict(*args, **kwargs): """ Return a waveform dictionary with the waveform labels as the keys. Align the waveforms using the waveform starting time. Ensure that the waveforms are of an equal length. The waveforms are zero-padded at the start and the end to ensure that they are not shorter than the minimum allowed length. Inputs: *args: arbitrarily long set of the Waveforms (instances of class Waveforms). *kwargs: min_length: minimum allowed length of the final waveform. Short waveforms are padded with zeros at the end to increase their length (default: 20). start_zeros: number of zeros to add to the start of each waveform (default: 4). end_zeros: number of zeros to add to the end of each waveform (default: 4). Actual number of zeros added may be higher if the waveform length does not satisfy the min_length requirement. Outputs: waveforms: dictionary with the processed waveforms. offset: difference between the corresponding index values of the waveform numpy ndarrays and the time values that specify the start and end times for the waveforms: offset = ndarray_index - assigned_time_value, i.e. ndarray_index = assigned_time_value + offset. """ defaults = {'min_length': 20, 'start_zeros': 4, 'end_zeros': 4} for key in kwargs: if isinstance(kwargs[key], units.Value): kwargs[key] = kwargs[key]['ns'] try: kwargs[key] = int(np.round(kwargs[key])) except: raise Exception("Invalid parameter '%s' value." %key) defaults.update(kwargs) min_len = defaults['min_length'] start, end = defaults['start_zeros'], defaults['end_zeros'] wfs = [arg for arg in args if isinstance(arg, Waveform)] # Align the waveforms. if wfs: start_offset = min([wf.start for wf in wfs]) for wf in wfs: wf.pulses = np.hstack([np.zeros(wf.start - start_offset), wf.pulses]) else: start_offset = 0 # Create an empty waveform 'None'. wfs.append(Waveform('None', DC(start=start_offset, duration=1))) # Ensure that the waveforms are long enough and of an equal length. max_len = max([wf.pulses.size for wf in wfs]) + start + end total_len = max(min_len, max_len) for wf in wfs: fin = max(total_len - start - wf.pulses.size, end) wf.pulses = np.hstack([np.zeros(start), wf.pulses, np.zeros(fin)]) return {wf.label: wf.pulses for wf in wfs}, start - start_offset def check_wfs(waveforms): """ Check that all waveforms have the same length. Input: waveforms: dictionary with the processed waveforms. Output: None. """ lengths = [waveforms[wf].size for wf in waveforms] if lengths.count(lengths[0]) != len(lengths): raise Exception('The waveform have different lengths.') def _close_figure(self, signal=None): """ Close the waveform figure. Input: None. Output: None. """ plt.close(2) def plot_wfs(waveforms, wf_labels, wf_colors=['r', 'g', 'm', 'b', 'k', 'c']): """ Plot waveforms. Input: waveforms: dictionary with the processed waveforms. wf_labels: waveform labels to plot. wf_colors: colors for waveform colorcoding. Output: None. """ if not isinstance(wf_colors, list): wf_colors = list(wf_colors) if not isinstance(wf_labels, list): wf_labels = list(wf_labels) time = waveforms[wf_labels[0]].size time = np.linspace(0, time - 1, time) plt.figure(2) plt.ioff() plt.clf() for idx, wf in enumerate(wf_labels): plt.plot(time, waveforms[wf], wf_colors[idx % 6], label=wf_labels[idx]) plt.xlim(time[0], time[-1]) plt.legend() plt.xlabel('Time [ns]') plt.ylabel('Waveforms') plt.draw() plt.pause(0.05) if __name__ == "__main__": """ Tests and examples. Add your test/example! """ # Explicitly close the waveform figure when the terminal is closed. SetConsoleCtrlHandler(_close_figure, True) # Cosine pulse with amplitude of 1 and frequency of 0.25 GHz # starting at t = 2 ns and ending at t = 8 ns. pulseA1 = Cosine(amplitude=1, frequency=0.25, start=2, end=8) # Sine pulse with amplitude of 0.5 and frequency of 0.25 GHz # starting at the start of pulseA1 and ending at the end of pulseA1. pulseB1 = Sine(amplitude=0.5, frequency=0.25, start=pulseA1.start, end=pulseA1.end) # DC pulse with amplitude of -1 starting after the end of pulseA1. # The pulse duration is 10 ns. pulseB2 = DC(amplitude=-1, start=pulseA1.after(), duration=10) # Combine the two pulses into one waveform. The waveform class # automatically puts the wave pulses in the correct order. waveformB = Waveform('B', pulseB1, pulseB2) # Specifying the start, duration and end times at the same time will # work only if these parameters are consistent, i.e. if the equation # self.duration = self.end - self.start + 1 is satisfied. pulseA2 = DC(start=pulseB2.start, duration=10, end=pulseB2.end) try: # Inconsistent specifications. pulseA2 = DC(start=pulseB2.after(-1), duration=12, end=pulseB2.end) except ValueError: print('The inconsistent time error has been correctly caught.') try: # Amplitude should not exceed 1. pulseA2 = Sine(amplitude=1, frequency=.25, offset=.1, start=pulseB2.after(-1), duration=12) except ValueError: print('The amplitude error has been correctly caught.') # Sine pulse with amplitude of 1 and frequency of 0.1 GHz # starting 2 ns after pulseB1 and ending at the same time as # pulseB2. pulseA2 = Sine(amplitude=1, phase=np.pi/2, frequency=0.1, start=pulseB1.after(2), end=pulseB2.end) # Combine the two pulses into one waveform. The waveform class # automatically puts the wave pulses in the correct order. waveformA = Waveform('A', pulseA1, pulseA2) # Create a waveform dictionary with the waveform labels as the keys. # The waveforms will be aligned based on their start times. They # will be zero-padded to ensure equal length that is longer than # a minimum length, which is 20 in this example. wfs, time_offset = wfs_dict(waveformA, waveformB, min_length=20) print(wfs) check_wfs(wfs) print('Time offset = %d ns.' %time_offset) # Gaussian pulse with amplitude of 1 starting at t = 0 ns and # ending at t = 14 ns (duration is equal to 15 ns). pulseC = Gaussian(amplitude=1, start=0, duration=15, end=14) waveformC = Waveform('C', pulseC) wfs, time_offset = wfs_dict(waveformA, waveformB, waveformC, min_length=100) print(wfs) check_wfs(wfs) print('Time offset = %d ns.' %time_offset) # Create an in-phase and quadrature components of a harmonic pulse. I, Q = Harmonic(amplitude=0.25, frequency=0.05, start=0, duration=150) wfs, time_offset = wfs_dict(Waveform('I', I), Waveform('Q', Q)) print(wfs) check_wfs(wfs) print('Time offset = %d ns.' %time_offset) # Plot the waveforms for inspection. plot_wfs(wfs, ['I', 'Q'], ['r', 'b']) # Some animation. for x in range(100): # Create an in-phase and quadrature components of a harmonic # pulse. I, Q = Harmonic(amplitude=0.25, frequency=0.03, phase= x / 20, start=0, duration=150) wfs, time_offset = wfs_dict(Waveform('I', I), Waveform('Q', Q)) # Plot the waveforms for inspection. plot_wfs(wfs, ['I', 'Q'], ['r', 'b'])
McDermott-Group/LabRAD
LabRAD/Measurements/General/waveform.py
Python
gpl-2.0
22,660
[ "Gaussian" ]
3436b911112993f41d993bb1f83d9241e3f5e3df84a2e25c084d901aed0bcaaf
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: acl version_added: '1.4' short_description: Set and retrieve file ACL information. description: - Set and retrieve file ACL information. options: path: description: - The full path of the file or object. type: path required: yes aliases: [ name ] state: description: - Define whether the ACL should be present or not. - The C(query) state gets the current ACL without changing it, for use in C(register) operations. choices: [ absent, present, query ] default: query follow: description: - Whether to follow symlinks on the path if a symlink is encountered. type: bool default: yes default: description: - If the target is a directory, setting this to C(yes) will make it the default ACL for entities created inside the directory. - Setting C(default) to C(yes) causes an error if the path is a file. type: bool default: no version_added: '1.5' entity: description: - The actual user or group that the ACL applies to when matching entity types user or group are selected. version_added: '1.5' etype: description: - The entity type of the ACL to apply, see C(setfacl) documentation for more info. choices: [ group, mask, other, user ] version_added: '1.5' permissions: description: - The permissions to apply/remove can be any combination of C(r), C(w) and C(x) (read, write and execute respectively) version_added: '1.5' entry: description: - DEPRECATED. - The ACL to set or remove. - This must always be quoted in the form of C(<etype>:<qualifier>:<perms>). - The qualifier may be empty for some types, but the type and perms are always required. - C(-) can be used as placeholder when you do not care about permissions. - This is now superseded by entity, type and permissions fields. recursive: description: - Recursively sets the specified ACL. - Incompatible with C(state=query). type: bool default: no version_added: '2.0' use_nfsv4_acls: description: - Use NFSv4 ACLs instead of POSIX ACLs. type: bool default: no version_added: '2.2' recalculate_mask: description: - Select if and when to recalculate the effective right masks of the files. - See C(setfacl) documentation for more info. - Incompatible with C(state=query). choices: [ default, mask, no_mask ] default: default version_added: '2.7' author: - Brian Coca (@bcoca) - Jérémie Astori (@astorije) notes: - The C(acl) module requires that ACLs are enabled on the target filesystem and that the C(setfacl) and C(getfacl) binaries are installed. - As of Ansible 2.0, this module only supports Linux distributions. - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. ''' EXAMPLES = r''' - name: Grant user Joe read access to a file acl: path: /etc/foo.conf entity: joe etype: user permissions: r state: present - name: Removes the ACL for Joe on a specific file acl: path: /etc/foo.conf entity: joe etype: user state: absent - name: Sets default ACL for joe on /etc/foo.d/ acl: path: /etc/foo.d/ entity: joe etype: user permissions: rw default: yes state: present - name: Same as previous but using entry shorthand acl: path: /etc/foo.d/ entry: default:user:joe:rw- state: present - name: Obtain the ACL for a specific file acl: path: /etc/foo.conf register: acl_info ''' RETURN = r''' acl: description: Current ACL on provided path (after changes, if any) returned: success type: list sample: [ "user::rwx", "group::rwx", "other::rwx" ] ''' import os from ansible.module_utils.basic import AnsibleModule, get_platform from ansible.module_utils._text import to_native def split_entry(entry): ''' splits entry and ensures normalized return''' a = entry.split(':') d = None if entry.lower().startswith("d"): d = True a.pop(0) if len(a) == 2: a.append(None) t, e, p = a t = t.lower() if t.startswith("u"): t = "user" elif t.startswith("g"): t = "group" elif t.startswith("m"): t = "mask" elif t.startswith("o"): t = "other" else: t = None return [d, t, e, p] def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False): '''Builds and returns an entry string. Does not include the permissions bit if they are not provided.''' if use_nfsv4_acls: return ':'.join([etype, entity, permissions, 'allow']) if permissions: return etype + ':' + entity + ':' + permissions return etype + ':' + entity def build_command(module, mode, path, follow, default, recursive, recalculate_mask, entry=''): '''Builds and returns a getfacl/setfacl command.''' if mode == 'set': cmd = [module.get_bin_path('setfacl', True)] cmd.append('-m "%s"' % entry) elif mode == 'rm': cmd = [module.get_bin_path('setfacl', True)] cmd.append('-x "%s"' % entry) else: # mode == 'get' cmd = [module.get_bin_path('getfacl', True)] # prevents absolute path warnings and removes headers if get_platform().lower() == 'linux': cmd.append('--omit-header') cmd.append('--absolute-names') if recursive: cmd.append('--recursive') if recalculate_mask == 'mask' and mode in ['set', 'rm']: cmd.append('--mask') elif recalculate_mask == 'no_mask' and mode in ['set', 'rm']: cmd.append('--no-mask') if not follow: if get_platform().lower() == 'linux': cmd.append('--physical') elif get_platform().lower() == 'freebsd': cmd.append('-h') if default: cmd.insert(1, '-d') cmd.append(path) return cmd def acl_changed(module, cmd): '''Returns true if the provided command affects the existing ACLs, false otherwise.''' # FreeBSD do not have a --test flag, so by default, it is safer to always say "true" if get_platform().lower() == 'freebsd': return True cmd = cmd[:] # lists are mutables so cmd would be overwritten without this cmd.insert(1, '--test') lines = run_acl(module, cmd) for line in lines: if not line.endswith('*,*'): return True return False def run_acl(module, cmd, check_rc=True): try: (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) except Exception as e: module.fail_json(msg=to_native(e)) lines = [] for l in out.splitlines(): if not l.startswith('#'): lines.append(l.strip()) if lines and not lines[-1].split(): # trim last line only when it is empty return lines[:-1] return lines def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['name']), entry=dict(type='str'), entity=dict(type='str', default=''), etype=dict( type='str', choices=['group', 'mask', 'other', 'user'], ), permissions=dict(type='str'), state=dict( type='str', default='query', choices=['absent', 'present', 'query'], ), follow=dict(type='bool', default=True), default=dict(type='bool', default=False), recursive=dict(type='bool', default=False), recalculate_mask=dict( type='str', default='default', choices=['default', 'mask', 'no_mask'], ), use_nfsv4_acls=dict(type='bool', default=False) ), supports_check_mode=True, ) if get_platform().lower() not in ['linux', 'freebsd']: module.fail_json(msg="The acl module is not available on this system.") path = module.params.get('path') entry = module.params.get('entry') entity = module.params.get('entity') etype = module.params.get('etype') permissions = module.params.get('permissions') state = module.params.get('state') follow = module.params.get('follow') default = module.params.get('default') recursive = module.params.get('recursive') recalculate_mask = module.params.get('recalculate_mask') use_nfsv4_acls = module.params.get('use_nfsv4_acls') if not os.path.exists(path): module.fail_json(msg="Path not found or not accessible.") if state == 'query': if recursive: module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.") if recalculate_mask in ['mask', 'no_mask']: module.fail_json(msg="'recalculate_mask' MUST NOT be set to 'mask' or 'no_mask' when 'state=query'.") if not entry: if state == 'absent' and permissions: module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.") if state == 'absent' and not entity: module.fail_json(msg="'entity' MUST be set when 'state=absent'.") if state in ['present', 'absent'] and not etype: module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state) if entry: if etype or entity or permissions: module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") if state == 'present' and not entry.count(":") in [2, 3]: module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.") if state == 'absent' and not entry.count(":") in [1, 2]: module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.") if state == 'query': module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.") default_flag, etype, entity, permissions = split_entry(entry) if default_flag is not None: default = default_flag if get_platform().lower() == 'freebsd': if recursive: module.fail_json(msg="recursive is not supported on that platform.") changed = False msg = "" if state == 'present': entry = build_entry(etype, entity, permissions, use_nfsv4_acls) command = build_command( module, 'set', path, follow, default, recursive, recalculate_mask, entry ) changed = acl_changed(module, command) if changed and not module.check_mode: run_acl(module, command) msg = "%s is present" % entry elif state == 'absent': entry = build_entry(etype, entity, use_nfsv4_acls) command = build_command( module, 'rm', path, follow, default, recursive, recalculate_mask, entry ) changed = acl_changed(module, command) if changed and not module.check_mode: run_acl(module, command, False) msg = "%s is absent" % entry elif state == 'query': msg = "current acl" acl = run_acl( module, build_command(module, 'get', path, follow, default, recursive, recalculate_mask) ) module.exit_json(changed=changed, msg=msg, acl=acl) if __name__ == '__main__': main()
resmo/ansible
lib/ansible/modules/files/acl.py
Python
gpl-3.0
11,789
[ "Brian" ]
c2a1580d56de0552b5318ed381a79a93720c0c2ce04ab1020f140ce789aa9051
""" View for Courseware Index """ # pylint: disable=attribute-defined-outside-init import logging import six from six.moves import urllib from django.conf import settings from django.contrib.auth.views import redirect_to_login from django.db import transaction from django.http import Http404 from django.template.context_processors import csrf from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.functional import cached_property from django.utils.translation import ugettext as _ from django.views.decorators.cache import cache_control from django.views.decorators.csrf import ensure_csrf_cookie from django.views.generic import View from edx_django_utils.monitoring import set_custom_metrics_for_course_key from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey, UsageKey from web_fragments.fragment import Fragment from edxmako.shortcuts import render_to_response, render_to_string from lms.djangoapps.courseware.exceptions import CourseAccessRedirect, Redirect from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context from lms.djangoapps.gating.api import get_entrance_exam_score_ratio, get_entrance_exam_usage_key from lms.djangoapps.grades.api import CourseGradeFactory from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.djangoapps.crawlers.models import CrawlersConfig from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY from openedx.core.djangoapps.user_api.preferences.api import get_user_preference from openedx.core.djangoapps.util.user_messages import PageLevelMessages from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace from openedx.core.djangolib.markup import HTML, Text from openedx.features.course_experience import ( COURSE_ENABLE_UNENROLLED_ACCESS_FLAG, COURSE_OUTLINE_PAGE_FLAG, default_course_url_name, RELATIVE_DATES_FLAG, ) from openedx.features.course_experience.urls import COURSE_HOME_VIEW_NAME from openedx.features.course_experience.views.course_sock import CourseSockFragmentView from openedx.features.enterprise_support.api import data_sharing_consent_required from student.models import CourseEnrollment from util.views import ensure_valid_course_key from xmodule.course_module import COURSE_VISIBILITY_PUBLIC from xmodule.modulestore.django import modulestore from xmodule.x_module import PUBLIC_VIEW, STUDENT_VIEW from ..access import has_access from ..access_utils import check_public_access from ..courses import ( check_course_access_with_redirect, get_course_with_access, get_current_child, get_studio_url ) from ..entrance_exams import ( course_has_entrance_exam, get_entrance_exam_content, user_can_skip_entrance_exam, user_has_passed_entrance_exam ) from ..masquerade import check_content_start_date_for_masquerade_user, setup_masquerade from ..model_data import FieldDataCache from ..module_render import get_module_for_descriptor, toc_for_course from ..permissions import MASQUERADE_AS_STUDENT from ..toggles import ( COURSEWARE_MICROFRONTEND_COURSE_TEAM_PREVIEW, REDIRECT_TO_COURSEWARE_MICROFRONTEND, ) from ..url_helpers import get_microfrontend_url from .views import CourseTabView log = logging.getLogger("edx.courseware.views.index") TEMPLATE_IMPORTS = {'urllib': urllib} CONTENT_DEPTH = 2 @method_decorator(transaction.non_atomic_requests, name='dispatch') class CoursewareIndex(View): """ View class for the Courseware page. """ @cached_property def enable_unenrolled_access(self): return COURSE_ENABLE_UNENROLLED_ACCESS_FLAG.is_enabled(self.course_key) @method_decorator(ensure_csrf_cookie) @method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True)) @method_decorator(ensure_valid_course_key) @method_decorator(data_sharing_consent_required) def get(self, request, course_id, chapter=None, section=None, position=None): """ Displays courseware accordion and associated content. If course, chapter, and section are all specified, renders the page, or returns an error if they are invalid. If section is not specified, displays the accordion opened to the right chapter. If neither chapter or section are specified, displays the user's most recent chapter, or the first chapter if this is the user's first visit. Arguments: request: HTTP request course_id (unicode): course id chapter (unicode): chapter url_name section (unicode): section url_name position (unicode): position in module, eg of <sequential> module """ self.course_key = CourseKey.from_string(course_id) if not (request.user.is_authenticated or self.enable_unenrolled_access): return redirect_to_login(request.get_full_path()) self.original_chapter_url_name = chapter self.original_section_url_name = section self.chapter_url_name = chapter self.section_url_name = section self.position = position self.chapter, self.section = None, None self.course = None self.url = request.path try: set_custom_metrics_for_course_key(self.course_key) self._clean_position() with modulestore().bulk_operations(self.course_key): self.view = STUDENT_VIEW self.course = get_course_with_access( request.user, 'load', self.course_key, depth=CONTENT_DEPTH, check_if_enrolled=True, check_if_authenticated=True ) self.course_overview = CourseOverview.get_from_id(self.course.id) self.is_staff = has_access(request.user, 'staff', self.course) # There's only one situation where we want to show the public view if ( not self.is_staff and self.enable_unenrolled_access and self.course.course_visibility == COURSE_VISIBILITY_PUBLIC and not CourseEnrollment.is_enrolled(request.user, self.course_key) ): self.view = PUBLIC_VIEW self.can_masquerade = request.user.has_perm(MASQUERADE_AS_STUDENT, self.course) self._setup_masquerade_for_effective_user() return self.render(request) except Exception as exception: # pylint: disable=broad-except return CourseTabView.handle_exceptions(request, self.course_key, self.course, exception) def _setup_masquerade_for_effective_user(self): """ Setup the masquerade information to allow the request to be processed for the requested effective user. """ self.real_user = self.request.user self.masquerade, self.effective_user = setup_masquerade( self.request, self.course_key, self.can_masquerade, reset_masquerade_data=True ) # Set the user in the request to the effective user. self.request.user = self.effective_user def _redirect_to_learning_mfe(self): """ Redirect to the new courseware micro frontend, unless this is a time limited exam. """ # DENY: feature disabled globally if not settings.FEATURES.get('ENABLE_COURSEWARE_MICROFRONTEND'): return # DENY: staff access if self.is_staff: return # DENY: Old Mongo courses, until removed from platform if self.course_key.deprecated: return # DENY: Timed Exams, until supported if getattr(self.section, 'is_time_limited', False): return # ALLOW: when flag set for course if REDIRECT_TO_COURSEWARE_MICROFRONTEND.is_enabled(self.course_key): raise Redirect(self.microfrontend_url) @property def microfrontend_url(self): """ Return absolute URL to this section in the courseware micro-frontend. """ try: unit_key = UsageKey.from_string(self.request.GET.get('activate_block_id', '')) # `activate_block_id` is typically a Unit (a.k.a. Vertical), # but it can technically be any block type. Do a check to # make sure it's really a Unit before we use it for the MFE. if unit_key.block_type != 'vertical': unit_key = None except InvalidKeyError: unit_key = None url = get_microfrontend_url( self.course_key, self.section.location if self.section else None, unit_key ) return url def render(self, request): """ Render the index page. """ self._prefetch_and_bind_course(request) if self.course.has_children_at_depth(CONTENT_DEPTH): self._reset_section_to_exam_if_required() self.chapter = self._find_chapter() self.section = self._find_section() if self.chapter and self.section: self._redirect_if_not_requested_section() self._save_positions() self._prefetch_and_bind_section() self._redirect_to_learning_mfe() check_content_start_date_for_masquerade_user(self.course_key, self.effective_user, request, self.course.start, self.chapter.start, self.section.start) if not request.user.is_authenticated: qs = six.moves.urllib.parse.urlencode({ 'course_id': self.course_key, 'enrollment_action': 'enroll', 'email_opt_in': False, }) allow_anonymous = check_public_access(self.course, [COURSE_VISIBILITY_PUBLIC]) if not allow_anonymous: PageLevelMessages.register_warning_message( request, Text(_(u"You are not signed in. To see additional course content, {sign_in_link} or " u"{register_link}, and enroll in this course.")).format( sign_in_link=HTML(u'<a href="{url}">{sign_in_label}</a>').format( sign_in_label=_('sign in'), url='{}?{}'.format(reverse('signin_user'), qs), ), register_link=HTML(u'<a href="/{url}">{register_label}</a>').format( register_label=_('register'), url=u'{}?{}'.format(reverse('register_user'), qs), ), ) ) return render_to_response('courseware/courseware.html', self._create_courseware_context(request)) def _redirect_if_not_requested_section(self): """ If the resulting section and chapter are different from what was initially requested, redirect back to the index page, but with an updated URL that includes the correct section and chapter values. We do this so that our analytics events and error logs have the appropriate URLs. """ if ( self.chapter.url_name != self.original_chapter_url_name or (self.original_section_url_name and self.section.url_name != self.original_section_url_name) ): raise CourseAccessRedirect( reverse( 'courseware_section', kwargs={ 'course_id': six.text_type(self.course_key), 'chapter': self.chapter.url_name, 'section': self.section.url_name, }, ) ) def _clean_position(self): """ Verify that the given position is an integer. If it is not positive, set it to 1. """ if self.position is not None: try: self.position = max(int(self.position), 1) except ValueError: raise Http404(u"Position {} is not an integer!".format(self.position)) def _reset_section_to_exam_if_required(self): """ Check to see if an Entrance Exam is required for the user. """ if not user_can_skip_entrance_exam(self.effective_user, self.course): exam_chapter = get_entrance_exam_content(self.effective_user, self.course) if exam_chapter and exam_chapter.get_children(): exam_section = exam_chapter.get_children()[0] if exam_section: self.chapter_url_name = exam_chapter.url_name self.section_url_name = exam_section.url_name def _get_language_preference(self): """ Returns the preferred language for the actual user making the request. """ language_preference = settings.LANGUAGE_CODE if self.request.user.is_authenticated: language_preference = get_user_preference(self.real_user, LANGUAGE_KEY) return language_preference def _is_masquerading_as_student(self): """ Returns whether the current request is masquerading as a student. """ return self.masquerade and self.masquerade.role == 'student' def _is_masquerading_as_specific_student(self): """ Returns whether the current request is masqueurading as a specific student. """ return self._is_masquerading_as_student() and self.masquerade.user_name def _find_block(self, parent, url_name, block_type, min_depth=None): """ Finds the block in the parent with the specified url_name. If not found, calls get_current_child on the parent. """ child = None if url_name: child = parent.get_child_by(lambda m: m.location.block_id == url_name) if not child: # User may be trying to access a child that isn't live yet if not self._is_masquerading_as_student(): raise Http404(u'No {block_type} found with name {url_name}'.format( block_type=block_type, url_name=url_name, )) elif min_depth and not child.has_children_at_depth(min_depth - 1): child = None if not child: child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child")) return child def _find_chapter(self): """ Finds the requested chapter. """ return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1) def _find_section(self): """ Finds the requested section. """ if self.chapter: return self._find_block(self.chapter, self.section_url_name, 'section') def _prefetch_and_bind_course(self, request): """ Prefetches all descendant data for the requested section and sets up the runtime, which binds the request user to the section. """ self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( self.course_key, self.effective_user, self.course, depth=CONTENT_DEPTH, read_only=CrawlersConfig.is_crawler(request), ) self.course = get_module_for_descriptor( self.effective_user, self.request, self.course, self.field_data_cache, self.course_key, course=self.course, will_recheck_access=True, ) def _prefetch_and_bind_section(self): """ Prefetches all descendant data for the requested section and sets up the runtime, which binds the request user to the section. """ # Pre-fetch all descendant data self.section = modulestore().get_item(self.section.location, depth=None, lazy=False) self.field_data_cache.add_descriptor_descendents(self.section, depth=None) # Bind section to user self.section = get_module_for_descriptor( self.effective_user, self.request, self.section, self.field_data_cache, self.course_key, self.position, course=self.course, will_recheck_access=True, ) def _save_positions(self): """ Save where we are in the course and chapter. """ save_child_position(self.course, self.chapter_url_name) save_child_position(self.chapter, self.section_url_name) def _create_courseware_context(self, request): """ Returns and creates the rendering context for the courseware. Also returns the table of contents for the courseware. """ course_url_name = default_course_url_name(self.course.id) course_url = reverse(course_url_name, kwargs={'course_id': six.text_type(self.course.id)}) show_search = ( settings.FEATURES.get('ENABLE_COURSEWARE_SEARCH') or (settings.FEATURES.get('ENABLE_COURSEWARE_SEARCH_FOR_COURSE_STAFF') and self.is_staff) ) staff_access = self.is_staff courseware_context = { 'csrf': csrf(self.request)['csrf_token'], 'course': self.course, 'course_url': course_url, 'chapter': self.chapter, 'section': self.section, 'init': '', 'fragment': Fragment(), 'staff_access': staff_access, 'can_masquerade': self.can_masquerade, 'masquerade': self.masquerade, 'supports_preview_menu': True, 'studio_url': get_studio_url(self.course, 'course'), 'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"), 'bookmarks_api_url': reverse('bookmarks'), 'language_preference': self._get_language_preference(), 'disable_optimizely': not WaffleSwitchNamespace('RET').is_enabled('enable_optimizely_in_courseware'), 'section_title': None, 'sequence_title': None, 'disable_accordion': COURSE_OUTLINE_PAGE_FLAG.is_enabled(self.course.id), 'show_search': show_search, } courseware_context.update( get_experiment_user_metadata_context( self.course, self.effective_user, ) ) table_of_contents = toc_for_course( self.effective_user, self.request, self.course, self.chapter_url_name, self.section_url_name, self.field_data_cache, ) courseware_context['accordion'] = render_accordion( self.request, self.course, table_of_contents['chapters'], ) courseware_context['course_sock_fragment'] = CourseSockFragmentView().render_to_fragment( request, course=self.course) # entrance exam data self._add_entrance_exam_to_context(courseware_context) if self.section: # chromeless data if self.section.chrome: chrome = [s.strip() for s in self.section.chrome.lower().split(",")] if 'accordion' not in chrome: courseware_context['disable_accordion'] = True if 'tabs' not in chrome: courseware_context['disable_tabs'] = True # default tab if self.section.default_tab: courseware_context['default_tab'] = self.section.default_tab # section data courseware_context['section_title'] = self.section.display_name_with_default section_context = self._create_section_context( table_of_contents['previous_of_active_section'], table_of_contents['next_of_active_section'], ) courseware_context['fragment'] = self.section.render(self.view, section_context) if self.section.position and self.section.has_children: self._add_sequence_title_to_context(courseware_context) # Courseware MFE link if show_courseware_mfe_link(request.user, staff_access, self.course.id): courseware_context['microfrontend_link'] = self.microfrontend_url else: courseware_context['microfrontend_link'] = None return courseware_context def _add_sequence_title_to_context(self, courseware_context): """ Adds sequence title to the given context. If we're rendering a section with some display items, but position exceeds the length of the displayable items, default the position to the first element. """ display_items = self.section.get_display_items() if not display_items: return if self.section.position > len(display_items): self.section.position = 1 courseware_context['sequence_title'] = display_items[self.section.position - 1].display_name_with_default def _add_entrance_exam_to_context(self, courseware_context): """ Adds entrance exam related information to the given context. """ if course_has_entrance_exam(self.course) and getattr(self.chapter, 'is_entrance_exam', False): courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.effective_user, self.course) courseware_context['entrance_exam_current_score'] = get_entrance_exam_score_ratio( CourseGradeFactory().read(self.effective_user, self.course), get_entrance_exam_usage_key(self.course), ) def _create_section_context(self, previous_of_active_section, next_of_active_section): """ Returns and creates the rendering context for the section. """ def _compute_section_url(section_info, requested_child): """ Returns the section URL for the given section_info with the given child parameter. """ return "{url}?child={requested_child}".format( url=reverse( 'courseware_section', args=[six.text_type(self.course_key), section_info['chapter_url_name'], section_info['url_name']], ), requested_child=requested_child, ) # NOTE (CCB): Pull the position from the URL for un-authenticated users. Otherwise, pull the saved # state from the data store. position = None if self.request.user.is_authenticated else self.position section_context = { 'activate_block_id': self.request.GET.get('activate_block_id'), 'requested_child': self.request.GET.get("child"), 'progress_url': reverse('progress', kwargs={'course_id': six.text_type(self.course_key)}), 'user_authenticated': self.request.user.is_authenticated, 'position': position, } if previous_of_active_section: section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last') if next_of_active_section: section_context['next_url'] = _compute_section_url(next_of_active_section, 'first') # sections can hide data that masquerading staff should see when debugging issues with specific students section_context['specific_masquerade'] = self._is_masquerading_as_specific_student() return section_context def render_accordion(request, course, table_of_contents): """ Returns the HTML that renders the navigation for the given course. Expects the table_of_contents to have data on each chapter and section, including which ones are active. """ context = dict( [ ('toc', table_of_contents), ('course_id', six.text_type(course.id)), ('csrf', csrf(request)['csrf_token']), ('due_date_display_format', course.due_date_display_format), ] + list(TEMPLATE_IMPORTS.items()) ) return render_to_string('courseware/accordion.html', context) def save_child_position(seq_module, child_name): """ child_name: url_name of the child """ for position, child in enumerate(seq_module.get_display_items(), start=1): if child.location.block_id == child_name: # Only save if position changed if position != seq_module.position: seq_module.position = position # Save this new position to the underlying KeyValueStore seq_module.save() def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None): """ Recurses up the course tree starting from a leaf Saving the position property based on the previous node as it goes """ current_module = xmodule while current_module: parent_location = modulestore().get_parent_location(current_module.location) parent = None if parent_location: parent_descriptor = modulestore().get_item(parent_location) parent = get_module_for_descriptor( user, request, parent_descriptor, field_data_cache, current_module.location.course_key, course=course ) if parent and hasattr(parent, 'position'): save_child_position(parent, current_module.location.block_id) current_module = parent def show_courseware_mfe_link(user, staff_access, course_key): """ Return whether to display the button to switch to the Courseware MFE. """ # The MFE isn't enabled at all, so don't show the button. if not settings.FEATURES.get('ENABLE_COURSEWARE_MICROFRONTEND'): return False # MFE does not work for Old Mongo courses. if course_key.deprecated: return False # Global staff members always get to see the courseware MFE button if the # platform and course are capable, regardless of rollout waffle flags. if user.is_staff: return True # If you have course staff access, you see this link if we've enabled the # course team preview CourseWaffleFlag for this course *or* if we've turned # on the redirect for your students. mfe_enabled_for_course_team = COURSEWARE_MICROFRONTEND_COURSE_TEAM_PREVIEW.is_enabled(course_key) mfe_experiment_enabled_for_course = REDIRECT_TO_COURSEWARE_MICROFRONTEND.is_experiment_on(course_key) if staff_access and (mfe_enabled_for_course_team or mfe_experiment_enabled_for_course): return True return False
msegado/edx-platform
lms/djangoapps/courseware/views/index.py
Python
agpl-3.0
26,965
[ "VisIt" ]
ead21751d885e7e5f1e12c9b2521762e75df43f617d368aaf4cdac56ba3aa4a9
# Test of the handling of degenerate bands in response code from ase import Atoms from gpaw import GPAW, PW from gpaw.response.df import DielectricFunction from gpaw.test import findpeak, equal import numpy as np def get_hydrogen_chain_dielectric_function(NH, NK): a = Atoms('H', cell=[1, 1, 1], pbc=True) a.center() a = a.repeat((1, 1, NH)) a.calc = GPAW(mode=PW(200), kpts={'size': (1, 1, NK), 'gamma': True}, parallel={'band': 1}, dtype=complex, gpts=(10, 10, 10 * NH)) a.get_potential_energy() a.calc.diagonalize_full_hamiltonian(nbands=2 * NH) a.calc.write('H_chain.gpw', 'all') DF = DielectricFunction('H_chain.gpw', ecut=1e-3, hilbert=False, omega2=np.inf, intraband=False) eps_NLF, eps_LF = DF.get_dielectric_function(direction='z') omega_w = DF.get_frequencies() return omega_w, eps_LF NH_i = [2**n for n in [0, 4]] NK_i = [2**n for n in [6, 2]] opeak_old = np.nan peak_old = np.nan for NH, NK in zip(NH_i, NK_i): omega_w, eps_w = get_hydrogen_chain_dielectric_function(NH, NK) eels_w = -(1. / eps_w).imag opeak, peak = findpeak(omega_w, eels_w) # Test for consistency if not np.isnan(opeak_old): equal(opeak, opeak_old, tolerance=1e-3) equal(peak, peak_old, tolerance=1e-3) opeak_old = opeak peak_old = peak
robwarm/gpaw-symm
gpaw/test/hyd_chain_response.py
Python
gpl-3.0
1,359
[ "ASE", "GPAW" ]
b987489e53333b01594505a644b264f63718f8b210ef0705c90c12227b766f44
# -*- coding: utf-8 -*- # # Copyright 2015 AirPlug Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Created on 2012. 7. 19. @author: springchoi """ import sys from datatype import * import worker import re import traceback import time import datetime import collections import MySQLdb try: from worker import log except ImportError: import logging as log Priority = collections.namedtuple('Priority', 'LOW NORMAL HIGH')._make(range(3)) class UniqueList(list): key = lambda x: x def setKey(self, key=lambda x: x): if not callable(key): raise RuntimeError("Key is not callable") self.key = key def addSet(self, item, key=None): if not key: key = self.key elif not callable(key): raise RuntimeError("Key is not callable") if len(filter(lambda x: key(x) == key(item), self)) > 0: return False self.append(item) return True class GeoInfo(collections.namedtuple('_GeoInfo', 'lat, lng, acc, geosrc, from_cell')): def __new__(cls, lat=-9999, lng=-9999, acc=50000, geosrc='unknown', from_cell=False): # add default values return super(GeoInfo, cls).__new__(cls, lat, lng, acc, geosrc, from_cell) class WiFiNode(collections.namedtuple('_WiFiNode', 'state, bssid, ssid, rssi, regdtm, bregap, bmap, optrcom, geoloc, priority')): __registerdApSSIDPattern = {'LGT':('U\+',), 'KT':('olleh_GiGA_WiFi', 'ollehWiFi', 'NESPOT', 'QOOKnSHOW'), 'SKT':('T wifi zone',)} __hotspotApSSIDPattern = ('AndroidHotspot', 'AndroidAP', 'HTC-', 'Galaxy ', 'SKY A') __mobileApSSIDPattern = ('WibroEgg', 'ollehEgg', 'KWI-B', 'SHOW_JAPAN_EGG', 'egg\Z') def __new__(cls, state, bssid, ssid='', regdtm='19000101000000', rssi=-200, bregap=False, bmap=False, optrcom='none', geoloc=None, priority=Priority.NORMAL): # Classify WiFi try: if ssid not in ('', None): ssid = re.sub(r'^\s*"(.*)"\s*$', r'\1', unicode(ssid)) if ssid.find('"') >= 0: log.error("!!! SSID - %s" % ssid) if cls.isHotspot(ssid): priority = Priority.LOW else: optrcom = cls.getWiFiOperator(ssid) bregap = True if optrcom != 'none' else False if not bregap: bmap = cls.isMobile(ssid) try: ssid = MySQLdb.escape_string(unicode(ssid).encode('utf-8')) except Exception, e: # Non-ascii data. log.warn("SSID MySQLdb.escape_string Error - %s, %s" % (ssid, e)) if not geoloc: geoloc = GeoInfo() except Exception, e: log.error(e) log.error('BSSID - %s, SSID - %s' % (bssid, ssid)) exc_type, exc_value, exc_traceback = sys.exc_info() log.error(traceback.format_exception(exc_type, exc_value, exc_traceback)) raise e return super(WiFiNode, cls).__new__(cls, state, bssid, ssid, rssi, regdtm, bregap, bmap, optrcom, geoloc, priority) @classmethod def isHotspot(cls, ssid): patt = r'%s' % '|'.join(cls.__hotspotApSSIDPattern) if re.match(patt, ssid, re.IGNORECASE): #log.info("%s - Hotspot SSID, drop this AP" % ssid) return True @classmethod def getWiFiOperator(cls, ssid): for provider in cls.__registerdApSSIDPattern.keys(): patt = r'%s' % '|'.join(cls.__registerdApSSIDPattern[provider]) if re.match(patt, ssid, re.IGNORECASE): #log.info("Registered SSID - %s" % ssid) return provider return 'none' @classmethod def isMobile(cls, ssid): patt = r'%s' % '|'.join(cls.__mobileApSSIDPattern) if re.search(patt, ssid, re.IGNORECASE): #log.info("Mobile AP - %s" % ssid) return True return False class CellNode(collections.namedtuple('_CellNode', 'state, cellid, plmnid, cid, lac, celltype, regdtm, geoloc, priority')): def __new__(cls, state, cellid, celltype=0, regdtm='19000101000000', geoloc=None, priority=Priority.NORMAL): # add default values try: plmnid, cid, lac = cellid.split('_') # guard from invalid data if len(plmnid) > 6 or int(plmnid) == 0: plmnid = '0' if not geoloc: geoloc = GeoInfo() except Exception, e: raise e return super(CellNode, cls).__new__(cls, state, cellid, plmnid, cid, lac, celltype, regdtm, geoloc, priority) def addWiFi(cursor, node): strSql = """INSERT INTO apmain.apinfo (bssid, ssid, regdtm, bregap, bmap, lat, lng, acc, geosrc, optrcom, seq) VALUES('%s','%s','%s','%d','%d','%f','%f','%d','%s','%s','%s') ON DUPLICATED UPDATE lat = IF(VALUES(seq) > seq, VALUES(lat), lat), lng = IF(VALUES(seq) > seq, VALUES(lng), lng), seq = IF(VALUES(seq) > seq, VALUES(seq), seq), acc = IF(VALUES(seq) > seq, VALUES(acc), acc), geosrc=VALUES(geosrc)""" try: strSql = strSql % (node.bssid, node.ssid, node.regdtm, int(node.bregap), int(node.bmap), node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, node.geoloc.geosrc, node.optrcom, node.rssi) except Exception, e: log.error("SQL GEN ERR - %s" % bytes(node.ssid)) strSql = strSql % (node.bssid, '', node.regdtm, int(node.bregap), int(node.bmap), node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, node.geoloc.geosrc, node.optrcom, node.rssi) try: cursor.execute(strSql) log.debug("INSERT - %s" % node.bssid) except Exception, e: # Duplicate entry error if e[0] != 1062: log.error(e) log.error(strSql) return False return True netTypeCode = {'gsm':1, 'cdma':2, 'lte':3} def addCellTower(cursor, node): strSql = """INSERT INTO apmain.cellinfo (fullid, plmnid, cellid, lac, celltype, regdtm, lat, lng, acc, geosrc, seq) VALUES('%s','%s','%s','%s','%d','%s','%s','%f','%f','%s', '1') ON DUPLICATED UPDATE lat=((lat*seq)+VALUES(lat))/(seq+1), lng=((lng*seq)+VALUES(lng))/(seq+1), seq=seq+1, geosrc=VALUES(geosrc)""" try: strSql = strSql % (node.cellid, node.plmnid, node.cid, node.lac, 0, node.regdtm, node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, 'cellLoc' if node.geoloc.from_cell else node.geoloc.geosrc) cursor.execute(strSql) log.debug("INSERT - %s" % node.cellid) except Exception, e: # Duplicate entry error if e[0] != 1062: log.error(e) log.error(strSql) return False return True class ProcessNetworkNode(object): OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog', 'evtNetworkLog'] OW_TASK_PUBLISH_EVENTS = [] OW_USE_HASHING = False OW_HASH_KEY = None OW_NUM_WORKER = 8 def publishEvent(self, event, params): # THIS METHOD WILL BE OVERRIDE # DO NOT EDIT THIS METHOD pass def __makeCellId(self, plmnid, cid, lac): try: cellId = map(lambda x: str(x) if str(x).isdigit() else '0', [plmnid, cid, lac]) if 0 not in map(int, cellId) and len(cellId[0]) < 7: return '_'.join(cellId) except Exception, e: log.error(e) return None def extractNetworkNode(self, params): # net info structure # wifi : 'wifi', status, ssid, bssid # cell : 'cell', status, celltower id # status : 'active' for current network, 'inactive' for logged network timestamp = time.strftime('%Y%m%d%H%M%S', time.gmtime(params['tTM'])) logType = params.get('log_type', 'unknown') netList = UniqueList() netList.setKey(key=lambda x: x.cellid if isinstance(x, CellNode) else x.bssid) if 'lat' in params and 'lng' in params: geoloc = GeoInfo(lat=params.get('lat'), lng=params.get('lng'), acc=params.get('accuracy', 500), geosrc='device') else: geoloc = None # APAT Header fields try: if 'pwf' in params: pwf = params['pwf'] if 'bssid' in pwf and EthAddrType.isEthAddr(pwf['bssid']): node = WiFiNode(state='active', bssid=pwf['bssid'], ssid=pwf.get('ssid', ''), regdtm=timestamp, geoloc=geoloc) netList.addSet(node) except Exception, e: log.error(e) log.error(params) try: if 'pcell_list' in params and isinstance(params['pcell_list'], list) and len(params['pcell_list']) > 0: pcell = params['pcell_list'][0] if 'cid' in pcell and 'lac' in pcell: cellId = self.__makeCellId(int("%03d%02d" % (pcell.get('mcc', 0), pcell.get('mnc', 0))), pcell.get('cid'), pcell.get('lac')) if cellId: if 'ctype' in pcell and str(pcell['ctype']).isdigit(): ctype = int(pcell.get('ctype', -1)) + 1 # -1 : Unknown cellType = ctype if ctype in netTypeCode.values() else 0 else: cellType = 0 node = CellNode(state='active', cellid=cellId, celltype=cellType, regdtm=timestamp, geoloc=geoloc, priority=Priority.HIGH) netList.addSet(node) except Exception, e: log.error(e) log.error(params) return netList def handler(self, params): # Event Key/Value 인 경우, 무시 if 'evtKey' in params.keys(): return # Extract network nodes from logging try: networks = self.extractNetworkNode(params) except Exception, e: log.error(e) log.error(params) return cursor = worker.dbmanager.allocDictCursor('myapmain') try: for node in networks: nodeType = 'wifi' if isinstance(node, WiFiNode) else 'cell' if nodeType == 'wifi' and node.priority > Priority.LOW: try: addWiFi(cursor, node) except Exception, e: log.error(e) elif nodeType == 'cell': try: addCellTower(cursor, node) except Exception, e: log.error(e) except Exception, e: log.error(e) exc_type, exc_value, exc_traceback = sys.exc_info() log.error(traceback.format_exception(exc_type, exc_value, exc_traceback)) worker.dbmanager.freeCursor(cursor) class CheckCellPCIField(object): OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog', 'evtNetworkLog'] OW_TASK_PUBLISH_EVENTS = [] OW_USE_HASHING = False OW_HASH_KEY = None OW_NUM_WORKER = 8 def __init__(self): pass def publishEvent(self, event, params): # THIS METHOD WILL BE OVERRIDE # DO NOT EDIT THIS METHOD pass def handler(self, params): if not isinstance(params, dict): return pcell = params.get('pcell') pcellList = params.get('pcell_list') if not pcell or not pcellList: return try: plmnid = pcell.get('mcc') + pcell.get('mnc') except Exception, e: return pciInfo = {} for cell in pcellList: if 'pci' not in cell: continue try: cellId = '%s_%s_%s' % (plmnid, cell['cid'], cell['tac']) except Exception, e: continue pciInfo[cellId] = cell['pci'] cursor = worker.dbmanager.allocDictCursor('myapmain') strSql = "UPDATE apmain.cellinfo SET pci='%s' WHERE fullid=%s and pci is null" try: cursor.executemany(strSql, [(v[1], v[0]) for v in pciInfo.items()]) except Exception, e: log.error(e) worker.dbmanager.freeCursor(cursor)
ddinsight/dd-streamworks
stream_worker/devmodule/production/networklog/__init__.py
Python
apache-2.0
12,862
[ "Galaxy" ]
6ce8cd03a7b442f811af28c23ab15a3c31c0215a327b1a314387bc972e84cf40
import sys import shlex import pysam import subprocess import multiprocessing import os def is_nonempty_file(path): return os.path.isfile(path) and os.path.getsize(path) def submit_mdist(tup): ss, ks, sketchfns, ofn = tup subprocess.check_call("mash dist -p 1 %s > %s" % (" ".join(sketchfns), ofn), shell=True) def submit_mash_sketch(tup): fa, fn, ss, ks = tup assert ss < 32 ss = 1 << ss if not is_nonempty_file(fn + ".msh"): assert not os.path.isfile(fn + ".msh") print("%s does not exist" % fn, file=sys.stderr) cstr = "mash sketch -s {ss} -k {ks} -o {fn} {fa}".format( **locals()) print("About to call '%s'." % cstr, file=sys.stderr) subprocess.check_call(shlex.split(cstr)) print("Successfully called command for fn %s" % fn, file=sys.stderr) ESTIMS = ["original", "improved", "ertl_ml", "ertl_joint"] ESTIM_DICT = dict(zip(ESTIMS, ["-E", "-I", "-J", ""])) def submit_distcmp_call(tup): k, n, opath, paths, redo, estim_method = tup estim_flag = ESTIM_DICT[estim_method] if any(not os.path.isfile(path) for path in paths): raise Exception("The files are NOT in the computer! %s" % ", ".join(paths)) if not redo: if is_nonempty_file(opath): print("%s has run and redo is set to false. " "Continuing" % opath, file=sys.stderr) return else: print("%s has run, but the file is empty." " Redoing!" % opath, file=sys.stderr) cstr = "distcmp %s -o%s -s %s -n%i -k%i %s" % ( estim_flag, opath, opath + ".sum", n, k, ' '.join(paths)) print("Calling '%s'" % cstr, file=sys.stderr) subprocess.check_call(shlex.split(cstr)) def submit_call(cstr): print(f"submitting command-string {cstr}", file=sys.stderr) subprocess.check_call(cstr, shell=True) def perform_sourmash(krange, ss, opath, paths, nthreads): assert isinstance(paths, list) assert isinstance(opath, str) ss = 1 << ss Spooooool = multiprocessing.Pool(nthreads) cstrs = ("sourmash compute -n %i -k %s %s" % ( ss, ",".join(map(str, krange)), path) for path in paths) Spooooool.map(submit_call, cstrs) sigpaths = [path.split("/")[-1] + ".sig" for path in paths] tmppaths = ["%s.n%i.k%i" % (opath, ss, k) for k in krange] cstrs = ("sourmash compare -o %s -k %i %s" % (tp, k, " ".join(sigpaths)) for tp, k in zip(tmppaths, krange)) Spooooool.map(submit_call, cstrs) Spooooool.map(submit_call, (f"rm {sig}" for sig in sigpaths)) def make_sketch_fn(fn): return fn + ".mash" def x31_hash(x): ret = ord(x[0]) for i in x[1:]: ret = (ret << 5) - ret + ord(i) ret = ret & 18446744073709551615 # (1 << 64) - 1 return ret def make_hash(x): from functools import reduce from operator import xor print("paths = %s" % x, file=sys.stderr) return reduce(xor, map(x31_hash, x)) def makefn(x, y, z, sketcher, estim): hashval = make_hash(x) if sketcher == "flashdans": return "experiment_%s_%i_%x_genomes.k%i.n%i.out" % ( estim, len(x), hashval, y, z) elif sketcher == "sourmash": return "sourmashed_experiment_%i_%x_genomes.k_all.n%i.out" % ( len(x), hashval, z) return "mashed_experiment_%i_%x_genomes.k%i.n%i." % ( len(x), hashval, y, z) def main(): sketch_range = range(10, 24, 1) import argparse argv = sys.argv # Handle args superparser = argparse.ArgumentParser( description=("This calculates all pairwise distances between " "genomes for all combinations of parameters." "This does take a while.")) sp = superparser.add_subparsers() shell_parser = sp.add_parser("sketch") shell_parser.add_argument("--no-redo", "-n", action="store_true") shell_parser.add_argument("--threads", "-p", default=multiprocessing.cpu_count(), type=int) shell_parser.add_argument("--sketcher", "-S", help=("Use Mash to calculate distances " "rather than 'flashdans dist'"), choices=("mash", "flashdans", "sourmash"), default="flashdans") shell_parser.add_argument('genomes', metavar='paths', type=str, nargs='+', help=('paths to genomes or a path to a file' ' with one genome per line.')) shell_parser.add_argument("--range-start", default=24, type=int) shell_parser.add_argument("--range-end", default=32, type=int) py_parser = sp.add_parser( "exact", description=("Calculates distances natively in Python " "slowly but with obviously no errors")) py_parser.add_argument('genomes', metavar='paths', type=str, nargs='+', help=('paths to genomes or a path to a file' ' with one genome per line.')) py_parser.add_argument("--threads", "-p", default=multiprocessing.cpu_count(), type=int) py_parser.add_argument("--range-start", default=24, type=int) py_parser.add_argument("--range-end", default=32, type=int) py_parser.add_argument("--outfile", "-o", default="-") if not sys.argv[1:]: sys.argv.append("-h") args = superparser.parse_args() if argv[1] == "sketch": return sketch_main(args) elif argv[1] == "exact": return exact_main(args) else: raise Exception("Subcommand required. sketch or exact supported, " "for sketching or exact calculation.") def jaccard_index(set1, set2): return len(set1 & set2) / float(len(set1 | set2)) def build_kmer_set(tup): ks, path = tup return {read.sequence[i:i + ks] for read in pysam.FastxFile(path) for i in range(len(read.sequence) - ks + 1) if all(nuc in 'acgtACGT' for nuc in read.sequence[i:i + ks])} def exact_main(args): kmer_range = range(args.range_start, args.range_end + 1) threads = args.threads paths = genomes = args.genomes if len(genomes) == 1 and os.path.isfile(next(open(genomes[0])).strip()): paths = [i.strip() for i in open(genomes[0])] Spooooool = multiprocessing.Pool(threads) # With apologies to Beckett. ofp = sys.stdout if args.outfile in ["-", "stdout", "/dev/stdout"] \ else open(args.outfile, "w") ofw = ofp.write # Cache to reduce the number of lookups def pair2tup(path1, path2, *, ks): return tuple(sorted([path1, path2]) + [ks]) ofw("#Path1\tPath2\tKmer Size\tExact Jaccard\n") for ks in kmer_range: genome_sets = Spooooool.map(build_kmer_set, ((ks, path) for path in paths)) kdict = {} for i in range(len(genome_sets)): for j in range(i+1, len(genome_sets)): kdict[tuple(sorted((paths[i], paths[j])) + [ks])] = \ jaccard_index(genome_sets[i], genome_sets[j]) assert len(kdict) == (len(genome_sets) * (len(genome_sets) - 1)) >> 1 set(not ofw("%s\t%s\t%i\t%f\n" % (*k, v)) for k, v in kdict.items()) def sketch_main(args): sketch_range = range(10, 24, 1) kmer_range = range(args.range_start, args.range_end + 1) threads = args.threads redo = not args.no_redo paths = genomes = args.genomes if len(genomes) == 1 and os.path.isfile(next(open(genomes[0])).strip()): paths = [i.strip() for i in open(genomes[0])] sketcher = args.sketcher if any(not os.path.isfile(path) for path in paths): raise Exception("The files are NOT in the computer: %s" % ' '.join(path for path in paths if os.path.isfile(path))) Spooooool = multiprocessing.Pool(threads) # With apologies to Beckett. if sketcher == "mash": for ss in sketch_range: for ks in kmer_range: sketchfns = list(map(lambda x: "%s.k%i.n%i" % (x[0], x[1], x[2]), ((path, ks, ss) for path in paths)) ) gen = ((path, sketch, ss, ks) for path, sketch in zip(paths, sketchfns)) while True: # DO IT # # JUST # # DO IT try: Spooooool.map(submit_mash_sketch, gen) break except BlockingIOError: pass sketchfns = [i + ".msh" for i in sketchfns] fn = makefn(paths, ks, ss, sketcher, None) print("Now about to submit dist comparisons", file=sys.stderr) todo = [] thisfns = [] for i in range(len(paths) - 1): identifier = os.path.basename(paths[i]).split(".")[0] thisfn = "%s.%s.out" % (fn, identifier) if not redo and is_nonempty_file(thisfn): print("Nonempty file, skipping %s" % thisfn, file=sys.stderr) continue else: print("Missing file %s. Generating." % thisfn, file=sys.stderr) todo.append(i) thisfns.append(thisfn) gen = ((ss, ks, sketchfns[i:], thisfn) for i, thisfn in zip(todo, thisfns)) while True: # IF YOU'RE TIRED OF STARTING OVER # # STOP # # GIVING # # UP try: Spooooool.map(submit_mdist, gen) break except BlockingIOError: pass elif sketcher == "flashdans": submission_sets = ((ks, ss, makefn(paths, ks, ss, sketcher, estim), paths, redo, estim) for ss in sketch_range for ks in kmer_range for estim in ESTIMS) Spooooool.map(submit_distcmp_call, submission_sets) elif sketcher == "sourmash": set(perform_sourmash(kmer_range, ss, makefn(paths, -1, ss, sketcher, None), paths, threads) for ss in sketch_range) else: raise Exception("Whoa, what?") return 0 if __name__ == "__main__": sys.exit(main())
dnbaker/emp
python/run_dist.py
Python
gpl-3.0
10,939
[ "pysam" ]
5899d0e6df5f7221e0ea4535975bf210854e103426aa34b8f5dfb4df3d1a79e2
''' Created on Jan 20, 2016 @author: rch ''' from traits.api import \ Float, Property, cached_property, Int, \ Instance, Array, Bool, List import numpy as np from oricreate.api import YoshimuraCPFactory, \ fix, link, r_, s_, t_, MapToSurface,\ GuConstantLength, GuDofConstraints, \ GuPsiConstraints, SimulationConfig, SimulationTask, \ FTV, FTA from oricreate.export import \ InfoCadMeshExporter from oricreate.forming_tasks.forming_process import FormingProcess from oricreate.forming_tasks.forming_task import FormingTask from oricreate.fu import \ FuPotEngTotal from oricreate.mapping_tasks.mask_task import MaskTask from oricreate.simulation_tasks.simulation_history import \ SimulationHistory from oricreate.util.einsum_utils import \ DELTA, EPS from oricreate.view.window.main_window import MainWindow import sympy as sp a_, b_ = sp.symbols('a,b') class WBShellFormingProcess(FormingProcess): ''' Define the simulation task prescribing the boundary conditions, target surfaces and configuration of the algorithm itself. ''' L_x = Property def _get_L_x(self): return (self.a + 2 * self.c) * self.n_cell_x L_y = Property def _get_L_y(self): return self.h * 2 * self.n_cell_y n_fold_steps = Int(30, auto_set=False, enter_set=True, input=True) n_load_steps = Int(30, auto_set=False, enter_set=True, input=True) n_cell_x = Int(1, auto_set=False, enter_set=True, input=True) n_cell_y = Int(1, auto_set=False, enter_set=True, input=True) d_r = Float(0.1, auto_set=False, enter_set=True, input=True) a = Float(1.0, auto_set=False, enter_set=True, input=True) h = Float(1.0, auto_set=False, enter_set=True, input=True) c = Float(1.0, auto_set=False, enter_set=True, input=True) n_x = Property '''Number of cross cells in x direction ''' def _get_n_x(self): return self.n_cell_x * 3 n_y = Property '''Number of cross cells in x direction ''' def _get_n_y(self): return self.n_cell_y * 2 stiffening_boundary = Bool(False) ctf = Property(depends_on='+input') '''control target surface''' @cached_property def _get_ctf(self): d_r = self.d_r return [r_, s_, d_r * t_ * s_ * (1 - s_ / float(self.L_y)) - d_r * t_] factory_task = Property(Instance(FormingTask)) '''Factory task generating the crease pattern. ''' @cached_property def _get_factory_task(self): yf = YoshimuraCPFactory(L_x=self.L_x, L_y=self.L_y, n_x=self.n_x, n_y=self.n_y, node='Waterbomb shell factory') cp = yf.formed_object N_h = yf.N_h N_i = yf.N_i N_v = yf.N_v e = (self.a + 2 * self.c) / 3.0 d_x = e - self.c cp.X[N_h[1::3, :].flatten(), 0] -= d_x cp.X[N_h[2::3, :].flatten(), 0] += d_x cp.X[N_i[0::3, :].flatten(), 0] += d_x cp.X[N_i[2::3, :].flatten(), 0] -= d_x cp.X[N_v[0, :], 0] -= 0.05 cp.X[N_v[-1, :], 0] += 0.05 n_N = cp.n_N X_add_left = np.copy(cp.X[N_h[0, :], :]) X_add_right = np.copy(cp.X[N_h[-1, :], :]) X_add_left[:, 0] -= 0.05 X_add_right[:, 0] += 0.05 n_added = 2 * len(X_add_left) N_add = n_N + np.arange(n_added).reshape(2, -1) cp.X = np.vstack([cp.X, X_add_left, X_add_right]) L_add_HL = np.vstack([N_h[0, :], N_add[0, :]]).T L_add_HR = np.vstack([N_h[-1, :], N_add[-1, :]]).T L_add_VD = np.vstack([N_v[:, :].reshape(1, -1), N_add[:, :-1].reshape(1, -1)]).T L_add_VU = np.vstack([N_v[:, :].reshape(1, -1), N_add[:, 1:].reshape(1, -1)]).T cp.L = np.vstack([cp.L, L_add_HL, L_add_HR, L_add_VD, L_add_VU]) F_add_L = np.c_[N_h[(0, -1), :-1].flatten(), N_v[(0, -1), :].flatten(), N_add[(0, -1), :-1].flatten()] F_add_U = np.c_[N_h[(0, -1), 1:].flatten(), N_v[(0, -1), :].flatten(), N_add[(0, -1), 1:].flatten()] n_F = len(cp.F) cp.F = np.vstack([cp.F, F_add_L, F_add_U]) print(cp.F_L[n_F:]) return yf init_displ_task = Property(Instance(FormingTask)) '''Initialization to render the desired folding branch. ''' @cached_property def _get_init_displ_task(self): cp = self.factory_task.formed_object return MapToSurface(node='Trigger fold mode', previous_task=self.factory_task, target_faces=[(self.ctf, cp.N)]) t_max = Float(1.0) fold_angle_cntl = Property(Instance(FormingTask)) '''Configure the simulation task. ''' @cached_property def _get_fold_angle_cntl(self): self.init_displ_task.x_1 corner2_i_x = np.array([0, 0, -1, -1], dtype=np.int_) corner2_i_y = np.array([0, -1, 0, -1], dtype=np.int_) corner2_h_x = np.array([0, 0, -1, -1], dtype=np.int_) corner2_h_y = np.array([0, -1, 0, -1], dtype=np.int_) tb2_i_x = np.array([1, 1, 1], dtype=np.int_) tb2_i_y = np.array([0, -1, -1], dtype=np.int_) tb2_h_x = np.array([1, 1, 2], dtype=np.int_) tb2_h_y = np.array([0, -1, -1], dtype=np.int_) up2_i_x = np.array([0, 0, -1, -1], dtype=np.int_) up2_i_y = np.array([0, 1, 0, 1], dtype=np.int_) up2_h_x = np.array([0, 0, -1, -1], dtype=np.int_) up2_h_y = np.array([1, 1, 1, 1], dtype=np.int_) right2_i_x = np.array([2, 2, 3], dtype=np.int_) right2_i_y = np.array([0, 0, 0], dtype=np.int_) right2_h_x = np.array([3, 3, 3], dtype=np.int_) right2_h_y = np.array([0, 1, 0], dtype=np.int_) base_i_x = corner2_i_x base_i_y = corner2_i_y base_h_x = corner2_h_x base_h_y = corner2_h_y for c_x in range(0, self.n_cell_x): base_i_x = np.hstack([base_i_x, 3 * c_x + tb2_i_x]) base_i_y = np.hstack([base_i_y, tb2_i_y]) base_h_x = np.hstack([base_h_x, 3 * c_x + tb2_h_x]) base_h_y = np.hstack([base_h_y, tb2_h_y]) for c_x in range(0, self.n_cell_x - 1): base_i_x = np.hstack([base_i_x, 3 * c_x + right2_i_x]) base_i_y = np.hstack([base_i_y, right2_i_y]) base_h_x = np.hstack([base_h_x, 3 * c_x + right2_h_x]) base_h_y = np.hstack([base_h_y, right2_h_y]) for c_y in range(0, self.n_cell_y - 1): print('c_y', c_y) base_i_x = np.hstack([base_i_x, up2_i_x]) base_i_y = np.hstack([base_i_y, c_y + up2_i_y]) base_h_x = np.hstack([base_h_x, up2_h_x]) base_h_y = np.hstack([base_h_y, c_y + up2_h_y]) f = self.factory_task cp = f.formed_object m_nodes = f.N_i[base_i_x, base_i_y] n_nodes = f.N_h[base_h_x, base_h_y] psi_lines = cp.NN_L[[m_nodes], n_nodes].flatten() print('psi_lines', psi_lines) cm_node = f.N_i[0, 0] cn_node = f.N_h[1, 1] cpsi_line = cp.NN_L[cm_node, cn_node] print('cpsi_lines', cpsi_line) N_h = f.N_h N_i = f.N_i N_v = f.N_v y_mid = N_i.shape[1] / 2 fixed_nodes_x = fix( N_h[0, 0], (0)) fixed_nodes_y = fix( N_h[(0, -1), 0], (1)) fixed_nodes_z = fix( [N_h[0, 0], N_h[-1, 0], N_h[0, -1]], (2)) u_max = (1.999 * self.c * self.t_max) link_mid = link( N_i[0, 0], (0), 1.0, N_i[2, 0], (0), -1.0, lambda t: t * u_max ) print('--------------------------') print(N_i[0, 0].flatten()) print(N_i[2, 0].flatten()) print('--------------------------') dof_constraints = fixed_nodes_x + fixed_nodes_z + fixed_nodes_y # + \ # link_mid gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints) def FN(psi): return lambda t: psi * t cpsi_constr = [([(cpsi_line, 1.0)], FN(0.99 * np.pi))] lpsi_constr = [([(psi_lines[0], 1.0), (i, -1.0)], 0.0) for i in psi_lines[1:]] gu_psi_constraints = \ GuPsiConstraints(forming_task=self.init_displ_task, psi_constraints=lpsi_constr + cpsi_constr) gu_constant_length = GuConstantLength() sim_config = SimulationConfig(goal_function_type='none', gu={'cl': gu_constant_length, 'dofs': gu_dof_constraints, 'psi': gu_psi_constraints}, acc=1e-5, MAX_ITER=500, debug_level=0) st = SimulationTask(node='Fold angle control', previous_task=self.init_displ_task, config=sim_config, n_steps=self.n_fold_steps) cp = st.formed_object N_down = np.hstack([N_h[::3, :].flatten(), N_i[1::3, :].flatten() ]) print('N_down', N_down) N_up = np.hstack([N_i[::3, :].flatten(), N_i[2::3, :].flatten(), N_v[:, :].flatten()]) print('N_up', N_up) cp.u[N_down, 2] -= self.d_down cp.u[N_up, 2] += self.d_up cp.u[:, 2] += self.d_down return st fold_kinem_cntl = Property(Instance(FormingTask)) '''Configure the simulation task. ''' @cached_property def _get_fold_kinem_cntl(self): self.init_displ_task.x_1 base_i_x = np.array([0, 0, -1, -1], dtype=np.int_) base_i_y = np.array([0, -1, 0, -1], dtype=np.int_) base_h_x = np.array([0, 0, -1, -1], dtype=np.int_) base_h_y = np.array([0, -1, 0, -1], dtype=np.int_) f = self.factory_task cp = f.formed_object m_n = f.N_h[(0, 0, -1, -1), (0, -1, 0, -1)] n_n = f.N_v[(0, 0, -1, -1), (0, -1, 0, -1)] print(m_n.flatten()) print(n_n.flatten()) fix_psi_lines = cp.NN_L[m_n, n_n].flatten() print('psi_lines', fix_psi_lines) cm_nodes = [f.N_i[0, 0], f.N_i[-1, 1]] cn_nodes = [f.N_h[1, 1], f.N_h[2, 1]] cpsi_lines = cp.NN_L[cm_nodes, cn_nodes] print('cpsi_lines', cpsi_lines) N_h = f.N_h N_i = f.N_i N_v = f.N_v fixed_nodes_x = fix( N_i[1, (0, 1)], (0)) fixed_nodes_y = fix( [N_h[1, 1].flatten()], (1)) fixed_nodes_z = fix( list(N_v[[0, 0, -1], [0, -1, 0]].flatten()), (2) ) link_nodes_yz = link( list(N_v[[0, 0], [0, -1]].flatten()) + list(N_h[[0, 0], [0, -1]].flatten()), (1, 2), 1.0, list(N_i[[0, 0], [0, -1]].flatten()) + list(N_h[[-1, -1], [0, -1]].flatten()), (1, 2), -1.0, ) link_nodes_z = link( N_h[1, 1], 2, 1.0, N_h[2, 1], 2, -1.0, ) dof_constraints = fixed_nodes_x + fixed_nodes_z + fixed_nodes_y + \ link_nodes_yz + link_nodes_z gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints) def FN(psi): return lambda t: psi * t cpsi_constr = [([(cpsi_line, 1.0)], FN(0.502 * np.pi)) for cpsi_line in cpsi_lines] fix_psi_constr = [([(i, 1.0)], 0.0) for i in fix_psi_lines] print('fix_psi_lines', fix_psi_lines) gu_psi_constraints = \ GuPsiConstraints(forming_task=self.init_displ_task, psi_constraints=fix_psi_constr + cpsi_constr) gu_constant_length = GuConstantLength() sim_config = SimulationConfig(goal_function_type='none', gu={'cl': gu_constant_length, 'dofs': gu_dof_constraints, 'psi': gu_psi_constraints}, acc=1e-5, MAX_ITER=500, debug_level=0) st = SimulationTask(previous_task=self.init_displ_task, config=sim_config, n_steps=self.n_fold_steps) cp = st.formed_object N_down = np.hstack([N_h[::3, :].flatten(), N_i[1::3, :].flatten(), [23, 26] ]) print('N_down', N_down) N_up = np.hstack([N_i[::3, :].flatten(), N_i[2::3, :].flatten(), N_v[:, :].flatten()]) print('N_up', N_up) cp.u[N_down, 2] -= self.d_down cp.u[N_up, 2] += self.d_up cp.u[:, 2] += self.d_down return st fold_self_weight_cntl = Property(Instance(FormingTask)) '''Configure the simulation task. ''' @cached_property def _get_fold_self_weight_cntl(self): self.init_displ_task.x_1 f = self.factory_task cp = f.formed_object N_h = f.N_h N_i = f.N_i N_v = f.N_v fixed_nodes_x = fix( N_i[1, (0, 1)], (0)) fixed_nodes_y = fix( [N_h[1, 1].flatten()], (1)) fixed_nodes_z = fix( list(N_v[[0, 0, -1, -1], [0, -1, 0, -1]].flatten()) + list(N_i[[0, 0, -1, -1], [0, -1, 0, -1]].flatten()), (2) ) cntl_z = fix(N_h[(1, 2), 1], 2, lambda t: 0.1 * t) link_nodes_y = link( list(N_v[[0, -1], [0, -1]].flatten()), 1, 1.0, list(N_i[[0, -1], [0, -1]].flatten()), 1, -1.0, ) link_nodes_z = link( list(N_h[[1, 1, 1], [0, 1, -1]].flatten()), 2, 1.0, list(N_h[[2, 2, 2], [0, 1, -1]].flatten()), 2, -1.0, ) # link_nodes_z = link( # N_h[1, 1], 2, 1.0, # N_h[2, 1], 2, -1.0, # ) dof_constraints = fixed_nodes_x + fixed_nodes_z + fixed_nodes_y + \ link_nodes_z + \ link_nodes_y # link_nodes_yz + link_nodes_z gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints) gu_constant_length = GuConstantLength() sim_config = SimulationConfig(goal_function_type='total potential energy', gu={'cl': gu_constant_length, 'dofs': gu_dof_constraints, }, acc=1e-5, MAX_ITER=1000, debug_level=0) def FN(F): return lambda t: t * F H = 0 P = 0.1 F_ext_list = [(N_i[1, 1], 2, FN(-P)), (N_i[1, -1], 2, FN(-P)), (N_h[(0, -1), 0], 2, FN(-P)), (N_h[(0, -1), -1], 2, FN(-P)) ] fu_tot_poteng = FuPotEngTotal(kappa=0.0, thickness=0.01, rho=23.6, F_ext_list=F_ext_list) sim_config._fu = fu_tot_poteng st = SimulationTask(previous_task=self.init_displ_task, config=sim_config, n_steps=self.n_fold_steps) fu_tot_poteng.forming_task = st cp = st.formed_object N_down = np.hstack([N_h[::3, :].flatten(), N_i[1::3, :].flatten() ]) print('N_down', N_down) N_up = np.hstack([N_i[::3, :].flatten(), N_i[2::3, :].flatten(), N_v[:, :].flatten()]) print('N_up', N_up) cp.u[N_down, 2] -= self.d_down cp.u[N_up, 2] += self.d_up cp.u[:, 2] += self.d_down return st load_self_weight = Property(Instance(FormingTask)) '''Configure the simulation task. ''' @cached_property def _get_load_self_weight(self): self.fold_kinem_cntl.x_1 f = self.factory_task exclude_lines = np.arange(9, 17) cp = f.formed_object N_h = f.N_h N_i = f.N_i N_v = f.N_v fixed_nodes = np.hstack([N_h[[-1, -1], [0, -1]]]) fixed_nodes = [25, 27] fixed_nodes_xyz = fix( list(N_h[[0, 0, -1, -1], [0, -1, 0, -1]].flatten()), (2) #fixed_nodes, (0, 2) ) slided_nodes = np.hstack([N_h[[0, -1], [0, 0]]]) # , [22, 25]]) #slide_y = fix(slided_nodes, [1], lambda t: 1.8 * self.L_y * t) slide_x = fix([22, 24], [2], lambda t: 0.95 * (self.L_x) * t) slide_z = fix(N_h[[1, -2], [1, 1]], [2], lambda t: -0.3 * t) dof_constraints = fixed_nodes_xyz + slide_z gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints) gu_constant_length = GuConstantLength() fix_psi_constr = [([(i, 1.0)], 0.0) for i in exclude_lines] gu_psi_constraints = \ GuPsiConstraints(forming_task=self.fold_kinem_cntl, psi_constraints=fix_psi_constr) sim_config = SimulationConfig( goal_function_type='total potential energy', gu={'cl': gu_constant_length, 'dofs': gu_dof_constraints, 'psi': gu_psi_constraints }, acc=1e-7, MAX_ITER=1000, debug_level=0 ) loaded_n = N_h[:, 1].flatten() def FN(F): return lambda t: t * F P = 0.0 * 30.5 F_ext_list = [] # (loaded_n, 2, FN(-P))] thickness = 0.01 E_mod = 21.0e+4 I = 1.0 * thickness**3 / 12.0 kappa = E_mod * I print('kappa', kappa) sig_tu = 3000.0 m_u = sig_tu * 1.0 * thickness**2 / 6.0 print('m_u', m_u) fu_tot_poteng = FuPotEngTotal( exclude_lines=exclude_lines, kappa=np.array([kappa]), thickness=thickness, rho=23.6, m_u=m_u, F_ext_list=F_ext_list ) sim_config._fu = fu_tot_poteng st = SimulationTask( previous_task=self.fold_kinem_cntl, config=sim_config, n_steps=self.n_load_steps ) fu_tot_poteng.forming_task = st cp = st.formed_object cp.x_0 = self.fold_kinem_cntl.x_1 cp.u[:, :] = 0.0 return st d_up = Float(0.01) d_down = Float(0.01) max_slope = Property(Array) @cached_property def _get_max_slope(self): u_t = self.fold_angle_cntl.sim_history.u_t phi_argmax = [] phi_arr = [] cp = ft.formed_object for u in u_t: cp.u = u n = cp.norm_F_normals n1, n2, n3 = n.T n12 = np.sqrt(n1**2 + n2**2) phi = np.pi / 2.0 - np.arctan(n3 / n12) i = np.argmax(phi) phi_argmax.append(i) phi_arr.append(180.0 / np.pi * phi[i]) return phi_argmax, phi_arr curvature_t = Property(Array) '''Configure the simulation task. ''' @cached_property def _get_curvature_t(self): u_1 = self.fold_angle_cntl.u_1 f = self.factory_task x_t = self.fold_angle_cntl.sim_history.x_t u = x_t[:, f.N_h[1, 0], :] - x_t[:, f.N_h[0, 0], :] v = x_t[:, f.N_i[0, 0], :] - x_t[:, f.N_i[1, 0], :] u[:, 0] = 0 v[:, 0] = 0 uxv = np.einsum('...i,...j,...ijk', u, v, EPS) s_uxv = -np.sign(uxv[:, 0]) n_uxv = np.sqrt(np.einsum('...i,...i', uxv, uxv)) n_uu = np.sqrt(np.einsum('...i,...i', u, u)) n_vv = np.sqrt(np.einsum('...i,...i', v, v)) phi = np.arcsin(s_uxv * n_uxv / (n_uu * n_vv)) print(n_uu) return n_uu, n_vv, phi def generate_fe_mesh(self, t): ft = self.fold_angle_cntl cp = ft.formed_object u_t = ft.sim_history.u_t arg_t = np.argwhere(ft.t_arr > t)[0][0] cp.u = u_t[arg_t] me = InfoCadMeshExporter(forming_task=ft, n_l_e=4) me.write() X, F = me._get_geometry() x, y, z = X.T import mayavi.mlab as m me.plot_mlab(m) m.show() def generate_fe_mesh_kinem(self): ft = self.fold_kinem_cntl me = InfoCadMeshExporter(forming_task=ft, n_l_e=4) me.write() X, F = me._get_geometry() x, y, z = X.T import mayavi.mlab as m me.plot_mlab(m) m.show() class WBShellFormingProcessFTV(FTV): model = Instance(WBShellFormingProcess) if __name__ == '__main__': kw1 = dict(a=0.25, c=0.35 / 2.0, h=0.55, d_r=0.0001, d_up=0.005, d_down=0.005, t_max=0.25, n_cell_x=1, n_cell_y=2, n_fold_steps=40, n_load_steps=50) kw2 = dict(a=0.25, c=0.35 / 2.0, h=0.55, d_r=0.0001, d_up=0.005, d_down=0.005, t_max=0.25, n_cell_x=2, n_cell_y=2, n_fold_steps=40, n_load_steps=1) kw3 = dict(a=2, h=3, c=2, d_r=1, d_up=0.001, d_down=0.02, t_max=1.0, n_cell_x=1, n_cell_y=1, n_fold_steps=40, n_load_steps=1) bsf_process = WBShellFormingProcess(**kw1) mw = MainWindow(forming_process=bsf_process) ftv = mw.forming_task_scene fa = bsf_process.factory_task print(fa.formed_object.iL_psi) if False: import pylab as p ax = p.axes() fa.formed_object.plot_mpl(ax) p.show() it = bsf_process.init_displ_task animate = False show_init_task = False show_fold_angle_cntl = False show_fold_kinem_cntl = True show_fold_self_weight_cntl = False show_load_self_weight = False fta = FTA(ftv=ftv) fta.init_view(a=33.4389721223, e=61.453898329, d=5.0, f=(1.58015494765, 1.12671403563, -0.111520325399), r=-105.783218753) if show_init_task: ftv.add(it.target_faces[0].viz3d['default']) it.formed_object.viz3d['cp'].set(tube_radius=0.002) ftv.add(it.formed_object.viz3d['cp']) #ftv.add(it.formed_object.viz3d['node_numbers'], order=5) it.u_1 if show_fold_angle_cntl: ft = bsf_process.fold_angle_cntl from oricreate.view import FormingProcessView as FPV ftt = FPV(forming_process=bsf_process) ftt.configure_traits() print(ft.sim_step) ft.sim_history.set(anim_t_start=0, anim_t_end=10) ft.config.gu['dofs'].set(anim_t_start=0, anim_t_end=5) ft.sim_history.viz3d['cp'].set(tube_radius=0.002) ftv.add(ft.sim_history.viz3d['cp']) # ftv.add(ft.sim_history.viz3d['node_numbers']) ft.config.gu['dofs'].viz3d['default'].scale_factor = 0.1 ftv.add(ft.config.gu['dofs'].viz3d['default']) ft.u_1 # bsf_process.generate_fe_mesh(0.5) fta.add_cam_move(duration=10, n=20) arg_phi, phi = bsf_process.max_slope if False: fig, (ax1, ax3) = p.subplots(2, 1, sharex=True) ax2 = ax1.twinx() n_u, n_v, c = bsf_process.curvature_t ax1.plot(ft.t_arr, c, 'b-', label='curvature') ax2.plot(ft.t_arr, n_u, 'g-', label='height u') ax2.plot(ft.t_arr, n_v, 'y-', label='height v') ax3.plot(ft.t_arr, phi, 'r-', label='slope') p.show() if show_fold_kinem_cntl: ft = bsf_process.fold_kinem_cntl it.formed_object.viz3d['cp'].set(tube_radius=0.002) # ftv.add(it.formed_object.viz3d['cp']) ft.sim_history.set(anim_t_start=0, anim_t_end=10) ft.sim_history.viz3d['cp'].set(tube_radius=0.005) ftv.add(ft.sim_history.viz3d['cp']) ft.config.gu['dofs'].set(anim_t_start=0, anim_t_end=5) ft.config.gu['dofs'].viz3d['default'].scale_factor = 0.1 # ftv.add(ft.config.gu['dofs'].viz3d['default']) ft.u_1 cp = ft.formed_object node_heights = [12, 18, 1, 4, 0, 3] for node in node_heights: <<<<<<< HEAD <<<<<<< master <<<<<<< master print('node %d, %5.3f [%5.3f, %5.3f, %5.3f]' % (node, cp.x[node, 2], cp.u[node, 0], cp.u[node, 1], cp.u[node, 2])) print('node %d, %5.3f [%5.3f, %5.3f, %5.3f]' % (node, cp.x[node, 2], cp.u[node, 0], cp.u[node, 1], cp.u[node, 2])) ======= print('node %d, %5.3f [%5.3f, %5.3f, %5.3f]' % \ (node, cp.x[node, 2], cp.u[node, 0], cp.u[node, 1], cp.u[node, 2])) >>>>>>> interim stage 1 ======= print('node %d, %5.3f [%5.3f, %5.3f, %5.3f]' % (node, cp.x[node, 2], cp.u[node, 0], cp.u[node, 1], cp.u[node, 2])) >>>>>>> Transformed to python 3 ======= print('node %d, %5.3f [%5.3f, %5.3f, %5.3f]' % (node, cp.x[node, 2], cp.u[node, 0], cp.u[node, 1], cp.u[node, 2])) >>>>>>> bypass2 iL_psi = cp.iL_psi / np.pi * 180.0 recorded_creases = [4, 17, 36, 29, 24, 19, 38] for crease in recorded_creases: print('crease %d, %5.2f' % (crease, iL_psi[cp.L_iL[crease]])) # bsf_process.generate_fe_mesh_kinem() # mw.forming_task_scene.plot() # mw.configure_traits() center_1 = ft.center_1 center_1[1] += 0.3 fta.init_view(a=48.9, e=66.8, d=2.4, f=center_1, r=-114.0) fta.add_cam_move(duration=10, n=50) if show_fold_self_weight_cntl: ft = bsf_process.fold_self_weight_cntl print(ft.sim_step) ft.sim_history.set(anim_t_start=0, anim_t_end=10) ft.config.gu['dofs'].set(anim_t_start=0, anim_t_end=5) ft.sim_history.viz3d['cp'].set(tube_radius=0.002) ftv.add(ft.sim_history.viz3d['cp']) ft.config.gu['dofs'].viz3d['default'].scale_factor = 0.1 ftv.add(ft.config.gu['dofs'].viz3d['default']) ft.u_1 fta.add_cam_move(duration=10, n=20) if show_load_self_weight: lot = bsf_process.load_self_weight lot.sim_history.set(anim_t_start=0, anim_t_end=50) lot.sim_history.viz3d['displ'].set(tube_radius=0.002, warp_scale_factor=5.0) lot.sim_history.viz3d['displ'].set(tube_radius=0.002, ) ftv.add(lot.sim_history.viz3d['displ']) gu_dofs_viz3d = lot.config.gu['dofs'].viz3d['default'] gu_dofs_viz3d.scale_factor = 0.1 ftv.add(gu_dofs_viz3d) ftv.add(lot.config.fu.viz3d['default']) lot.config.fu.viz3d['default'].set(anim_t_start=0, anim_t_end=50) ftv.add(lot.config.fu.viz3d['node_load']) lot.config.fu.viz3d['node_load'].set(anim_t_start=0, anim_t_end=50) u_max = np.max(np.fabs(lot.u_1)) print(u_max) fta.add_cam_move(duration=10, n=20) fta.render() fta.plot() fta.configure_traits()
simvisage/oricreate
apps/waterbomb/ex01_pwaterbomb_ahc.py
Python
gpl-3.0
27,583
[ "Mayavi" ]
f841edf76e6911b2565b5c991c73c7fc43bfdc935e0dc6adfdf7bf27d6797839
# pylint: disable-msg=I0011,C0301,W0611 """I found some of my scripts trigger off an AttributeError in pylint 0.8.1 (with common 0.12.0 and astroid 0.13.1). Traceback (most recent call last): File "/usr/bin/pylint", line 4, in ? lint.Run(sys.argv[1:]) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 729, in __init__ linter.check(args) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 412, in check self.check_file(filepath, modname, checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 426, in check_file astroid = self._check_file(filepath, modname, checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 450, in _check_file self.check_astroid_module(astroid, checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 494, in check_astroid_module self.astroid_events(astroid, [checker for checker in checkers File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 511, in astroid_events self.astroid_events(child, checkers, _reversed_checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 511, in astroid_events self.astroid_events(child, checkers, _reversed_checkers) File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 508, in astroid_events checker.visit(astroid) File "/usr/lib/python2.4/site-packages/logilab/astroid/utils.py", line 84, in visit method(node) File "/usr/lib/python2.4/site-packages/pylint/checkers/variables.py", line 295, in visit_import self._check_module_attrs(node, module, name_parts[1:]) File "/usr/lib/python2.4/site-packages/pylint/checkers/variables.py", line 357, in _check_module_attrs self.add_message('E0611', args=(name, module.name), AttributeError: Import instance has no attribute 'name' You can reproduce it by: (1) create package structure like the following: package/ __init__.py subpackage/ __init__.py module.py (2) in package/__init__.py write: import subpackage (3) run pylint with a script importing package.subpackage.module. """ __revision__ = '$Id: import_package_subpackage_module.py,v 1.1 2005-11-10 15:59:32 syt Exp $' import package.subpackage.module
ruchee/vimrc
vimfiles/bundle/vim-python/submodules/astroid/tests/testdata/python3/data/package/import_package_subpackage_module.py
Python
mit
2,242
[ "VisIt" ]
53a06c31bff28056fc46a226b13ad6106262854ee726010b3c7e80bd633ecda5
from paraview.simple import * import os mainDirName = os.getcwd() + '\\vtk\\' fileRootFmt = '5MW_Land_ModeShapes.Mode{:d}.LinTime1.' # keep the format specifier {:d} for the mode number nModes = 15 # number of modes to visualize fps = 30 # frames per second (rate to save in the .avi file) StructureModule = 'ED' BladeMesh = "AD_Blade" print('') for iMode in range(nModes): # iMode starts at 0, so add 1 #fileRootName = fileRoot + str(iMode+1) + '.LinTime1.' #.LinTime1 depends on visualization options fileRootName = fileRootFmt.format(iMode+1) print('***' + fileRootName + '***') # determine number of leading zeros in this mode shape nLeadingZeros = 0 exists = False while (not exists) and nLeadingZeros < 6: nLeadingZeros = nLeadingZeros + 1 txt = '{:0' + str(nLeadingZeros) + 'd}' fileLeadingZeros = txt.format(1) Blade1File = mainDirName + fileRootName + BladeMesh + '1.' + fileLeadingZeros + '.vtp' exists = os.path.isfile(Blade1File) if not exists: print(' Could not find files to load.') else: LoadState('ED_Surfaces.pvsm', LoadStateDataFileOptions='Choose File Names', DataDirectory=mainDirName, a5MW_Land_DLL_WTurbMode1LinTime1AD_Blade10FileName=[Blade1File], a5MW_Land_DLL_WTurbMode1LinTime1AD_Blade20FileName=[mainDirName + fileRootName + BladeMesh + '2.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1AD_Blade30FileName=[mainDirName + fileRootName + BladeMesh + '3.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1Blade1Surface0FileName=[mainDirName + fileRootName + 'Blade1Surface.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1Blade2Surface0FileName=[mainDirName + fileRootName + 'Blade2Surface.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1Blade3Surface0FileName=[mainDirName + fileRootName + 'Blade3Surface.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1ED_Hub0FileName=[mainDirName + fileRootName + StructureModule + '_Hub.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1ED_Nacelle0FileName=[mainDirName + fileRootName + StructureModule + '_Nacelle.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1ED_TowerLn2Mesh_motion0FileName=[mainDirName + fileRootName + StructureModule + '_TowerLn2Mesh_motion.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1HubSurface0FileName=[mainDirName + fileRootName + 'HubSurface.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1NacelleSurface0FileName=[mainDirName + fileRootName + 'NacelleSurface.' + fileLeadingZeros + '.vtp'], a5MW_Land_DLL_WTurbMode1LinTime1TowerSurface0FileName=[mainDirName + fileRootName + 'TowerSurface.' + fileLeadingZeros + '.vtp'] ) ## find new sources # blade 1 for iBlade in range(3): Blade = FindSource(fileRootName + BladeMesh + str(iBlade+1) + '...vtp') SetActiveSource(Blade) ExtendFileSeries(Blade) Blade = FindSource(fileRootName + 'Blade' + str(iBlade+1) + 'Surface...vtp') SetActiveSource(Blade) ExtendFileSeries(Blade) # hub Hub = FindSource(fileRootName + StructureModule + '_Hub...vtp') SetActiveSource(Hub) ExtendFileSeries(Hub) Hub = FindSource(fileRootName + 'HubSurface...vtp') SetActiveSource(Hub) ExtendFileSeries(Hub) # nacelle Nacelle = FindSource(fileRootName + StructureModule + '_Nacelle...vtp') SetActiveSource(Nacelle) ExtendFileSeries(Nacelle) Nacelle = FindSource(fileRootName + 'NacelleSurface...vtp') SetActiveSource(Nacelle) ExtendFileSeries(Nacelle) # tower Tower = FindSource(fileRootName + StructureModule + '_TowerLn2Mesh_motion...vtp') SetActiveSource(Tower) ExtendFileSeries(Tower) Tower = FindSource(fileRootName + 'TowerSurface...vtp') SetActiveSource(Tower) ExtendFileSeries(Tower) ##### SetActiveView(GetRenderView()) #view = GetActiveView() layout = GetLayout() SaveAnimation(fileRootName + 'avi', viewOrLayout=layout, FrameRate=fps ) # SaveAnimation(fileRootName + 'avi', viewOrLayout=layout, FrameRate=fps, ImageResolution=(1544,784) ) # this .pvsm file defaults to (2734,1178) without ImageResolution arguments, resulting in a bunch of warnings # For some reason, ParaView is ignoring the FrameRate argument and always uses a value of 1. print(' Saved animation file.')
OpenFAST/r-test
glue-codes/openfast/5MW_Land_ModeShapes/plotModeShapes.py
Python
apache-2.0
4,700
[ "ParaView", "VTK" ]
b7d9c2ab078aa40e9f62123807b6a1345ce4987da4d94046215300e2fdaa5c7c
#runas import numpy as np; x = np.arange(1., 26.).reshape(5,5); factorMatrix0(x), factorMatrix1(x) import numpy as np #pythran export factorMatrix0(float[:,:]) def factorMatrix0(M): # Gaussian elimination, partial pivoting. # M must be an (n,n+1) numpy array. Not tested! n = M.shape[0] m= M.shape[1] for line in range(0, n-1): # find pivot cmax = np.argmax(abs(M[line:n,line])) + line # exchange rows if necessary if cmax != line: M[[line,cmax]]=M[[cmax,line]] # eliminate pivot = M[line,line] for j in range(line+1,n): v= M[j,line]/pivot for k in range(line,m): M[j,k]-= v*M[line,k] #pythran export factorMatrix1(float[:,:]) def factorMatrix1(M): # Gaussian elimination, partial pivoting. # M must be an (n,n+1) numpy array. Not tested! n = M.shape[0] m= M.shape[1] for line in range(0, n-1): # find pivot cmax=line vmax= abs(M[line,line]) for i in range(line+1,n): if abs(M[i,line])> vmax: vmax= abs(M[i,line]) cmax= i # exchange rows if necessary if cmax != line: for j in range(line,m): t= M[line,j] M[line,j]= M[cmax,j] M[cmax,j]= t # eliminate pivot = M[line,line] for j in range(line+1,n): v= M[j,line]/pivot for k in range(line,m): M[j,k]-= v*M[line,k]
serge-sans-paille/pythran
pythran/tests/cases/lu.py
Python
bsd-3-clause
1,516
[ "Gaussian" ]
e68b062f35a65a530157031aff303fafb6b15352a394f509c1f8fe5ea931c330
# -*- coding: utf-8 -*- """ This is a Numeric/numpy free port of the method: Scientific.Geometry.Transformation.Rotation.axisAndAngle(self) From Konrad Hinsen ScientificPython http://dirac.cnrs-orleans.fr/plone/software/scientificpython """ __author__ = "Pierre Legrand (pierre legrand \at synchrotron-soleil fr)" __date__ = "23-11-2009" __copyright__ = "Copyright (c) 2009 Pierre Legrand" __version__ = "0.1.0" from pycgtypes import vec3 from pycgtypes import mat3 import math R2D = 180/math.pi def asymmetrical_part(mat_3): "Return the asymmetrical part." if len(mat_3.mlist) == 9 and len(mat_3) == 3: return 0.5*(mat_3 - mat_3.transpose()) else: raise ValueError('Not yet implemented') def dyadic_product(vector1, vector2): "Dyadic product of two vectors." matr1, matr2 = mat3(), mat3() matr1.setColumn(0, vector1) matr2.setRow(0, vector2) return matr1*matr2 def trace(ma3): "Return the trace of the matrix." return ma3[0, 0]+ma3[1, 1]+ma3[2, 2] def angle_from_sine_and_cosine(ylen, xlen): "Indirection to atan2 with check." try: return math.atan2(ylen, xlen) except: raise TypeError, 'AxisAndAngle:FAILURE in atan2' def axis_and_angle(mat_3): """From a rotation matrix return a corresponding rotation as an axis (a normalized vector) and angle (in radians). The angle is in the interval (-pi, pi] """ asym = -asymmetrical_part(mat_3) axis = vec3(asym[1, 2], asym[2, 0], asym[0, 1]) sine = axis.length() if abs(sine) > 1.e-10: axis = axis/sine projector = dyadic_product(axis, axis) cosine = trace((mat_3-projector))/(3.-axis*axis) angle = angle_from_sine_and_cosine(sine, cosine) else: tsr = 0.5*(mat_3+mat3(1)) diag = tsr[0, 0], tsr[1, 1], tsr[3, 3] i = tsr.index(max(diag)) axis = vec3(tsr.getRow(i)/(tsr[i, i])**0.5) angle = 0. if trace(tsr) < 2.: angle = math.pi return axis, angle # Test code if __name__ == '__main__': from Scientific.Geometry import Vector ##.Transformation import * from Scientific.Geometry.Transformation import Rotation from random import random # Q = mat3(0.36, 0.48, -0.8, -0.8, 0.6, 0, 0.48, 0.64, 0.60) axis_q, angle_q = axis_and_angle(Q) print "Axis_q: %9.6f%9.6f%9.6f" % tuple(axis_q), print "Angle_q: %10.5f" % (angle_q*R2D) # for iii in range(1e6): axis_i = list(vec3([random(), random(), random()]).normalize()) angle_i = 3*random() rme = mat3().rotation(angle_i, vec3(axis_i)) axis_1, angle_1 = axis_and_angle(rme) v = Vector(axis_i) r = Rotation(v, angle_i) axis_2, angle_2 = r.axisAndAngle() axis_d = (axis_1 - vec3(tuple(axis_2))).length() angle_d = abs(angle_1 - angle_2) if (angle_d > 1e-13) or (axis_d > 1e-13): print "Angle_d: %.3e" % (angle_d*R2D), print " Axis_length_diff: %.3e" % axis_d print "Axis_i: %9.6f%9.6f%9.6f" % tuple(axis_i), print "Angle_i: %10.5f" % (angle_i*R2D) print "Axis_1: %9.6f%9.6f%9.6f" % tuple(axis_1), print "Angle_1: %10.5f" % (angle_1*R2D) print "Axis_2: %9.6f%9.6f%9.6f" % tuple(axis_2), print "Angle_2: %10.5f" % (angle_2*R2D)
RAPD/RAPD
src/plugins/subcontractors/xdsme/new/xdsme-0.4.9/XOconv/AxisAndAngle.py
Python
agpl-3.0
3,373
[ "DIRAC" ]
c67f652c7a0d9e649139654c66d3044aad1e6a326e2296d593c2ff7c6bbe5d54
import os import lan debug = False class CGenerator(object): """ Uses the same visitor pattern as the NodeVisitor, but modified to return a value from each visit method, using string accumulation in generic_visit. """ def __init__(self): self.output = '' self.quotes = '\"' self.newline = '\n' self.semi = ';' self.start = '' # Statements start with indentation of self.indent_level spaces, using # the _make_indent method # self.indent_level = 0 self.inside_ArgList = False self.inside_ArgList2 = list() self.arg_list_level = 0 self.inside_Assignment = False self._is_first_group_compount = True def write_ast_to_file(self, ast, filename='temp.cpp'): code = self.visit(ast) currentdir = os.getcwd() full_file_name = currentdir + '/' + filename try: os.remove(full_file_name) except OSError: pass try: fileobj = open(full_file_name, 'w') fileobj.write(code) fileobj.close() except IOError: print "Unable to write file" def simple_node(self, n): """ Returns True for nodes that are "simple" """ return not isinstance(n, (lan.Constant, lan.Id, lan.ArrayRef, lan.FuncDecl, lan.FuncCall)) def parenthesize_if(self, n, condition): """ Visits 'n' and returns its string representation, parenthesized if the condition function applied to the node returns True. :param n: :param condition: """ s = self.visit(n) if condition(n): return '(' + s + ')' else: return s def _make_indent(self): return ' ' * self.indent_level def visit(self, node): method = 'visit_' + node.__class__.__name__ return getattr(self, method, self.generic_visit)(node) def generic_visit(self, node): if node is None: return '' else: return ''.join(self.visit(c[1]) if len(c) == 2 \ else self.visit(c) for c in node.children()) def visit_FileAST(self, n): newline = self.newline start = self.start if debug: newline = n.__class__.__name__ + newline start = n.__class__.__name__ + start s = '' for ext in n.ext: if isinstance(ext, lan.Compound): s += self.visit_GlobalCompound(ext) else: s += start + self.visit(ext) + newline return s def visit_GlobalCompound(self, n): s = '' for stat in n.statements: s += self.visit(stat) s += n.__class__.__name__ + self.newline return s def visit_GroupCompound(self, n): newline = self.newline start = self.start if debug: newline = n.__class__.__name__ + newline start = n.__class__.__name__ + start s = '' if self._is_first_group_compount: self._is_first_group_compount = False indent = '' else: indent = self._make_indent() for i, stat in enumerate(n.statements): if not isinstance(stat, lan.GroupCompound): s += self._make_indent() + start + self.visit(stat) + newline else: s += indent + start + self.visit(stat) + newline s += start return s def visit_Comment(self, n): return n.value def visit_Increment(self, n): s = self.visit(n.name) return s + n.op def visit_UnaryBefore(self, n): s = self.visit(n.expr) return n.op + s def visit_TypeId(self, n): s = self.visit(n.name) if n.type: s1 = ' '.join(n.type) s1 += ' ' + s else: s1 = s if not self.inside_ArgList: s1 += self.semi return s1 def visit_ArrayTypeId(self, n): s = self.visit(n.name) s1 = ' '.join(n.type) s1 += ' ' + s for arg in n.subscript: s1 += '[' + self.visit(arg) + ']' if not self.inside_ArgList: s1 += self.semi return s1 def visit_Assignment(self, n): self.inside_ArgList = True lval = self.visit(n.lval) self.inside_ArgList = False self.inside_Assignment = True rval = self.visit(n.rval) self.inside_Assignment = False return lval + ' ' + n.op + ' ' + rval + self.semi def visit_ArrayInit(self, n): s = '{' for stat in n.values: s += self.visit(stat) + ', ' s = s[:-2] s += '}' return s def visit_Compound(self, n): start = self.start newline = self.newline if debug: newline = n.__class__.__name__ + newline start = n.__class__.__name__ + start s = start + self._make_indent() + '{' + newline self.indent_level += 2 for stat in n.statements: if isinstance(stat, lan.GroupCompound): indent = start else: indent = start + self._make_indent() s += indent + self.visit(stat) + newline self.indent_level -= 2 s += start + self._make_indent() + '}' return s def visit_ArgList(self, n): newline = self.newline start = self.start if debug: newline = n.__class__.__name__ + newline start = n.__class__.__name__ + start s = '(' count = 1 if len(n.arglist) == 1: return '(' + self.visit(n.arglist[0]) + ')' for arg in n.arglist: if count == 1: s += start s += self.visit(arg) if count != (len(n.arglist)): s += ', ' if count % 3 == 0: s += newline + '\t' + start count += 1 return s + ')' def visit_ArrayRef(self, n): s = self.visit(n.name) for arg in n.subscript: s += '[' + self.visit(arg) + ']' return s def visit_BinOp(self, n): lval = self.parenthesize_if(n.lval, self.simple_node) rval = self.parenthesize_if(n.rval, self.simple_node) return lval + ' ' + n.op + ' ' + rval def visit_FuncDecl(self, n): newline = self.newline if debug: newline = n.__class__.__name__ + newline my_inside_arg_list = self.inside_ArgList self.inside_ArgList = True typeid = self.visit(n.typeid) arglist = self.visit(n.arglist) self.inside_ArgList = False or my_inside_arg_list if self.inside_Assignment or self.inside_ArgList: compound = '' elif n.compound.statements: typeid = self.start + typeid arglist += newline compound = self.visit(n.compound) + newline else: compound = self.semi return typeid + arglist + compound def visit_FuncCall(self, n): my_inside_arg_list = self.inside_ArgList self.inside_ArgList = True id = self.visit(n.id) arglist = self.visit(n.arglist) self.inside_ArgList = False or my_inside_arg_list if self.inside_Assignment or self.inside_ArgList: end = '' else: end = self.semi return id + arglist + end def visit_ClassMemberFuncCall(self, n): my_inside_arg_list = self.inside_ArgList self.inside_ArgList = True name = self.visit(n.name) classname = self.visit(n.classname) arglist = self.visit(n.arglist) self.inside_ArgList = False or my_inside_arg_list if self.inside_Assignment or self.inside_ArgList: end = '' else: end = self.semi return classname + '->' + name + arglist + end def visit_ForLoop(self, n): newline = self.newline if debug: newline = n.__class__.__name__ + newline init = self.visit(n.init) # already has a semi at the end cond = self.visit(n.cond) inc = self.visit(n.inc) self.indent_level += 2 compound = self.visit(n.compound) self.indent_level -= 2 return 'for (' + init + ' ' + cond + self.semi + ' ' + inc + ')' \ + newline + compound def visit_IfThen(self, n): newline = self.newline start = self.start if debug: newline = n.__class__.__name__ + newline start = n.__class__.__name__ + start cond = self.visit(n.cond) self.indent_level += 2 compound = self.visit(n.compound) self.indent_level -= 2 if_cond = self.__create_if_cond(cond) first_compound = self.__create_if_compound(compound) return if_cond + first_compound def visit_IfThenElse(self, n): newline = self.newline start = self.start if debug: newline = n.__class__.__name__ + newline start = n.__class__.__name__ + start cond = self.visit(n.cond) self.indent_level += 2 compound1 = self.visit(n.compound1) compound2 = self.visit(n.compound2) self.indent_level -= 2 if_cond = self.__create_if_cond(cond) first_compound = self.__create_if_compound(compound1) else_clause = self._make_indent() + newline + self._make_indent() + 'else' + newline second_compound = self.__create_if_compound(compound2) return if_cond + first_compound + else_clause + second_compound def __create_if_compound(self, compound): return self._make_indent() + '{' + self.newline + compound + self._make_indent() + '}' def __create_if_cond(self, cond): return 'if (' + cond + ')' + self.newline def visit_Id(self, n): return n.name def visit_Include(self, n): return "#include " + n.name def visit_Constant(self, n): try: s = float(n.value) except ValueError: if len(n.value) == 0: return '\"\"' if n.value[0] == '"': ## if self.extraquotes and False: ## return self.quotes + n.value[1:-1] + self.quotes return n.value else: return self.quotes + n.value + self.quotes else: return str(n.value) def visit_Return(self, n): expr = self.visit(n.expr) return 'return ' + expr def visit_RawCpp(self, n): return n.code def visit_Type(self, n): return n.type def visit_Ref(self, n): expr = self.visit(n.expr) return '&' + expr def visit_Cout(self, n): s = '' for arg in n.print_args: s += ' << ' + self.visit(arg) return 'cout' + s + ' << endl;' def visit_RunOCLArg(self, n): s = self.visit(n.ocl_arg) return s def visit_CppClass(self, n): s = '' name = self.visit(n.name) self.indent_level += 2 s += 'class ' + name + self.newline + '{' + self.newline self._is_first_group_compount = True s += self.visit(n.var_list) s += self.newline s += 'public:' + self.newline s += self.visit(n.public_list) s += self.newline protected_str = self.visit(n.protected_list) if len(protected_str) > 3: s += 'protected:' + self.newline s += protected_str s += self.newline s += 'private:' + self.newline s += self.visit(n.private_list) s += self.newline s += '};' self.indent_level -= 2 return s def visit_ClassConstructor(self, n): newline = self.newline if debug: newline = n.__class__.__name__ + newline name = self.visit(n.name) arglist = self.visit(n.arglist) name = self.start + name arglist += newline compound = self.visit(n.compound) + newline return name + arglist + compound
dikujepsen/OpenTran
src/framework/codegen/cgen.py
Python
mit
12,254
[ "VisIt" ]
8e49bc0b01ca3437add16262a3fe7ede8e06fb6451a30685229d2fb80f7c4860
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2014 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## ## import gtk from stoqlib.api import api from stoqlib.domain.product import Product, GridGroup from stoqlib.gui.base.dialogs import run_dialog from stoqlib.gui.base.wizards import BaseWizard, BaseWizardStep from stoqlib.gui.editors.producteditor import ProductEditor from stoqlib.gui.slaves.productslave import ProductAttributeSlave from stoqlib.lib.message import yesno, warning from stoqlib.lib.translation import stoqlib_gettext as _ class ProductTypeStep(BaseWizardStep): gladefile = 'ProductTypeStep' # # WizardEditorStep # def next_step(self): if self.wizard.product_type == Product.TYPE_GRID: return ProductAttributeEditorStep(self.wizard.store, self.wizard, previous=self) else: return ProductEditorStep(store=self.wizard.store, wizard=self.wizard, previous=self) def validate_step(self): if (self.wizard.product_type == Product.TYPE_GRID and not GridGroup.has_group(self.wizard.store)): warning(_("You need to register an attribute group first")) return False return True # # Callbacks # def on_common__toggled(self, radio): if radio.get_active(): self.wizard.product_type = Product.TYPE_COMMON def on_batch__toggled(self, radio): if radio.get_active(): self.wizard.product_type = Product.TYPE_BATCH def on_without_stock__toggled(self, radio): if radio.get_active(): self.wizard.product_type = Product.TYPE_WITHOUT_STOCK def on_consigned__toggled(self, radio): if radio.get_active(): self.wizard.product_type = Product.TYPE_CONSIGNED def on_grid__toggled(self, radio): if radio.get_active(): self.wizard.product_type = Product.TYPE_GRID class ProductAttributeEditorStep(BaseWizardStep): gladefile = 'HolderTemplate' def __init__(self, store, wizard, previous): BaseWizardStep.__init__(self, store, wizard, previous) self.slave = ProductAttributeSlave(self.wizard.store, object()) self.attach_slave('product_attribute_holder', self.slave, self.place_holder) def validate_step(self): if len(self.slave.get_selected_attributes()) == 0: warning(_("You should select an attribute first")) return False return True def next_step(self): self.wizard.attr_list = self.slave.get_selected_attributes() return ProductEditorStep(self.wizard.store, self.wizard, previous=self) class ProductEditorStep(BaseWizardStep): gladefile = 'HolderTemplate' # # BaseWizardStep # def post_init(self): # self.wizard.model will return something if it is coming back from self.slave = ProductEditor(self.store, wizard=self.wizard, product_type=self.wizard.product_type) self.slave.get_toplevel().reparent(self.place_holder) self.wizard.model = self.slave.model self.slave.register_validate_function(self.wizard.refresh_next) self.slave.force_validation() def previous_step(self): # Avoid creating duplicated products when going back self.store.rollback(close=False) return super(ProductEditorStep, self).previous_step() def has_next_step(self): return False class ProductCreateWizard(BaseWizard): size = (800, 450) title = _('Product creation wizard') help_section = 'product-new' need_cancel_confirmation = True # args and kwargs are here to get extra parameters sent by SearchEditor's # run_dialog. We will just ignore them since they are not useful here def __init__(self, store, *args, **kwargs): self.product_type = Product.TYPE_COMMON first_step = ProductTypeStep(store, self) BaseWizard.__init__(self, store, first_step) # # BaseWizard # def finish(self): self.retval = self.model self.close() self.model.update_children_info() # # Classmethods # @classmethod def run_wizard(cls, parent): """Run the wizard to create a product This will run the wizard and after finishing, ask if the user wants to create another product alike. The product will be cloned and `stoqlib.gui.editors.producteditor.ProductEditor` will run as long as the user chooses to create one alike """ with api.new_store() as store: rv = run_dialog(cls, parent, store) if rv: inner_rv = rv while yesno(_("Would you like to register another product alike?"), gtk.RESPONSE_NO, _("Yes"), _("No")): with api.new_store() as store: template = store.fetch(rv) inner_rv = run_dialog(ProductEditor, parent, store, product_type=template.product_type, template=template) if not inner_rv: break # We are insterested in the first rv that means that at least one # obj was created. return rv
andrebellafronte/stoq
stoqlib/gui/wizards/productwizard.py
Python
gpl-2.0
6,117
[ "VisIt" ]
5efd3ba012c42136c758146077e81e95f31518ed7c39bbf8e4ddfe1c5071217d
try: from mmtf import fetch, parse except ImportError: from Bio import MissingPythonDependencyError raise MissingPythonDependencyError("Install mmtf to use Bio.PDB.mmtf " "(e.g. pip install mmtf-python)") from Bio.PDB.mmtf.DefaultParser import StructureDecoder def get_from_decoded(decoder): structure_decoder = StructureDecoder() decoder.pass_data_on(structure_decoder) return structure_decoder.structure_bulder.get_structure() class MMTFParser(object): """Class to get a BioPython structure from a URL or a filename.""" @staticmethod def get_structure_from_url(pdb_id): """Get a structure from a URL - given a PDB id. :param pdb_id: the input PDB id :return the structure """ decoder = fetch(pdb_id) return get_from_decoded(decoder) @staticmethod def get_structure(file_path): """Get a structrue from a file - given a file path. :param file_path: the input file path :return the structure """ decoder = parse(file_path) return get_from_decoded(decoder)
zjuchenyuan/BioWeb
Lib/Bio/PDB/mmtf/__init__.py
Python
mit
1,141
[ "Biopython" ]
ad01fe2538d8ec77b376be89e5dd99e3f9da77f39d75adcf559ab3aabda38305
import os import uuid import unittest import bson import pymongo from pymatgen.db.query_engine import QueryEngine, QueryResults from pymatgen.db.tests import common has_mongo = common.has_mongo() test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files") class QueryResultsTest(unittest.TestCase): def setUp(self): if has_mongo: self.conn = pymongo.MongoClient() self.db_name = "test" self.db = self.conn[self.db_name] self.coll_name = f"tasks_{uuid.uuid4()}" self.coll = self.db[self.coll_name] with open(os.path.join(test_dir, "db_test", "GaLa.task.json")) as f: doc = bson.json_util.loads(f.read()) self.coll.insert_one(doc) def tearDown(self): if has_mongo: self.db.drop_collection(self.coll_name) @unittest.skipUnless(has_mongo, "requires MongoDB server") def test_queryresult(self): qe = QueryEngine( connection=self.conn, database=self.db_name, collection=self.coll_name, ) result = qe.query( criteria={"task_id": "mp-1002133"}, properties=[ "calcs_reversed.output.ionic_steps.e_0_energy", "calcs_reversed.output.ionic_steps.electronic_steps.e_0_energy", ], ) self.assertTrue(isinstance(result, QueryResults)) print(list(qe.query(criteria={"task_id": "mp-1002133"}))) self.assertEqual(len(result), 1) doc = list(result)[0] self.assertIn("calcs_reversed.output.ionic_steps.e_0_energy", doc) v = doc["calcs_reversed.output.ionic_steps.e_0_energy"] self.assertIsInstance(v, list) for elt in v: self.assertIsInstance(elt, list) for n in elt: self.assertIsInstance(n, float) self.assertIn("calcs_reversed.output.ionic_steps.electronic_steps.e_0_energy", doc) v = doc["calcs_reversed.output.ionic_steps.electronic_steps.e_0_energy"] for elt in v: self.assertIsInstance(elt, list) for _elt in elt: self.assertIsInstance(_elt, list) for n in _elt: self.assertIsInstance(n, float)
materialsproject/pymatgen-db
pymatgen/db/tests/test_queryresults.py
Python
mit
2,299
[ "pymatgen" ]
700dcc56a81807533662d90a32ceac79c4de90c5f53357e4338bd975bd8fb6fd
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2017 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # from __future__ import absolute_import from __future__ import print_function from .vecutil import * from .physconst import * from .cov_radii import * BOND_FACTOR = 1.2 # fudge factor for bond length threshold _expected_bonds = { 'H': 1, 'C': 4, 'N': 3, 'O': 2, 'F': 1, 'P': 3, 'S': 2, } def xyz2mol(self): """Returns a string of Molecule formatted for mol2. Written by Trent M. Parker 9 Jun 2014 """ factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms bonds = self.bond_profile() N = 0 for i in range(self.natom()): if self.Z(i): N += 1 # header text = '%s\n' % (self.tagline) text += ' Generated by xyz2mol\n\n' text += '%3i%3i 0 0 0 0 0 0 0 0999 V2000\n' % (N, len(bonds)) # coordinates for i in range(self.natom()): [x, y, z] = self.atoms[i].compute() if self.Z(i): text += ' %9.4f %9.4f %9.4f %-2s 0 0 0 0 0\n' % \ (x * factor, y * factor, z * factor, self.symbol(i)) # bonds for p in range(len(bonds)): text += '%3i%3i%3i' % (bonds[p][0] + 1, bonds[p][1] + 1, bonds[p][2]) text += ' 0 0 0\n' text += 'M END\n' return text def missing_bonds(bonds, bond_tree, at_types): """Determine number of bonds missing for each atom""" n_missing = [] for i in range(len(at_types)): n_bonds_i = 0 for p in range(len(bonds)): at1 = bonds[p][0] at2 = bonds[p][1] if (at1 == i or at2 == i): bond_order = bonds[p][2] n_bonds_i += bond_order n_expect_i = _expected_bonds[at_types[i]] n_missing.append(n_expect_i - n_bonds_i) return n_missing def missing_neighbors(bond_tree, n_missing): """Determine number of neighboring atoms missing bonds for each atom""" missing_neighbors = [] for i in range(len(bond_tree)): N_neighbors = len(bond_tree[i]) missing = 0 for a in range(N_neighbors): j = bond_tree[i][a] if n_missing[j] > 0: missing += 1 missing_neighbors.append(missing) return missing_neighbors def bond_profile(self): """Obtain bonding topology of molecule""" # determine bond topology from covalent radii bonds = [] for i in range(self.natom()): for j in range(i + 1, self.natom()): dist = norm(sub(self.xyz(j), self.xyz(i))) * psi_bohr2angstroms # TOOD check bohr/ang progress bonded_dist = BOND_FACTOR * (psi_cov_radii[self.symbol(i)] + psi_cov_radii[self.symbol(j)]) if bonded_dist > dist: bonds.append([i, j, 1]) # determine bond order from number of bonds N_atoms = self.natom() N_bonds = len(bonds) at_types = [self.symbol(i) for i in range(self.natom())] bond_tree = [[] for i in range(N_atoms)] for i in range(N_bonds): at1 = bonds[i][0] at2 = bonds[i][1] bond_tree[at1].append(at2) bond_tree[at2].append(at1) # determine bond order for all bonds from bond tree and element types n_missing = missing_bonds(bonds, bond_tree, at_types) n_neighbors_missing = missing_neighbors(bond_tree, n_missing) # add double / triple bonds if only one neighbor missing bonds N_left = math.floor(sum(n_missing) / 2) N_left_previous = N_left + 1 N_iter = 0 while N_left > 0: N_iter += 1 if N_left == N_left_previous: neighbor_min += 1 else: neighbor_min = 1 N_left_previous = N_left # add a multiple bond to a deficient atom with the fewest number of deficient neighbors BREAK_LOOP = False for i in range(N_atoms): if n_missing[i] > 0 and n_neighbors_missing[i] == neighbor_min: N_neighbors = len(bond_tree[i]) for a in range(N_neighbors): j = bond_tree[i][a] if n_missing[j] > 0: for p in range(N_bonds): at1 = bonds[p][0] at2 = bonds[p][1] if (at1 == i and at2 == j) or (at1 == j and at2 == i): bonds[p][2] += 1 n_missing[i] += -1 n_missing[j] += -1 n_neighbors_missing[i] += -1 n_neighbors_missing[j] += -1 N_left = math.floor(sum(n_missing) / 2) BREAK_LOOP = True if BREAK_LOOP: break if BREAK_LOOP: break # recalculate incomplete bond topology n_missing = missing_bonds(bonds, bond_tree, at_types) n_neighbors_missing = missing_neighbors(bond_tree, n_missing) # break cycle if takes more than given number of iterations max_iter = 100 if N_iter > max_iter: print("""Error: multiple bond determination not complete""") print(""" %i bonds unaccounted for""" % (N_left)) break # bond order is number of bonds between each bonded atom pair bond_order = [] for p in range(N_bonds): bond_order.append(bonds[p][2]) for p in range(len(bond_order)): bonds[p][2] = bond_order[p] return bonds
kratman/psi4public
psi4/driver/qcdb/parker.py
Python
gpl-2.0
6,432
[ "Psi4" ]
e885eba50b4a882ec5f5bbc539ff256688a8138cac5adcbbf27a3e6e673d70a8
#!/usr/bin/env python __author__ = 'thomasvangurp' # Date created: 22/11/2014 (europe date) # Function: Pipeline for mapping reads to reference #Python version: 2.7.3 #External dependencies: samtools,pysam,methylation_calling.py #Known bugs: None #Modifications: None import pysam import argparse import subprocess import tempfile import os import shutil import sys from Bio import SeqIO from Bio import Restriction def getScriptPath(): return os.path.dirname(__file__) def parse_args(): "Pass command line arguments" if not sys.argv[1:]: sys.argv.append('-h') parser = argparse.ArgumentParser(description='use bwameth for mapping reads') #input files parser.add_argument('-s','--sequences', help='number of sequences to take for testing') parser.add_argument('--subsample_treshold', help='Subsample treshold',default='100000') parser.add_argument('--tmpdir', help='tmp directory',default="/tmp/") parser.add_argument('--input_dir', help='optional: Choose input directory') parser.add_argument('--reads_R1', help='Forward unmerged reads') parser.add_argument('--reads_R2', help='Reverse unmerged reads') parser.add_argument('--merged', help='merged watson and crick fastq') parser.add_argument('--reference', help='reference clusters') parser.add_argument('--refgenome', help='reference genome instead of clusters') parser.add_argument('-b','--barcodes', help='Barcodes used in output') parser.add_argument('--species', help='Species: if selected only that species will be putin BAM RG header') parser.add_argument('--bamout', help='output for bam file with RGs') parser.add_argument('--threads', help='Number of threads to used where multithreading is possible') parser.add_argument('--log', help='log of output operation') parser.add_argument('--output_dir', help='optional: Choose output directory') parser.add_argument('--watson_vcf', help='watson vcf output') parser.add_argument('--crick_vcf', help='crick vcf output') parser.add_argument('--snp_vcf', help='vcf output snp') parser.add_argument('--methylation_vcf', help='Methylation vcf output') parser.add_argument('--heatmap', help='heatmap output methylation') args = parser.parse_args() if args.input_dir: if not args.reads_R1: args.reads_R1 = os.path.join(args.input_dir,'Unassembled.R1.watson.fq.gz') if not args.reads_R2: args.reads_R2 = os.path.join(args.input_dir,'Unassembled.R2.crick.fq.gz') if not args.merged: args.merged = os.path.join(args.input_dir,'Assembled.fq.gz') if args.reference == None and args.refgenome == None: args.reference = os.path.join(args.input_dir,'consensus_cluster.renamed.fa') if args.barcodes == None: args.barcodes = os.path.join(args.input_dir,'barcodes.csv') if args.output_dir: if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) if not args.log: args.log = os.path.join(args.output_dir,'mapping_variantcalling.log') args.watson_vcf = os.path.join(args.output_dir,'watson.vcf.gz') args.crick_vcf = os.path.join(args.output_dir,'crick.vcf.gz') args.snp_vcf = os.path.join(args.output_dir,'snp.vcf.gz') args.methylation_vcf = os.path.join(args.output_dir,'methylation.vcf.gz') args.heatmap = os.path.join(args.output_dir,'heatmap.igv') args.mastermeth = os.path.join(args.output_dir,'methylation.bed') return args def run_subprocess(cmd,args,log_message): "Run subprocess under standardized settings" #force the cmds to be a string. if len(cmd) != 1: cmd = [" ".join(cmd)] with open(args.log,'a') as log: log.write("now starting:\t%s\n"%log_message) log.write('running:\t%s\n'%(' '.join(cmd))) log.flush() p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True,executable='/bin/bash') stdout, stderr = p.communicate() stdout = stdout.decode().replace('\r','\n') stderr = stderr.decode().replace('\r','\n') if stdout: log.write('stdout:\n%s\n'%stdout) if stderr: log.write('stderr:\n%s\n'%stderr) return_code = p.poll() if return_code: raise RuntimeError(stderr) log.write('finished:\t%s\n\n'%log_message) return 0 def make_header(in_files,args): """Make sam header given input file species and""" #parse input barcode file and make list of individuals in_files['header'] = 'location of header' return in_files def run_bwameth(in_files,args): "run bwa_meth for mapping" in_files['bam_out'] = {} in_files['bam_out']['watson'] = os.path.join(args.output_dir,'watson.bam') in_files['bam_out']['crick'] = os.path.join(args.output_dir,'crick.bam') in_files['header'] = os.path.join(args.output_dir,'header.sam') log = "index renamed reference using bwameth" ref = args.reference if not os.path.exists('%s.bwameth.c2t'%ref): cmd = ['bwameth.py index %s'%ref] run_subprocess(cmd,args,log) log = "run bwameth for merged reads" if args.sequences: add = '|head -n %s'%(4*int(args.sequences)) else: add = '' if args.merged: cmd = ['bwameth.py -t %s -p %s --reference %s <(pigz -cd %s %s) NA'% (args.threads, os.path.join(args.output_dir,'merged'), ref, args.merged,add )] run_subprocess(cmd,args,log) log = "run bwameth for non-merged reads" cmd = ['bwameth.py -t %s -p %s --reference %s <(pigz -cd %s %s) <(pigz -cd %s %s)'% (args.threads, os.path.join(args.output_dir,'pe'), ref, args.reads_R1,add, args.reads_R2,add )] run_subprocess(cmd,args,log) log = "get start of header from pe.bam, add RG information using addRG function" cmd = ["samtools view -H %s > %s"% ((os.path.join(args.output_dir,'pe.bam')), (os.path.join(args.output_dir,'header.sam')))] run_subprocess(cmd,args,log) log = "Append RG to header" in_files = addRG(in_files,args) log = "merge bam files" cmd = ["samtools merge -h %s -fcp -@ %s %s <(samtools reheader %s %s) <(samtools reheader %s %s)"% (in_files['header'], args.threads, os.path.join(args.output_dir,'combined.bam'), in_files['header'], os.path.join(args.output_dir, 'pe.bam'), in_files['header'],os.path.join(args.output_dir, 'merged.bam'))] run_subprocess(cmd,args,log) log = "index combined bam file" cmd = ["samtools index %s"%(os.path.join(args.output_dir,'combined.bam'))] run_subprocess(cmd, args, log) log = "split in watson and crick bam file" bam_input = pysam.AlignmentFile(os.path.join(args.output_dir,'combined.bam'),'rb') watson_output = pysam.AlignmentFile(os.path.join(args.output_dir,'watson.bam'),'wb', template=bam_input) crick_output = pysam.AlignmentFile(os.path.join(args.output_dir,'crick.bam'),'wb', template=bam_input) for record in bam_input: tag_dict = dict(record.tags) #remove reads with alternate mapping positions # if 'XA' in tag_dict: # continue try: if (record.is_reverse and record.is_paired == False) or \ (record.is_paired and record.is_read1 and record.is_reverse == True) or \ (record.is_paired and record.is_read2 and record.is_reverse == False): if tag_dict['ST'].lower() == 'crick': watson_output.write(record) elif tag_dict['ST'].lower() == 'watson': crick_output.write(record) else: if tag_dict['ST'].lower() == 'watson': watson_output.write(record) elif tag_dict['ST'].lower() == 'crick': crick_output.write(record) except KeyError: continue watson_output.close() crick_output.close() in_files['bam_out'] = {} in_files['bam_out']['watson'] = os.path.join(args.output_dir,'watson.bam') in_files['bam_out']['crick'] = os.path.join(args.output_dir,'crick.bam') log = "index watson bam file" cmd = ["samtools index %s"%in_files['bam_out']['watson']] run_subprocess(cmd,args,log) log = "index crick bam file" cmd = ["samtools index %s"%in_files['bam_out']['crick']] run_subprocess(cmd,args,log) return in_files def run_STAR(in_files, args): "run bwa_meth for mapping" in_files['bam_out'] = {} in_files['bam_out']['watson'] = os.path.join(args.output_dir, 'watson.dedup.bam') in_files['bam_out']['crick'] = os.path.join(args.output_dir, 'crick.dedup.bam') in_files['header'] = os.path.join(args.output_dir, 'header.sam') cmd = ["map_STAR.py", '--reads_R1 %s' % args.reads_R1, '--reads_R2 %s' % args.reads_R2, '--merged %s' % args.merged, "--barcodes %s" % args.barcodes, "--tmpdir %s" % args.tmpdir, "--threads %s" % args.threads, "--output_dir %s" % args.output_dir] if not args.reference: cmd += ['--refgenome %s' % args.refgenome] else: cmd += ['--reference %s' % args.reference] if args.sequences != None: cmd += ['--sequences %s' % args.sequences] log = "Map reads using STAR" run_subprocess(cmd, args, log) return in_files def addRG(in_files,args): "make header for output bamfile and split in watson and crick" #define readgroup header lines by combining the following species_name = '' """ - read group ID* Unique read group identifier. The value of the ID field is used in the RG tags of alignment records. SM* Sample (use pool name where a pool is being sequenced) LB Library DS Description PU Platform unit (e.g. lane for Illumina or slide for SOLiD); should be a full, unambiguous identifier PI Predicted median insert size (maybe different from the actual median insert size) CN Name of sequencing center producing the read. DT Date the run was produced (ISO 8601 date or date/time). PL Platform/technology used to produce the read.""" with open(args.barcodes,'r') as barcodes: sam_out= open(in_files['header'],'a') header = barcodes.readline().split('\t') for line in barcodes: RG = ['@RG'] split_line = line.split('\t') if args.species: if split_line[(header.index('Species'))] != args.species: continue fc = split_line[(header.index('Flowcell'))] lane = split_line[(header.index('Lane'))] sample = split_line[(header.index('Sample'))] RG.append('ID:%s_%s_%s'%(fc,lane,sample)) RG.append('SM:%s'%(sample)) RG.append('LB:%s_%s'%(fc,sample)) RG.append('PL:ILLUMINA\n') sam_out.write('\t'.join(RG)) sam_out.close() return in_files def get_enz(enz): """Get enzyme from biopython restriction library""" for enzyme in Restriction.AllEnzymes: if "%s"%(enzyme) == enz: return enzyme def get_regions(contig,enzymes): """return loci with start and end locations""" out_sites = [] enz_1 = get_enz(enzymes[0]) enz_2 = get_enz(enzymes[1]) enz_1_sites = enz_1.search(contig.seq) enz_2_sites = enz_2.search(contig.seq) combined_sites = sorted(enz_1_sites + enz_2_sites) for i in range(len(combined_sites)): site_A = combined_sites[i] try: site_B = combined_sites[i+1] except IndexError: break if site_B - site_A < 30: continue if site_A in enz_1_sites and site_B in enz_2_sites: out_sites.append((site_A + 1, site_B - len(enz_2.site))) elif site_A in enz_2_sites and site_B in enz_1_sites: out_sites.append((site_A + 1, site_B - len(enz_1.site))) return out_sites def remove_PCR_duplicates(in_files,args): """Remove PCR duplicates and non-paired PE-reads per cluster""" #check if random tag is present in fastq file, otherwise do not perform function # fastq_tags = open(in_files['']) #TODO: implement sample specific PCR duplicate detection for strand,bamfile in in_files['bam_out'].items(): clusters = SeqIO.parse(open(args.reference),'fasta') handle = pysam.AlignmentFile(bamfile,'rb') out_bam = tempfile.NamedTemporaryFile(suffix='uniq.bam',dir=args.output_dir,delete=False) out_handle = pysam.AlignmentFile(out_bam.name,'wb', template=handle) read_count = {} for cluster in clusters: enzymes = ["Csp6I","NsiI"] if len(cluster.seq) > 350: #this must be a reference genome / chromosome: look for regions with mapping reads regions = get_regions(cluster,enzymes) else: regions = [None] for region in regions: if region: reads = handle.fetch(cluster.id,region[0],region[1]) else: reads = handle.fetch(cluster.id) if 'NNNNNNNN' in cluster._seq.upper() and not region: cluster_is_paired = True elif region: if region[1] - region[0] > 240: cluster_is_paired = True else: cluster_is_paired = False else: cluster_is_paired = False read_out = {} for read in reads: tag_dict = dict(read.tags) try: tag = tag_dict['RN'] sample = tag_dict['RG'] AS = tag_dict['AS'] except KeyError: break if not read.is_proper_pair and cluster_is_paired: continue if sample not in read_out: read_out[sample] = {} if tag not in read_out[sample]: read_out[sample][tag] = {read.qname:AS} else: try: read_out[sample][tag][read.qname]+= AS except KeyError: read_out[sample][tag][read.qname] = AS #process read_out if read_out == {} and 'RN' not in tag_dict: #random tag not yet implemented. return in_files and do not process further return in_files if region: reads = handle.fetch(cluster.id, region[0], region[1]) else: reads = handle.fetch(cluster.id) for read in reads: if not read.is_proper_pair and cluster_is_paired: continue # if not read_count%100000: # print '%s reads processed for %s strand'%(read_count,strand) tag_dict = dict(read.tags) tag = tag_dict['RN'] sample = tag_dict['RG'] try: read_count[sample]['count'] += 1 except KeyError: if sample not in read_count: read_count[sample] = {'count':1} else: read_count[sample]['count'] = 1 max_AS = max(read_out[sample][tag].values()) qname = [name for name,AS in read_out[sample][tag].items() if AS == max_AS][0] if read.qname == qname: out_handle.write(read) else: try: read_count[sample]['dup_count'] += 1 except KeyError: read_count[sample]['dup_count'] = 1 for key , subdict in sorted(read_count.items()): count = subdict['count'] if 'dup_count' in subdict: dup_count = subdict['dup_count'] dup_pct = dup_count / float(count) print('%s has %s reads and %s duplicates. Duplicate rate: %.2f%%'%(key,count,dup_count,100*dup_pct)) else: print('%s has %s reads and 0 duplicates. Duplicate rate: 0%%' % (key, count)) # out_handle.flush() out_handle.close() old_bam = in_files['bam_out'][strand] log = "move old bam file %s to %s"%(old_bam,old_bam.replace('.bam','.old.bam')) cmd = ["mv %s %s"%(old_bam,old_bam.replace('.bam','.old.bam'))] run_subprocess(cmd,args,log) log = "move uniq bam file %s to %s"%(out_bam.name,old_bam) cmd = ["mv %s %s"%(out_bam.name,in_files['bam_out'][strand])] run_subprocess(cmd,args,log) log = "index bam file %s"%(old_bam) cmd = ["samtools index %s"%(in_files['bam_out'][strand])] run_subprocess(cmd,args,log) return in_files def run_Freebayes(in_files,args): "run freebayes on watson and crick bam file with threadpool" in_files['variants'] = {} log = open(args.log,'a') for strand in ['watson','crick']: processes = set() max_processes = int(args.threads) outdir = tempfile.mkdtemp(prefix='vcf', dir=args.tmpdir) outlist = [] in_files['variants'][strand] = outdir n = 0 skipped = 0 with open(in_files['header']) as header: for line in header: if line.startswith('@SQ'): contig = line.split('\t')[1].split(':')[1] length = line[:-1].split('\t')[2].split(':')[1] else: continue #determine coverage on contig in bam file #set depth at 100.000.000 n+=1 if not n%1000: print('Done processing %s contigs on %s,skipped %s'%(n,strand,skipped)) bamfile = in_files['bam_out'][strand] cmd = ['samtools mpileup -d 10000000 %s -r %s:10-10'%(bamfile,contig)] # if int(contig) > 1000: # break if int(length) <500: p = subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True,executable='/bin/bash') stdout, stderr = p.communicate() return_code = p.poll() stderr = stderr.replace('\r','\n') if return_code: raise RuntimeError(stderr) try: out = stdout.split('\t') depth = int(out[3]) n_samples = int(stderr.split(' ')[1]) except IndexError: #no reads for this contig, skip continue else: # it does not make sense to calculate depth here! depth = 0 if depth < n_samples * 10: skipped += 1 continue outlist.append('%s.vcf'%contig) #freebayes --bam <(samtools view -hs 0.89552238806 /tmp/watson.bam 1|samtools view -Shb -) # --fasta-reference /Volumes/data/epiGBS/Baseclear/unfiltered_sequences/seqNNAtlE/Carrot/consensus.clustered.renamed.fa # -F 0 -E 1 -C 0 -G 0 --haplotype-length 1 -k -K -X -u -i -q 21 -w -a # --report-all-haplotype-alleles --report-monomorphic --report-genotype-likelihood-max cmd = """freebayes -f %s -F 0 -E 1 \ -C 0 -G 0 --haplotype-length 1 \ --report-all-haplotype-alleles --report-monomorphic\ --report-genotype-likelihood-max \ --haplotype-length 1 -KkXuiwaq 21 """%\ (args.reference) if depth > int(args.subsample_treshold): factor = int(args.subsample_treshold) / float(depth) log = "Subsample bam file with high coverage" bamfile = "--bam <(samtools view -hs %s %s %s|samtools view -Shb -)"%\ (factor,bamfile,contig) cmd += " %s > %s"%\ (bamfile, os.path.join(outdir,'%s.vcf'%contig)) # elif depth == 0: # cmd += " --bam %s > %s"%\ # (bamfile, os.path.join(outdir,'%s.vcf'%contig)) else: cmd += " -r %s:0-%s --bam %s > %s"%\ (contig, int(length)-1,bamfile, os.path.join(outdir,'%s.vcf'%contig)) processes.add(subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True,executable='/bin/bash')) while len(processes) >= max_processes: os.wait() processes.difference_update([ p for p in processes if p.poll() is not None]) #Make sure that all Freebayes processes are done before continuing to next st ep. while len(processes): os.wait() processes.difference_update([ p for p in processes if p.poll() is not None]) if strand == 'watson': target = args.watson_vcf else: target = args.crick_vcf print(outdir,outlist[0],target) shutil.move(os.path.join(outdir,outlist[0]),target) for vcf_file in outlist[1:]: file_in = os.path.join(outdir,vcf_file) cmd = ['grep -v "^#" %s >> %s'%(file_in,target)] p = subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True,executable='/bin/bash') stdout, stderr = p.communicate() return in_files def variant_calling_samtools(in_files,args): """Do variant calling with freebayes""" #run mpileup on watson bam file in_files['vcf_out'] = {} in_files['vcf_out']['watson'] = os.path.join(args.output_dir,'watson.vcf.gz') in_files['vcf_out']['crick'] = os.path.join(args.output_dir,'crick.vcf.gz') cmd = ["samtools mpileup --reference %s -gt DP,AD,INFO/AD" % (args.reference) + " --max-depth 999999999 " + # call at max-depth of 10.000.000 "-q 0 " + # Do not skip alignments with low mapQ "-Q 15 " + # Skip bases with baseQ/BAQ smaller than 15 "--skip-indels " + # skip indels "-vu %s" % ( in_files['bam_out']['watson']) + # v = generate genotype likelihoods in VCF format u = uncompressed "|grep -v '^##contig='|bgzip -c > %s" % (in_files['vcf_out']['watson'])] log = "use samtools mpileup to get variant observation counts for watson" run_subprocess(cmd, args, log) cmd = ["samtools mpileup --reference %s -gt DP,AD,INFO/AD" % (args.reference) + " --max-depth 999999999 " + #call at max-depth of 10.000.000 "-q 0 " + #Do not skip alignments with low mapQ #TODO: investigate option "-Q 15 " + #Skip bases with baseQ/BAQ smaller than 15 "--skip-indels " + #skip indels "-vu %s" % (in_files['bam_out']['crick']) + #v = generate genotype likelihoods in VCF format u = uncompressed "|grep -v '^##contig='|bgzip -c > %s" % (in_files['vcf_out']['crick'])] log = "use samtools mpileup to get variant observation counts for crick" run_subprocess(cmd, args, log) return in_files def merge_watson_crick(in_files, args): """create merged.tsv.gz with watson and crick calls merged""" if 'vcf_out' not in in_files: in_files['vcf_out'] = {} in_files['vcf_out']['watson'] = os.path.join(args.output_dir, 'watson.vcf.gz') in_files['vcf_out']['crick'] = os.path.join(args.output_dir, 'crick.vcf.gz') in_files['vcf_out']['merged'] = os.path.join(args.output_dir,'merged.tsv') cmd = ["merge_watson_crick.py", "-w %s" % in_files['vcf_out']['watson'], "-c %s" % in_files['vcf_out']['crick'], "-o %s" % in_files['vcf_out']['merged']] log = "Create custom tsv file for combining watson and crick observation counts per individual" run_subprocess(cmd, args, log) in_files['vcf_out']['merged'] = os.path.join(args.output_dir, 'merged.tsv.gz') return in_files def SNP_calling(in_files, args): """run SNP calling""" if 'vcf_out' not in in_files: in_files['vcf_out'] = {} in_files['vcf_out']['SNP'] = os.path.join(args.output_dir, 'snp.vcf') in_files['vcf_out']['merged'] = os.path.join(args.output_dir, 'merged.tsv.gz') cmd = ["SNP_calling.py", "-m %s" % in_files['vcf_out']['merged'], "-s %s" % in_files['vcf_out']['SNP'], "-w %s" % os.path.join(args.output_dir, 'watson.vcf.gz')] log = "perform SNP calling" run_subprocess(cmd, args, log) return in_files def methylation_calling(in_files,args): "run methylation calling script." log = ["Run methylation calling script"] in_files['vcf_out']['SNP'] = os.path.join(args.output_dir, 'snp.vcf.gz') in_files['vcf_out']['merged'] = os.path.join(args.output_dir, 'merged.tsv.gz') cmd = ["methylation_calling.py", " -r %s"%(args.reference), " -m %s"%(in_files['vcf_out']['merged']), " -s %s"%(in_files['vcf_out']['SNP']), " -o %s"%(os.path.join(args.output_dir,'methylation.bed')), " -heat %s"%(os.path.join(args.output_dir,'heatmap.igv')) ] run_subprocess(cmd,args,log) return in_files def main(): "Main function loop" args = parse_args() #Make sure log is empty at start if os.path.isfile(args.log): os.remove(args.log) #Step 1: discover files in input #todo files = {} #Step 2: map reads using STAR #TODO: replace for running map_STAR files = run_STAR(files,args) if args.refgenome: args.reference = args.refgenome files = variant_calling_samtools(files, args) files = merge_watson_crick(files,args) files = SNP_calling(files, args) files = methylation_calling(files,args) print('done') if __name__ == '__main__': main()
thomasvangurp/epiGBS
mapping_varcall/mapping_variant_calling.py
Python
mit
27,215
[ "Biopython", "pysam" ]
a3af5973aec438a66618e0a481a0b14ffce2aa68226d0e10074ebe78a5b6a0ac
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2017 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # """ | Database of molecules that are challenging to optimize. | Geometries from Baker J. Comput. Chem. 14 1085 (1993), as reported in Bakken and Helgaker, J. Chem. Phys. 117, 9160 (2002), with a few further corrections. | No reference energies defined. - **cp** ``'off'`` - **rlxd** ``'off'`` - **subset** - ``'small'`` - ``'large'`` """ import re import qcdb # <<< BAKERJCC93 Database Module >>> dbse = 'BAKERJCC93' isOS = 'true' # <<< Database Members >>> HRXN = ['1_3_5_trifluorobenzene', '1_3_5_trisilacyclohexane', '1_3_difluorobenzene', '1_5_difluoronaphthalene', '2_hydroxybicyclopentane', 'ACANIL01', 'acetone', 'acetylene', 'ACHTAR10', 'allene', 'ammonia', 'benzaldehyde', 'benzene', 'benzidine', 'caffeine', 'difuropyrazine', 'dimethylpentane', 'disilyl_ether', 'ethane', 'ethanol', 'furan', 'histidine', 'hydroxysulphane', 'menthone', 'mesityl_oxide', 'methylamine', 'naphthalene', 'neopentane', 'pterin', 'water', ] HRXN_SM = ['1_3_5_trisilacyclohexane', '2_hydroxybicyclopentane', 'acetone', 'acetylene', 'allene', 'ammonia', 'benzene', 'disilyl_ether', 'ethane', 'ethanol', 'furan', 'hydroxysulphane', 'methylamine', 'neopentane', 'water'] HRXN_LG = ['1_3_difluorobenzene', '1_3_5_trifluorobenzene', '1_5_difluoronaphthalene', 'ACANIL01', 'ACHTAR10', 'benzaldehyde', 'benzidine', 'caffeine', 'difuropyrazine', 'dimethylpentane', 'histidine', 'menthone', 'mesityl_oxide', 'naphthalene', 'pterin'] # <<< Chemical Systems Involved >>> RXNM = {} # reaction matrix of reagent contributions per reaction ACTV = {} # order of active reagents per reaction ACTV['%s-%s' % (dbse, '1_3_5_trifluorobenzene' )] = ['%s-%s-reagent' % (dbse, '1_3_5_trifluorobenzene')] RXNM['%s-%s' % (dbse, '1_3_5_trifluorobenzene' )] = dict(zip(ACTV['%s-%s' % (dbse, '1_3_5_trifluorobenzene')], [+1])) ACTV['%s-%s' % (dbse, '1_3_5_trisilacyclohexane' )] = ['%s-%s-reagent' % (dbse, '1_3_5_trisilacyclohexane')] RXNM['%s-%s' % (dbse, '1_3_5_trisilacyclohexane' )] = dict(zip(ACTV['%s-%s' % (dbse, '1_3_5_trisilacyclohexane')], [+1])) ACTV['%s-%s' % (dbse, '1_3_difluorobenzene' )] = ['%s-%s-reagent' % (dbse, '1_3_difluorobenzene')] RXNM['%s-%s' % (dbse, '1_3_difluorobenzene' )] = dict(zip(ACTV['%s-%s' % (dbse, '1_3_difluorobenzene')], [+1])) ACTV['%s-%s' % (dbse, '1_5_difluoronaphthalene' )] = ['%s-%s-reagent' % (dbse, '1_5_difluoronaphthalene')] RXNM['%s-%s' % (dbse, '1_5_difluoronaphthalene' )] = dict(zip(ACTV['%s-%s' % (dbse, '1_5_difluoronaphthalene')], [+1])) ACTV['%s-%s' % (dbse, '2_hydroxybicyclopentane' )] = ['%s-%s-reagent' % (dbse, '2_hydroxybicyclopentane')] RXNM['%s-%s' % (dbse, '2_hydroxybicyclopentane' )] = dict(zip(ACTV['%s-%s' % (dbse, '2_hydroxybicyclopentane')], [+1])) ACTV['%s-%s' % (dbse, 'ACANIL01' )] = ['%s-%s-reagent' % (dbse, 'ACANIL01')] RXNM['%s-%s' % (dbse, 'ACANIL01' )] = dict(zip(ACTV['%s-%s' % (dbse, 'ACANIL01')], [+1])) ACTV['%s-%s' % (dbse, 'acetone' )] = ['%s-%s-reagent' % (dbse, 'acetone')] RXNM['%s-%s' % (dbse, 'acetone' )] = dict(zip(ACTV['%s-%s' % (dbse, 'acetone')], [+1])) ACTV['%s-%s' % (dbse, 'acetylene' )] = ['%s-%s-reagent' % (dbse, 'acetylene')] RXNM['%s-%s' % (dbse, 'acetylene' )] = dict(zip(ACTV['%s-%s' % (dbse, 'acetylene')], [+1])) ACTV['%s-%s' % (dbse, 'ACHTAR10' )] = ['%s-%s-reagent' % (dbse, 'ACHTAR10')] RXNM['%s-%s' % (dbse, 'ACHTAR10' )] = dict(zip(ACTV['%s-%s' % (dbse, 'ACHTAR10')], [+1])) ACTV['%s-%s' % (dbse, 'allene' )] = ['%s-%s-reagent' % (dbse, 'allene')] RXNM['%s-%s' % (dbse, 'allene' )] = dict(zip(ACTV['%s-%s' % (dbse, 'allene')], [+1])) ACTV['%s-%s' % (dbse, 'ammonia' )] = ['%s-%s-reagent' % (dbse, 'ammonia')] RXNM['%s-%s' % (dbse, 'ammonia' )] = dict(zip(ACTV['%s-%s' % (dbse, 'ammonia')], [+1])) ACTV['%s-%s' % (dbse, 'benzaldehyde' )] = ['%s-%s-reagent' % (dbse, 'benzaldehyde')] RXNM['%s-%s' % (dbse, 'benzaldehyde' )] = dict(zip(ACTV['%s-%s' % (dbse, 'benzaldehyde')], [+1])) ACTV['%s-%s' % (dbse, 'benzene' )] = ['%s-%s-reagent' % (dbse, 'benzene')] RXNM['%s-%s' % (dbse, 'benzene' )] = dict(zip(ACTV['%s-%s' % (dbse, 'benzene')], [+1])) ACTV['%s-%s' % (dbse, 'benzidine' )] = ['%s-%s-reagent' % (dbse, 'benzidine')] RXNM['%s-%s' % (dbse, 'benzidine' )] = dict(zip(ACTV['%s-%s' % (dbse, 'benzidine')], [+1])) ACTV['%s-%s' % (dbse, 'caffeine' )] = ['%s-%s-reagent' % (dbse, 'caffeine')] RXNM['%s-%s' % (dbse, 'caffeine' )] = dict(zip(ACTV['%s-%s' % (dbse, 'caffeine')], [+1])) ACTV['%s-%s' % (dbse, 'difuropyrazine' )] = ['%s-%s-reagent' % (dbse, 'difuropyrazine')] RXNM['%s-%s' % (dbse, 'difuropyrazine' )] = dict(zip(ACTV['%s-%s' % (dbse, 'difuropyrazine')], [+1])) ACTV['%s-%s' % (dbse, 'dimethylpentane' )] = ['%s-%s-reagent' % (dbse, 'dimethylpentane')] RXNM['%s-%s' % (dbse, 'dimethylpentane' )] = dict(zip(ACTV['%s-%s' % (dbse, 'dimethylpentane')], [+1])) ACTV['%s-%s' % (dbse, 'disilyl_ether' )] = ['%s-%s-reagent' % (dbse, 'disilyl_ether')] RXNM['%s-%s' % (dbse, 'disilyl_ether' )] = dict(zip(ACTV['%s-%s' % (dbse, 'disilyl_ether')], [+1])) ACTV['%s-%s' % (dbse, 'ethane' )] = ['%s-%s-reagent' % (dbse, 'ethane')] RXNM['%s-%s' % (dbse, 'ethane' )] = dict(zip(ACTV['%s-%s' % (dbse, 'ethane')], [+1])) ACTV['%s-%s' % (dbse, 'ethanol' )] = ['%s-%s-reagent' % (dbse, 'ethanol')] RXNM['%s-%s' % (dbse, 'ethanol' )] = dict(zip(ACTV['%s-%s' % (dbse, 'ethanol')], [+1])) ACTV['%s-%s' % (dbse, 'furan' )] = ['%s-%s-reagent' % (dbse, 'furan')] RXNM['%s-%s' % (dbse, 'furan' )] = dict(zip(ACTV['%s-%s' % (dbse, 'furan')], [+1])) ACTV['%s-%s' % (dbse, 'histidine' )] = ['%s-%s-reagent' % (dbse, 'histidine')] RXNM['%s-%s' % (dbse, 'histidine' )] = dict(zip(ACTV['%s-%s' % (dbse, 'histidine')], [+1])) ACTV['%s-%s' % (dbse, 'hydroxysulphane' )] = ['%s-%s-reagent' % (dbse, 'hydroxysulphane')] RXNM['%s-%s' % (dbse, 'hydroxysulphane' )] = dict(zip(ACTV['%s-%s' % (dbse, 'hydroxysulphane')], [+1])) ACTV['%s-%s' % (dbse, 'menthone' )] = ['%s-%s-reagent' % (dbse, 'menthone')] RXNM['%s-%s' % (dbse, 'menthone' )] = dict(zip(ACTV['%s-%s' % (dbse, 'menthone')], [+1])) ACTV['%s-%s' % (dbse, 'mesityl_oxide' )] = ['%s-%s-reagent' % (dbse, 'mesityl_oxide')] RXNM['%s-%s' % (dbse, 'mesityl_oxide' )] = dict(zip(ACTV['%s-%s' % (dbse, 'mesityl_oxide')], [+1])) ACTV['%s-%s' % (dbse, 'methylamine' )] = ['%s-%s-reagent' % (dbse, 'methylamine')] RXNM['%s-%s' % (dbse, 'methylamine' )] = dict(zip(ACTV['%s-%s' % (dbse, 'methylamine')], [+1])) ACTV['%s-%s' % (dbse, 'naphthalene' )] = ['%s-%s-reagent' % (dbse, 'naphthalene')] RXNM['%s-%s' % (dbse, 'naphthalene' )] = dict(zip(ACTV['%s-%s' % (dbse, 'naphthalene')], [+1])) ACTV['%s-%s' % (dbse, 'neopentane' )] = ['%s-%s-reagent' % (dbse, 'neopentane')] RXNM['%s-%s' % (dbse, 'neopentane' )] = dict(zip(ACTV['%s-%s' % (dbse, 'neopentane')], [+1])) ACTV['%s-%s' % (dbse, 'pterin' )] = ['%s-%s-reagent' % (dbse, 'pterin')] RXNM['%s-%s' % (dbse, 'pterin' )] = dict(zip(ACTV['%s-%s' % (dbse, 'pterin')], [+1])) ACTV['%s-%s' % (dbse, 'water' )] = ['%s-%s-reagent' % (dbse, 'water')] RXNM['%s-%s' % (dbse, 'water' )] = dict(zip(ACTV['%s-%s' % (dbse, 'water')], [+1])) # <<< Reference Values [kcal/mol] >>> BIND = {} BIND['%s-%s' % (dbse, '1_3_5_trifluorobenzene' )] = 0.000 BIND['%s-%s' % (dbse, '1_3_5_trisilacyclohexane' )] = 0.000 BIND['%s-%s' % (dbse, '1_3_difluorobenzene' )] = 0.000 BIND['%s-%s' % (dbse, '1_5_difluoronaphthalene' )] = 0.000 BIND['%s-%s' % (dbse, '2_hydroxybicyclopentane' )] = 0.000 BIND['%s-%s' % (dbse, 'ACANIL01' )] = 0.000 BIND['%s-%s' % (dbse, 'acetone' )] = 0.000 BIND['%s-%s' % (dbse, 'acetylene' )] = 0.000 BIND['%s-%s' % (dbse, 'ACHTAR10' )] = 0.000 BIND['%s-%s' % (dbse, 'allene' )] = 0.000 BIND['%s-%s' % (dbse, 'ammonia' )] = 0.000 BIND['%s-%s' % (dbse, 'benzaldehyde' )] = 0.000 BIND['%s-%s' % (dbse, 'benzene' )] = 0.000 BIND['%s-%s' % (dbse, 'benzidine' )] = 0.000 BIND['%s-%s' % (dbse, 'caffeine' )] = 0.000 BIND['%s-%s' % (dbse, 'difuropyrazine' )] = 0.000 BIND['%s-%s' % (dbse, 'dimethylpentane' )] = 0.000 BIND['%s-%s' % (dbse, 'disilyl_ether' )] = 0.000 BIND['%s-%s' % (dbse, 'ethane' )] = 0.000 BIND['%s-%s' % (dbse, 'ethanol' )] = 0.000 BIND['%s-%s' % (dbse, 'furan' )] = 0.000 BIND['%s-%s' % (dbse, 'histidine' )] = 0.000 BIND['%s-%s' % (dbse, 'hydroxysulphane' )] = 0.000 BIND['%s-%s' % (dbse, 'menthone' )] = 0.000 BIND['%s-%s' % (dbse, 'mesityl_oxide' )] = 0.000 BIND['%s-%s' % (dbse, 'methylamine' )] = 0.000 BIND['%s-%s' % (dbse, 'naphthalene' )] = 0.000 BIND['%s-%s' % (dbse, 'neopentane' )] = 0.000 BIND['%s-%s' % (dbse, 'pterin' )] = 0.000 BIND['%s-%s' % (dbse, 'water' )] = 0.000 # <<< Comment Lines >>> TAGL = {} TAGL['%s-%s' % (dbse, '1_3_5_trifluorobenzene' )] = '' TAGL['%s-%s-reagent' % (dbse, '1_3_5_trifluorobenzene' )] = '' TAGL['%s-%s' % (dbse, '1_3_5_trisilacyclohexane' )] = '' TAGL['%s-%s-reagent' % (dbse, '1_3_5_trisilacyclohexane' )] = '' TAGL['%s-%s' % (dbse, '1_3_difluorobenzene' )] = '' TAGL['%s-%s-reagent' % (dbse, '1_3_difluorobenzene' )] = '' TAGL['%s-%s' % (dbse, '1_5_difluoronaphthalene' )] = '' TAGL['%s-%s-reagent' % (dbse, '1_5_difluoronaphthalene' )] = '' TAGL['%s-%s' % (dbse, '2_hydroxybicyclopentane' )] = '' TAGL['%s-%s-reagent' % (dbse, '2_hydroxybicyclopentane' )] = '' TAGL['%s-%s' % (dbse, 'ACANIL01' )] = '' TAGL['%s-%s-reagent' % (dbse, 'ACANIL01' )] = '' TAGL['%s-%s' % (dbse, 'acetone' )] = '' TAGL['%s-%s-reagent' % (dbse, 'acetone' )] = '' TAGL['%s-%s' % (dbse, 'acetylene' )] = '' TAGL['%s-%s-reagent' % (dbse, 'acetylene' )] = '' TAGL['%s-%s' % (dbse, 'ACHTAR10' )] = '' TAGL['%s-%s-reagent' % (dbse, 'ACHTAR10' )] = '' TAGL['%s-%s' % (dbse, 'allene' )] = '' TAGL['%s-%s-reagent' % (dbse, 'allene' )] = '' TAGL['%s-%s' % (dbse, 'ammonia' )] = '' TAGL['%s-%s-reagent' % (dbse, 'ammonia' )] = '' TAGL['%s-%s' % (dbse, 'benzaldehyde' )] = '' TAGL['%s-%s-reagent' % (dbse, 'benzaldehyde' )] = '' TAGL['%s-%s' % (dbse, 'benzene' )] = '' TAGL['%s-%s-reagent' % (dbse, 'benzene' )] = '' TAGL['%s-%s' % (dbse, 'benzidine' )] = '' TAGL['%s-%s-reagent' % (dbse, 'benzidine' )] = '' TAGL['%s-%s' % (dbse, 'caffeine' )] = '' TAGL['%s-%s-reagent' % (dbse, 'caffeine' )] = '' TAGL['%s-%s' % (dbse, 'difuropyrazine' )] = '' TAGL['%s-%s-reagent' % (dbse, 'difuropyrazine' )] = '' TAGL['%s-%s' % (dbse, 'dimethylpentane' )] = '' TAGL['%s-%s-reagent' % (dbse, 'dimethylpentane' )] = '' TAGL['%s-%s' % (dbse, 'disilyl_ether' )] = '' TAGL['%s-%s-reagent' % (dbse, 'disilyl_ether' )] = '' TAGL['%s-%s' % (dbse, 'ethane' )] = '' TAGL['%s-%s-reagent' % (dbse, 'ethane' )] = '' TAGL['%s-%s' % (dbse, 'ethanol' )] = '' TAGL['%s-%s-reagent' % (dbse, 'ethanol' )] = '' TAGL['%s-%s' % (dbse, 'furan' )] = '' TAGL['%s-%s-reagent' % (dbse, 'furan' )] = '' TAGL['%s-%s' % (dbse, 'histidine' )] = '' TAGL['%s-%s-reagent' % (dbse, 'histidine' )] = '' TAGL['%s-%s' % (dbse, 'hydroxysulphane' )] = '' TAGL['%s-%s-reagent' % (dbse, 'hydroxysulphane' )] = '' TAGL['%s-%s' % (dbse, 'menthone' )] = '' TAGL['%s-%s-reagent' % (dbse, 'menthone' )] = '' TAGL['%s-%s' % (dbse, 'mesityl_oxide' )] = '' TAGL['%s-%s-reagent' % (dbse, 'mesityl_oxide' )] = '' TAGL['%s-%s' % (dbse, 'methylamine' )] = '' TAGL['%s-%s-reagent' % (dbse, 'methylamine' )] = '' TAGL['%s-%s' % (dbse, 'naphthalene' )] = '' TAGL['%s-%s-reagent' % (dbse, 'naphthalene' )] = '' TAGL['%s-%s' % (dbse, 'neopentane' )] = '' TAGL['%s-%s-reagent' % (dbse, 'neopentane' )] = '' TAGL['%s-%s' % (dbse, 'pterin' )] = '' TAGL['%s-%s-reagent' % (dbse, 'pterin' )] = '' TAGL['%s-%s' % (dbse, 'water' )] = '' TAGL['%s-%s-reagent' % (dbse, 'water' )] = '' # <<< Geometry Specification Strings >>> GEOS = {} # These atoms were replaced below to avoid a D3h related symmetry bug # the point group wasn't getting detected and optimization couldn't proceed #C 2.27501122 1.31347834 0.00000000 #C -2.27501122 1.31347834 0.00000000 GEOS['%s-%s-reagent' % (dbse, '1_3_5_trifluorobenzene')] = qcdb.Molecule(""" 0 1 F 4.45124771 2.56992907 0.00000000 F -4.45124771 2.56992907 0.00000000 F 0.00000000 -5.13985813 0.00000000 C 2.27501122 1.31348 0.00000000 C -2.27501122 1.31348 0.00000000 C 0.00000000 -2.62695668 0.00000000 C 2.27446593 -1.31316352 0.00000000 C -2.27446593 -1.31316352 0.00000000 C 0.00000000 2.62632703 0.00000000 H 4.04176646 -2.33351496 0.00000000 H -4.04176646 -2.33351496 0.00000000 H 0.00000000 4.66702991 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, '1_3_5_trisilacyclohexane')] = qcdb.Molecule(""" 0 1 Si 2.87562701 1.66024403 0.50009833 Si -2.87562701 1.66024403 0.50009833 Si 0.00000000 -3.32048805 0.50009833 C 0.00000000 3.31617083 -0.65645952 C 2.87188818 -1.65808542 -0.65645952 C -2.87188818 -1.65808542 -0.65645952 H 0.00000000 5.25402682 0.04550787 H 4.55012070 -2.62701341 0.04550787 H -4.55012070 -2.62701341 0.04550787 H 0.00000000 3.33620321 -2.71676085 H 2.88923673 -1.66810160 -2.71676085 H -2.88923673 -1.66810160 -2.71676085 H 5.14953250 2.97308398 -0.46837999 H -5.14953250 2.97308398 -0.46837999 H 2.91112385 1.68073814 3.29599415 H -2.91112385 1.68073814 3.29599415 H 0.00000000 -3.36147627 3.29599415 H 0.00000000 -5.94616795 -0.46837999 units bohr """) GEOS['%s-%s-reagent' % (dbse, '1_3_difluorobenzene')] = qcdb.Molecule(""" 0 1 F 4.45098629 2.53075455 0.00000000 F -4.45098629 2.53075455 0.00000000 C 2.27459315 -1.35284979 0.00000000 C -2.27459315 -1.35284979 0.00000000 C 2.27465109 1.27385640 0.00000000 C -2.27465109 1.27385640 0.00000000 C 0.00000000 2.58727941 0.00000000 C 0.00000000 -2.66641919 0.00000000 H 4.04232694 -2.37256182 0.00000000 H -4.04232694 -2.37256182 0.00000000 H 0.00000000 4.62804882 0.00000000 H 0.00000000 -4.70730774 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, '1_5_difluoronaphthalene')] = qcdb.Molecule(""" 0 1 F 5.77442810 0.00000000 0.00000000 F -5.77442810 0.00000000 0.00000000 C 0.72785457 -4.70254512 0.00000000 C -0.72785457 4.70254512 0.00000000 C 3.11062174 -3.60249243 0.00000000 C -3.11062174 3.60249243 0.00000000 C 3.38479931 -0.98799287 0.00000000 C -3.38479931 0.98799287 0.00000000 C 1.23776851 0.57124055 0.00000000 C -1.23776851 -0.57124055 0.00000000 C 1.43014268 3.20907701 0.00000000 C -1.43014268 -3.20907701 0.00000000 H 0.55204008 -6.73646406 0.00000000 H -0.55204008 6.73646406 0.00000000 H 4.76445952 -4.80069021 0.00000000 H -4.76445952 4.80069021 0.00000000 H 3.24999844 4.13948522 0.00000000 H -3.24999844 -4.13948522 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, '2_hydroxybicyclopentane')] = qcdb.Molecule(""" 0 1 O 0.00000000 0.00000000 3.97630549 C 0.61275612 1.71787828 -0.25674160 C -1.25240609 0.75430367 1.72991074 C -1.89991796 -1.49181590 0.06218311 C 2.64764592 0.00000000 -1.36190849 C -0.10732099 -0.53140328 -1.99092676 H -2.85601026 2.05561017 2.08353099 H 0.13348920 3.49094037 -1.26103342 H 3.57368102 -1.34558615 -0.05933199 H 3.80698111 0.79833379 -2.90613711 H -1.33579202 -3.34159783 0.85888891 H -3.90122993 -1.54049270 -0.53915310 H -0.93405780 0.26002983 -3.74579160 H 1.51218168 -0.82620025 3.41020482 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'ACANIL01')] = qcdb.Molecule(""" 0 1 O 6.74334167 0.00000000 0.00000000 N 2.75125398 -0.91996681 0.00000000 C -3.75958919 -3.62046813 0.00000000 C -1.13660145 -3.38720984 0.00000000 C 0.00427371 -1.00318363 0.00000000 C -1.53985353 1.15387105 0.00000000 C -4.16293704 0.91831969 0.00000000 C -5.26811078 -1.46724271 0.00000000 C 4.57389611 0.80511522 0.00000000 C 4.13207020 3.64054919 0.00000000 H -4.62306754 -5.47176436 0.00000000 H -0.00377805 -5.08765397 0.00000000 H -0.76505606 3.03326534 0.00000000 H -5.34041651 2.58758240 0.00000000 H -7.30266577 -1.64802574 0.00000000 H 3.58082506 -2.66479209 0.00000000 H 5.95212032 4.66178191 0.00000000 H 3.08214744 4.23491124 1.70076220 H 3.08214744 4.23491124 -1.70076220 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'acetone')] = qcdb.Molecule(""" 0 1 O 0.00000000 3.46695757 0.00000000 C 0.00000000 1.14032594 0.00000000 C 0.00000000 -0.29542841 2.50138172 C 0.00000000 -0.29542841 -2.50138172 H 0.00000000 1.00440652 4.13754069 H 0.00000000 1.00440652 -4.13754069 H 1.69360304 -1.50630994 2.66984804 H -1.69360304 -1.50630994 2.66984804 H 1.69360304 -1.50630994 -2.66984804 H -1.69360304 -1.50630994 -2.66984804 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'acetylene')] = qcdb.Molecule(""" 0 1 C 0.00000000 0.00000000 1.13383600 C 0.00000000 0.00000000 -1.13383600 H 0.00000000 0.00000000 3.02356266 H 0.00000000 0.00000000 -3.02356266 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'ACHTAR10')] = qcdb.Molecule(""" 0 1 O 0.00000000 0.00000000 3.93735249 O 1.79875939 0.00000000 -0.09531034 N -4.40589519 1.32037243 -3.31810156 C -2.43021636 -0.18962157 -2.05696026 C -0.22185404 1.49597798 -1.20775357 C 1.69726730 -0.59259412 2.46067577 C 3.97685548 -2.11479138 3.27934906 H -3.68043380 2.27933244 -4.84082518 H -5.10144333 2.68085421 -2.12147722 H -3.24985392 -1.18842676 -0.41051393 H -1.74547418 -1.68142667 -3.35347133 H 0.55351430 2.51912058 -2.85842920 H -0.88071695 2.99188292 0.10524925 H 5.73529679 -1.04410557 2.94759034 H 4.08562680 -3.90736002 2.21955987 H 3.86856770 -2.56921447 5.31306580 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'allene')] = qcdb.Molecule(""" 0 1 C 0.00000000 0.00000000 0.00000000 C 0.00000000 2.49419295 0.00000000 C 0.00000000 -2.49419295 0.00000000 H 1.76772016 -3.51503166 0.00000000 H -1.76772016 -3.51503166 0.00000000 H 0.00000000 3.51503166 1.76772016 H 0.00000000 3.51503166 -1.76772016 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'ammonia')] = qcdb.Molecule(""" 0 1 N 0.00000000 0.00000000 0.47690250 H 1.55848945 0.89979432 -0.15896750 H -1.55848945 0.89979432 -0.15896750 H 0.00000000 -1.79958864 -0.15896750 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'benzaldehyde')] = qcdb.Molecule(""" 0 1 O 6.11695944 0.00000000 0.00000000 C -0.42811838 -2.25953622 0.00000000 C -2.92869352 -1.43478712 0.00000000 C -3.46561640 1.14118082 0.00000000 C -1.50611491 2.89722764 0.00000000 C 0.99614123 2.07851844 0.00000000 C 1.55290207 -0.51034434 0.00000000 C 4.31002394 -1.46969818 0.00000000 H 4.69277313 -3.52434043 0.00000000 H -0.04838912 -4.26733408 0.00000000 H -4.45167820 -2.79426839 0.00000000 H -5.40516702 1.77808408 0.00000000 H -1.92653663 4.89495992 0.00000000 H 2.49151439 3.47033786 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'benzene')] = qcdb.Molecule(""" 0 1 C 0.00000000 2.63452745 0.00000000 C 0.00000000 -2.63452745 0.00000000 C 2.28156770 1.31726373 0.00000000 C -2.28156770 1.31726373 0.00000000 C 2.28156770 -1.31726373 0.00000000 C -2.28156770 -1.31726373 0.00000000 H 0.00000000 4.67589156 0.00000000 H 0.00000000 -4.67589156 0.00000000 H 4.04944088 2.33794578 0.00000000 H -4.04944088 2.33794578 0.00000000 H 4.04944088 -2.33794578 0.00000000 H -4.04944088 -2.33794578 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'benzidine')] = qcdb.Molecule(""" 0 1 N 0.00000000 0.00000000 9.17973038 N 0.00000000 0.00000000 -9.17973038 C -2.20388942 0.56488223 5.36955702 C 2.20388942 -0.56488223 5.36955702 C -2.20388942 -0.56488223 -5.36955702 C 2.20388942 0.56488223 -5.36955702 C -2.20706622 0.56349235 2.73912945 C 2.20706622 -0.56349235 2.73912945 C -2.20706622 -0.56349235 -2.73912945 C 2.20706622 0.56349235 -2.73912945 C 0.00000000 0.00000000 1.32948630 C 0.00000000 0.00000000 -1.32948630 C 0.00000000 0.00000000 6.67931977 C 0.00000000 0.00000000 -6.67931977 H -3.93022673 1.02227253 6.36283467 H 3.93022673 -1.02227253 6.36283467 H -3.93022673 -1.02227253 -6.36283467 H 3.93022673 1.02227253 -6.36283467 H -3.95573979 1.07384957 1.81596643 H 3.95573979 -1.07384957 1.81596643 H -3.95573979 -1.07384957 -1.81596643 H 3.95573979 1.07384957 -1.81596643 H 1.67837252 -0.43031314 10.04483176 H -1.67837252 0.43031314 10.04483176 H 1.67837252 0.43031314 -10.04483176 H -1.67837252 -0.43031314 -10.04483176 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'caffeine')] = qcdb.Molecule(""" 0 1 O -1.35796495 -4.55968346 0.00000000 O 6.00359465 0.00000000 0.00000000 N -4.34699530 0.40790868 0.00000000 N -2.02147868 4.34704366 0.00000000 N 2.40166495 2.29891253 0.00000000 N 2.38963107 -2.32861610 0.00000000 C -1.73514100 0.00806819 0.00000000 C -0.39656652 2.28913440 0.00000000 C -4.28628286 3.03178701 0.00000000 C -0.23380597 -2.51268919 0.00000000 C 3.66630626 -0.01011647 0.00000000 C 3.91233427 -4.71649369 0.00000000 C 3.86899427 4.70507045 0.00000000 C -6.50871497 -1.38799138 0.00000000 H -6.04146873 4.08346573 0.00000000 H 5.15026261 -4.82916673 1.68145730 H 5.15026261 -4.82916673 -1.68145730 H 2.75182289 -6.45823288 0.00000000 H 5.10374160 4.83609896 1.68332379 H 5.10374160 4.83609896 -1.68332379 H 2.65878836 6.40983420 0.00000000 H -8.34003564 -0.38023773 0.00000000 H -6.44634525 -2.62051420 1.68782779 H -6.44634525 -2.62051420 -1.68782779 units bohr """) #set { guess gwh """) GEOS['%s-%s-reagent' % (dbse, 'difuropyrazine')] = qcdb.Molecule(""" 0 1 O 5.24048162 0.00000000 0.00000000 O -5.24048162 0.00000000 0.00000000 N 1.15705376 -2.55150608 0.00000000 N -1.15705376 2.55150608 0.00000000 C 1.53596834 2.15160317 0.00000000 C -1.53596834 -2.15160317 0.00000000 C 2.65703648 -0.27471770 0.00000000 C -2.65703648 0.27471770 0.00000000 C 5.62186670 2.62201493 0.00000000 C -5.62186670 -2.62201493 0.00000000 C 3.54881353 4.07756019 0.00000000 C -3.54881353 -4.07756019 0.00000000 H 7.52697788 3.41239127 0.00000000 H -7.52697788 -3.41239127 0.00000000 H 3.34537092 6.12657010 0.00000000 H -3.34537092 -6.12657010 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'dimethylpentane')] = qcdb.Molecule(""" 0 1 C -1.90302142 1.79989214 -3.12819161 C 0.68098191 1.17008149 -1.92744962 C 0.57347759 -0.44273007 0.55332222 C -0.57536860 1.07092655 2.79511667 C 0.00000000 0.00000000 5.44078830 C 2.40130119 0.00000000 -3.96848713 C -0.75740445 -3.03396450 0.24401702 H -3.17069973 2.76026122 -1.77509550 H -2.89812692 0.08656618 -3.79174516 H -1.70835535 3.07391957 -4.77327789 H 1.57127154 2.99847536 -1.42186024 H 2.56484657 -0.84063638 1.06972922 H -2.64484782 1.25561061 2.54633021 H 0.13997457 3.03676459 2.74695911 H -0.82887852 -1.89737304 5.71865173 H -0.77969219 1.22647944 6.94230752 H 2.05566062 -0.15968979 5.78408339 H 1.63584569 -1.79524917 -4.71467976 H 2.65159319 1.28124788 -5.60071709 H 4.31235662 -0.40611038 -3.22785050 H -0.60282164 -4.20392129 1.96624261 H -2.79329149 -2.82201336 -0.17320237 H 0.07519864 -4.15853702 -1.30499111 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'disilyl_ether')] = qcdb.Molecule(""" 0 1 Si 0.00000000 -0.06571048 3.03636189 Si 0.00000000 -0.06571048 -3.03636189 O 0.00000000 -0.88817346 0.00000000 H 0.00000000 -2.19565412 4.54756839 H 2.12290049 1.35272566 3.58475023 H -2.12290049 1.35272566 3.58475023 H 0.00000000 -2.19565412 -4.54756839 H 2.12290049 1.35272566 -3.58475023 H -2.12290049 1.35272566 -3.58475023 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'ethane')] = qcdb.Molecule(""" 0 1 C 0.00000000 0.00000000 1.45478763 C 0.00000000 0.00000000 -1.45478763 H 1.68084455 0.97043609 2.14455455 H 1.68084455 -0.97043609 -2.14455455 H -1.68084455 0.97043609 2.14455455 H -1.68084455 -0.97043609 -2.14455455 H 0.00000000 -1.94087219 2.14455455 H 0.00000000 1.94087219 -2.14455455 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'ethanol')] = qcdb.Molecule(""" 0 1 O 2.94951269 0.00000000 0.00000000 C 0.42864361 0.89070972 0.00000000 C -1.47274991 -1.22612707 0.00000000 H 4.05795769 1.50458064 0.00000000 H 0.07017562 2.06834349 1.69306899 H 0.07017562 2.06834349 -1.69306899 H -1.36184741 -2.46035674 1.67199009 H -1.36184741 -2.46035674 -1.67199009 H -3.38002050 -0.38513679 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'furan')] = qcdb.Molecule(""" 0 1 O 0.00000000 -2.71155703 0.00000000 C 1.30409645 1.35600277 0.00000000 C -1.30409645 1.35600277 0.00000000 C 2.07680908 -1.14870311 0.00000000 C -2.07680908 -1.14870311 0.00000000 H 2.51639050 2.99782755 0.00000000 H -2.51639050 2.99782755 0.00000000 H 3.99399875 -1.84934869 0.00000000 H -3.99399875 -1.84934869 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'histidine')] = qcdb.Molecule(""" 0 1 O 3.93683911 0.00000000 5.02858545 O 0.00000000 0.00000000 6.75548572 N -1.62714005 -0.17063169 -6.38981145 N -1.55525882 2.75691585 -2.98858739 N -0.06519044 -3.43699076 1.78152280 C 0.00313112 -0.96382673 -4.49205004 C 0.04394056 0.76526971 -2.47860156 C -2.44306081 2.04255765 -5.31626413 C 1.61712938 0.55012280 -0.07100455 C 0.25915307 -0.68070217 2.22272340 C 1.63605981 -0.20456172 4.77295177 H 1.09061008 -2.68966721 -4.55294959 H -1.91653466 4.35407321 -1.95395961 H -3.77917584 3.21725531 -6.32817718 H 2.24417172 2.47631265 0.44479551 H 3.39934986 -0.46804317 -0.47537143 H -1.63798632 0.18516799 2.40194434 H -1.09297021 -4.24259361 3.21643716 H -1.09919371 -3.74001070 0.16810497 H 0.98612615 0.24935257 8.25422581 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'hydroxysulphane')] = qcdb.Molecule(""" 0 1 S 0.00000000 0.00000000 1.64344454 O 1.55643788 0.00000000 -0.78417924 H 0.70878977 -0.98889634 -2.04698233 H -2.26522765 0.98889634 1.18771703 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'menthone')] = qcdb.Molecule(""" 0 1 O 0.00000000 0.00000000 4.83502957 C -5.06597212 -1.27592091 0.49885049 C -3.60348796 -1.49111229 -2.01995066 C -1.13779972 0.12182250 -2.02402508 C 0.69335828 -0.53324847 0.24699141 C -0.81879368 -0.76420189 2.79442766 C -3.41755812 -2.06413311 2.77868746 C 0.08327139 -0.18247958 -4.66184769 C 3.16977849 1.11788425 0.33780916 C 5.23967937 0.00000000 2.05851212 C 2.74820737 3.91648659 1.03692914 H -5.73534045 0.69223903 0.75660810 H -6.80139535 -2.44289264 0.43045930 H -3.15419510 -3.50140339 -2.39715388 H -4.86109777 -0.90264082 -3.58603040 H -1.71463208 2.12647811 -1.82542348 H 1.33530286 -2.48925976 -0.10949068 H -4.41049264 -1.64891601 4.56938165 H -3.10227312 -4.12767303 2.77142958 H -1.27515064 0.19340176 -6.20625544 H 0.83979297 -2.10810531 -4.96157719 H 1.65711962 1.15531285 -4.96195049 H 4.01314574 1.10167735 -1.57473542 H 4.69908810 0.02990650 4.07747056 H 7.03475689 1.05859686 1.90111311 H 5.66887645 -1.98486988 1.56898286 H 4.52277834 5.01677786 0.95132487 H 1.98900684 4.13531008 2.97264568 H 1.40402606 4.85096335 -0.25821233 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'mesityl_oxide')] = qcdb.Molecule(""" 0 1 O 4.30492455 0.00000000 0.00000000 C 0.05024721 -3.82629843 0.00000000 C -1.35087834 -1.32752917 0.00000000 C -4.20838872 -1.49335398 0.00000000 C -0.19920658 0.94023239 0.00000000 C 2.60618767 1.58735088 0.00000000 C 3.31537901 4.37315331 0.00000000 H 1.28461121 -4.00174347 1.67716810 H 1.28461121 -4.00174347 -1.67716810 H -1.21281465 -5.48980179 0.00000000 H -5.04695592 -0.57944060 1.68053694 H -5.04695592 -0.57944060 -1.68053694 H -4.87911033 -3.47264458 0.00000000 H -1.42668593 2.59614349 0.00000000 H 2.56758840 5.33389639 1.69384111 H 2.56758840 5.33389639 -1.69384111 H 5.38985873 4.60732325 0.00000000 units bohr """) #set { guess gwh """) GEOS['%s-%s-reagent' % (dbse, 'methylamine')] = qcdb.Molecule(""" 0 1 N 1.59169309 0.00000000 0.00000000 C -1.10781247 -0.03073718 0.00000000 H 2.61432616 -1.63020032 0.00000000 H -1.81666320 1.93163906 0.00000000 H 2.57804913 1.64911594 0.00000000 H -1.92979635 -0.95990875 1.69695191 H -1.92979635 -0.95990875 -1.69695191 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'naphthalene')] = qcdb.Molecule(""" 0 1 C 1.31500993 4.56625993 0.00000000 C -1.31500993 4.56625993 0.00000000 C 1.31500993 -4.56625993 0.00000000 C -1.31500993 -4.56625993 0.00000000 C 2.65095410 2.30121210 0.00000000 C -2.65095410 2.30121210 0.00000000 C 2.65095410 -2.30121210 0.00000000 C -2.65095410 -2.30121210 0.00000000 C 1.35957848 0.00000000 0.00000000 C -1.35957848 0.00000000 0.00000000 H 2.32713807 6.33915590 0.00000000 H -2.32713807 6.33915590 0.00000000 H 2.32713807 -6.33915590 0.00000000 H -2.32713807 -6.33915590 0.00000000 H 4.69449351 2.36375141 0.00000000 H -4.69449351 2.36375141 0.00000000 H 4.69449351 -2.36375141 0.00000000 H -4.69449351 -2.36375141 0.00000000 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'neopentane')] = qcdb.Molecule(""" 0 1 C 0.00000000 0.00000000 0.00000000 C 1.68781269 -1.68781269 1.68781269 C -1.68781269 1.68781269 1.68781269 C -1.68781269 -1.68781269 -1.68781269 C 1.68781269 1.68781269 -1.68781269 H 2.93275937 -0.55961452 2.93275937 H -2.93275937 0.55961452 2.93275937 H 2.93275937 0.55961452 -2.93275937 H -2.93275937 -0.55961452 -2.93275937 H 0.55961452 -2.93275937 2.93275937 H -0.55961452 2.93275937 2.93275937 H -0.55961452 -2.93275937 -2.93275937 H 0.55961452 2.93275937 -2.93275937 H 2.93275937 -2.93275937 0.55961452 H -2.93275937 2.93275937 0.55961452 H -2.93275937 -2.93275937 -0.55961452 H 2.93275937 2.93275937 -0.55961452 units bohr """) GEOS['%s-%s-reagent' % (dbse, 'pterin')] = qcdb.Molecule(""" 0 1 O 5.40068710 0.00000000 0.00000000 N 1.67450469 -4.01224809 0.00000000 N -3.29778810 -2.66298586 0.00000000 N 2.41435003 2.98093954 0.00000000 N -2.07868639 1.96577816 0.00000000 N -1.05941931 6.33383022 0.00000000 C 1.04506477 -1.58869528 0.00000000 C -1.57825490 -0.83595727 0.00000000 C -0.10078959 -5.76401042 0.00000000 C -2.66568392 -5.07794600 0.00000000 C 3.13177958 0.52435884 0.00000000 C -0.33744328 3.67065942 0.00000000 H 0.42296241 -7.73663229 0.00000000 H -4.11548210 -6.51452882 0.00000000 H 3.70204141 4.43045951 0.00000000 H -2.96601438 6.68621706 0.00000000 H 0.40817199 7.60076128 0.00000000 units bohr """) #set { guess gwh """) GEOS['%s-%s-reagent' % (dbse, 'water')] = qcdb.Molecule(""" 0 1 O 0.00000000 -0.69801390 0.00000000 H 1.48150016 0.34900695 0.00000000 H -1.48150016 0.34900695 0.00000000 units bohr """) ######################################################################### # <<< Supplementary Quantum Chemical Results >>> DATA = {} DATA['NUCLEAR REPULSION ENERGY'] = {} DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-1_3_5_trifluorobenzene-reagent' ] = 422.92396136 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-1_3_5_trisilacyclohexane-reagent'] = 458.36587183 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-1_3_difluorobenzene-reagent' ] = 342.91092587 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-1_5_difluoronaphthalene-reagent' ] = 646.43123032 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-2_hydroxybicyclopentane-reagent' ] = 242.19428832 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-ACANIL01-reagent' ] = 482.21477925 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-acetone-reagent' ] = 117.95076939 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-acetylene-reagent' ] = 25.27722466 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-ACHTAR10-reagent' ] = 308.80224696 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-allene-reagent' ] = 58.87417679 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-ammonia-reagent' ] = 11.96515487 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-benzaldehyde-reagent' ] = 318.78609908 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-benzene-reagent' ] = 203.68596051 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-benzidine-reagent' ] = 792.45947768 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-caffeine-reagent' ] = 906.96430213 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-difuropyrazine-reagent' ] = 627.88695998 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-dimethylpentane-reagent' ] = 329.98386705 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-disilyl_ether-reagent' ] = 159.72016132 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-ethane-reagent' ] = 42.23178002 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-ethanol-reagent' ] = 81.36264622 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-furan-reagent' ] = 160.13552808 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-histidine-reagent' ] = 593.28835805 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-hydroxysulphane-reagent' ] = 61.30095938 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-menthone-reagent' ] = 661.81731171 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-mesityl_oxide-reagent' ] = 286.76670258 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-methylamine-reagent' ] = 42.02150992 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-naphthalene-reagent' ] = 460.06217417 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-neopentane-reagent' ] = 196.29453370 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-pterin-reagent' ] = 650.63929481 DATA['NUCLEAR REPULSION ENERGY']['BAKERJCC93-water-reagent' ] = 9.15711319
kratman/psi4public
psi4/share/psi4/databases/BAKERJCC93.py
Python
gpl-2.0
42,491
[ "Psi4" ]
49d37892e793409889a9cf19d420ee413a01bc71d54c05f0c2203be60f40f90f
#!/usr/bin/env python ######################################################################## # File : dirac-dms-pfn-accessURL # Author : Stuart Paterson ######################################################################## """ Retrieve an access URL for a PFN given a valid DIRAC SE Usage: dirac-dms-pfn-accessURL [options] ... PFN SE Arguments: PFN: Physical File Name or file containing PFNs (mandatory) SE: Valid DIRAC SE (mandatory) """ from __future__ import print_function from __future__ import absolute_import from __future__ import division __RCSID__ = "$Id$" import DIRAC from DIRAC.Core.Base import Script from DIRAC.Core.Utilities.DIRACScript import DIRACScript @DIRACScript() def main(): Script.parseCommandLine(ignoreErrors=True) args = Script.getPositionalArgs() if len(args) < 2: Script.showHelp(exitCode=1) if len(args) > 2: print('Only one PFN SE pair will be considered') from DIRAC.Interfaces.API.Dirac import Dirac dirac = Dirac() exitCode = 0 pfn = args[0] seName = args[1] try: with open(pfn, 'r') as f: pfns = f.read().splitlines() except Exception: pfns = [pfn] for pfn in pfns: result = dirac.getPhysicalFileAccessURL(pfn, seName, printOutput=True) if not result['OK']: print('ERROR: ', result['Message']) exitCode = 2 DIRAC.exit(exitCode) if __name__ == "__main__": main()
yujikato/DIRAC
src/DIRAC/Interfaces/scripts/dirac_dms_pfn_accessURL.py
Python
gpl-3.0
1,410
[ "DIRAC" ]
a6bf0f191994655441ad7ace42a86f4657a303e49747e175a58904d382f3e147
#---------------------------------------------------------------------- # Copyright (c) 2013-2015 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- # XML tag constants RSPEC_TAG = 'rspec' LINK_TAG = 'link' NODE_TAG = 'node' PORT_TAG = 'port' STITCHING_TAG = 'stitching' PATH_TAG = 'path' SLIVER_TYPE_TAG = 'sliver_type' EXPIRES_ATTRIBUTE = 'expires' # Capabilities element names CAPABILITIES_TAG = 'capabilities' CAPABILITY_TAG = 'capability' CONSUMER_VALUE = 'consumer' PRODUCER_VALUE = 'producer' VLANCONSUMER_VALUE = 'vlanconsumer' VLANPRODUCER_VALUE = 'vlanproducer' # see geni.util.rspec_schema for namespaces # This should go away, its value is no longer used LAST_UPDATE_TIME_TAG = "lastUpdateTime" # Need the ExoSM URL, as ugly as that is EXOSM_URL = "https://geni.renci.org:11443/orca/xmlrpc" # Need to be able to ID Utah AMs for default sliver expirations (see below) PGU_URN = "urn:publicid:IDN+emulab.net+authority+cm" IGUDDC_URN = "urn:publicid:IDN+utahddc.geniracks.net+authority+cm" USTITCH_URN = "urn:publicid:IDN+stitch.geniracks.net+authority+cm" APT_URN = "urn:publicid:IDN+apt.emulab.net+authority+cm" CL_URN_END = ".cloudlab.us+authority+cm" # Default sliver expirations by AM type in days as of September, 2014 # Utah is Utah DDC and ProtoGENI Utah and Utah Stitch and ALL Cloudlab (including Clemson and Wisconsin). And Apt # See ticket #577 DEF_SLIVER_EXPIRATION_UTAH = 5 DEF_SLIVER_EXPIRATION_IG = 90 DEF_SLIVER_EXPIRATION_GRAM = 7 DEF_SLIVER_EXPIRATION_EG = 14 # Singleton class for getting the default sliver expirations for some AM types # Allows the config to have an omni_defaults section with values for these defaults to over-ride the values specified here # Stitchhandler should call defs.DefaultSliverExpirations.getInstance(config, logger) # Then uses of defs.DEF_... in objects.py should instead do: # defs_getter = defs.DefaultSliverExpirations.getInstance() # defaultUtah = defs_getter.getUtah() .... class DefaultSliverExpirations(object): instance = None def __init__(self, config, logger=None): self.config = config self.logger = logger self.utah = None self.ig = None self.gram = None self.eg = None self.otherUtahUrns = None @classmethod def getInstance(cls, config=None, logger=None): if DefaultSliverExpirations.instance: if config: DefaultSliverExpirations.instance.config = config else: DefaultSliverExpirations.instance = DefaultSliverExpirations(config, logger) return DefaultSliverExpirations.instance # Parse the new value, allowing for # to denote start of end-of-line comment def parseConfig(self, value): if not value: raise Exception("No value supplied") import re match = re.match(r'^\s*(\d+)\s*#*', value) if not match: raise Exception("Could not find integer in value") return int(match.group(1)) # Is this AM one of the AMs subject to the Utah default sliver expiration? # Start with hard-codede defaults, but then accept additional Utah URNs from omni_defaults.utah_am_urns (CSV list) def isUtah(self, agg): if agg is None or not agg.isPG: return False if not agg.urn: return False if agg.urn in [PGU_URN, IGUDDC_URN, USTITCH_URN, APT_URN]: return True if agg.urn.endswith(CL_URN_END): return True if self.otherUtahUrns is None and self.config and self.config.has_key('omni_defaults') and self.config['omni_defaults'].has_key('utah_am_urns') and self.config['omni_defaults']['utah_am_urns']: try: urns = str(self.config['omni_defaults']['utah_am_urns']).strip().split(',') self.otherUtahUrns = [] for urn in urns: if not urn: continue u = urn.strip() if u == '': continue self.otherUtahUrns.append(u) if self.logger is not None: self.logger.debug("otherUrns IDing Utah AMs: %s", self.otherUtahUrns) except Exception, e: if self.logger is not None: self.logger.debug("Failed to parse omni_defaults/utah_am_urns: %s", e) if self.otherUtahUrns is not None and agg.urn in self.otherUtahUrns: return True return False def getUtah(self): if self.utah: return self.utah self.utah = DEF_SLIVER_EXPIRATION_UTAH if self.config and self.config.has_key('omni_defaults') and self.config['omni_defaults'].has_key('def_sliver_expiration_utah') and self.config['omni_defaults']['def_sliver_expiration_utah']: try: self.utah = self.parseConfig(self.config['omni_defaults']['def_sliver_expiration_utah']) self.logger.debug("Resetting default Utah sliver expiration to %s", self.utah) except Exception, e: self.logger.info("Failed to parse def_sliver_expiration_utah from omni_defaults. Parsing '%s' gave: %s", self.config['omni_defaults']['def_sliver_expiration_utah'], e) return self.utah def getIG(self): if self.ig: return self.ig self.ig = DEF_SLIVER_EXPIRATION_IG if self.config and self.config.has_key('omni_defaults') and self.config['omni_defaults'].has_key('def_sliver_expiration_ig') and self.config['omni_defaults']['def_sliver_expiration_ig']: try: self.ig = self.parseConfig(self.config['omni_defaults']['def_sliver_expiration_ig']) self.logger.debug("Resetting default IG sliver expiration to %s", self.ig) except Exception, e: self.logger.info("Failed to parse def_sliver_expiration_ig from omni_defaults. Parsing '%s' gave: %s", self.config['omni_defaults']['def_sliver_expiration_ig'], e) return self.ig def getGram(self): if self.gram: return self.gram self.gram = DEF_SLIVER_EXPIRATION_GRAM if self.config and self.config.has_key('omni_defaults') and self.config['omni_defaults'].has_key('def_sliver_expiration_gram') and self.config['omni_defaults']['def_sliver_expiration_gram']: try: self.gram = self.parseConfig(self.config['omni_defaults']['def_sliver_expiration_gram']) self.logger.debug("Resetting default GRAM sliver expiration to %s", self.gram) except Exception, e: self.logger.info("Failed to parse def_sliver_expiration_gram from omni_defaults. Parsing '%s' gave: %s", self.config['omni_defaults']['def_sliver_expiration_gram'], e) return self.gram def getEG(self): if self.eg: return self.eg self.eg = DEF_SLIVER_EXPIRATION_EG if self.config and self.config.has_key('omni_defaults') and self.config['omni_defaults'].has_key('def_sliver_expiration_eg') and self.config['omni_defaults']['def_sliver_expiration_eg']: try: self.eg = self.parseConfig(self.config['omni_defaults']['def_sliver_expiration_eg']) self.logger.debug("Resetting default EG sliver expiration to %s", self.eg) except Exception, e: self.logger.info("Failed to parse def_sliver_expiration_eg from omni_defaults. Parsing '%s' gave: %s", self.config['omni_defaults']['def_sliver_expiration_eg'], e) return self.eg # schema paths for switching between v1 and v2 STITCH_V1_BASE = "hpn.east.isi.edu/rspec/ext/stitch/0.1" STITCH_V2_BASE = "geni.net/resources/rspec/ext/stitch/2" STITCH_V1_SCHEMA = "http://hpn.east.isi.edu/rspec/ext/stitch/0.1/ http://hpn.east.isi.edu/rspec/ext/stitch/0.1/stitch-schema.xsd" STITCH_V1_NS = "http://hpn.east.isi.edu/rspec/ext/stitch/0.1" STITCH_V2_SCHEMA = "http://www.geni.net/resources/rspec/ext/stitch/2/ http://www.geni.net/resources/rspec/ext/stitch/2/stitch-schema.xsd" STITCH_V2_NS = "http://www.geni.net/resources/rspec/ext/stitch/2" # Minutes since last VLAN availability check before bothing to check again CHECK_AVAIL_INTERVAL_MINS=60
plantigrade/geni-tools
src/gcf/omnilib/stitch/defs.py
Python
mit
9,291
[ "ORCA" ]
44c7b759401e503897e65086cf0f375dbb293156a24c5401bc20d8ad01bd21f7
"""Coders for individual Variable objects.""" from __future__ import absolute_import, division, print_function import warnings from functools import partial import numpy as np import pandas as pd from ..core import dtypes, duck_array_ops, indexing from ..core.pycompat import dask_array_type from ..core.variable import Variable class SerializationWarning(RuntimeWarning): """Warnings about encoding/decoding issues in serialization.""" class VariableCoder(object): """Base class for encoding and decoding transformations on variables. We use coders for transforming variables between xarray's data model and a format suitable for serialization. For example, coders apply CF conventions for how data should be represented in netCDF files. Subclasses should implement encode() and decode(), which should satisfy the identity ``coder.decode(coder.encode(variable)) == variable``. If any options are necessary, they should be implemented as arguments to the __init__ method. The optional name argument to encode() and decode() exists solely for the sake of better error messages, and should correspond to the name of variables in the underlying store. """ def encode(self, variable, name=None): # type: (Variable, Any) -> Variable """Convert an encoded variable to a decoded variable.""" raise NotImplementedError def decode(self, variable, name=None): # type: (Variable, Any) -> Variable """Convert an decoded variable to a encoded variable.""" raise NotImplementedError class _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin): """Lazily computed array holding values of elemwise-function. Do not construct this object directly: call lazy_elemwise_func instead. Values are computed upon indexing or coercion to a NumPy array. """ def __init__(self, array, func, dtype): assert not isinstance(array, dask_array_type) self.array = indexing.as_indexable(array) self.func = func self._dtype = dtype @property def dtype(self): return np.dtype(self._dtype) def __getitem__(self, key): return self.func(self.array[key]) def __repr__(self): return ("%s(%r, func=%r, dtype=%r)" % (type(self).__name__, self.array, self.func, self.dtype)) def lazy_elemwise_func(array, func, dtype): """Lazily apply an element-wise function to an array. Parameters ---------- array : any valid value of Variable._data func : callable Function to apply to indexed slices of an array. For use with dask, this should be a pickle-able object. dtype : coercible to np.dtype Dtype for the result of this function. Returns ------- Either a dask.array.Array or _ElementwiseFunctionArray. """ if isinstance(array, dask_array_type): return array.map_blocks(func, dtype=dtype) else: return _ElementwiseFunctionArray(array, func, dtype) def unpack_for_encoding(var): return var.dims, var.data, var.attrs.copy(), var.encoding.copy() def unpack_for_decoding(var): return var.dims, var._data, var.attrs.copy(), var.encoding.copy() def safe_setitem(dest, key, value, name=None): if key in dest: var_str = ' on variable {!r}'.format(name) if name else '' raise ValueError( 'failed to prevent overwriting existing key {} in attrs{}. ' 'This is probably an encoding field used by xarray to describe ' 'how a variable is serialized. To proceed, remove this key from ' "the variable's attributes manually.".format(key, var_str)) dest[key] = value def pop_to(source, dest, key, name=None): """ A convenience function which pops a key k from source to dest. None values are not passed on. If k already exists in dest an error is raised. """ value = source.pop(key, None) if value is not None: safe_setitem(dest, key, value, name=name) return value def _apply_mask(data, # type: np.ndarray encoded_fill_values, # type: list decoded_fill_value, # type: Any dtype, # type: Any ): # type: np.ndarray """Mask all matching values in a NumPy arrays.""" data = np.asarray(data, dtype=dtype) condition = False for fv in encoded_fill_values: condition |= data == fv return np.where(condition, decoded_fill_value, data) class CFMaskCoder(VariableCoder): """Mask or unmask fill values according to CF conventions.""" def encode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_encoding(variable) if encoding.get('_FillValue') is not None: fill_value = pop_to(encoding, attrs, '_FillValue', name=name) if not pd.isnull(fill_value): data = duck_array_ops.fillna(data, fill_value) return Variable(dims, data, attrs, encoding) def decode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_decoding(variable) raw_fill_values = [pop_to(attrs, encoding, attr, name=name) for attr in ('missing_value', '_FillValue')] if raw_fill_values: encoded_fill_values = {fv for option in raw_fill_values for fv in np.ravel(option) if not pd.isnull(fv)} if len(encoded_fill_values) > 1: warnings.warn("variable {!r} has multiple fill values {}, " "decoding all values to NaN." .format(name, encoded_fill_values), SerializationWarning, stacklevel=3) dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype) if encoded_fill_values: transform = partial(_apply_mask, encoded_fill_values=encoded_fill_values, decoded_fill_value=decoded_fill_value, dtype=dtype) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding) def _scale_offset_decoding(data, scale_factor, add_offset, dtype): data = np.array(data, dtype=dtype, copy=True) if scale_factor is not None: data *= scale_factor if add_offset is not None: data += add_offset return data def _choose_float_dtype(dtype, has_offset): """Return a float dtype that can losslessly represent `dtype` values.""" # Keep float32 as-is. Upcast half-precision to single-precision, # because float16 is "intended for storage but not computation" if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating): return np.float32 # float32 can exactly represent all integers up to 24 bits if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer): # A scale factor is entirely safe (vanishing into the mantissa), # but a large integer offset could lead to loss of precision. # Sensitivity analysis can be tricky, so we just use a float64 # if there's any offset at all - better unoptimised than wrong! if not has_offset: return np.float32 # For all other types and circumstances, we just use float64. # (safe because eg. complex numbers are not supported in NetCDF) return np.float64 class CFScaleOffsetCoder(VariableCoder): """Scale and offset variables according to CF conventions. Follows the formula: decode_values = encoded_values * scale_factor + add_offset """ def encode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_encoding(variable) if 'scale_factor' in encoding or 'add_offset' in encoding: dtype = _choose_float_dtype(data.dtype, 'add_offset' in encoding) data = data.astype(dtype=dtype, copy=True) if 'add_offset' in encoding: data -= pop_to(encoding, attrs, 'add_offset', name=name) if 'scale_factor' in encoding: data /= pop_to(encoding, attrs, 'scale_factor', name=name) return Variable(dims, data, attrs, encoding) def decode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_decoding(variable) if 'scale_factor' in attrs or 'add_offset' in attrs: scale_factor = pop_to(attrs, encoding, 'scale_factor', name=name) add_offset = pop_to(attrs, encoding, 'add_offset', name=name) dtype = _choose_float_dtype(data.dtype, 'add_offset' in attrs) transform = partial(_scale_offset_decoding, scale_factor=scale_factor, add_offset=add_offset, dtype=dtype) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding) class UnsignedIntegerCoder(VariableCoder): def encode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_encoding(variable) if encoding.get('_Unsigned', False): pop_to(encoding, attrs, '_Unsigned') signed_dtype = np.dtype('i%s' % data.dtype.itemsize) if '_FillValue' in attrs: new_fill = signed_dtype.type(attrs['_FillValue']) attrs['_FillValue'] = new_fill data = duck_array_ops.around(data).astype(signed_dtype) return Variable(dims, data, attrs, encoding) def decode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_decoding(variable) if '_Unsigned' in attrs: unsigned = pop_to(attrs, encoding, '_Unsigned') if data.dtype.kind == 'i': if unsigned: unsigned_dtype = np.dtype('u%s' % data.dtype.itemsize) transform = partial(np.asarray, dtype=unsigned_dtype) data = lazy_elemwise_func(data, transform, unsigned_dtype) if '_FillValue' in attrs: new_fill = unsigned_dtype.type(attrs['_FillValue']) attrs['_FillValue'] = new_fill else: warnings.warn("variable %r has _Unsigned attribute but is not " "of integer type. Ignoring attribute." % name, SerializationWarning, stacklevel=3) return Variable(dims, data, attrs, encoding)
jcmgray/xarray
xarray/coding/variables.py
Python
apache-2.0
10,630
[ "NetCDF" ]
616da71b2c23a87fb38a5a6983d7ccf5a0de81bbfc23d4e3528a398f50c74900
"""Defines use-cases for verifying requirements and providing examples """ import unittest import seres from seres import test objs = test.get_all_models() class FileCsv(unittest.TestCase): def test_create(self): seres.create("file:///C:/Users/Brian/Projects/seres/test/test.csv", objs) def test_read(self): objs = seres.read("file:///C:/Users/Brian/Projects/seres/test/test.csv") def test_update(self): seres.read("file:///C:/Users/Brian/Projects/seres/test/test.csv", objs) def test_delete(self): seres.delete("file:///C:/Users/Brian/Projects/seres/test/test.csv") if __name__ == "__main__": unittest.main()
Tythos/seres
test/restful_crud.py
Python
mit
635
[ "Brian" ]
410accb9a13a136f6bdad19c9d057d3f8c1383044a928e5dbd63de3d9c7b81d0
""" """ import os import glob import ast import logging import subprocess import ctypes as c import numpy as np import numpy.ctypeslib as ctl from .vhdl_ctree.frontend import get_ast from .vhdl_ctree.c.nodes import MultiNode from .vhdl_ctree.jit import LazySpecializedFunction from errors import TransformationError from collections import namedtuple # from sejits4fpgas.src.config import config logger = logging.getLogger(__name__) logger.disabled = config.getboolean("Logging", "disable_logging") logger.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) class VhdlSynthModule(object): """ Manages synthetisation of all VhdlFiles in VhdlProject.""" def __init__(self): self._linked_files = [] # vivado project folder self.v_proj_fol = os.path.dirname(__file__) + config.get("Paths", "vivado_proj_path") if os.path.isdir(self.v_proj_fol): logger.info("Found Vivado Project at: %s" % self.v_proj_fol) else: log_txt = "Could not find Vivado Project at: %s" % self.v_proj_fol logger.warning(log_txt) # --------------------------------------------------------------------- # LOGGING # --------------------------------------------------------------------- logger.info("Initialized VhdlSynthModule") # --------------------------------------------------------------------- self.hw_interface = None def __call__(self, *args, **kwargs): """Redirect call to python or vhdl kernel.""" if os.uname()[-1] == "armv7l": if len(args) > 1: raise TransformationError("Multiple input data currently not supported by the hardware") mod_arg = args[0].astype(np.uint32) self.hw_interface(mod_arg, len(mod_arg)) return mod_arg else: return "Concrete Specialized Function called on x86" def _link_in(self, submodule): """Add submodule to list of linked files. :param submodule: path to VHDL file :type submodule: str """ self._linked_files.append(submodule) def get_callable(self, entry_point_name, entry_point_typesig): """Return a python callable that redirects to hardware.""" self._link_to_vivado_project() self._activate() return self def _link_to_vivado_project(self): """Link all files to Vivado template project.""" # vivado src folder v_src_fol = self.v_proj_fol + "template_project.srcs/sources_1/new/" # Clean up source folder for proj_file in glob.glob(v_src_fol + "*"): if os.path.basename(proj_file) != "top.vhd": os.remove(proj_file) # Copy all files to top folder and save file names file_names = [] for file_path in self._linked_files: os.system("cp " + file_path + " " + v_src_fol) file_names.append(os.path.basename(file_path)) # Add update source files in TCL script saved_tcl_file_path = self.v_proj_fol + "template_project.sav" mod_tcl_file_path = self.v_proj_fol + "template_project.tcl" if not os.path.exists(saved_tcl_file_path): os.system("cp " + mod_tcl_file_path + " " + saved_tcl_file_path) with open(saved_tcl_file_path, "r") as old_tcl, open(mod_tcl_file_path, "w") as new_tcl: line = old_tcl.readline() # TODO: find better way than with while loops # read till set origin_dir while "set origin_dir" not in line: new_tcl.write(line) line = old_tcl.readline() new_tcl.write("set origin_dir " + '"' + os.path.abspath(self.v_proj_fol) + '"') line = "" # read till create_project while "create_project" not in line: new_tcl.write(line) line = old_tcl.readline() new_tcl.write("create_project -force template_project ./template_project\n") line = "" # read till set files begins while "set files" not in line: new_tcl.write(line) line = old_tcl.readline() # read till end of set files while "]\n" != line: new_tcl.write(line) line = old_tcl.readline() # insert new files tcl_set_file = ' "[file normalize "$origin_dir/template_project.srcs/sources_1/new/{file_name}"]"\\\n' for vhdl_file_name in file_names: new_tcl.write(tcl_set_file.format(file_name=vhdl_file_name)) new_tcl.write("]\n") # copy rest of file for line in old_tcl.readlines(): new_tcl.write(line) def _activate(self): """Initialize Vivado synthesis subprocess.""" if os.uname()[-1] == "armv7l": print "Execute synthesis script" else: pass class VhdlLazySpecializedFunction(LazySpecializedFunction): def __init__(self, py_ast=None, sub_dir=None, backend_name="default", py_func=None): """Extend existing LazySpecializedFunction with error handling and default execution. Herefore the parameter py_func is added in order to also pass the Python function of the AST passed to py_ast. :param py_ast: Python AST representation of py_func :param sub_dir: sub directory :param backend_name: Unused in VHDL Back-End :type backend_name: str :param py_func: Python function which is also passed in its AST representation as py_ast :type py_func: function """ self.py_func = py_func super(VhdlLazySpecializedFunction, self).__init__(py_ast, sub_dir, backend_name) def __call__(self, *args, **kwargs): """ Added error-handling with Python fall-back around super.__call__. If calling the __call__ method of the super-class raises an TransformationError exeption, the passed Python function will be called instead of an specialized version. In case of an TransformationError, the cache is cleared. .. todo:: refine cache cleaning in error case """ ret = None try: ret = super(VhdlLazySpecializedFunction, self).__call__(*args, **kwargs) except TransformationError: print "Calling Python function ..." subprocess.call(["ctree", "-cc"]) ret = self.py_func(*args, **kwargs) finally: return ret @classmethod def from_function(cls, func, folder_name=''): class Replacer(ast.NodeTransformer): def visit_Module(self, node): return MultiNode(body=[self.visit(i) for i in node.body]) def visit_FunctionDef(self, node): if node.name == func.__name__: node.name = 'apply' node.body = [self.visit(item) for item in node.body] return node def visit_Name(self, node): if node.id == func.__name__: node.id = 'apply' return node func_ast = Replacer().visit(get_ast(func)) return cls(py_ast=func_ast, sub_dir=folder_name or func.__name__, py_func=func)
alphaFred/Sejits4Fpgas
sejits4fpgas/src/jit_synth.py
Python
gpl-3.0
7,688
[ "VisIt" ]
906b677f28c2515ac6faeae3afd14a7a87f3ce4e95cd04cd0cff0e03996945a6
# -*- coding: utf-8 -*- # # we set up default information for our locale. # Translators should use this file as the basis of their translation. # Copy this file and rename it for you locale. # # For example, Spanish uses: # defaults_es.py # # British English uses: # defaults_en_GB.py # # Please fill in the below fields: # Language: English # Translator: None (English is our default language). # Last-updated: 4/27/05 # TRANSLATOR WARNING: DO NOT TRANSLATE THE FIELD NAMES: ONLY THE VALUES!!! # only translate the items in the list [..] (and feel free to create # categories that make sense for your locale -- no need to translate # these ones). DO NOT translate 'cuisine','rating','source' or # 'category' # The below are Gourmet's standard fields and the default categories for them. # Do not translate the field names ('cuisine','rating','source,'category'). # Instead, fill in the list with categories that make sense for your locale. # Feel free to change the number or content of categories to be consistent # with what users in your locale are likely to be familiar with. from .abstractLang import AbstractLanguage from typing import Any, Mapping, List class Language(AbstractLanguage): fields={'cuisine': ['American','Italian','French','Mexican', 'Southwestern','Asian/Thai','Asian/Vietnamese', 'Asian/Chinese','Asian/Japanese','Asian/Indian'], 'rating' : ['Excellent','Great','Good','Fair','Poor'], 'source' : [], 'category' : ['Dessert','Entree','Salad','Soup', 'Breakfast'], 'yield_unit':['servings','cups','pints','gallons','ounces', 'cookies','crusts', ], } # In English, there are a heck of a lot of synonyms. This is a list # for those synonyms. ["preferred word","alternate word","alternate word"] # If there are none of these that you can think of in your language, just # set this to: # SYNONYMS=[] SYNONYMS=[ # the first item of each list is the default ["arugula","rocket"], ["azuki beans", "adzuki beans", "adzuki", "azuki"], ["beet","beetroot"], ["bell pepper, green", "green pepper", "bell pepper","green bell pepper", "pepper"], ["black cod","sablefish"], ["bok choy","chinese leaves"], ["chilean sea bass","patagonian toothfish"], ["chilli pepper","capsicum"], ["chokeberry","cooking apple"], ["cilantro","coriander"], ["collard greens","spring greens"], ["corn","sweetcorn","maize","sweet corn"], ["dragonfruit" , "pitaya"], ["eggplant","aubergine"], ["flour, all purpose","flour, all-purpose","flour","white flour"], ["jackfruit","nangka"], ["juneberry","saskatoon"], ["kiwi fruit","chinese gooseberry"], ["langsat","longkong", "duku"], ["mamoncillo", "quenepa", "genip"], ["nannyberry","sheepberry"], ["red bell pepper","red pepper"], ["rose apple", "malay apple"], ["rutabaga","swede"], ["salak", "snakefruit"], ["sapodilla", "chiku", "sapadilla", "snake fruit", "sawo"], ["scallion","green onion","spring onion"], ["snap peas","mangetout"], ["soursop", "guanabana"], ["start fruit","carambola"], ["sugar, granulated","sugar"], ["sunberry","wonderberry"], ["velvet persimmon","mabolo"], ["zucchini","courgette"], ] # A DICTIONARY CONTAINING INGREDIENT KEYS AND NDBNO for the USDA # nutritional database. For these items, we will have nutritional # information by default. NUTRITIONAL_INFO: Mapping[str, Any] = {} # a dictionary for ambiguous words. # key=ambiguous word, value=list of possible non-ambiguous terms # # Translators: if you have a word that has more than one food meaning # in your language, you can add an entry as follow # AMBIGUOUS = { # 'word':['meaning1','meaning2','meaning3'], # } AMBIGUOUS: Mapping[str, List[str]] = {} # triplicates ITEM, KEY, SHOPPING CATEGORY # These will be defaults. # They should include whatever foods might be standard for your # locale, with whatever sensible default categories you can think of # (again, thinking of your locale, not simply translating what I've # done). # Items provided here will automatically be recognized and go onto the # given category in a user's shopping list by default. # Don't feel obligated to translate every item -- especially since not # all items will be common for all locales. However, the more items # you can put here, the more the user will get the sense that gourmet # "knows" about foods that they enter. # I generated the below list using the wikipedia entry on foods as my # starting point. You may want to do something similar for your # locale. Also, if the syntax of the below is too complicated, you # can send me a list of category headings and ingredients for your # locale and I'll do the necessary formatting <Thomas_Hinkle@alumni.brown.edu> INGREDIENT_DATA = [("alfalfa sprouts","alfalfa sprouts","produce"), ("anise","anise","produce"), ("artichoke","artichoke","produce"), ("arugula","arugula","produce"), ("asparagus","asparagus","produce"), ("eggplant","eggplant","produce"), ("avocado","avocado","produce"), ("green beans","green beans","produce"), ("azuki beans","azuki beans","produce"), ("bean sprouts","bean sprouts","produce"), ("black beans","black beans","produce"), ("black-eyed peas","black-eyed peas","produce"), ("borlotti beans","borlotti beans","produce"), ("broad beans","broad beans","produce"), ("chickpeas, garbanzos, or ceci beans","chickpeas, garbanzos, or ceci beans","produce"), ("green beans","green beans","produce"), ("kidney beans","kidney beans","produce"), ("lentils","lentils","produce"), ("lima bean","butter bean","produce"), ("mung beans","mung beans","produce"), ("navy beans","navy beans","produce"), ("runner beans","runner beans","produce"), ("soybeans","soybeans","produce"), ("peas","peas","produce"), ("snap peas","snap peas","produce"), ("bok choy","bok choy","produce"), ("breadfruit","breadfruit","produce"), ("broccoflower","broccoflower","produce"), ("broccoli","broccoli","produce"), ("brussels sprouts","brussels sprouts","produce"), ("cabbage","cabbage","produce"), ("calabrese","calabrese","produce"), ("cauliflower","cauliflower","produce"), ("celery","celery","produce"), ("chard","chard","produce"), ("cilantro","cilantro","produce"), ("collard greens","collard greens","produce"), ("corn salad","corn salad","produce"), ("endive","endive","produce"), ("fennel","fennel","produce"), ("fiddleheads","fiddleheads","produce"), ("frisee","frisee","produce"), ("kale","kale","produce"), ("kohlrabi","kohlrabi","produce"), ("lemon grass","lemon grass","produce"), ("lettuce lactuca sativa","lettuce lactuca sativa","produce"), ("corn","corn","produce"), ("mushrooms","mushrooms","produce"), ("mustard greens","mustard greens","produce"), ("nettles","nettles","produce"), ("new zealand spinach","new zealand spinach","produce"), ("okra","okra","produce"), ("onion family","onion family","produce"), ("chives","chives","produce"), ("garlic","garlic","produce"), ("leek allium porrum","leek allium porrum","produce"), ("onion","onion","produce"), ("shallot","shallot","produce"), ("scallion","scallion","produce"), ("parsley","parsley","produce"), ("green pepper","bell pepper, green","produce"), ("red bell pepper","bell pepper, red","produce"), ("chilli pepper","chilli pepper","produce"), ("jalapeño pepper","pepper, jalapeño","produce"), ("habañero pepper","pepper, habañero","produce"), ("radicchio","radicchio","produce"), ("rapini","rapini","produce"), ("rhubarb","rhubarb","produce"), ("root vegetables","root vegetables","produce"), ("beet","beet","produce"), ("carrot","carrot","produce"), ("cassava (manioc)","cassava (manioc)","produce"), ("celeriac","celeriac","produce"), ("daikon","daikon","produce"), ("fennel","fennel","produce"), ("ginger","ginger","produce"), ("parsnip","parsnip","produce"), ("radish","radish","produce"), ("rutabaga","rutabaga","produce"), ("turnip","turnip","produce"), ("wasabi","wasabi","produce"), ("white radish","white radish","produce"), ("skirret","skirret","produce"), ("spinach","spinach","produce"), ("acorn squash","squash, acorn","produce"), ("butternut squash","squash, butternut","produce"), ("zucchini","zucchini","produce"), ("cucumber","cucumber","produce"), ("gem squash","squash, gem","produce"), ("patty pans","patty pans","produce"), ("pumpkin","pumpkin","produce"), ("spaghetti squash","squash, spaghetti","produce"), ("tat soi","tat soi","produce"), ("tomato","tomato","produce"), ("jicama","jicama","produce"), ("jerusalem artichoke","jerusalem artichoke","produce"), ("potato","potato","produce"), ("sweet potato","sweet potato","produce"), ("taro","taro","produce"), ("yam","yam","produce"), ("water chestnut","water chestnut","produce"), ("watercress","watercress","produce"), # fruits, from wikipedia list ("apple","apple","produce"), ("green apple","green apple","produce"), ("crabapple","crabapple","produce"), ("chokeberry","chokeberry","produce"), ("hawthorn","hawthorn","produce"), ("juneberry","juneberry","produce"), ("loquat","loquat","produce"), ("medlar","medlar","produce"), ("pomegranate","pomegranate","produce"), ("quince","quince","produce"), ("rowan","rowan","produce"), ("rose hip","rose hip","produce"), ("apricot","apricot","produce"), ("cherry","cherry","produce"), ("plum","plum","produce"), ("peach","peach","produce"), ("nectarine","nectarine","produce"), ("blackberry","blackberry","produce"), ("boysenberry","boysenberry","produce"), ("raspberry","raspberry","produce"), ("cloudberry","cloudberry","produce"), ("wineberry","wineberry","produce"), ("bearberry","bearberry","produce"), ("bilberry","bilberry","produce"), ("blueberry ","blueberry ","produce"), ("cranberry ","cranberry ","produce"), ("huckleberry ","huckleberry ","produce"), ("lingonberry","lingonberry","produce"), ("barberry ","barberry ","produce"), ("red currant","currant, red","produce"), ("black currant","currant, black","produce"), ("white currant","currant, white","produce"), ("elderberry ","elderberry ","produce"), ("gooseberry ","gooseberry ","produce"), ("nannyberry","nannyberry","produce"), ("sea-buckthorn","sea-buckthorn","produce"), ("wolfberry","wolfberry","produce"), ("crowberry","crowberry","produce"), ("mulberry","mulberry","produce"), ("goumi","goumi","produce"), ("kiwi fruit ","kiwi fruit ","produce"), ("persimmon ","persimmon ","produce"), ("buffaloberry","buffaloberry","produce"), ("pawpaw","pawpaw","produce"), ("american persimmon","american persimmon","produce"), ("prickly pear ","prickly pear ","produce"), ("saguaro","saguaro ","produce"), ("pitaya","pitaya","produce"), ("cantaloupe","cantaloupe","produce"), ("honeydew","honeydew","produce"), ("sunberry","sunberry","produce"), ("watermelon ","watermelon ","produce"), ("strawberry ","strawberry ","produce"), ("angelica","angelica","produce"), ("rhubarb","rhubarb","produce"), ("fig ","fig ","produce"), ("grape","grape","produce"), ("jujube","jujube","produce"), ("black mulberry","black mulberry","produce"), ("pomegranate","pomegranate","produce"), ("date","date","produce"), ("citron","citron","produce"), ("grapefruit","grapefruit","produce"), ("pommelo","pommelo","produce"), ("key lime","key lime","produce"), ("kumquat","kumquat","produce"), ("lemon","lemon","produce"), ("lime","lime","produce"), ("mandarin","mandarin","produce"), ("clementine","clementine","produce"), ("tangelo","tangelo","produce"), ("tangerine","tangerine","produce"), ("orange","orange","produce"), ("ugli fruit","ugli fruit","produce"), ("guava ","guava ","produce"), ("longan","longan","produce"), ("lychee","lychee","produce"), ("passion fruit","passion fruit","produce"), ("feijoa","feijoa","produce"), ("akee","akee","produce"), ("banana","banana","produce"), ("plantain","plantain","produce"), ("breadfruit","breadfruit","produce"), ("camucamu","camucamu","produce"), ("star fruit","star fruit","produce"), ("cempedak","cempedak","produce"), ("cherimoya","cherimoya","produce"), ("coconut","coconut","produce"), ("custard apple","custard apple","produce"), ("dragonfruit","dragonfruit","produce"), ("durian","durian","produce"), ("guarana","guarana","produce"), ("jackfruit","jackfruit","produce"), ("keppel fruit","keppel fruit","produce"), ("langsat","langsat","produce"), ("velvet persimmon","velvet persimmon","produce"), ("mamey sapote","mamey sapote","produce"), ("mamoncillo","mamoncillo","produce"), ("mango","mango","produce"), ("mangosteen","mangosteen","produce"), ("marang","marang","produce"), ("papaya","papaya","produce"), ("peanut butter fruit","peanut butter fruit","produce"), ("pineapple","pineapple","produce"), ("poha","poha","produce"), ("rambutan","rambutan","produce"), ("rose apple","rose apple","produce"), ("salak","salak","produce"), ("sapodilla","sapodilla","produce"), ("soursop","soursop","produce"), ("sugar apple","sugar apple","produce"), ("tamarind","tamarind","produce"), ## seafood, from wikipedia list ("anchovy","anchovy","seafood"), ("bass","bass","seafood"), ("striped bass","striped bass","seafood"), ("black cod","black cod","seafood"), ("blowfish","blowfish","seafood"), ("catfish","catfish","seafood"), ("cod","cod","seafood"), ("eel","eel","seafood"), ("flounder","flounder","seafood"), ("haddock","haddock","seafood"), ("halibut","halibut","seafood"), ("lingcod","lingcod","seafood"), ("mahi mahi","mahi mahi","seafood"), ("monkfish","monkfish","seafood"), ("orange roughy","orange roughy","seafood"), ("chilean sea bass","chilean sea bass","seafood"), ("pike","pike","seafood"), ("pollock","pollock","seafood"), ("sanddab","sanddab","seafood"), ("sardine","sardine","seafood"), ("salmon","salmon","seafood"), ("sea bass","sea bass","seafood"), ("shark","shark","seafood"), ("snapper","snapper","seafood"), ("rockfish","rockfish","seafood"), ("rock cod","rock cod","seafood"), ("pacific snapper","pacific snapper","seafood"), ("red snapper","red snapper","seafood"), ("sole","sole","seafood"), ("sturgeon","sturgeon","seafood"), ("surimi","surimi","seafood"), ("swordfish","swordfish","seafood"), ("tilapia","tilapia","seafood"), ("tilefish","tilefish","seafood"), ("trout","trout","seafood"), ("tuna","tuna","seafood"), ("whitefish","whitefish","seafood"), ("whiting","whiting","seafood"), ("roe","roe","seafood"), ("caviar","caviar","seafood"), ("salmon roe","salmon roe","seafood"), ("crab","crab","seafood"), ("dungness crab","dungness crab","seafood"), ("king crab","king crab","seafood"), ("snow crab","snow crab","seafood"), ("crayfish","crayfish","seafood"), ("lobster","lobster","seafood"), ("shrimp","shrimp","seafood"), ("prawns","prawns","seafood"), ("abalone","abalone","seafood"), ("clam","clam","seafood"), ("mussel","mussel","seafood"), ("octopus","octopus","seafood"), ("oyster","oyster","seafood"), ("snail","snail","seafood"), ("squid","squid","seafood"), ("scallop","scallop","seafood"), ## meats (garnered from wikipedia lists) ("bacon","bacon","meats"), ("chorizo","chorizo","meats"), ("fuet","fuet","meats"), ("salami","salami","meats"), ("ham","ham","meats"), ("mutton","mutton","meats"), ("lamb","lamb","meats"), ("veal","veal","meats"), ("steak","steak","meats"), ("hamburger","hamburger","meats"), ("roast beef","roast beef","meats"), ("chicken","chicken","meats"), ("turkey","turkey","meats"), ("duck","duck","meats"), ("goose","goose","meats"), ## my old list ("tamarind water","tamarind water", "international"), ("tamarind juice","tamarind juice", "international"), ('vegetable broth','broth, vegetable', 'soups&sauces'), ('fresh basil','basil, fresh', 'produce',), ('light sugar brown','sugar, light brown', 'baking',), ('balsamic vinegar','vinegar, balsamic', 'wines&oils',), ('zuchini','zuchini', 'produce',), ('avocado','avocado', 'produce',), ('walnut','walnut', 'baking',), ('celery','celery', 'produce',), ('coriander seeds','coriander, seeds', 'spices',), ('provolone cheese','cheese, provolone', 'dairy',), ('galanga','galanga', 'produce',), ('couscous','couscous', 'pastas',), ('rice','rice', 'pastas',), ('flour tortillas','tortillas, flour', 'dairy',), ('olive oil','oil, olive', 'wines&oils',), ('vanilla extract','vanilla extract', 'baking',), ('red potato-skinned','potato, red-skinned', 'produce',), ('powdered ginger','ginger, powdered', 'spices',), ('roasted chili paste','roasted chili paste', 'international',), ('curry powder','curry powder', 'spices',), ('dried shrimp','shrimp, dried', 'international',), ('dijon mustard','mustard, dijon', 'condiments',), ('whole rock cod or snapper','whole rock cod or snapper', 'seafood',), ('shells pasta','pasta, shells', 'pastas',), ('green canned chiles','green chiles, canned', 'international',), ('nutmeg','nutmeg', 'spices',), ('sourdough bread','bread, sourdough', 'bread',), ('corn oil','oil, corn', 'wines&oils',), ('lemon grass','lemon grass', 'produce',), ('feta cheese','cheese, feta', 'dairy',), ('jack cheese','cheese, jack', 'dairy',), ('grape tomato','tomato, grape', 'produce',), ('cherry tomato','tomato, cherry', 'produce',), ('spaghetti','spaghetti', 'pastas',), ('cottage cheese','cheese, cottage', 'dairy',), ('white onion','onion, white', 'produce',), ('baking soda','baking soda', 'baking',), ('garam masala','garam masala', 'spices',), ('yogurt','yogurt', 'dairy',), ('monkfish','monkfish', 'seafood',), ('croutons','croutons', 'bread',), ('ground coriander','coriander, ground', 'spices',), ('chili powder','chili powder', 'spices',), ('curly lettuce leaf','lettuce, curly leaf', 'produce',), ('dark sugar brown','sugar, dark brown', 'baking',), ('rice vinegar','vinegar, rice', 'international',), ('pasta','pasta', 'pastas',), ('sesame oil','oil, sesame', 'wines&oils',), ('water','water', ''), ('sour cream','sour cream', 'dairy',), ('orange juice','orange juice', 'produce',), ('spinach','spinach', 'produce',), ('stick cinnamon','cinnamon, stick', 'spices',), ('shrimp paste','shrimp paste', 'international',), ('ground cinnamon','cinnamon, ground', 'spices',), ('salad greens','salad greens', 'produce',), ('garlic','garlic', 'produce',), ('vegetable oil','oil, vegetable', 'wines&oils',), ('peanut butter','peanut butter', 'bread',), ('seeds ajowan','ajowan, seeds', 'spices',), ('apple','apple', 'produce',), ('cayenne','cayenne', 'spices',), ('arugula','arugula', 'produce',), ('linguine pasta','pasta, linguine', 'pastas',), ('scallion','scallion', 'produce',), ('egg','egg', 'dairy',), ('lime','lime', 'produce',), ('olives','olives', 'produce',), ('basil, thai fresh','basil, fresh, thai', 'produce',), ('bean sprouts','bean sprouts', 'produce',), ('ricotta cheese','cheese, ricotta', 'dairy',), ('parsley','parsley', 'produce',), ('acorn squash','squash, acorn', 'produce',), ('yellow onion','onion, yellow', 'produce',), ('chiles, dried red','chiles, red, dried', 'produce',), ('portobello mushroom','mushroom, portobello', 'produce',), ('nappa cabbage','cabbage, nappa', 'produce',), ('lime leaves','lime leaves', 'produce',), ('butter','butter', 'dairy',), ('bell red pepper','bell pepper, red', 'produce',), ('mushroom','mushroom', 'produce',), ('shallot','shallot', 'produce',), ('cheddar cheese','cheese, cheddar', 'dairy',), ('mozzarella cheese','cheese, mozzarella', 'dairy',), ('squash','squash', 'produce',), ('fish sauce','fish sauce', 'international',), ('green curry paste','green curry paste', 'international',), ('curly endive','endive, curly', 'produce',), ('granulated sugar','sugar, granulated', 'baking',), ('fresh cheese white goat','cheese, fresh white goat', 'dairy',), ('cilantro stems','cilantro stems', 'produce',), ('yellow cornmeal','cornmeal, yellow', 'baking',), ('paprika','paprika', 'spices',), ('chocolate chips','chocolate chips', 'baking',), ('star anise','star anise', 'spices',), ('brown sugar','sugar, brown', 'baking',), ('roasted peanuts','peanuts, roasted', 'produce',), ('fresh cilantro','cilantro, fresh', 'produce',), ('honey','honey', 'baking',), ('russet potato','potato, russet', 'produce',), ('lemon juice','lemon juice', 'produce',), ('carrot','carrot', 'produce',), ('penne pasta','pasta, penne', 'pastas',), ('red onion','onion, red', 'produce',), ('shredded coconut','coconut, shredded', 'baking',), ('peppered linguini','linguini, peppered', 'pastas',), ('milk','milk', 'dairy',), ('tahitian squash','squash, tahitian', 'produce',), ('baking powder','baking powder', 'baking',), ('tomato sauce','tomato sauce', 'soups&sauces',), ('seeds mustard','mustard, seeds', 'spices',), ('flat rice flour noodles','flat rice flour noodles', 'international',), ('parmesan cheese','cheese, parmesan', 'pastas',), ('mayonnaise','mayonnaise', 'bread',), ('leek','leek', 'produce',), ('zucchini','zucchini', 'produce',), ('smoked cheese Gouda','cheese, smoked Gouda', 'dairy',), ('lime juice','lime juice', 'produce',), ('coconut milk','coconut milk', 'international',), ('eggs','egg', 'dairy',), ('salmon','salmon', 'seafood',), ('lasagna pasta noodles','pasta, lasagna noodles', 'pastas',), ('all purpose flour','flour, all purpose', 'baking',), ('flour','flour, all purpose','baking',), ('all-purpose flour','flour, all purpose','baking',), ('ground cumin','cumin, ground', 'spices',), ('cucumber','cucumber', 'produce',), ('salsa','salsa', 'international',), ('broccoli','broccoli', 'produce',), ('rolled oats','rolled oats', 'pastas',), ('tomato','tomato', 'produce',), ('potato','potato', 'produce',), ('white wine','wine, white', 'wines&oils',), ('black ground pepper','black pepper, ground', 'spices',), ('seeds cumin','cumin, seeds', 'spices',), ('soy sauce','soy sauce', 'international',), ('sesame seeds','sesame seeds', 'international',), ('radicchio','radicchio', 'produce',), ('salt','salt', 'baking',), ('fresh ginger','ginger, fresh', 'produce',), ('turmeric','turmeric', 'spices',), ('chicken breast' ,'chicken, breast' , 'meats',), ('whole chicken' ,'chicken, whole' , 'meats',), ('chicken leg' ,'chicken, leg' , 'meats',), ('beef' ,'beef' , 'meats',), ('ground beef' ,'beef, ground' , 'meats',), ('pork' ,'pork' , 'meats',), ('turkey' ,'turkey' , 'meats',), ] # THESE ARE STANDARD UNIT CONVERSIONS. You can simply translate unit names where # you know them. Eliminate entries that are untranslatable or don't exist in your # locale. And please add any additional units that you know of. # Each unit is of the following format: # ("unit1","unit2"):conversion_factor, where unit1 contains conversion_factor X unit2 # For example: 1 cup has 16 tablespoons. CONVERTER_TABLE = { ("c", "Tbs"):16, ("lb", "oz"):16, ("Tbs", "tsp"):3, ("pt", "c"):2, ("qt", "c"):4, ("gallon", "qt"):4, ("l", "qt"):1.057, ('Japanese cup','ml'):200, ('metric cup','ml'):250, ('Imperial cup','ml'):284.130625, ('Imperial pint','oz'):20, ("l", "ml"):1000, ("l", "cl"):100, ("l", "dl"):10, ("oz", "g"):28.35, ("fl oz","Tbs"):2, ("kg", "g"):1000, ("g", "mg"):1000, ("tsp", "drop"):76, ("oz", "dram"):16, ("dram", "grains"):27.34375, ("peck", "gallon"):2, ("bucket", "peck"):2, ("bushel", "bucket"):2, ("lb", "grains"):7000} # DENSITIES of common foods. This allows us to convert between mass and volume. # Translators: You may be best off translating the food names below, since lists # of food densities can be hard to come by! DENSITY_TABLE={ "water":1, "juice, grape":1.03, "vegetable broth":1, "broth, vegetable":1, "broth, chicken":1, "milk":1.029, "milk, whole":1.029, "milk, skim":1.033, "milk, 2%":1.031, "milk, 1%":1.03, "coconut milk":0.875, "buttermilk":1.03, "heavy cream":0.994, "light cream":1.012, "half and half":1.025, "honey":1.420, "sugar, granulated":0.9, "salt":2.165, "butter":0.911, "oil, vegetable":0.88, "oil, olive":0.88, "oil, corn":0.88, "oil, sesame":0.88, "flour, all purpose": 0.55, "flour, whole wheat": 0.53, "corn starch": 0.6, "sugar, powdered": 0.6, "sugar, confectioners": 0.6 } # Standard unit names and alternate unit names that might appear. For # example: "c." is our standard abbreviation for cup. "cup","c." or # "cups" might appear in a recipe we are importing. Each item of this # list looks like this: # # ["standard", ["alternate1","alternate2","alternate3",...]] # # The first item should be the preferred abbreviation # The second item should be the full name of the unit # e.g. ["c.", ["cup",...]] # UNITS = [("", ["each", "eaches", "ea", "ea."]), ("bucket", ["bucket", "buckets", "bckt", "bckt."]), ("peck", ["peck", "pecks"]), ("bushel", ["bushel", "bushels", "bshl", "bshl.", "bsh", "bsh.", "bu", "bu."]), ("grains", ["grain", "grains"]), ("dram", ["dram", "drams"]), ("drop", ["drop", "drops"]), ("fl oz", ["fl oz", "fluid ounce","fluid ounces","fl ounces", "fl. ounces","fl. oz", "fl oz.", "fl. oz."]), ("tsp", ["teaspoon", "teaspoons", "tea_spoon", "tea_spoons", "Teaspoon", "Teaspoons", "Tea_spoon", "Tea_spoons", "tsps","tsps.","Tsps","Tsps.","tsp","tsp.","Tsp","Tsp.","ts","ts.","Ts","Ts.","t","t."]), ("Tbs", ["tablespoon", "tablespoons","table_spoon", "table_spoons","Tablespoon","Tablespoons","Table_spoon","Table_spoons","tbsp","tbsp.","Tbsp","Tbsp.","tbs","tbs.","Tbs","Tbs.","tb","tb.","Tb","Tb.","T","T."]), ("lb", ["pound", "pounds", "lbs", "lbs.", "lb", "lb."]), ("oz", ["ounce", "ounces", "oz", "oz."]), ("c", ["cup", "cups", "c."]), ("qt", ["quart", "quarts", "qt.", "Qt", "Qt."]), ("pt", ["pint", "pints", "pt.", "Pt", "Pt."]), ("gallon", ["gallon", "gallons", "gal", "gal."]), ("ml", ["mililiter", "mililiters", "ml", "ml."]), ("cl", ["centiliter", "centiliters","cl", "cl."]), ("dl", ["deciliter", "deciliters", "dl", "dl."]), ("l", ["liter", "liters", "lit.", "l", "l."]), ("g", ["grams", "gram", "g.", "g", "gr", "gr."]), ("mg", ["miligram", "miligrams", "mg", "mg."]), ("kg", ["kilogram", "kilograms", "kg", "kg."]), # These names aren"t really convertible, but we want them to # be recognized as units. ("small", ["small", "Small", "sm", "sm."]), ("medium", ["medium", "Medium", "med", "med.", "Med", "Med."]), ("large", ["large", "Large", "lg", "lg.", "Lg", "Lg."]), ("box", ["box", "Box", "bx"]), ("whole", ["whole", "whl", "wh."]), ("clove", ["clove", "cloves", "clv", "clv."]), ("can", ["can", "Can", "cn", "cn."]), ("head", ["head", "heads", "Head", "Heads", "hd", "hd."]), ("package",["pkg.", "package", "Package", "packages", "Packages", "pkg", "Pkg.", "pack"]), ("slice", ["slice", "slices"]), ("bunch", ["bunch", "bunches"]), ] METRIC_RANGE = (1,999) # The following sets up unit groups. Users will be able to turn # these on or off (American users, for example, would likely turn # off metric units, since we don't use them). # (User choice not implemented yet) UNIT_GROUPS = { 'metric mass':[('mg',METRIC_RANGE), ('g',METRIC_RANGE), ('kg',(1,None))], 'metric volume':[('ml',METRIC_RANGE), ('cl',(1,99)), ('dl',(1,9)), ('l',(1,None)),], 'imperial weight':[('grains',(0,27)), ('dram',(0.5,15)), ('oz',(0.25,32)), ('lb',(0.25,None)), ], 'imperial volume':[ #('drop',(0,3)), ('tsp',(0.125,5.9)), ('Tbs',(1,4)), ('c',(0.25,8)), #('pt',(1,1)), #('qt',(1,3)), ('gallon',(1,None)), #('peck',(1,2)), #('bucket',(1,2)), #('bushel',(1,None)), ('fl oz',(1,None)), ] } # The units here need to correspond to the standard unit names defined # above in UNITS CROSS_UNIT_TABLE = { ## This if for units that require an additional ## bit of information -- i.e. to convert between ## volume and mass you need the density of an ## item. In these cases, the additional factor ## will be provided as an 'item' that is then looked ## up in the dictionary referenced here (i.e. the density_table) ## currently, 'density' is the only keyword used ("pt", "lb") :('density',1), ("Tbs", "oz") :('density',0.5), ("c", "oz") :('density',8), ("pt", "oz") :('density',16), ("ml", "g") :('density',1), ('oz','fl oz'):('density',1), } # The units here need to correspond to the standard unit names defined # in UNITS. These are some core conversions from mass-to-volume, # assuming a density of 1 (i.e. the density of water). VOL_TO_MASS_TABLE = { ("pt", "lb") : 1, ("Tbs", "oz") : 0.5, ("c", "oz") : 8, ("pt", "oz") : 16, ("ml", "g") : 1, ("ml", "mg") : 1000, ("ml", "kg"): 0.001, ("cl", "kg"): 0.01, ("cl", "g") : 10, ("dl", "kg") : 0.1, ("dl", "g") : 100, ("l", "kg") : 1} # TIME ABBREVIATIONS (this is new!) TIME_ABBREVIATIONS = { 'sec':'seconds', 'min':'minutes', 'hr':'hours' } # These functions are rather important! Our goal is simply to # facilitate look ups -- if the user types in "tomatoes", we want to # find "tomato." Note that the strings these functions produce will # _never_ be shown to the user, so it's fine to generate nonsense # words as well as correct answers -- our goal is to generate a list # of possible hits rather than to get the plural/singular form "right". irregular_plurals={ "geese":"goose", } import re two_digit_plural_matcher = re.compile('[szxo]es$') one_digit_plural_matcher = re.compile("[^s]s$") v_plural_matcher = re.compile('ves') @staticmethod def guess_singulars (s): if len(s)<3: return [] rets = [] if s in Language.irregular_plurals: rets.append(Language.irregular_plurals[s]) if Language.two_digit_plural_matcher.search(s): wrd=s[0:-2] if wrd not in rets: rets.append(wrd) if Language.v_plural_matcher.search(s): rets.append(s[0:-3]+'f') if Language.one_digit_plural_matcher.search(s): rets.append(s[0:-1]) return rets @staticmethod def guess_plurals (s): if not s: return [] ret = [s+'s',s+'es'] if s[-1]=='f': ret.append(s[0:-1]+'ves') return ret IGNORE = ["and","with","of","for","cold","warm","finely","thinly","roughly","coarsely"] NUMBERS = { (1.0/8):['eighth','an eigth'], (1.0/4):['quarter','a quarter'], (3.0/4):['three quarters'], (2.0/3):['two thirds'], (1.0/3):['third','a third'], (1.0/2):['one half','a half','half'], 1:['an','a','one'], 2:['two','a couple','a couple of','a pair of'], 3:['three'], 4:['four'], 5:['five'], 6:['six'], 7:['seven'], 8:['eight'], 9:['nine'], 10:['ten'], 11:['eleven'], 12:['twelve','a dozen'], 20:['twenty'], 30:['thirty'], 40:['forty'], 50:['fifty'], 60:['sixty'], } PLURALS = [ ('loaf','loaves'), ('box','boxes'), ] PLURALS += [(s,s+'s') for s in ['cup','crust','clove','serving','pound','gram', 'ounce','tablespoon','teaspoon','gallon','can', 'slice','pie','package','quart','pint','muffin', 'cookie',] ]
thinkle/gourmet
gourmet/defaults/defaults_en.py
Python
gpl-2.0
43,183
[ "Octopus" ]
2de050179b96be74367c3e29027ea4041cf32195bbada63c2c11d6cc58f67f04
''' DowntimeCommand module ''' import urllib2 from datetime import datetime, timedelta from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.Core.LCG.GOCDBClient import GOCDBClient from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName, getGOCFTSName from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient from DIRAC.ResourceStatusSystem.Command.Command import Command from DIRAC.ResourceStatusSystem.Utilities import CSHelpers from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getStorageElementOptions, getFTS3Servers from operator import itemgetter __RCSID__ = '$Id: $' class DowntimeCommand( Command ): ''' Downtime "master" Command. ''' def __init__( self, args = None, clients = None ): super( DowntimeCommand, self ).__init__( args, clients ) if 'GOCDBClient' in self.apis: self.gClient = self.apis[ 'GOCDBClient' ] else: self.gClient = GOCDBClient() if 'ResourceManagementClient' in self.apis: self.rmClient = self.apis[ 'ResourceManagementClient' ] else: self.rmClient = ResourceManagementClient() def _storeCommand( self, result ): ''' Stores the results of doNew method on the database. ''' for dt in result: resQuery = self.rmClient.addOrModifyDowntimeCache( downtimeID = dt[ 'DowntimeID' ], element = dt[ 'Element' ], name = dt[ 'Name' ], startDate = dt[ 'StartDate' ], endDate = dt[ 'EndDate' ], severity = dt[ 'Severity' ], description = dt[ 'Description' ], link = dt[ 'Link' ], gocdbServiceType = dt[ 'GOCDBServiceType' ] ) return resQuery def _cleanCommand( self, element, elementNames): ''' Clear Cache from expired DT. ''' resQuery = [] for elementName in elementNames: #reading all the cache entries result = self.rmClient.selectDowntimeCache( element = element, name = elementName ) if not result[ 'OK' ]: return result uniformResult = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ] currentDate = datetime.utcnow() if len(uniformResult) == 0: return S_OK( None ) for dt in uniformResult: if dt[ 'EndDate' ] < currentDate: result = self.rmClient.deleteDowntimeCache ( downtimeID = dt[ 'DowntimeID' ] ) resQuery.append(result) return S_OK( resQuery ) def _prepareCommand( self ): ''' DowntimeCommand requires four arguments: - name : <str> - element : Site / Resource - elementType: <str> If the elements are Site(s), we need to get their GOCDB names. They may not have, so we ignore them if they do not have. ''' if 'name' not in self.args: return S_ERROR( '"name" not found in self.args' ) elementName = self.args[ 'name' ] if 'element' not in self.args: return S_ERROR( '"element" not found in self.args' ) element = self.args[ 'element' ] if 'elementType' not in self.args: return S_ERROR( '"elementType" not found in self.args' ) elementType = self.args[ 'elementType' ] if not element in [ 'Site', 'Resource' ]: return S_ERROR( 'element is neither Site nor Resource' ) hours = None if 'hours' in self.args: hours = self.args[ 'hours' ] gocdbServiceType = None # Transform DIRAC site names into GOCDB topics if element == 'Site': gocSite = getGOCSiteName( elementName ) if not gocSite[ 'OK' ]: return gocSite elementName = gocSite[ 'Value' ] # The DIRAC se names mean nothing on the grid, but their hosts do mean. elif elementType == 'StorageElement': # We need to distinguish if it's tape or disk if getStorageElementOptions( elementName )['Value']['TapeSE']: gocdbServiceType = "srm.nearline" elif getStorageElementOptions( elementName )['Value']['DiskSE']: gocdbServiceType = "srm" seHost = CSHelpers.getSEHost( elementName ) if not seHost: return S_ERROR( 'No seHost for %s' % elementName ) elementName = seHost elif elementType == 'FTS' or elementType == 'FTS3': gocdbServiceType = 'FTS' try: #WARNING: this method presupposes that the server is an FTS3 type elementName = getGOCFTSName(elementName) except: return S_ERROR( 'No FTS3 server specified in dirac.cfg (see Resources/FTSEndpoints)' ) return S_OK( ( element, elementName, hours, gocdbServiceType ) ) def doNew( self, masterParams = None ): ''' Gets the parameters to run, either from the master method or from its own arguments. For every elementName, unless it is given a list, in which case it contacts the gocdb client. The server is not very stable, so in case of failure tries a second time. If there are downtimes, are recorded and then returned. ''' if masterParams is not None: element, elementNames = masterParams hours = 120 elementName = None gocdbServiceType = None else: params = self._prepareCommand() if not params[ 'OK' ]: return params element, elementName, hours, gocdbServiceType = params[ 'Value' ] elementNames = [ elementName ] #WARNING: checking all the DT that are ongoing or starting in given <hours> from now try: results = self.gClient.getStatus( element, name = elementNames, startingInHours = hours ) except urllib2.URLError: try: #Let's give it a second chance.. results = self.gClient.getStatus( element, name = elementNames, startingInHours = hours ) except urllib2.URLError, e: return S_ERROR( e ) if not results[ 'OK' ]: return results results = results[ 'Value' ] if results is None: return S_OK( None ) #cleaning the Cache cleanRes = self._cleanCommand(element, elementNames) if not cleanRes[ 'OK' ]: return cleanRes uniformResult = [] # Humanize the results into a dictionary, not the most optimal, but readable for downtime, downDic in results.items(): dt = {} if 'HOSTNAME' in downDic.keys(): dt[ 'Name' ] = downDic[ 'HOSTNAME' ] elif 'SITENAME' in downDic.keys(): dt[ 'Name' ] = downDic[ 'SITENAME' ] else: return S_ERROR( "SITENAME or HOSTNAME are missing" ) if 'SERVICE_TYPE' in downDic.keys(): dt[ 'GOCDBServiceType' ] = downDic[ 'SERVICE_TYPE' ] if gocdbServiceType: gocdbST = gocdbServiceType.lower() csST = downDic[ 'SERVICE_TYPE' ].lower() if gocdbST != csST: return S_ERROR( "SERVICE_TYPE mismatch between GOCDB (%s) and CS (%s) for %s" % (gocdbST, csST, dt[ 'Name' ]) ) else: #WARNING: do we want None as default value? dt[ 'GOCDBServiceType' ] = None dt[ 'DowntimeID' ] = downtime dt[ 'Element' ] = element dt[ 'StartDate' ] = downDic[ 'FORMATED_START_DATE' ] dt[ 'EndDate' ] = downDic[ 'FORMATED_END_DATE' ] dt[ 'Severity' ] = downDic[ 'SEVERITY' ] dt[ 'Description' ] = downDic[ 'DESCRIPTION' ].replace( '\'', '' ) dt[ 'Link' ] = downDic[ 'GOCDB_PORTAL_URL' ] uniformResult.append( dt ) storeRes = self._storeCommand( uniformResult ) if not storeRes[ 'OK' ]: return storeRes return S_OK() def doCache( self ): ''' Method that reads the cache table and tries to read from it. It will return a list with one dictionary describing the DT if there are results. ''' params = self._prepareCommand() if not params[ 'OK' ]: return params element, elementName, hours, gocdbServiceType = params[ 'Value' ] result = self.rmClient.selectDowntimeCache( element = element, name = elementName, gocdbServiceType = gocdbServiceType ) if not result[ 'OK' ]: return result uniformResult = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ] #'targetDate' can be either now or some 'hours' later in the future targetDate = datetime.utcnow() #dtOverlapping is a buffer to assure only one dt is returned #when there are overlapping outage/warning dt for same element #on top of the buffer we put the most recent outages #while at the bottom the most recent warnings, #assumption: uniformResult list is already ordered by resource/site name, severity, startdate dtOverlapping = [] if hours is not None: #IN THE FUTURE targetDate = targetDate + timedelta( hours = hours ) #sorting by 'StartDate' b/c if we look for DTs in the future #then we are interested in the earliest DTs uniformResult.sort(key=itemgetter('Name','Severity','StartDate')) for dt in uniformResult: if ( dt[ 'StartDate' ] < targetDate ) and ( dt[ 'EndDate' ] > targetDate ): #the list is already ordered in a way that outages come first over warnings #and the earliest outages are on top of other outages and warnings #while the earliest warnings are on top of the other warnings #so what ever comes first in the list is also what we are looking for dtOverlapping = [dt] break else: #IN THE PRESENT #sorting by 'EndDate' b/c if we look for DTs in the present #then we are interested in those DTs that last longer uniformResult.sort(key=itemgetter('Name','Severity','EndDate')) for dt in uniformResult: if ( dt[ 'StartDate' ] < targetDate ) and ( dt[ 'EndDate' ] > targetDate ): #if outage, we put it on top of the overlapping buffer #i.e. the latest ending outage is on top if dt['Severity'].upper() == 'OUTAGE': dtOverlapping = [dt] + dtOverlapping #if warning, we put it at the bottom of the overlapping buffer #i.e. the latest ending warning is at the bottom elif dt['Severity'].upper() == 'WARNING': dtOverlapping.append(dt) result = None if len(dtOverlapping) > 0: dtTop = dtOverlapping[0] dtBottom = dtOverlapping[-1] if dtTop['Severity'].upper() == 'OUTAGE': result = dtTop else: result = dtBottom return S_OK( result ) def doMaster( self ): ''' Master method, which looks little bit spaghetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the the CEs (FTS and file catalogs will come). ''' gocSites = CSHelpers.getGOCSites() if not gocSites[ 'OK' ]: return gocSites gocSites = gocSites[ 'Value' ] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts[ 'OK' ]: return sesHosts sesHosts = sesHosts[ 'Value' ] resources = sesHosts ftsServer = getFTS3Servers() if ftsServer[ 'OK' ]: resources.extend( ftsServer[ 'Value' ] ) #TODO: file catalogs need also to use their hosts #fc = CSHelpers.getFileCatalogs() #if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce[ 'OK' ]: resources.extend( ce[ 'Value' ] ) gLogger.verbose( 'Processing Sites: %s' % ', '.join( gocSites ) ) siteRes = self.doNew( ( 'Site', gocSites ) ) if not siteRes[ 'OK' ]: self.metrics[ 'failed' ].append( siteRes[ 'Message' ] ) gLogger.verbose( 'Processing Resources: %s' % ', '.join( resources ) ) resourceRes = self.doNew( ( 'Resource', resources ) ) if not resourceRes[ 'OK' ]: self.metrics[ 'failed' ].append( resourceRes[ 'Message' ] ) return S_OK( self.metrics ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
marcelovilaca/DIRAC
ResourceStatusSystem/Command/DowntimeCommand.py
Python
gpl-3.0
12,680
[ "DIRAC" ]
3265386f13d2622fd315a87ffc704b1475399048a5b8c6378abc0dbef80d733e
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- author: Jeroen Hoekx module: lvol short_description: Configure LVM logical volumes description: - This module creates, removes or resizes logical volumes. version_added: "1.1" options: vg: description: - The volume group this logical volume is part of. required: true lv: description: - The name of the logical volume. required: true size: description: - The size of the logical volume, according to lvcreate(8) --size, by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; resizing is not supported with percentages. state: choices: [ "present", "absent" ] default: present description: - Control if the logical volume exists. required: false force: version_added: "1.5" choices: [ "yes", "no" ] default: "no" description: - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. required: false notes: - Filesystems on top of the volume are not resized. ''' EXAMPLES = ''' # Create a logical volume of 512m. - lvol: vg=firefly lv=test size=512 # Create a logical volume of 512g. - lvol: vg=firefly lv=test size=512g # Create a logical volume the size of all remaining space in the volume group - lvol: vg=firefly lv=test size=100%FREE # Extend the logical volume to 1024m. - lvol: vg=firefly lv=test size=1024 # Reduce the logical volume to 512m - lvol: vg=firefly lv=test size=512 force=yes # Remove the logical volume. - lvol: vg=firefly lv=test state=absent force=yes ''' import re decimal_point = re.compile(r"(\.|,)") def parse_lvs(data): lvs = [] for line in data.splitlines(): parts = line.strip().split(';') lvs.append({ 'name': parts[0], 'size': int(decimal_point.split(parts[1])[0]), }) return lvs def main(): module = AnsibleModule( argument_spec=dict( vg=dict(required=True), lv=dict(required=True), size=dict(), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), supports_check_mode=True, ) vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] state = module.params['state'] force = module.boolean(module.params['force']) size_opt = 'L' size_unit = 'm' if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' # LVCREATE(8) -L --size option unit elif size[-1].isalpha(): if size[-1].lower() in 'bskmgtpe': size_unit = size[-1].lower() if size[0:-1].isdigit(): size = int(size[0:-1]) else: module.fail_json(msg="Bad size specification for unit %s" % size_unit) size_opt = 'L' else: module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]") # when no unit, megabytes by default elif size.isdigit(): size = int(size) else: module.fail_json(msg="Bad size specification") if size_opt == 'l': unit = 'm' else: unit = size_unit lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s --noheadings -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) for test_lv in lvs: if test_lv['name'] == lv: this_lv = test_lv break else: this_lv = None if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") else: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) msg = '' if this_lv is None: if state == 'present': ### create LV if module.check_mode: changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': ### remove LV if module.check_mode: module.exit_json(changed=True) if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif size_opt == 'l': module.exit_json(changed=False, msg="Resizing extents with percentage not supported.") else: ### resize LV tool = None if size > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) elif size < this_lv['size']: if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if module.check_mode: changed = True else: rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name'])) if rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) module.exit_json(changed=changed, msg=msg) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
lberruti/ansible-modules-extras
system/lvol.py
Python
gpl-3.0
8,229
[ "Firefly" ]
89e7e40f539453a5b89b41e36875c8368fd9c97be88b10c66de9f20d3d4a9df7
from __future__ import absolute_import import unittest from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Structure from pymatgen.analysis.elasticity.tensors import Tensor from pymatgen.analysis.elasticity.strain import Strain, Deformation,\ convert_strain_to_deformation, DeformedStructureSet from pymatgen.util.testing import PymatgenTest import numpy as np import warnings class DeformationTest(PymatgenTest): def setUp(self): self.norm_defo = Deformation.from_index_amount((0, 0), 0.02) self.ind_defo = Deformation.from_index_amount((0, 1), 0.02) self.non_ind_defo = Deformation([[1.0, 0.02, 0.02], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) lattice = Lattice([[3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603]]) self.structure = Structure(lattice, ["Si", "Si"], [[0, 0, 0], [0.75, 0.5, 0.75]]) def test_properties(self): # green_lagrange_strain self.assertArrayAlmostEqual(self.ind_defo.green_lagrange_strain, [[0., 0.01, 0.], [0.01, 0.0002, 0.], [0., 0., 0.]]) self.assertArrayAlmostEqual(self.non_ind_defo.green_lagrange_strain, [[0., 0.01, 0.01], [0.01, 0.0002, 0.0002], [0.01, 0.0002, 0.0002]]) def test_independence(self): self.assertFalse(self.non_ind_defo.is_independent()) self.assertEqual(self.ind_defo.get_perturbed_indices()[0], (0, 1)) def test_apply_to_structure(self): strained_norm = self.norm_defo.apply_to_structure(self.structure) strained_ind = self.ind_defo.apply_to_structure(self.structure) strained_non = self.non_ind_defo.apply_to_structure(self.structure) # Check lattices self.assertArrayAlmostEqual(strained_norm.lattice.matrix, [[3.9170018886, 0, 0], [1.958500946136, 3.32571019, 0], [0, -2.21713849, 3.13550906]]) self.assertArrayAlmostEqual(strained_ind.lattice.matrix, [[3.84019793, 0, 0], [1.9866132, 3.32571019, 0], [-0.04434277, -2.21713849, 3.13550906]]) self.assertArrayAlmostEqual(strained_non.lattice.matrix, [[3.84019793, 0, 0], [1.9866132, 3.3257102, 0], [0.0183674, -2.21713849, 3.13550906]]) # Check coordinates self.assertArrayAlmostEqual(strained_norm.sites[1].coords, [3.91700189, 1.224e-06, 2.3516318]) self.assertArrayAlmostEqual(strained_ind.sites[1].coords, [3.84019793, 1.224e-6, 2.3516318]) self.assertArrayAlmostEqual(strained_non.sites[1].coords, [3.8872306, 1.224e-6, 2.3516318]) # Check convention for applying transformation for vec, defo_vec in zip(self.structure.lattice.matrix, strained_non.lattice.matrix): new_vec = np.dot(self.non_ind_defo, np.transpose(vec)) self.assertArrayAlmostEqual(new_vec, defo_vec) for coord, defo_coord in zip(self.structure.cart_coords, strained_non.cart_coords): new_coord = np.dot(self.non_ind_defo, np.transpose(coord)) self.assertArrayAlmostEqual(new_coord, defo_coord) class StrainTest(PymatgenTest): def setUp(self): self.norm_str = Strain.from_deformation([[1.02, 0, 0], [0, 1, 0], [0, 0, 1]]) self.ind_str = Strain.from_deformation([[1, 0.02, 0], [0, 1, 0], [0, 0, 1]]) self.non_ind_str = Strain.from_deformation([[1, 0.02, 0.02], [0, 1, 0], [0, 0, 1]]) with warnings.catch_warnings(record=True): warnings.simplefilter("always") self.no_dfm = Strain([[0., 0.01, 0.], [0.01, 0.0002, 0.], [0., 0., 0.]]) def test_new(self): test_strain = Strain([[0., 0.01, 0.], [0.01, 0.0002, 0.], [0., 0., 0.]]) self.assertArrayAlmostEqual(test_strain.deformation_matrix.green_lagrange_strain, test_strain) self.assertRaises(ValueError, Strain, [[0.1, 0.1, 0], [0, 0, 0], [0, 0, 0]]) def test_from_deformation(self): self.assertArrayAlmostEqual(self.norm_str, [[0.0202, 0, 0], [0, 0, 0], [0, 0, 0]]) self.assertArrayAlmostEqual(self.ind_str, [[0., 0.01, 0.], [0.01, 0.0002, 0.], [0., 0., 0.]]) self.assertArrayAlmostEqual(self.non_ind_str, [[0., 0.01, 0.01], [0.01, 0.0002, 0.0002], [0.01, 0.0002, 0.0002]]) def test_from_index_amount(self): # From voigt index test = Strain.from_index_amount(2, 0.01) should_be = np.zeros((3, 3)) should_be[2, 2] = 0.01 self.assertArrayAlmostEqual(test, should_be) # from full-tensor index test = Strain.from_index_amount((1, 2), 0.01) should_be = np.zeros((3, 3)) should_be[1, 2] = should_be[2, 1] = 0.01 self.assertArrayAlmostEqual(test, should_be) def test_properties(self): # deformation matrix self.assertArrayAlmostEqual(self.ind_str.deformation_matrix, [[1, 0.02, 0], [0, 1, 0], [0, 0, 1]]) symm_dfm = Strain(self.no_dfm, dfm_shape="symmetric") self.assertArrayAlmostEqual(symm_dfm.deformation_matrix, [[0.99995,0.0099995, 0], [0.0099995,1.00015, 0], [0, 0, 1]]) self.assertArrayAlmostEqual(self.no_dfm.deformation_matrix, [[1, 0.02, 0], [0, 1, 0], [0, 0, 1]]) # voigt self.assertArrayAlmostEqual(self.non_ind_str.voigt, [0, 0.0002, 0.0002, 0.0004, 0.02, 0.02]) def test_convert_strain_to_deformation(self): strain = Tensor(np.random.random((3, 3))).symmetrized while not (np.linalg.eigvals(strain) > 0).all(): strain = Tensor(np.random.random((3, 3))).symmetrized upper = convert_strain_to_deformation(strain, shape="upper") symm = convert_strain_to_deformation(strain, shape="symmetric") self.assertArrayAlmostEqual(np.triu(upper), upper) self.assertTrue(Tensor(symm).is_symmetric()) for defo in upper, symm: self.assertArrayAlmostEqual(defo.green_lagrange_strain, strain) class DeformedStructureSetTest(PymatgenTest): def setUp(self): self.structure = self.get_structure("Sn") self.default_dss = DeformedStructureSet(self.structure) def test_init(self): self.assertEqual(self.structure, self.default_dss.undeformed_structure) # Test symmetry dss_symm = DeformedStructureSet(self.structure, symmetry=True) # Should be 4 strains for normal, 2 for shear (since +/- shear # are symmetrically equivalent) self.assertEqual(len(dss_symm), 6) if __name__ == '__main__': unittest.main()
matk86/pymatgen
pymatgen/analysis/elasticity/tests/test_strain.py
Python
mit
8,507
[ "pymatgen" ]
1247a47950da4676085d855a114d13b546c899d42bfc9c8fbeb9dd5c8387d2b8
#!/usr/bin/env python # Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' Restricted Open-shell Hartree-Fock ''' from functools import reduce import numpy import pyscf.gto from pyscf import lib from pyscf.lib import logger from pyscf.scf import hf from pyscf.scf import uhf from pyscf import __config__ WITH_META_LOWDIN = getattr(__config__, 'scf_analyze_with_meta_lowdin', True) MO_BASE = getattr(__config__, 'MO_BASE', 1) def init_guess_by_minao(mol): dm = hf.init_guess_by_minao(mol) return numpy.array((dm*.5, dm*.5)) def init_guess_by_atom(mol): dm = hf.init_guess_by_atom(mol) return numpy.array((dm*.5, dm*.5)) init_guess_by_huckel = uhf.init_guess_by_huckel init_guess_by_chkfile = uhf.init_guess_by_chkfile def get_fock(mf, h1e=None, s1e=None, vhf=None, dm=None, cycle=-1, diis=None, diis_start_cycle=None, level_shift_factor=None, damp_factor=None): '''Build fock matrix based on Roothaan's effective fock. See also :func:`get_roothaan_fock` ''' if h1e is None: h1e = mf.get_hcore() if s1e is None: s1e = mf.get_ovlp() if vhf is None: vhf = mf.get_veff(mf.mol, dm) if dm is None: dm = mf.make_rdm1() if isinstance(dm, numpy.ndarray) and dm.ndim == 2: dm = numpy.array((dm*.5, dm*.5)) # To Get orbital energy in get_occ, we saved alpha and beta fock, because # Roothaan effective Fock cannot provide correct orbital energy with `eig` # TODO, check other treatment J. Chem. Phys. 133, 141102 focka = h1e + vhf[0] fockb = h1e + vhf[1] f = get_roothaan_fock((focka,fockb), dm, s1e) if cycle < 0 and diis is None: # Not inside the SCF iteration return f if diis_start_cycle is None: diis_start_cycle = mf.diis_start_cycle if level_shift_factor is None: level_shift_factor = mf.level_shift if damp_factor is None: damp_factor = mf.damp dm_tot = dm[0] + dm[1] if 0 <= cycle < diis_start_cycle-1 and abs(damp_factor) > 1e-4: raise NotImplementedError('ROHF Fock-damping') if diis and cycle >= diis_start_cycle: f = diis.update(s1e, dm_tot, f, mf, h1e, vhf) if abs(level_shift_factor) > 1e-4: f = hf.level_shift(s1e, dm_tot*.5, f, level_shift_factor) f = lib.tag_array(f, focka=focka, fockb=fockb) return f def get_roothaan_fock(focka_fockb, dma_dmb, s): '''Roothaan's effective fock. Ref. http://www-theor.ch.cam.ac.uk/people/ross/thesis/node15.html ======== ======== ====== ========= space closed open virtual ======== ======== ====== ========= closed Fc Fb Fc open Fb Fc Fa virtual Fc Fa Fc ======== ======== ====== ========= where Fc = (Fa + Fb) / 2 Returns: Roothaan effective Fock matrix ''' nao = s.shape[0] focka, fockb = focka_fockb dma, dmb = dma_dmb fc = (focka + fockb) * .5 # Projector for core, open-shell, and virtual pc = numpy.dot(dmb, s) po = numpy.dot(dma-dmb, s) pv = numpy.eye(nao) - numpy.dot(dma, s) fock = reduce(numpy.dot, (pc.conj().T, fc, pc)) * .5 fock += reduce(numpy.dot, (po.conj().T, fc, po)) * .5 fock += reduce(numpy.dot, (pv.conj().T, fc, pv)) * .5 fock += reduce(numpy.dot, (po.conj().T, fockb, pc)) fock += reduce(numpy.dot, (po.conj().T, focka, pv)) fock += reduce(numpy.dot, (pv.conj().T, fc, pc)) fock = fock + fock.conj().T fock = lib.tag_array(fock, focka=focka, fockb=fockb) return fock def get_occ(mf, mo_energy=None, mo_coeff=None): '''Label the occupancies for each orbital. NOTE the occupancies are not assigned based on the orbital energy ordering. The first N orbitals are assigned to be occupied orbitals. Examples: >>> mol = gto.M(atom='H 0 0 0; O 0 0 1.1', spin=1) >>> mf = scf.hf.SCF(mol) >>> energy = numpy.array([-10., -1., 1, -2., 0, -3]) >>> mf.get_occ(energy) array([2, 2, 2, 2, 1, 0]) ''' if mo_energy is None: mo_energy = mf.mo_energy if getattr(mo_energy, 'mo_ea', None) is not None: mo_ea = mo_energy.mo_ea mo_eb = mo_energy.mo_eb else: mo_ea = mo_eb = mo_energy nmo = mo_ea.size mo_occ = numpy.zeros(nmo) if getattr(mf, 'nelec', None) is None: nelec = mf.mol.nelec else: nelec = mf.nelec if nelec[0] > nelec[1]: nocc, ncore = nelec else: ncore, nocc = nelec nopen = nocc - ncore mo_occ = _fill_rohf_occ(mo_energy, mo_ea, mo_eb, ncore, nopen) if mf.verbose >= logger.INFO and nocc < nmo and ncore > 0: ehomo = max(mo_energy[mo_occ> 0]) elumo = min(mo_energy[mo_occ==0]) if ehomo+1e-3 > elumo: logger.warn(mf, 'HOMO %.15g >= LUMO %.15g', ehomo, elumo) else: logger.info(mf, ' HOMO = %.15g LUMO = %.15g', ehomo, elumo) if nopen > 0 and mf.verbose >= logger.DEBUG: core_idx = mo_occ == 2 open_idx = mo_occ == 1 vir_idx = mo_occ == 0 logger.debug(mf, ' Roothaan | alpha | beta') logger.debug(mf, ' Highest 2-occ = %18.15g | %18.15g | %18.15g', max(mo_energy[core_idx]), max(mo_ea[core_idx]), max(mo_eb[core_idx])) logger.debug(mf, ' Lowest 0-occ = %18.15g | %18.15g | %18.15g', min(mo_energy[vir_idx]), min(mo_ea[vir_idx]), min(mo_eb[vir_idx])) for i in numpy.where(open_idx)[0]: logger.debug(mf, ' 1-occ = %18.15g | %18.15g | %18.15g', mo_energy[i], mo_ea[i], mo_eb[i]) if mf.verbose >= logger.DEBUG: numpy.set_printoptions(threshold=nmo) logger.debug(mf, ' Roothaan mo_energy =\n%s', mo_energy) logger.debug1(mf, ' alpha mo_energy =\n%s', mo_ea) logger.debug1(mf, ' beta mo_energy =\n%s', mo_eb) numpy.set_printoptions(threshold=1000) return mo_occ def _fill_rohf_occ(mo_energy, mo_energy_a, mo_energy_b, ncore, nopen): mo_occ = numpy.zeros_like(mo_energy) open_idx = [] core_sort = numpy.argsort(mo_energy) core_idx = core_sort[:ncore] if nopen > 0: open_idx = core_sort[ncore:] open_sort = numpy.argsort(mo_energy_a[open_idx]) open_idx = open_idx[open_sort[:nopen]] mo_occ[core_idx] = 2 mo_occ[open_idx] = 1 return mo_occ def get_grad(mo_coeff, mo_occ, fock): '''ROHF gradients is the off-diagonal block [co + cv + ov], where [ cc co cv ] [ oc oo ov ] [ vc vo vv ] ''' occidxa = mo_occ > 0 occidxb = mo_occ == 2 viridxa = ~occidxa viridxb = ~occidxb uniq_var_a = viridxa.reshape(-1,1) & occidxa uniq_var_b = viridxb.reshape(-1,1) & occidxb if getattr(fock, 'focka', None) is not None: focka = fock.focka fockb = fock.fockb elif isinstance(fock, (tuple, list)) or getattr(fock, 'ndim', None) == 3: focka, fockb = fock else: focka = fockb = fock focka = reduce(numpy.dot, (mo_coeff.conj().T, focka, mo_coeff)) fockb = reduce(numpy.dot, (mo_coeff.conj().T, fockb, mo_coeff)) g = numpy.zeros_like(focka) g[uniq_var_a] = focka[uniq_var_a] g[uniq_var_b] += fockb[uniq_var_b] return g[uniq_var_a | uniq_var_b] def make_rdm1(mo_coeff, mo_occ, **kwargs): '''One-particle densit matrix. mo_occ is a 1D array, with occupancy 1 or 2. ''' if isinstance(mo_occ, numpy.ndarray) and mo_occ.ndim == 1: mo_occa = mo_occ > 0 mo_occb = mo_occ == 2 else: mo_occa, mo_occb = mo_occ dm_a = numpy.dot(mo_coeff*mo_occa, mo_coeff.conj().T) dm_b = numpy.dot(mo_coeff*mo_occb, mo_coeff.conj().T) return numpy.array((dm_a, dm_b)) def energy_elec(mf, dm=None, h1e=None, vhf=None): if dm is None: dm = mf.make_rdm1() elif isinstance(dm, numpy.ndarray) and dm.ndim == 2: dm = numpy.array((dm*.5, dm*.5)) return uhf.energy_elec(mf, dm, h1e, vhf) get_veff = uhf.get_veff def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN, **kwargs): '''Analyze the given SCF object: print orbital energies, occupancies; print orbital coefficients; Mulliken population analysis ''' from pyscf.lo import orth from pyscf.tools import dump_mat mo_energy = mf.mo_energy mo_occ = mf.mo_occ mo_coeff = mf.mo_coeff log = logger.new_logger(mf, verbose) if log.verbose >= logger.NOTE: mf.dump_scf_summary(log) log.note('**** MO energy ****') if getattr(mo_energy, 'mo_ea', None) is not None: mo_ea = mo_energy.mo_ea mo_eb = mo_energy.mo_eb log.note(' Roothaan | alpha | beta') for i,c in enumerate(mo_occ): log.note('MO #%-3d energy= %-18.15g | %-18.15g | %-18.15g occ= %g', i+MO_BASE, mo_energy[i], mo_ea[i], mo_eb[i], c) else: for i,c in enumerate(mo_occ): log.note('MO #%-3d energy= %-18.15g occ= %g', i+MO_BASE, mo_energy[i], c) ovlp_ao = mf.get_ovlp() if log.verbose >= logger.DEBUG: label = mf.mol.ao_labels() if with_meta_lowdin: log.debug(' ** MO coefficients (expansion on meta-Lowdin AOs) **') orth_coeff = orth.orth_ao(mf.mol, 'meta_lowdin', s=ovlp_ao) c = reduce(numpy.dot, (orth_coeff.conj().T, ovlp_ao, mo_coeff)) else: log.debug(' ** MO coefficients (expansion on AOs) **') c = mo_coeff dump_mat.dump_rec(mf.stdout, c, label, start=MO_BASE, **kwargs) dm = mf.make_rdm1(mo_coeff, mo_occ) if with_meta_lowdin: pop_and_charge = mf.mulliken_meta(mf.mol, dm, s=ovlp_ao, verbose=log) else: pop_and_charge = mf.mulliken_pop(mf.mol, dm, s=ovlp_ao, verbose=log) dip = mf.dip_moment(mf.mol, dm, verbose=log) return pop_and_charge, dip mulliken_pop = hf.mulliken_pop mulliken_meta = hf.mulliken_meta def canonicalize(mf, mo_coeff, mo_occ, fock=None): '''Canonicalization diagonalizes the Fock matrix within occupied, open, virtual subspaces separatedly (without change occupancy). ''' if getattr(fock, 'focka', None) is None: dm = mf.make_rdm1(mo_coeff, mo_occ) fock = mf.get_fock(dm=dm) mo_e, mo_coeff = hf.canonicalize(mf, mo_coeff, mo_occ, fock) fa, fb = fock.focka, fock.fockb mo_ea = numpy.einsum('pi,pi->i', mo_coeff.conj(), fa.dot(mo_coeff)).real mo_eb = numpy.einsum('pi,pi->i', mo_coeff.conj(), fb.dot(mo_coeff)).real mo_e = lib.tag_array(mo_e, mo_ea=mo_ea, mo_eb=mo_eb) return mo_e, mo_coeff dip_moment = hf.dip_moment # use UHF init_guess, get_veff, diis, and intermediates such as fock, vhf, dm # keep mo_energy, mo_coeff, mo_occ as RHF structure class ROHF(hf.RHF): __doc__ = hf.SCF.__doc__ def __init__(self, mol): hf.SCF.__init__(self, mol) self.nelec = None @property def nelec(self): if getattr(self, '_nelec', None) is not None: return self._nelec else: return self.mol.nelec @nelec.setter def nelec(self, x): self._nelec = x @property def nelectron_alpha(self): return self.nelec[0] @nelectron_alpha.setter def nelectron_alpha(self, x): logger.warn(self, 'WARN: Attribute .nelectron_alpha is deprecated. ' 'Set .nelec instead') #raise RuntimeError('API updates') self.nelec = (x, self.mol.nelectron-x) check_sanity = hf.SCF.check_sanity def dump_flags(self, verbose=None): hf.SCF.dump_flags(self, verbose) nelec = self.nelec logger.info(self, 'num. doubly occ = %d num. singly occ = %d', nelec[1], nelec[0]-nelec[1]) def init_guess_by_minao(self, mol=None): if mol is None: mol = self.mol return init_guess_by_minao(mol) def init_guess_by_atom(self, mol=None): if mol is None: mol = self.mol logger.info(self, 'Initial guess from the superpostion of atomic densties.') return init_guess_by_atom(mol) def init_guess_by_huckel(self, mol=None): if mol is None: mol = self.mol logger.info(self, 'Initial guess from on-the-fly Huckel, doi:10.1021/acs.jctc.8b01089.') return init_guess_by_huckel(mol) def init_guess_by_1e(self, mol=None): if mol is None: mol = self.mol logger.info(self, 'Initial guess from hcore.') h1e = self.get_hcore(mol) s1e = self.get_ovlp(mol) mo_energy, mo_coeff = self.eig(h1e, s1e) mo_occ = self.get_occ(mo_energy, mo_coeff) return self.make_rdm1(mo_coeff, mo_occ) def init_guess_by_chkfile(self, chkfile=None, project=None): if chkfile is None: chkfile = self.chkfile return init_guess_by_chkfile(self.mol, chkfile, project=project) get_fock = get_fock get_occ = get_occ @lib.with_doc(hf.eig.__doc__) def eig(self, fock, s): e, c = self._eigh(fock, s) if getattr(fock, 'focka', None) is not None: mo_ea = numpy.einsum('pi,pi->i', c.conj(), fock.focka.dot(c)).real mo_eb = numpy.einsum('pi,pi->i', c.conj(), fock.fockb.dot(c)).real e = lib.tag_array(e, mo_ea=mo_ea, mo_eb=mo_eb) return e, c @lib.with_doc(get_grad.__doc__) def get_grad(self, mo_coeff, mo_occ, fock=None): if fock is None: dm1 = self.make_rdm1(mo_coeff, mo_occ) fock = self.get_hcore(self.mol) + self.get_veff(self.mol, dm1) return get_grad(mo_coeff, mo_occ, fock) @lib.with_doc(make_rdm1.__doc__) def make_rdm1(self, mo_coeff=None, mo_occ=None, **kwargs): if mo_coeff is None: mo_coeff = self.mo_coeff if mo_occ is None: mo_occ = self.mo_occ if self.mol.spin < 0: # Flip occupancies of alpha and beta orbitals mo_occ = (mo_occ == 2), (mo_occ > 0) return make_rdm1(mo_coeff, mo_occ, **kwargs) energy_elec = energy_elec @lib.with_doc(uhf.get_veff.__doc__) def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1): if mol is None: mol = self.mol if dm is None: dm = self.make_rdm1() if isinstance(dm, numpy.ndarray) and dm.ndim == 2: dm = numpy.array((dm*.5, dm*.5)) if self._eri is not None or not self.direct_scf: if getattr(dm, 'mo_coeff', None) is not None: mo_coeff = dm.mo_coeff mo_occ_a = (dm.mo_occ > 0).astype(numpy.double) mo_occ_b = (dm.mo_occ ==2).astype(numpy.double) dm = lib.tag_array(dm, mo_coeff=(mo_coeff,mo_coeff), mo_occ=(mo_occ_a,mo_occ_b)) vj, vk = self.get_jk(mol, dm, hermi) vhf = vj[0] + vj[1] - vk else: ddm = dm - numpy.asarray(dm_last) vj, vk = self.get_jk(mol, ddm, hermi) vhf = vj[0] + vj[1] - vk vhf += numpy.asarray(vhf_last) return vhf @lib.with_doc(analyze.__doc__) def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN, **kwargs): if verbose is None: verbose = self.verbose return analyze(self, verbose, with_meta_lowdin, **kwargs) canonicalize = canonicalize def spin_square(self, mo_coeff=None, s=None): '''Spin square and multiplicity of RHF determinant''' neleca, nelecb = self.nelec ms = (neleca - nelecb) * .5 ss = ms * (ms + 1) return ss, ms*2+1 def stability(self, internal=getattr(__config__, 'scf_stability_internal', True), external=getattr(__config__, 'scf_stability_external', False), verbose=None, return_status=False): ''' ROHF/ROKS stability analysis. See also pyscf.scf.stability.rohf_stability function. Kwargs: internal : bool Internal stability, within the RHF optimization space. external : bool External stability. It is not available in current version. return_status: bool Whether to return `stable_i` and `stable_e` Returns: If return_status is False (default), the return value includes two set of orbitals, which are more close to the stable condition. The first corresponds to the internal stability and the second corresponds to the external stability. Else, another two boolean variables (indicating current status: stable or unstable) are returned. The first corresponds to the internal stability and the second corresponds to the external stability. ''' from pyscf.scf.stability import rohf_stability return rohf_stability(self, internal, external, verbose, return_status) def nuc_grad_method(self): from pyscf.grad import rohf return rohf.Gradients(self) class HF1e(ROHF): def scf(self, *args): logger.info(self, '\n') logger.info(self, '******** 1 electron system ********') self.converged = True h1e = self.get_hcore(self.mol) s1e = self.get_ovlp(self.mol) self.mo_energy, self.mo_coeff = self.eig(h1e, s1e) self.mo_occ = self.get_occ(self.mo_energy, self.mo_coeff) self.e_tot = self.mo_energy[self.mo_occ>0][0] + self.mol.energy_nuc() self._finalize() return self.e_tot del(WITH_META_LOWDIN)
sunqm/pyscf
pyscf/scf/rohf.py
Python
apache-2.0
18,334
[ "PySCF" ]
bbacde3258c4308aa5dff1435f7df4064eb46adafcde5d56da6393ef3aa063f2
import sys from ase.test.tasks.dcdft import DeltaCodesDFTTask from gpaw.atom.configurations import parameters from gpaw.test.big.scf.analyse import rundefs if len(sys.argv) == 1: xc = 'PBE' run = None elif len(sys.argv) == 2: xc = 'PBE' run = sys.argv[1] elif len(sys.argv) == 3: xc = sys.argv[1] run = sys.argv[2] tag = 'scf_dcdft_%s_pw' % (xc.lower()) if run is not None: tag += '_%s' % run class Task(DeltaCodesDFTTask): def __init__(self, inititer=0, **kwargs): DeltaCodesDFTTask.__init__(self, **kwargs) self.inititer = inititer def calculate(self, name, atoms): data = DeltaCodesDFTTask.calculate(self, name, atoms) try: steps = atoms.get_calculator().get_number_of_iterations() except (AttributeError, NotImplemented): steps = None data['calculator steps'] = steps + self.inititer return data from gpaw import ConvergenceError from gpaw import PW from gpaw.factory import GPAWFactory from gpaw.mixer import Mixer, MixerSum, MixerSum2, MixerDif from gpaw.mixer import FFTMixer, FFTMixerSum, FFTMixerDif from gpaw.mixer import BroydenMixer, BroydenMixerSum from gpaw.poisson import PoissonSolver from gpaw.occupations import FermiDirac, MethfesselPaxton class Factory(GPAWFactory): def __init__(self, show_text_output=False, write_gpw_file=None, inititer=0, **kwargs): GPAWFactory.__init__(self, show_text_output=show_text_output, write_gpw_file=write_gpw_file, **kwargs) self.inititer = inititer self.maxiter = kwargs['maxiter'] self.eigensolver = kwargs['eigensolver'] def __call__(self, name, atoms): calculator = GPAWFactory.__call__(self, name, atoms) if name.split('-')[0] in ['Li', 'Na']: # https://listserv.fysik.dtu.dk/pipermail/gpaw-developers/2012-May/002870.html calculator.set(h=0.11) if self.inititer > 0: try: calculator.set(eigensolver='cg') calculator.set(maxiter=self.inititer) atoms.set_calculator(calculator) atoms.get_potential_energy() except ConvergenceError: pass calculator.set(maxiter=self.maxiter) calculator.set(eigensolver=self.eigensolver) return calculator calcopts = { 'mode': PW(), 'xc': xc, # allow other mixers 'spinpol': True, # allow for long SCFs 'maxiter': 500, 'nbands': -5, } if run.startswith('inititer'): inititer = int(run[len('inititer'):len('inititer') + 2]) calcopts.update({'inititer': inititer}) else: inititer = 0 if run.startswith('bands'): nbands = run[len('bands'):len('bands') + 2] if nbands == '00': calcopts.update({'nbands': None}) else: calcopts.update({'nbands': - int(nbands)}) if run.startswith('cgbands'): nbands = run[len('cgbands'):len('cgbands') + 2] if nbands == '00': calcopts.update({'nbands': None}) else: calcopts.update({'nbands': - int(nbands)}) if run.startswith('dzpbands'): nbands = run[len('dzpbands'):len('dzpbands') + 2] if nbands == '00': calcopts.update({'nbands': None}) else: calcopts.update({'nbands': - int(nbands)}) if run.startswith('szpdzp'): calcopts.update({'basis': 'szp(dzp)'}) if run.startswith('szdzp'): calcopts.update({'basis': 'sz(dzp)'}) if run.startswith('dzp'): calcopts.update({'basis': 'dzp'}) if 'cg' in run: calcopts.update({'eigensolver': 'cg'}) elif 'dav' in run: calcopts.update({'eigensolver': 'dav'}) else: calcopts.update({'eigensolver': 'rmm-diis'}) if run.startswith('cgdzp'): calcopts.update({'basis': 'dzp'}) calcopts.update({'mixer': eval(rundefs[run])}) if 'mp' in run: calcopts.update({'occupations': MethfesselPaxton(0.1)}) if 'poisson' in run: calcopts.update({'poissonsolver': PoissonSolver(eps=1e-12)}) calcfactory = Factory(**calcopts) task = Task( inititer=inititer, calcfactory=calcfactory, tag=tag, use_lock_files=True, ) if __name__ == '__main__': # run systems from collection for which we have setups keys = list(set(parameters.keys()).intersection(task.collection.names)) keys.sort() task.run(keys)
robwarm/gpaw-symm
gpaw/test/big/scf/dcdft_pbe_pw.py
Python
gpl-3.0
4,362
[ "ASE", "GPAW" ]
8cce479340b694d74b3ad53218175d7a182f1484fb34eafac7cb41c04f9d1067
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type import os import pwd import sys from string import ascii_letters, digits from six import string_types from six.moves import configparser from ansible.parsing.splitter import unquote from ansible.errors import AnsibleOptionsError # copied from utils, avoid circular reference fun :) def mk_boolean(value): if value is None: return False val = str(value) if val.lower() in [ "true", "t", "y", "1", "yes" ]: return True else: return False def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False): ''' return a configuration variable with casting ''' value = _get_config(p, section, key, env_var, default) if boolean: value = mk_boolean(value) if value: if integer: value = int(value) elif floating: value = float(value) elif islist: if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif isinstance(value, string_types): value = unquote(value) return value def _get_config(p, section, key, env_var, default): ''' helper function for get_config ''' if env_var is not None: value = os.environ.get(env_var, None) if value is not None: return value if p is not None: try: return p.get(section, key, raw=True) except: return default return default def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' p = configparser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = os.path.expanduser(path0) path1 = os.getcwd() + "/ansible.cfg" path2 = os.path.expanduser("~/.ansible.cfg") path3 = "/etc/ansible/ansible.cfg" for path in [path0, path1, path2, path3]: if path is not None and os.path.exists(path): try: p.read(path) except configparser.Error as e: raise AnsibleOptionsError("Error reading config file: \n{0}".format(e)) return p, path return None, '' def shell_expand_path(path): ''' shell_expand_path is needed as os.path.expanduser does not work when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE ''' if path: path = os.path.expanduser(os.path.expandvars(path)) return path p, CONFIG_FILE = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] # check all of these extensions when looking for yaml files for things like # group variables -- really anything we can load YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] # sections in config file DEFAULTS='defaults' # generally configurable things DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts'))) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8') DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True) DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True) DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None)) DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart') DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True) DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True) DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}') DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True) DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) ### PRIVILEGE ESCALATION ### # Backwards Compat DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) # Become BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} #FIXME: deal with i18n BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None) DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # Plugin paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins') DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True) # Display ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True) HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True) SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True) DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True) DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) # obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True) ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True) ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # galaxy related DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0
hnakamur/ansible
lib/ansible/constants.py
Python
gpl-3.0
16,032
[ "Galaxy" ]
a43d803cf63770762c0624bf73b2487c2243755cbfa344a008dd22ea58a87347
# -*- coding: utf-8 -*- # HiPart is a program to analyze the electronic structure of molecules with # fuzzy-atom partitioning methods. # Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be> # # This file is part of HiPart. # # HiPart is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # HiPart is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # #-- from hipart.schemes import scheme_classes from hipart.context import Context, Options as FakeOptions import os, tempfile, shutil, glob __all__ = [ "setup_gaussian", "iter_hf_sto3g_gaussian_schemes", "iter_hf_sto3g_gaussian_schemes_opts", "iter_oh1_sto3g_gaussian_schemes", "iter_oh2_sto3g_gaussian_schemes", "iter_h_sto3g_gaussian_schemes", ] def clean_txt(directory): if directory is not None: for fn_txt in glob.glob(os.path.join(directory, "*.txt")): os.remove(fn_txt) def setup_gaussian(fchk_name, densities_name): tmpdir = tempfile.mkdtemp("hipart") if not os.path.isdir("input"): raise IOError("Input directory with test files is not present") fn_fchk = os.path.join(tmpdir, "gaussian.fchk") shutil.copy("input/%s.fchk" % fchk_name, fn_fchk) fn_densities = os.path.join(tmpdir, "densities.txt") shutil.copy("input/densities_%s.txt" % densities_name, fn_densities) return tmpdir, fn_fchk, fn_densities def iter_hf_sto3g_gaussian_schemes(): options = FakeOptions() tmpdir, fn_fchk, fn_densities = setup_gaussian("hf_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("hf_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("hf_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['isa'].new_from_args(context, ["2e-5", "20.0", "100"]) clean_txt(context.output.directory) yield scheme_classes['isa'].new_from_args(context, []) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("hf_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['becke'].new_from_args(context, ["3", "2e-5", "20.0", "100"]) clean_txt(context.output.directory) yield scheme_classes['becke'].new_from_args(context, ["3"]) clean_txt(context.output.directory) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) def iter_hf_sto3g_gaussian_schemes_opts(): options = FakeOptions(do_clean=True) tmpdir, fn_fchk, fn_densities = setup_gaussian("hf_sto3g", "sto3g") for do_random in True, False: options.do_random = do_random for do_work in True, False: options.do_work = do_work for do_output in True, False: options.do_output = do_output for save_mem in True, False: options.save_mem = save_mem context = Context(fn_fchk, options) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) shutil.rmtree(tmpdir) def iter_oh1_sto3g_gaussian_schemes(): options = FakeOptions() tmpdir, fn_fchk, fn_densities = setup_gaussian("oh_rad1_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("oh_rad1_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("oh_rad1_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['isa'].new_from_args(context, ["2e-5", "20.0", "100"]) clean_txt(context.output.directory) yield scheme_classes['isa'].new_from_args(context, []) shutil.rmtree(tmpdir) def iter_oh2_sto3g_gaussian_schemes(): options = FakeOptions() tmpdir, fn_fchk, fn_densities = setup_gaussian("oh_rad2_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("oh_rad2_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("oh_rad2_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['isa'].new_from_args(context, ["2e-5", "20.0", "100"]) clean_txt(context.output.directory) yield scheme_classes['isa'].new_from_args(context, []) shutil.rmtree(tmpdir) def iter_h_sto3g_gaussian_schemes(): options = FakeOptions() tmpdir, fn_fchk, fn_densities = setup_gaussian("h_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("h_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirshi'].new_from_args(context, [fn_densities]) clean_txt(context.output.directory) yield scheme_classes['hirsh'].new_from_args(context, [fn_densities]) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("h_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['isa'].new_from_args(context, ["2e-5", "20.0", "100"]) clean_txt(context.output.directory) yield scheme_classes['isa'].new_from_args(context, []) shutil.rmtree(tmpdir) tmpdir, fn_fchk, fn_densities = setup_gaussian("h_sto3g", "sto3g") context = Context(fn_fchk, options) yield scheme_classes['becke'].new_from_args(context, ["2e-5", "20.0", "100"]) clean_txt(context.output.directory) yield scheme_classes['becke'].new_from_args(context, []) shutil.rmtree(tmpdir)
molmod/hipart
hipart/tests/utils.py
Python
gpl-3.0
7,966
[ "Gaussian" ]
4e32dc948c91bc7e2b24c95bcae6294221beeb7d27c18258c388065b16f4994f
""" Ensure that we can use pathlib.Path objects in all relevant IO functions. """ import sys try: from pathlib import Path except ImportError: # Not available. No fallback import, since we'll skip the entire # test suite for Python < 3.6. pass import numpy as np from numpy.testing import assert_ import pytest import scipy.io import scipy.io.wavfile from scipy._lib._tmpdirs import tempdir import scipy.sparse @pytest.mark.skipif(sys.version_info < (3, 6), reason='Passing path-like objects to IO functions requires Python >= 3.6') class TestPaths(object): data = np.arange(5).astype(np.int64) def test_savemat(self): with tempdir() as temp_dir: path = Path(temp_dir) / 'data.mat' scipy.io.savemat(path, {'data': self.data}) assert_(path.is_file()) def test_loadmat(self): # Save data with string path, load with pathlib.Path with tempdir() as temp_dir: path = Path(temp_dir) / 'data.mat' scipy.io.savemat(str(path), {'data': self.data}) mat_contents = scipy.io.loadmat(path) assert_((mat_contents['data'] == self.data).all()) def test_whosmat(self): # Save data with string path, load with pathlib.Path with tempdir() as temp_dir: path = Path(temp_dir) / 'data.mat' scipy.io.savemat(str(path), {'data': self.data}) contents = scipy.io.whosmat(path) assert_(contents[0] == ('data', (1, 5), 'int64')) def test_readsav(self): path = Path(__file__).parent / 'data/scalar_string.sav' scipy.io.readsav(path) def test_hb_read(self): # Save data with string path, load with pathlib.Path with tempdir() as temp_dir: data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) path = Path(temp_dir) / 'data.hb' scipy.io.harwell_boeing.hb_write(str(path), data) data_new = scipy.io.harwell_boeing.hb_read(path) assert_((data_new != data).nnz == 0) def test_hb_write(self): with tempdir() as temp_dir: data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) path = Path(temp_dir) / 'data.hb' scipy.io.harwell_boeing.hb_write(path, data) assert_(path.is_file()) def test_netcdf_file(self): path = Path(__file__).parent / 'data/example_1.nc' scipy.io.netcdf.netcdf_file(path) def test_wavfile_read(self): path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' scipy.io.wavfile.read(path) def test_wavfile_write(self): # Read from str path, write to Path input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' rate, data = scipy.io.wavfile.read(str(input_path)) with tempdir() as temp_dir: output_path = Path(temp_dir) / input_path.name scipy.io.wavfile.write(output_path, rate, data)
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/scipy/io/tests/test_paths.py
Python
mit
2,994
[ "NetCDF" ]
5439a650dc99de48801c507ef5e35d144b37b2ce19cd9e19e0f9b5554224445a
# -*- coding: cp1252 -*- # Copyright (C) 2011 - 2015 The Board of Regents of the University of Wisconsin System # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # """ This dialog implements the Transana Media Conversion Dialog class. It requires the Transana-specific FFMpeg build for the platform being used. """ __author__ = 'David Woods <dwoods@wcer.wisc.edu>' DEBUG = False if DEBUG: print "MediaConvert DEBUG is ON!!!" # import wxPython import wx # if running stand-alone (for testing) if __name__ == '__main__': # This module expects i18n. Enable it here. __builtins__._ = wx.GetTranslation # if NOT running stand-alone else: # Import Transana's Database Interface import DBInterface # Import Transana's Dialogs import Dialogs # Import Transana's Miscellaneous Functions import Misc # Import Transana's Constants import TransanaConstants # import Transana's Global variables import TransanaGlobal # import Transana's Waveform Progress Dialog (no longer just for Waveform Audio Extration!) import WaveformProgress # import the Python exceptions module import exceptions # import Python's multiprocessing module import multiprocessing # Import Python's os and sys modules import os, sys # import Python's shutil for fast file copies import shutil # We MUST disable MPEG-1, MPEG-2, and MP3 formats for legal reasons. ENABLE_MPG = False # DO NOT CHANGE THIS VALUE # We never got a response regarding the legalities of using the MOV format. # We've disabled it to be certain we are within the law. ENABLE_MOV = False if ENABLE_MPG or ENABLE_MOV: print "MediaConvert: MPG or MOV format enabled!!" # This simple derived class let's the user drop files onto an edit box class EditBoxFileDropTarget(wx.FileDropTarget): def __init__(self, editbox): wx.FileDropTarget.__init__(self) self.editbox = editbox def OnDropFiles(self, x, y, files): """Called when a file is dragged onto the edit box.""" self.editbox.SetValue(files[0]) class MediaConvert(wx.Dialog): """ Transana's Media Conversion Tool Dialog Box. """ def __init__(self, parent, fileName='', clipStart=0, clipDuration=0, clipName='', snapshot=False): """ Initialize the MediaConvert Dialog Box. """ # There's a bug. I think it's an interaction between OS X 10.7.5 and earlier (but not 10.8.4), wxPython (version # unknown, but I've seen it in 2.8.12.1, 2.9.4.0, and a pre-release build of 2.9.5.0), and the build process. # Basically, if you open a wxFileDialog object, something bad happens with wxLocale such that subsequent calls # to wxProcess don't encode unicode file names correctly, so FFMpeg fails. Only OS X 10.7.5, only when Transana # has been built (not running through the interpreter), only following the opening of a wxFileDialog, and only # with filenames with accented characters or characters from non-English languages. # The resolution to this bug is to reset the Locale here, based on Transana's current wxLocale setting in the # menuWindow object. self.locale = wx.Locale(TransanaGlobal.menuWindow.locale.Language) # Remember the parent self.parent = parent # Remember the File Name passed in self.fileName = fileName # Start exception handling try: # Note the number of computer cores available self.cpu_count = multiprocessing.cpu_count() # If this raises an exception ... except: # ... then indicate the number of cpus is unknown self.cpu_count = 0 # If we're on Windows ... if 'wxMSW' in wx.PlatformInfo: # ... specify a Temporary Path for handling non-cp1252 files self.tmpPath = 'C:\\Temp_Transana' # Set the Temporary Filename to a blank. (This is used for non-cp1252 filenames, which ffmpeg can't handle on Windows.) self.tmpFileName = '' # Remember Clip Start Point self.clipStart = clipStart # Remember Clip Duration self.clipDuration = clipDuration # Remember the Clip Name self.clipName = clipName # Remember snapshot setting (Are we capturing a still from a video file?) self.snapshot = snapshot # We need to know if we have successfully taken a snapshot self.snapshotSuccess = False # Initialize the process variable self.process = None # Create a dictionary for remembering the number of conversion processes currently running self.runningConversions = {} # Initialize all media file variables self.Reset() # Create the Dialog wx.Dialog.__init__(self, parent, -1, _('Media File Conversion'), size=wx.Size(600, 700), style=wx.CAPTION | wx.THICK_FRAME) # To look right, the Mac needs the Small Window Variant. if "__WXMAC__" in wx.PlatformInfo: self.SetWindowVariant(wx.WINDOW_VARIANT_SMALL) # Create the main Sizer, which will hold the box1, box2, etc. and boxButton sizers box = wx.BoxSizer(wx.VERTICAL) # Add the Source File label lblSource = wx.StaticText(self, -1, _("Source Media File:")) box.Add(lblSource, 0, wx.TOP | wx.LEFT | wx.RIGHT, 10) # Create the box1 sizer, which will hold the source file and its browse button box1 = wx.BoxSizer(wx.HORIZONTAL) # If we are in the DEMO version and are not taking a snapshot ... if TransanaConstants.demoVersion and not snapshot: # ... use the file name to indicate that the Media Conversion tool is disabled fileName = _('Media Conversion is disabled in the Demo version.') # Create the Source File text box self.txtSrcFileName = wx.TextCtrl(self, -1, fileName) # If we are exporting a Clip ... if (self.clipDuration > 0) or (TransanaConstants.demoVersion and not snapshot): # ... then we need to disable the Source Media File text box self.txtSrcFileName.Enable(False) # If we're NOT exporting a Clip ... else: # Make the Source File a File Drop Target self.txtSrcFileName.SetDropTarget(EditBoxFileDropTarget(self.txtSrcFileName)) # Handle ALL changes to the source filename self.txtSrcFileName.Bind(wx.EVT_TEXT, self.OnSrcFileNameChange) box1.Add(self.txtSrcFileName, 1, wx.EXPAND) # Spacer box1.Add((4, 0)) # Create the Source File Browse button self.srcBrowse = wx.Button(self, -1, _("Browse")) # If we are exporting a Clip ... if (self.clipDuration > 0) or (TransanaConstants.demoVersion and not snapshot): # ... then we need to disable the Source Media File Browse button self.srcBrowse.Enable(False) self.srcBrowse.Bind(wx.EVT_BUTTON, self.OnBrowse) box1.Add(self.srcBrowse, 0) # Add the Source Sizer to the Main Sizer box.Add(box1, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the Format label lblFormat = wx.StaticText(self, -1, _("Format:")) box.Add(lblFormat, 0, wx.LEFT | wx.RIGHT, 10) # Add the Format Choice Box with no options initially self.format = wx.Choice(self, -1, choices=[]) # Fix problems with Right-To-Left languages self.format.SetLayoutDirection(wx.Layout_LeftToRight) # Disable the Format box initially self.format.Enable(False) self.format.Bind(wx.EVT_CHOICE, self.OnFormat) box.Add(self.format, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the Destination File label lblDest = wx.StaticText(self, -1, _("Destination Media File:")) box.Add(lblDest, 0, wx.LEFT | wx.RIGHT, 10) # Create the box2 horizontal sizer for the Destination File and its Browse button box2 = wx.BoxSizer(wx.HORIZONTAL) # Create the Destination File text box self.txtDestFileName = wx.TextCtrl(self, -1) # Set the layout Direction to LtR so file names don't get mangled self.txtDestFileName.SetLayoutDirection(wx.Layout_LeftToRight) # Disable the Destination File box initially self.txtDestFileName.Enable(False) box2.Add(self.txtDestFileName, 1, wx.EXPAND) # Spacer box2.Add((4, 0)) # Create the Destination File Browse button self.destBrowse = wx.Button(self, -1, _("Browse")) # Disable the Destination File Browse button initially self.destBrowse.Enable(False) self.destBrowse.Bind(wx.EVT_BUTTON, self.OnBrowse) box2.Add(self.destBrowse, 0) # Add the Destination Sizer to the Main Sizer box.Add(box2, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the Video Parameters label lblVideo = wx.StaticText(self, -1, _("Video Parameters:")) box.Add(lblVideo, 0, wx.LEFT | wx.RIGHT, 10) # Create the box3 horizontal sizer for the Video Parameters box3 = wx.BoxSizer(wx.HORIZONTAL) # Add the Video Size label lblVideoSize = wx.StaticText(self, -1, _("Size:")) box3.Add(lblVideoSize, 0, wx.RIGHT, 10) # This does not work!!!! # if TransanaGlobal.configData.LayoutDirection == wx.Layout_RightToLeft: # style = wx.ALIGN_RIGHT # else: # style = 0 # Add the Video Size choice box, initially empty self.videoSize = wx.Choice(self, -1, choices=[]) # , style=style # Fix problems with Right-To-Left languages self.videoSize.SetLayoutDirection(wx.Layout_LeftToRight) # Disable Video Size initially self.videoSize.Enable(False) box3.Add(self.videoSize, 1, wx.RIGHT, 10) # Add the Video Bit Rate label lblVideoBitrate = wx.StaticText(self, -1, _("Bit Rate: (kb/s)")) box3.Add(lblVideoBitrate, 0, wx.RIGHT, 10) # Add the Video Bitrate choice box self.videoBitrate = wx.Choice(self, -1, choices=[]) # Disable the Video Bit Rate box initially self.videoBitrate.Enable(False) box3.Add(self.videoBitrate, 1, wx.RIGHT, 10) # Add the Video Parameters sizer to the Main Sizer box.Add(box3, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the Audio Parameters label lblAudio = wx.StaticText(self, -1, _("Audio Parameters:")) box.Add(lblAudio, 0, wx.LEFT | wx.RIGHT, 10) # Create the box4 horizontal sizer for Audio Parameters box4 = wx.BoxSizer(wx.HORIZONTAL) # Add the Audio Bit Rate label lblAudioBitrate = wx.StaticText(self, -1, _("Bit Rate: (kb/s)")) box4.Add(lblAudioBitrate, 0, wx.RIGHT, 10) # Add the Audio Bit Rate choice box self.audioBitrate = wx.Choice(self, -1, choices=[]) # Disable the Audio Choice Box initially self.audioBitrate.Enable(False) box4.Add(self.audioBitrate, 4, wx.RIGHT, 10) # Add the Audio Sample Rate label lblAudioSampleRate = wx.StaticText(self, -1, _("Sample Rate: (Hz)")) box4.Add(lblAudioSampleRate, 0, wx.RIGHT, 10) # Add the Audio Sample choice box self.audioSampleRate = wx.Choice(self, -1, choices=[]) # Disable the Audio Sample box initially self.audioSampleRate.Enable(False) box4.Add(self.audioSampleRate, 4, wx.RIGHT, 10) # Add the Audio Parameters sizer to the Main Sizer box.Add(box4, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the Still Images label lblStill = wx.StaticText(self, -1, _("Still Image Parameters:")) box.Add(lblStill, 0, wx.LEFT | wx.RIGHT, 10) # Create the box6 horizontal sizer for Still Image Parameters box6 = wx.BoxSizer(wx.HORIZONTAL) # Add the Still Frame Rate label lblStillFrameRate = wx.StaticText(self, -1, _("Seconds between images:")) box6.Add(lblStillFrameRate, 0, wx.RIGHT, 10) # Add the Still Frame Rate choice box stillFrameRateChoices = [_("20 seconds"), _("15 seconds"), _("10 seconds"), _("5 seconds"), _("1 second")] self.stillFrameRate = wx.Choice(self, -1, choices=stillFrameRateChoices) # Select the first entry self.stillFrameRate.SetSelection(0) # Disable the Still Framee box initially self.stillFrameRate.Enable(False) box6.Add(self.stillFrameRate, 4, wx.RIGHT, 10) # Add the Still Images Parameters sizer to the Main Sizer box.Add(box6, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # If we are exporting Clip Video ... if self.clipDuration > 0: # Add the Clip Parameters label lblClip = wx.StaticText(self, -1, _("Clip Parameters:")) box.Add(lblClip, 0, wx.LEFT | wx.RIGHT, 10) # Create the box5 horizontal sizer for the Clip Parameters box5 = wx.BoxSizer(wx.HORIZONTAL) # Add the Start Time label lblClipStart = wx.StaticText(self, -1, _("Clip Start Time:")) box5.Add(lblClipStart, 0, wx.RIGHT, 10) # Add the Clip Start Time box self.txtClipStartTime = wx.TextCtrl(self, -1, Misc.time_in_ms_to_str(self.clipStart, True)) # Disable Clip Start Time self.txtClipStartTime.Enable(False) box5.Add(self.txtClipStartTime, 1, wx.RIGHT, 10) # Add the Duration label lblClipDuration = wx.StaticText(self, -1, _("Clip Duration:")) box5.Add(lblClipDuration, 0, wx.RIGHT, 10) # Add the Clip Duration box self.txtClipDuration = wx.TextCtrl(self, -1, Misc.time_in_ms_to_str(self.clipDuration, True)) # Disable Clip Duration self.txtClipDuration.Enable(False) box5.Add(self.txtClipDuration, 1, wx.RIGHT, 10) # Add the Clip Parameters sizer to the Main Sizer box.Add(box5, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the Information label lblMemo = wx.StaticText(self, -1, _("Information:")) box.Add(lblMemo, 0, wx.LEFT | wx.RIGHT, 10) # Add the Information text control self.memo = wx.TextCtrl(self, -1, style = wx.TE_MULTILINE) box.Add(self.memo, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Add the FFmpeg label lblFFmpeg = wx.StaticText(self, -1, _("Transana's Media File Conversion tool is powered by FFmpeg.")) box.Add(lblFFmpeg, 0, wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Create the boxButtons sizer, which will hold the dialog box's buttons boxButtons = wx.BoxSizer(wx.HORIZONTAL) # Create a Convert button self.btnConvert = wx.Button(self, -1, _("Convert")) # Set this as the default button self.btnConvert.SetDefault() self.btnConvert.Bind(wx.EVT_BUTTON, self.OnConvert) boxButtons.Add(self.btnConvert, 0, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM | wx.RIGHT, 10) # If we are in the DEMO version and are not taking a snapshot ... if TransanaConstants.demoVersion and not snapshot: # ... then disable the Convert button self.btnConvert.Enable(False) # Create a Close button self.btnClose = wx.Button(self, wx.ID_CANCEL, _("Close")) boxButtons.Add(self.btnClose, 1, wx.EXPAND | wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM | wx.RIGHT, 10) self.Bind(wx.EVT_CLOSE, self.OnClose) # Create a Help button btnHelp = wx.Button(self, -1, _("Help")) btnHelp.Bind(wx.EVT_BUTTON, self.OnHelp) boxButtons.Add(btnHelp, 0, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM, 10) # Add the boxButtons sizer to the main box sizer box.Add(boxButtons, 0, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM | wx.LEFT | wx.RIGHT | wx.BOTTOM, 10) # Define box as the form's main sizer self.SetSizer(box) # Set this as the minimum size for the form. self.SetSizeHints(minW = self.GetSize()[0], minH = int(self.GetSize()[1] * 0.75)) # Tell the form to maintain the layout and have it set the intitial Layout self.SetAutoLayout(True) self.Layout() # Position the form in the center of the screen self.CentreOnScreen() # If NOT running stand-alone (for testing) ... if __name__ != '__main__': # ... start with Transana's Video Root as the initial path self.lastPath = TransanaGlobal.configData.videoPath # If running stand-alone (for testing) ... else: # ... use the current directory as the initial path self.lastPath = os.path.dirname(sys.argv[0]) # If a file name was passed in as a parameter ... if (self.fileName != '') and (not (TransanaConstants.demoVersion) or snapshot): # ... process that file to prepare for conversion self.ProcessMediaFile(self.fileName) def Reset(self): """ Initialize or Reset all variables associated with the Media File to be converted """ # initialize the process variable self.process = None # Media File Duration self.duration = 0 # Overall Bit Rate self.bitrate = 0 # Number of Streams self.streams = 0 # Contains a Video Stream? self.vidStream = False # Video Codec Name self.vidCodec = '' # Video Picture Format (see FFmpeg) self.vidPixFmt = '' # Video Bit Rate self.vidBitrate = 0 # Video Picture Width self.vidSizeW = 0 # Video Picture Height self.vidSizeH = 0 # Video Frame Rate self.vidFrameRate = 0 # Contains an Audio Stream? self.audStream = False # Audio Codec Name self.audCodec = '' # Audio Bit Rate self.audBitrate = 0 # Audio Sample Rate self.audSampleRate = 0 # Number of Audio Channels self.audChannels = 0 # Temporary File Name self.tmpFileName = '' # File Extension for converted file self.ext = '' def ProcessMediaFile(self, inputFile): """ Process a Media File to see what it's made of, a pre-requisite to converting it. This process also populates the form's options. """ # If we're messing with wxProcess, we need to define a function to clean up if we shut down! def __del__(self): # If a process has been defined ... if self.process is not None: # ... detach it self.process.Detach() # ... Close its output self.process.CloseOutput() # ... and de-reference it self.process = None # Reset (re-initialize) all media file variables self.Reset() # Clear the Information box self.memo.Clear() # If we're on Windows ... if 'wxMSW' in wx.PlatformInfo: # Start exception handling try: # Initialize the file name for the error message testFileName = self.fileName # Find out if the file name can be converted to cp1252 encoding. # FFmpeg cannot handle files that are not cp1252 encodable on Windows! testFileName = self.fileName.encode('cp1252') # If the above doesn't trigger an exception ... # Separate path and file name (self.lastPath, filename) = os.path.split(self.fileName) # Find out what the output file name would be self.SetOutputFilename(filename) # Get the Destination File Name from the text control testFileName = self.txtDestFileName.GetValue() # Re-disable the Destination File Name and Browse controls that were accidentally enabled by SetOutputFilename() self.txtDestFileName.Enable(False) self.destBrowse.Enable(False) # In Chinese etc., the output file name may include non-cp1252 characters even if the input file doesn't! testFileName = testFileName.encode('cp1252') # Handle Unicode Error exceptions except exceptions.UnicodeEncodeError: # If we end up here, we have a file or path that is NOT cp1252 encodable. # Inform the user prompt = unicode(_('The file name "%s" is not compatible with FFmpeg. Your file is being temporarily copied for processing.'), 'utf8') self.memo.AppendText(prompt % testFileName + '\n\n') # Remember the ORIGINAL file name. We'll need it later. self.tmpFileName = self.fileName # Break the file name into file name and extension (filename, ext) = os.path.splitext(self.fileName) # Create a temporary file name, made up of the cp1252 compatible tmpPath, "temp", and the file's correct extension self.fileName = os.path.join(self.tmpPath, 'Input' + ext) # We need to use this temporary file name for the inputFile too! inputFile = self.fileName # If the cp1252 compatible path does not exist ... if not os.path.exists(self.tmpPath): # ... create it! os.mkdir(self.tmpPath) # Copy the non-cp1252 file to the cp1252-compatible path with a cp1252-compatible file name. shutil.copyfile(self.tmpFileName, self.fileName) # Reset all Conversion parameter items on the form # Clear and Disable the Destination File Name self.txtDestFileName.SetValue('') self.txtDestFileName.Enable(False) # Disable the Destination File Name Browse Button self.destBrowse.Enable(False) # Clear and Disable the Format choice box self.format.Clear() self.format.Enable(False) # Clear and Disable the Video Size choice box self.videoSize.Clear() self.videoSize.Enable(False) # Clear and Disable the Video Bit Rate choice box self.videoBitrate.Clear() self.videoBitrate.Enable(False) # Clear and Disable the Audio Bit Rate choice box self.audioBitrate.Clear() self.audioBitrate.Enable(False) # Clear and Disable the Audio Sampling Rate choice box self.audioSampleRate.Clear() self.audioSampleRate.Enable(False) # Be prepared to capture the wxProcess' EVT_END_PROCESS self.Bind(wx.EVT_END_PROCESS, self.OnEndProcess) # Windows requires that we change the default encoding for Python for the audio extraction code to work # properly with Unicode files (!!!) This isn't needed on OS X, as its default file system encoding is utf-8. # See python's documentation for sys.getfilesystemencoding() if 'wxMSW' in wx.PlatformInfo: # Set the Python Encoding to match the File System Encoding wx.SetDefaultPyEncoding(sys.getfilesystemencoding()) # Just use the File Name, no encoding needed tempMediaFilename = inputFile if DEBUG: self.memo.AppendText("MediaConvert.ProcessMediaFile():\n") self.memo.AppendText("%s\n (%d) exists: %s\n" % (tempMediaFilename, len(tempMediaFilename), os.path.exists(tempMediaFilename))) statinfo = os.stat(tempMediaFilename) self.memo.AppendText(" size: %s\n\n" % statinfo.st_size) self.memo.AppendText(" type: %s, defaultPyEncoding: %s, filesystemencoding: %s, Transana's Encoding: %s\n\n" % (type(tempMediaFilename), wx.GetDefaultPyEncoding(), sys.getfilesystemencoding(), TransanaGlobal.encoding)) for tmpX in range(len(tempMediaFilename)): self.memo.AppendText(" - %d %s %d\n" % ( tmpX, tempMediaFilename[tmpX], ord(tempMediaFilename[tmpX]) )) self.memo.AppendText("\n") # We need to build the Conversion command line. Start with the executable path and name, # and add that we are using it embedded and want the second level of feedback (file information), # and specify the Input File name placeholder. process = '"' + TransanaGlobal.programDir + os.sep + 'ffmpeg_Transana" "-embedded" "2"' process += ' "-i" "%s"' # Create a wxProcess object self.process = wx.Process(self) # Call the wxProcess Object's Redirect method. This allows us to capture the process's output! self.process.Redirect() # Encode the filenames to UTF8 so that unicode files are handled properly process = process.encode('utf8') if DEBUG: self.memo.AppendText("\n\nMedia Filename:\n") self.memo.AppendText("%s\n\n" % tempMediaFilename) self.memo.AppendText("\n\nProcess call:\n") self.memo.AppendText("%s\n\n" % process % tempMediaFilename) # Call the Audio Extraction program using wxExecute, capturing the output via wxProcess. This call MUST be asynchronous. self.pid = wx.Execute(process % tempMediaFilename.encode(sys.getfilesystemencoding()), wx.EXEC_ASYNC, self.process) # On Windows, we need to reset the encoding to UTF-8 if 'wxMSW' in wx.PlatformInfo: wx.SetDefaultPyEncoding('utf_8') def OnEndProcess(self, event): """ End of wxProcess Event Handler """ # If a process is defined ... if self.process is not None: if DEBUG: self.memo.AppendText("\n\nProcess pid %s calling OnEndProcess()\n\n" % self.pid) # Get the Process' Input Stream stream = self.process.GetInputStream() # If that stream can be read ... if stream.CanRead(): if DEBUG: self.memo.AppendText("stream.CanRead() call successful\n") tmpParamCount = 0 # ... read it! text = stream.read() # Divide the text up into separate lines text = text.replace('\r\n', '\n') text = text.split('\n') # Process the input stream text one line at a time for line in text: # Divide the line up into its separate parameters param = line.split(' ') if DEBUG: tmpParamCount += 1 self.memo.AppendText("%8d '%s' " % (tmpParamCount, param[0])) if len(param) > 1: self.memo.AppendText("%s " % param[1]) if tmpParamCount < 16: for par in param[2:]: self.memo.AppendText("%s " % par) self.memo.AppendText("(%s)" % len(line)) self.memo.AppendText("\n") # If the line isn't blank and starts with an "x", indicating embedded feedback information ... if (len(line) > 0) and (line[0] == 'x'): # If the first parameter is just plain "x", we have a General Parameter if param[0] == 'x': # If Duration: if param[1] == 'Duration:': # Get the Media File Duration self.duration = float(param[2]) # If Bitrate: elif param[1] == 'Bitrate:': # Get the General Bitrate self.bitrate = int(param[2]) # If Streams: elif param[1] == 'Streams:': # Get the Number of Streams self.streams = int(param[2]) # If the first paramer is "xv", we have a Video Parameter elif param[0] == 'xv': # If Stream if param[1] == 'Stream': # We have a Video Stream self.vidStream = True # If Codec: elif param[1] == 'Codec:': # Get the Video Codec self.vidCodec = param[2] # If Pix_Fmt: elif param[1] == 'Pix_Fmt:': # Get the Picture Format self.vidPixFmt = param[2] # If Bitrate: elif param[1] == 'Bitrate:': # Get the Video Bit Rate self.vidBitrate = int(param[2]) # If FrameRate: elif param[1] == 'FrameRate:': # Get Video Frame Rate self.vidFrameRate = float(param[2]) # If the Frame Rate is negative ... if self.vidFrameRate < 0.0: # ... reset it to 0 self.vidFrameRate == 0.0 # If Size: elif param[1] == 'Size:': # Get video Width ... self.vidSizeW = int(param[2]) # ... and Height self.vidSizeH = int(param[4]) # If the first paramer is "xa", we have an Audio Parameter elif param[0] == 'xa': # If Stream if param[1] == 'Stream': self.audStream = True # We have an Audio Stream # If Codec: elif param[1] == 'Codec:': # Get the Audio Codec self.audCodec = param[2] # If Bitrate: elif param[1] == 'Bitrate:': # Get the Audio Bit Rate self.audBitrate = int(param[2]) # If SampleRate: elif param[1] == 'SampleRate:': # Get Audio Sample Rate self.audSampleRate = int(param[2]) # If Channels: elif param[1] == 'Channels:': # Get the Number of Audio Channels self.audChannels = int(param[2]) # Otherwise ... else: # ... we have an unknown parameter. (This shouldn't occur.) print "Unknown Parameter", param # Since the process has ended, destroy it. self.process.Destroy() # De-reference the process self.process = None # Report File Information to the user # Start by freezing the Information box to speed and smooth the process self.memo.Freeze() # Split the file name from the path, either for tmpFileName or self.fileName as appropriate if self.tmpFileName == '': (self.lastPath, filename) = os.path.split(self.fileName) else: (self.lastPath, filename) = os.path.split(self.tmpFileName) # Report Path and File Name self.memo.AppendText(unicode(_("File Path: %s\n"), 'utf8') % self.lastPath) self.memo.AppendText(unicode(_("File Name: %s\n\n"), 'utf8') % filename) # Report General Media File Parameters self.memo.AppendText(unicode(_('Duration: %s\n'), 'utf8') % Misc.time_in_ms_to_str(self.duration * 1000, True)) self.memo.AppendText(unicode(_('Bitrate: %d kb/s\n'), 'utf8') % self.bitrate) self.memo.AppendText(unicode(_('Streams: %d\n'), 'utf8') % self.streams) # If there was a Video stream ... if self.vidStream: # ... report the Video settings self.memo.AppendText('\n') self.memo.AppendText(unicode(_('Video Stream:\n'), 'utf8')) self.memo.AppendText(unicode(_(' Codec: %s\n'), 'utf8') % self.vidCodec) self.memo.AppendText(unicode(_(' Picture Format: %s\n'), 'utf8') % self.vidPixFmt) self.memo.AppendText(unicode(_(' Size: %d x %d\n'), 'utf8') % (self.vidSizeW, self.vidSizeH)) if self.vidBitrate > 0.0: self.memo.AppendText(unicode(_(' Bitrate: %d kb/s\n'), 'utf8') % self.vidBitrate) else: self.memo.AppendText(unicode(_(' Bitrate: Unknown\n'), 'utf8')) if self.vidFrameRate > 0.0: self.memo.AppendText(unicode(_(' Frame Rate: %0.2f fps\n'), 'utf8') % self.vidFrameRate) else: self.memo.AppendText(unicode(_(' Frame Rate: Unknown\n'), 'utf8')) # If there was an Audio stream ... if self.audStream: # ... report the Audio settings self.memo.AppendText('\n') self.memo.AppendText(unicode(_('Audio Stream:\n'), 'utf8')) self.memo.AppendText(unicode(_(' Codec: %s\n'), 'utf8') % self.audCodec) self.memo.AppendText(unicode(_(' Bitrate: %d kb/s\n'), 'utf8') % self.audBitrate) self.memo.AppendText(unicode(_(' Sample Rate: %d Hz\n'), 'utf8') % self.audSampleRate) self.memo.AppendText(unicode(_(' Channels: %d\n'), 'utf8') % self.audChannels) self.memo.AppendText('\n') # Once this is finished, we can Thaw the control self.memo.Thaw() # If our file had at least one media stream ... if self.audStream or self.vidStream: # Clear the Format choice box self.format.Clear() # If we have a Video stream ... if self.vidStream: # If we're not doing a video snapshot ... if not self.snapshot: if ENABLE_MPG: # ... add the video options to the Format choice box # The MPEG-1 option is NOT ALLOWED. They want royalty fees of $15,000 a year minimum, # which we cannot afford. self.format.Append(_("MPEG-1 - An excellent choice for video files")) # MPEG-4 is viable as long as we ship 50,000 units or fewer a year. self.format.Append(_("MPEG-4 - Efficient video compression, with moderate responsiveness")) if ENABLE_MOV: # Add the MOV format option. self.format.Append(_("MOV - An excellent choice for multiple simultaneous video files")) # Clear the Video Size choice box self.videoSize.Clear() # Add sizes as long as they are SMALLER than source video size. (Calculate Heights for # "standard" Width options) if self.vidSizeW >= 320: st = _('%d x %d') % (320, int(self.vidSizeH * 320.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 400: st = _('%d x %d') % (400, int(self.vidSizeH * 400.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 480: st = _('%d x %d') % (480, int(self.vidSizeH * 480.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 560: st = _('%d x %d') % (560, int(self.vidSizeH * 560.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 640: st = _('%d x %d') % (640, int(self.vidSizeH * 640.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 720: st = _('%d x %d') % (720, int(self.vidSizeH * 720.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 800: st = _('%d x %d') % (800, int(self.vidSizeH * 800.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 1024: st = _('%d x %d') % (1024, int(self.vidSizeH * 1024.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 1280: st = _('%d x %d') % (1280, int(self.vidSizeH * 1280.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 1366: st = _('%d x %d') % (1366, int(self.vidSizeH * 1366.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 1440: st = _('%d x %d') % (1440, int(self.vidSizeH * 1440.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 1680: st = _('%d x %d') % (1680, int(self.vidSizeH * 1680.0 / self.vidSizeW)) self.videoSize.Append(st) if self.vidSizeW >= 1920: st = _('%d x %d') % (1920, int(self.vidSizeH * 1920.0 / self.vidSizeW)) self.videoSize.Append(st) # If the actual video size hasn't already been inserted, add it, UNLESS # it is larger than 1920 pixels wide, which is our maximum if (not self.vidSizeW in [1920, 1680, 1440, 1366, 1280, 1024, 800, 720, 640, 560, 480, 400, 320]) and (self.vidSizeW <= 1920): st = str(self.vidSizeW) + ' x ' + str(self.vidSizeH) self.videoSize.Append(st) # If more than one Video Size option exists ... if self.videoSize.GetCount() > 1: # ... enable the Video Size choice box self.videoSize.Enable(True) # Set the last Size in the list, which should be the same as the original, or the # largest allowable size if the original was too big. self.videoSize.SetSelection(self.videoSize.GetCount() - 1) # If the Video Bitrate is 0 (as seems to be true for AVI files) ... if self.vidBitrate == 0: # ... use the Overall Bit Rate as the Video Bit Rate. # (I don't know if this is legit, but it's a best-I-can-do approximation.) self.vidBitrate = self.bitrate # If we are doing a video snapshot ... if self.snapshot: # ... let the user know this could take a while self.memo.AppendText("\n" + _("Please note that the further into the media file your snapshot is, the longer this process will take.") + "\n") # If we're NOT doing a snapshot, we should display Video Bitrate warnings if appropriate else: # If the Video Bit Rate exceeds 1500 kb/s ... if self.vidBitrate > 1500: # ... inform the user they may want to lower it self.memo.AppendText(_("You should be able to reduce the Video Bit Rate setting to 1500 kb/s or less without noticable loss of quality.") + "\n") # If the Video Bit Rate exceeds 500 kb/s ... if self.vidBitrate > 500: # ... inform the user they may want to lower it if planning on using Multiple Simultaneous Media Files self.memo.AppendText(_("If you intend to use this video as one of multiple simultaneously displayed videos, you may want to reduce the Video Bit Rate setting to 1000 kb/s or less.") + "\n") self.memo.AppendText("\n") # Clear the Video Bit Rate choice box self.videoBitrate.Clear() # Start with a list of "default" video bit rates bitrates = [100, 150, 200, 250, 300, 350, 500, 750, 1000, 1500, 2000, 2500, 3000, 5000] # For each bit rate in the list ... for bitrate in bitrates: # ... if the File's Video Bit Rate is greater than the proposed bit rate setting ... if self.vidBitrate >= bitrate: # ... then add the proposed bit rate option to the choice box self.videoBitrate.Append(str(bitrate)) # If not ... else: # ... we can stop looking at bit rates break # If the file's ACTUAL video bitrate wasn't in the default list ... if not str(self.vidBitrate) in self.videoBitrate.GetStrings(): # ... then add it to the choice box too self.videoBitrate.Append(str(self.vidBitrate)) # if the Video Bitrate exceeds 1500 ... if self.vidBitrate >= 1500: # ... then select 1500 as a reasonable bitrate self.videoBitrate.SetSelection(9) # if the video bitrate is less than 1500 else: # ... select the highest video bit rate in the list, which should match the source file's self.videoBitrate.SetSelection(self.videoBitrate.GetCount() - 1) # if there is moe than one option in the list and we're not doing a video snapshot ... if (self.videoBitrate.GetCount() > 1) and not self.snapshot: # ... then enable the Video Bit Rate choice box self.videoBitrate.Enable(True) # If we have an Audio stream ... if self.audStream: # If we're not doing a video snapshot ... if not self.snapshot: if ENABLE_MPG: # ... add the audio options to the Format choice box # The MP3 option is NOT ALLOWED. They want royalty fees of $15,000 a year minimum, # which we cannot afford. self.format.Append(_("MP3 - Compressed audio files")) # Add the WAV file option self.format.Append(_("WAV - Uncompressed audio files")) # Clear the Audio Bit Rate choice box self.audioBitrate.Clear() # Start with a list of "default" audio bit rates bitrates = [32, 48, 56, 64, 80, 96, 128, 144, 192, 224, 256, 320, 384] # For each bit rate in the list ... for bitrate in bitrates: # ... if the File's Audio Bit Rate is greater than the proposed bit rate setting ... if self.audBitrate >= bitrate: # ... then add the proposed bit rate option to the choice box self.audioBitrate.Append(str(bitrate)) # If not ... else: # ... we can stop looking at bit rates break # If the file's ACTUAL audio bitrate wasn't in the default list ... if not str(self.audBitrate) in self.audioBitrate.GetStrings(): # ... then add it to the choice box too self.audioBitrate.Append(str(self.audBitrate)) # If 192 kb/s is NOT among the audio bit rate options ... if self.audioBitrate.FindString('192') == wx.NOT_FOUND: # ... then select the highest audio bit rate in the list, which should match the source file's self.audioBitrate.SetSelection(self.audioBitrate.GetCount() - 1) # If 192 kb/s IS among the options ... else: # ... pick that. It's "good enough" for analysis. self.audioBitrate.SetSelection(self.audioBitrate.FindString('192')) # If there are multiple audio bit rate options and we're not doing a video snapshot ... if (self.audioBitrate.GetCount() > 1) and not self.snapshot: # Enable the audio bit rate choice box self.audioBitrate.Enable(True) # Clear the Audio Sampling Rate choice box self.audioSampleRate.Clear() # Start with a list of "default" audio sampling rates samplerates = [11025, 22050, 24000, 32000, 44100, 48000] # For each sample rate in the list ... for samplerate in samplerates: # ... if the File's Audio Sample Rate is greater than the proposed sample rate setting ... if self.audSampleRate >= samplerate: # ... then add the proposed sample rate option to the choice box self.audioSampleRate.Append(str(samplerate)) # If not ... else: # ... we can stop looking at sample rates break # If the file's ACTUAL audio sampling rate wasn't in the default list ... if not str(self.audSampleRate) in self.audioSampleRate.GetStrings(): # ... then add it to the choice box too self.audioSampleRate.Append(str(self.audSampleRate)) # Select the highest audio sampling rate in the list, which should match the source file's self.audioSampleRate.SetSelection(self.audioSampleRate.GetCount() - 1) # If there are multiple audio sampling rate options and we're not doing a video snapshot ... if (self.audioSampleRate.GetCount() > 1) and not self.snapshot: # Enable the audio sampling rate choice box self.audioSampleRate.Enable(True) # If we have a video stream ... if self.vidStream: # ... add the still images option to the Format choice box self.format.Append(_("JPEG - Create still images from video files")) # If we have video and are on a Mac ... if ENABLE_MOV and self.vidStream and ('wxMac' in wx.PlatformInfo): # Select the second item in the Format list, MOV video self.format.SetSelection(1) # If we have audio only OR we're not on a Mac ... else: # Select the first item in the Format list self.format.SetSelection(0) # Enable the Format choice box self.format.Enable(True) # now that we have an input file and a format, we can generate an output file name self.SetOutputFilename(filename) # If we have no audio or video streams, we don't have a valid media file. else: # Report this to the user. self.memo.AppendText(_('The selected media file cannot be processed or converted.')) def OnConvert(self, event): """ Convert Button Press Event """ # Check the file name, ensuring it has required the "%06d" parameter # First, let's break the file name up into path, filename, and ext (filename, ext) = os.path.splitext(self.txtDestFileName.GetValue()) (path, filename) = os.path.split(filename) originalFilename = filename # If we're converting to a still image ... if self.ext in ['.jpg']: # if "%06d" isn't part of the filename ... if not ("%06d" in filename): # ... add it to the end of the file name ... filename += "_%06d" # ... and rebuild the file name from its components self.txtDestFileName.SetValue(os.path.join(path, filename) + ext) # If a numeric insertion is called for ... if '%06d' in self.txtDestFileName.GetValue(): # Figure out the output file name tmpFileName = self.txtDestFileName.GetValue() % 1 else: tmpFileName = self.txtDestFileName.GetValue() # See if the output file already exists if os.path.exists(tmpFileName): # If so, inform the user errmsg = unicode(_('File "%s" already exists. Do you want to replace this file?'), 'utf8') errDlg = Dialogs.QuestionDialog(self, errmsg % tmpFileName, noDefault=True) result = errDlg.LocalShowModal() errDlg.Destroy() # If the user does not want to replace the file, ... if result == wx.ID_NO: # ... then exit this method immediately! return # Error Checking -- Initialize Error Message to blank errmsg = "" # See if the Source File exists (is available) if not os.path.exists(self.txtSrcFileName.GetValue()): errmsg += unicode(_('File "%s" not found.\n'), 'utf8') % self.txtSrcFileName.GetValue() # If we have a non-cp1252-compatible File and it didn't COPY correctly ... if (self.tmpFileName != '') and (not os.path.exists(self.fileName)): errmsg += unicode(_('File "%s" not found. File "%s" did not copy correctly.\n'), 'utf8') % (self.fileName, self.txtSrcFileName.GetValue()) # See if we have a valid Media File elif not self.audStream and not self.vidStream: errmsg += unicode(_('File "%s" is not a valid media file.\n'), 'utf8') % self.txtSrcFileName.GetValue() # Check the file extension if self.txtDestFileName.GetValue()[-4:].lower() != self.ext.lower(): errmsg += unicode(_('The Destination File Name does not have the correct file extension. It must end with "%s".'), 'utf8') % self.ext # Check for a DIFFERENT file name, so we're not over-writing media files! if self.txtSrcFileName.GetValue() == self.txtDestFileName.GetValue(): errmsg += unicode(_("The Destination File Name cannot be the same as the Source File Name."), 'utf8') # If an error has been detected ... if errmsg != '': # ... create an Error Dialog and display the message to the user errDlg = Dialogs.ErrorDialog(self, errmsg) errDlg.ShowModal() errDlg.Destroy() # If no error has been detected ... else: # Windows requires that we change the default encoding for Python for the audio extraction code to work # properly with Unicode files (!!!) This isn't needed on OS X, as its default file system encoding is utf-8. # See python's documentation for sys.getfilesystemencoding() if 'wxMSW' in wx.PlatformInfo: # Set the Python Encoding to match the File System Encoding wx.SetDefaultPyEncoding(sys.getfilesystemencoding()) # We need to build the Extraction command line in stages. Start with the executable path and name, # and add that we are using it embedded and want the first level of feedback (progress information), # and specify the Input File name placeholder. ## THEORY: Moving -ss parameter before -i parameter will speed up Clip Export and prevent Harrie's "Buffering ## several frames" problem. "-async 1" will prevent audio-video synch problems. ## See http://ffmpeg.org/pipermail/ffmpeg-user/2011-April/000234.html ## Implemented for Transana 2.61. It appears to work exactly that way. ## ## Except, for Transana 3.0, I notice that I can't take Snapshots from MPEG-1 video!! ## FFmpegCommand = '"' + TransanaGlobal.programDir + os.sep + 'ffmpeg_Transana" "-embedded" "1" "-i" "%s"' FFmpegCommand = '"' + TransanaGlobal.programDir + os.sep + 'ffmpeg_Transana" "-embedded" "1"' # For CLIPS, add "-ss StartTime" and "-t Duration (seconds)"!! if (not self.ext in ['.jpg']) and (self.clipDuration > 0): FFmpegCommand += ' "-ss" "%0.5f" "-t" "%0.5f"' % (float(self.clipStart) / 1000.0, float(self.clipDuration) / 1000.0) # If we're producing still images ... if self.ext in ['.jpg']: # Extract the extension of the source file name (srcName, srcExt) = os.path.splitext(self.txtSrcFileName.GetValue()) # Some video formats have proven to be less reliable than others. They seem to # work well enough if we request 4 frames. # Specifically, MPEG formats only seem to work with every third frame. Weird. # The value 4 was determined through trial-and-error. numFramesForStill = 4 # AVI and WMV formats appear to have a frame rate of float(-1.#IND00), which also shows up as # string('nan'). To check for this, we have to typecast the Frame Rate as a string. # If the Video Frame Rate is "not a number" ... if str(self.vidFrameRate) == 'nan': # ... then a frame rate of 30 fps can be used. tmpVidFrameRate = 30.0 # Otherwise ... else: # ... just use the frame rate extracted from the video tmpVidFrameRate = self.vidFrameRate # This syntax is SLOWER, but the other syntax doesn't work for MPEG video if srcExt in ['.mpg', '.mpeg']: FFmpegCommand += ' "-i" "%s"' # If we're doing a video snapshot ... if self.snapshot: # Set the Clip Duration to the frame rate times the number of frames divided by 1000. # Hopefully, this will stop the DIVx Snapshot not stopping problem. (It didn't.) self.clipDuration = round(tmpVidFrameRate * numFramesForStill) / 1000.0 # ... then a frame rate of whatever the frame rate is and specifying the position of the desired frame is needed. # We need to adjust the start time one FRAME earlier! We need to adjust the end time 4 FRAMES later. Otherwise, # MPEG-1 video doesn't work every time! I'm not sure why. (This was determined experimentally.) FFmpegCommand += ' "-r" "%0.2f" "-ss" "%0.5f" "-t" "%0.5f"' % (tmpVidFrameRate, (float(self.clipStart) - (1.5 * tmpVidFrameRate))/ 1000.0, self.clipDuration) # If we're NOT doing a snapshop, we need to get the proper frame rate to produce the correct pictures. elif self.stillFrameRate.GetStringSelection() == _("20 seconds"): FFmpegCommand += ' "-r" "0.05"' elif self.stillFrameRate.GetStringSelection() == _("15 seconds"): FFmpegCommand += ' "-r" "0.0666667"' elif self.stillFrameRate.GetStringSelection() == _("10 seconds"): FFmpegCommand += ' "-r" "0.1"' elif self.stillFrameRate.GetStringSelection() == _("5 seconds"): FFmpegCommand += ' "-r" "0.2"' elif self.stillFrameRate.GetStringSelection() == _("1 second"): FFmpegCommand += ' "-r" "1"' # This syntax is FASTER, but the doesn't work for MPEG video if not srcExt in ['.mpg', '.mpeg']: FFmpegCommand += ' "-i" "%s"' else: FFmpegCommand += ' "-i" "%s"' # Specify image size. If we are creating a Video file ... if self.vidStream and (self.ext in ['.mpg', '.mp4', '.mov', '.jpg']): # ... Determine the current Video Size selection, and divide it up into its component parts size = self.videoSize.GetStringSelection().split(' ') # Supply the FFmpeg "-s" parameter and open the data quotes FFmpegCommand += ' "-s" "' # Build the size value. (This essentially removes the internal spaces from the string) for x in size: FFmpegCommand += x # Close the data quotes. FFmpegCommand += '"' # Specify video bitrate and some additional parameters. If we are creating a Video file ... if self.vidStream and (self.ext in ['.mpg', '.mp4', '.mov']): # If we are creating an MPEG-1 file ... if self.ext == '.mpg': # ... specify the video codec as mpeg1video FFmpegCommand += ' "-vcodec" "mpeg1video"' # If we are creating an MPEG-4 file ... elif self.ext == '.mp4': # ... specify Four Motion Vector (mpeg4) and h.263 advanced introacoding / mpeg2 ac prediction FFmpegCommand += ' "-flags" "+mv4+aic"' # Add the Video Bit Rate specification FFmpegCommand += ' "-vb" "%dk"' % int(self.videoBitrate.GetStringSelection()) # if the Frame Rate is not UNKNOWN ... if self.vidFrameRate > 0.0: # HD video with high frame rates (eg. 59.96 fps) don't play smoothly. # Frame Rate reduction causes problems if set to "29.97" or "30", but is okay at "29" if self.vidFrameRate > 30: # Let's max the Frame Rate out at 29 fps. FFmpegCommand += ' "-r" "29"' # Let's inform the user we changed their frame rate! self.memo.AppendText("\n" + _("Frame Rate reduced from %0.2f fps to 29 fps.") % self.vidFrameRate) # Otherwise ... else: # ... use the existing frame rate FFmpegCommand += ' "-r" "%0.2f"' % self.vidFrameRate # If we have an Audio Stream to process ... if self.audStream and not self.ext in ['.jpg']: # If we are creating an MPEG-1 file ... if self.ext == '.mpg': # ... specify the audio codes as mp2 FFmpegCommand += ' "-acodec" "mp2"' # Get the desired Audio Bitrate from the form tmpAudioBitrate = int(self.audioBitrate.GetStringSelection()) # If we are creating an MPEG-1 file ... if self.ext == '.mpg': # Check to see if the desired Audio Bit Rate is in the options allowed by the MP2 specification. If not ... if not int(self.audioBitrate.GetStringSelection()) in [32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384]: # ... display a message to the user ... self.memo.AppendText("\n" + _("MPEG-1 supports only limited audio bit rate options. Over-riding Audio Bit Rate setting.")) # ... if the user has multiple options ... if self.audioBitrate.GetSelection() > 0: # ... pick the option just smaller than the user selected. All the values in the list except the source file's # original setting are legal values, so this MUST be legal! tmpAudioBitrate = int(self.audioBitrate.GetString(self.audioBitrate.GetSelection() - 1)) # If there's only one value in the options list ... else: # ... just use 64. (This was somewhat arbitrary, but is unlikely to be used.) tmpAudioBitrate = 64 # Add the Audio Bit Rate to the Conversion Command FFmpegCommand += ' "-ab" "%dk"' % tmpAudioBitrate # If we are creating an MPEG-1 file and we are supposed to use a Sample Rate less than 44,100 Hz ... if (self.ext == '.mpg') and (int(self.audioSampleRate.GetStringSelection()) < 32000): # ... inform the user of the smallest legal Sample Rate value for MP2 audio self.memo.AppendText("\n" + _("MPEG-1 requires an Audio Sample Rate of at least 32,000. Over-riding Audio Sample Rate.") + "\n") # ... and set the value to 32000 FFmpegCommand += ' "-ar" "%d"' % 32000 # Otherwise ... else: # ... use the Sample Rate from the form FFmpegCommand += ' "-ar" "%d"' % int(self.audioSampleRate.GetStringSelection()) # If there are more than 2 audio channels ... if self.audChannels > 2: # ... then let's reduce the number down to just 2. FFmpegCommand += ' "-ac" "2"' if not self.ext in ['.jpg']: # For best quality media files, the FFmpeg site suggests the following: # -mbd rd Macroblock Decision Algorithm "use best rate distortion" # -trellis 2 rate-distortion optimal quantization (whatever that means) # -cmp 2 full pel me compare function (whatever that means) # -subcmp 2 sub pel me compare function (whatever that means) FFmpegCommand += ' "-mbd" "rd" "-trellis" "2" "-cmp" "2" "-subcmp" "2"' # If we are creating an MPEG-1 file ... if self.ext == '.mpg': # ... the FFmpeg web site recommends a "group picture size" of 100 and a pass value of 1/2 FFmpegCommand += ' "-g" "100" "-pass" "1/2"' # if we are creating an MPEG-4 file ... elif self.ext == '.mp4': # ... the FFmpeg web site recommends a "group picture size" of 300 and a pass value of 1/2 FFmpegCommand += ' "-g" "300"' # When bundled on OS X, this argument causes problems. # Or perhaps it's due to network and permissions, as it returns a "permission denied" error. # Let's leave it off everywhere! if False or (not 'wxMac' in wx.PlatformInfo): FFmpegCommand += ' "-pass" "1/2"' # The Transana Demo restricts the length of file conversion to 10 minutes if TransanaConstants.demoVersion and (self.duration > 600): FFmpegCommand += ' "-t" "600"' ## # If we're producing still images ... ## if self.ext in ['.jpg']: ## # Extract the extension of the source file name ## (srcName, srcExt) = os.path.splitext(self.txtSrcFileName.GetValue()) ## # Some video formats have proven to be less reliable than others. They seem to ## # work well enough if we request 4 frames. ## # Specifically, MPEG formats only seem to work with every third frame. Weird. ## # The value 4 was determined through trial-and-error. ## numFramesForStill = 4 ## ## # AVI and WMV formats appear to have a frame rate of float(-1.#IND00), which also shows up as ## # string('nan'). To check for this, we have to typecast the Frame Rate as a string. ## # If the Video Frame Rate is "not a number" ... ## if str(self.vidFrameRate) == 'nan': ## # ... then a frame rate of 30 fps can be used. ## tmpVidFrameRate = 30.0 ## # Otherwise ... ## else: ## # ... just use the frame rate extracted from the video ## tmpVidFrameRate = self.vidFrameRate ## ## # If we're doing a video snapshot ... ## if self.snapshot: ## # Set the Clip Duration to the frame rate times the number of frames divided by 1000. ## # Hopefully, this will stop the DIVx Snapshot not stopping problem. (It didn't.) ## self.clipDuration = round(tmpVidFrameRate * numFramesForStill) / 1000.0 ## # ... then a frame rate of whatever the frame rate is and specifying the position of the desired frame is needed. ## # We need to adjust the start time one FRAME earlier! We need to adjust the end time 4 FRAMES later. Otherwise, ## # MPEG-1 video doesn't work every time! I'm not sure why. (This was determined experimentally.) ## FFmpegCommand += ' "-r" "%0.2f" "-ss" "%0.5f" "-t" "%0.5f"' % (tmpVidFrameRate, (float(self.clipStart) - (1.5 * tmpVidFrameRate))/ 1000.0, self.clipDuration) ## ## # If we're NOT doing a snapshop, we need to get the proper frame rate to produce the correct pictures. ## elif self.stillFrameRate.GetStringSelection() == _("20 seconds"): ## FFmpegCommand += ' "-r" "0.05"' ## elif self.stillFrameRate.GetStringSelection() == _("15 seconds"): ## FFmpegCommand += ' "-r" "0.0666667"' ## elif self.stillFrameRate.GetStringSelection() == _("10 seconds"): ## FFmpegCommand += ' "-r" "0.1"' ## elif self.stillFrameRate.GetStringSelection() == _("5 seconds"): ## FFmpegCommand += ' "-r" "0.2"' ## elif self.stillFrameRate.GetStringSelection() == _("1 second"): ## FFmpegCommand += ' "-r" "1"' ## THEORY: Moving -ss parameter before -i parameter will speed up Clip Export and prevent Harrie's "Buffering ## several frames" problem. "-async 1" will prevent audio-video synch problems. ## See http://ffmpeg.org/pipermail/ffmpeg-user/2011-April/000234.html ## # For CLIPS, add "-ss StartTime" and "-t Duration (seconds)"!! ## if (not self.ext in ['.jpg']) and (self.clipDuration > 0): ## FFmpegCommand += ' "-ss" "%0.5f" "-t" "%0.5f"' % (float(self.clipStart) / 1000.0, float(self.clipDuration) / 1000.0) if (not self.ext in ['.jpg']) and (self.clipDuration > 0): FFmpegCommand += ' "-async" "1"' # Add the "-y" parameter to over-write files, and append the destination file name placeholder FFmpegCommand += ' "-y" "%s"' # Create the prompt for the progress dialog prompt = unicode(_("Converting %s\n to %s"), 'utf8') % (self.txtSrcFileName.GetValue(), self.txtDestFileName.GetValue()) # Create the Progress Dialog, allowing MULTIPLE THREADS progressDlg = WaveformProgress.WaveformProgress(self, prompt, self.clipStart, self.clipDuration, showModally=False) # If there are NO currently-running conversions ... if self.runningConversions == {}: # ... then set the index to 1 indexNum = 1 # If there are currently-running conversions ... else: # ... then set the index to 1 more than the largest current number indexNum = max(self.runningConversions) + 1 # Have the Progress Dialog remember its index number progressDlg.indexNum = indexNum # Have the Progress Dialog remember the name of the file being converted progressDlg.originalFilename = originalFilename # Add the Progress Dialog to the dictionary that holds the running conversions self.runningConversions[indexNum] = progressDlg # If there is exactly ONE running conversion ... if len(self.runningConversions) == 1: msg = unicode(_('%d conversion running'), 'utf8') % len(self.runningConversions) # ... update the Close button's Text ... self.btnClose.SetLabel(msg) # ... and update the layout to enlarge the button self.Layout() # If there are MORE THAN ONE running conversions ... else: msg = unicode(_('%d conversions running'), 'utf8') % len(self.runningConversions) # ... update the Close button's Text self.btnClose.SetLabel(msg) # Disable the Close Button self.btnClose.Enable(False) # If the number of CPU Cores is known ... if self.cpu_count > 0: # ... Add the number of cores to the message text if len(self.runningConversions) == 1: msg = unicode(_('%d Conversion Running on %d computer cores'), 'utf8') % \ (len(self.runningConversions), self.cpu_count) else: msg = unicode(_('%d Conversions Running on %d computer cores'), 'utf8') % \ (len(self.runningConversions), self.cpu_count) # If we have as many conversions running as we have computer cores ... if len(self.runningConversions) >= self.cpu_count: # ... disable the Convert button self.btnConvert.Enable(False) # Add the current message about number of conversions (and cores, if known) to the Memo self.memo.AppendText(msg + '\n\n') if DEBUG: self.memo.AppendText("MediaConvert.OnConvert(): FFmpeg Command:") self.memo.AppendText(FFmpegCommand % (self.txtSrcFileName.GetValue(), self.txtDestFileName.GetValue())) self.memo.AppendText('\n\n') msg = FFmpegCommand dlg = Dialogs.InfoDialog(self, msg) dlg.ShowModal() dlg.Destroy() # Pass the Conversion Command we have created to the Progress Dialog progressDlg.SetProcessCommand(FFmpegCommand) # If we have a temporary file name because of the non-cp1252 FFmpeg issue... if self.tmpFileName != '': # ... use the modified file name as the input ... inputFile = self.fileName # ... and create the appropriate modified output file name outputFile = os.path.join(self.tmpPath, 'Output' + self.ext) # If we do NOT have a temporary file name ... else: # ... then we can use the input and output files currently showing on the form. inputFile = self.txtSrcFileName.GetValue() outputFile = self.txtDestFileName.GetValue() # Initiate the Conversion with the appropriate file names progressDlg.Extract(inputFile, outputFile, mode='CustomConvert') # Get the Error Log that may have been created # errorLog = progressDlg.GetErrorMessages() # Destroy the Progess Dialog # progressDlg.Destroy() def OnConvertComplete(self, progressDlg): if DEBUG: print "MediaConvert.OnConvertComplete() called for", progressDlg.indexNum, progressDlg.originalFilename if True: # False # Get the Error Log that may have been created errorLog = progressDlg.GetErrorMessages() # If the conversion was CANCELLED by the user ... if (len(errorLog) == 1) and (errorLog[0] == 'Cancelled'): msg = unicode(_('Conversion of "%s" cancelled by user.'), 'utf8') % progressDlg.originalFilename + "\n\n" # If the conversion was NOT cancelled ... else: # Inform the user that the conversion is complete msg = unicode(self.ext[1:].upper(), 'utf8') + unicode(_(' conversion of "%s" completed.'), 'utf8') % progressDlg.originalFilename + "\n\n" # If there are messages in the Error Log ... if len(errorLog) > 0: # Create the message to the user msg += unicode(_("Conversion Report:"), 'utf8') + "\n" + unicode(_("(These messages can be ignored unless problems arise.)"), 'utf8') + "\n\n" # Add the Error Log contents to the user message for line in errorLog: msg += unicode(line, sys.getfilesystemencoding()) + "\n" # If we're taking a snapshot ... if self.snapshot: # ... indicate that the snapshot was successful self.snapshotSuccess = True # Display the user message self.memo.AppendText(msg + '\n') # When still images are created, FFmpeg seems to like to create extra images. We need to clean that up here. # Start exception handling try: # If we're creating still images, have an Image #2, and DON'T have an Image #6, we can safely conclude that # a single still image was desired but more than one was created. if (self.ext == '.jpg') and \ os.path.exists(self.txtDestFileName.GetValue() % 2) and \ not os.path.exists(self.txtDestFileName.GetValue() % 6): # Delete images 2 through 5, which are extraneous for img in range(2, 6): if os.path.exists(self.txtDestFileName.GetValue() % img): # delete the image os.remove(self.txtDestFileName.GetValue() % img) # If we have a temporary file name because of the non-cp1252 file name issue on Windows ... if self.tmpFileName != '': # Determine the destination file name destFile = self.txtDestFileName.GetValue() # If we are doing a SnapShot (i.e. if the file has a NUMBER part) ... if '%06d' in destFile: # ... then substitute 1 in the number part of the destination file name ... destFile = destFile % 1 # ... move the CONVERTED file, renaming it along the way shutil.move(outputFile, destFile) # Handle exceptions ... except: if DEBUG: print sys.exc_info()[0], sys.exc_info()[1] # ... by ignoring them! pass # On Windows, we need to reset the encoding to UTF-8 if 'wxMSW' in wx.PlatformInfo: wx.SetDefaultPyEncoding('utf_8') # If we are embedded in Transana, not running stand-alone ... if __name__ != '__main__': # If we converted a raw media file (as opposed to exporting Clip Video) if (self.ext != '.jpg') and ((len(errorLog) != 1) or (errorLog[0] != 'Cancelled')) and (self.clipDuration == 0): # ... prompt about updating media file references updateDlg = Dialogs.QuestionDialog(self, _("Do you want to update all media file references in the database?"), noDefault=True) # If the user wants to update all references ... if updateDlg.LocalShowModal() == wx.ID_YES: # ... separate paths from file names for both source and destination (sourcePath, sourceFile) = os.path.split(self.txtSrcFileName.GetValue()) (destPath, destFile) = os.path.split(self.txtDestFileName.GetValue()) # If our file name is cp1252 compatible ... if (self.tmpFileName == ''): # ... then we need to process it like Database Data gets processed so the Queries will work! sourceFile = DBInterface.ProcessDBDataForUTF8Encoding(sourceFile) # Update the source file in the Database with the new File Path AND the new File Name if not DBInterface.UpdateDBFilenames(self, destPath, [sourceFile], newName=destFile): # Display an error message if the update failed infodlg = Dialogs.InfoDialog(self, _('Update Failed. Some records that would be affected may be locked by another user.')) infodlg.ShowModal() infodlg.Destroy() # Remove this conversion from the dictionary of running conversions del(self.runningConversions[progressDlg.indexNum]) # If we have NO MORE running conversions ... if len(self.runningConversions) == 0: # ... reset the label of the button to Close ... self.btnClose.SetLabel(_('Close')) # ... enable the Close button ... self.btnClose.Enable(True) # ... and redo the layout to resize the button self.Layout() # If we have exactly ONE conversion remaining ... elif len(self.runningConversions) == 1: # ... update the Close button text self.btnClose.SetLabel(unicode(_('%d Conversion Running'), 'utf8') % len(self.runningConversions)) # If we have MORE THAN ONE conversions remaining ... else: # ... update the Close button text self.btnClose.SetLabel(unicode(_('%d Conversions Running'), 'utf8') % len(self.runningConversions)) # If we now have fewer running convesions than CPU Cores (which should ALWAYS be true) ... if len(self.runningConversions) < self.cpu_count: # ... enable the Convert button self.btnConvert.Enable(True) # If we are doing a snapshot ... if self.snapshot: # ... let's close the dialog automatically! self.Close() def OnClose(self, event): """ Close Button Press """ # If we have NO running conversions ... if len(self.runningConversions) == 0: # If we're on Windows ... if 'wxMSW' in wx.PlatformInfo: # If the temporary path exists ... if os.path.exists(self.tmpPath): # Start exception handling try: # Get a list of files in the temporary directory files = os.listdir(self.tmpPath) # iterate through the files for fil in files: # If the file is called Input or Output ... if ((len(fil) > 5) and (fil[:5] == 'Input')) or ((len(fil) > 6) and (fil[:6] == 'Output')): # ... try to delete it os.remove(os.path.join(self.tmpPath, fil)) # Remove the DIRECTORY os.removedirs(self.tmpPath) # If an exception is raised ... except: if DEBUG: print sys.exc_info()[0] print sys.exc_info()[1] # ... ignore it. Transana might clean up after itself later pass # Allow the form's Cancel event to fire to close the form event.Skip() # I couldn't get the VETO to work, so disabled the Close Button instead. else: print "Event.Veto() called!" event.Veto() errmsg = unicode(_('You still have %d conversions in progress. Please let them finish or Cancel them before proceeding.'), 'utf8') % len(self.runningConversions) errDlg = Dialogs.ErrorDialog(self, errmsg) errDlg.ShowModal() errDlg.Destroy() def OnBrowse(self, event): """ Browse Button event handler (for both source and destination file names) """ # If triggered by the Source File ... if event.GetId() == self.srcBrowse.GetId(): if DEBUG: cwdbefore = os.getcwd() # Get Transana's File Filter definitions fileTypesString = _("All files (*.*)|*.*") # Create a File Open dialog. fs = wx.FileDialog(self, _('Select a media file to process:'), self.lastPath, "", fileTypesString, wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) # Select "All Media Files" as the initial Filter fs.SetFilterIndex(0) # Show the dialog and get user response. If OK ... if fs.ShowModal() == wx.ID_OK: # ... get the selected file name self.fileName = fs.GetPath() ## self.fileName = '/Users/davidwoods/Movies/Workshop Video/Leader/Demo/Demo.mpg' else: self.fileName = '' # Destroy the File Dialog fs.Destroy() self.txtSrcFileName.SetValue(self.fileName) if DEBUG: self.memo.AppendText('cwd BEFORE: %s AFTER: %s\n' % (cwdbefore, os.getcwd())) # If triggered by the Destination File ... else: # Get the path and file name from the Destination File's current setting (path, filename) = os.path.split(self.txtDestFileName.GetValue()) # Check the Conversion Type and limit the File Types to that Type if self.ext == '.mpg': fileTypesString = _("MPEG-1 files (*.mpg)|*.mpg") elif self.ext == '.mov': fileTypesString = _("MOV files (*.mov)|*.mov") elif self.ext == '.mp4': fileTypesString = _("MPEG-4 files (*.mp4)|*.mp4") elif self.ext == '.mp3': fileTypesString = _("MP3 audio files (*.mp3)|*.mp3") elif self.ext == '.wav': fileTypesString = _("WAV audio files (*.wav)|*.wav") else: fileTypesString = _("All files (*.*)|*.*") # Create a File Save dialog, using the path, file name, and file type determined above. fs = wx.FileDialog(self, _('Select a name for the output:'), path, filename, fileTypesString, wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) # Show the dialog and get user response. If OK ... if fs.ShowModal() == wx.ID_OK: # ... place the selected file and path in the Destination File Name text control self.txtDestFileName.SetValue(fs.GetPath()) # Destroy the File Dialog fs.Destroy() def OnSrcFileNameChange(self, event): """ Process any change in the Source File Name text control """ # This is needed for when the user TYPES in the Source File Name control rather than using the Browse button # Clear the Memo self.memo.Clear() self.memo.Update() # If the current contents point to a file that exists ... if os.path.exists(self.txtSrcFileName.GetValue()): # ... set the selected file name self.fileName = self.txtSrcFileName.GetValue() # Process the selected file name to prepare for conversion self.ProcessMediaFile(self.txtSrcFileName.GetValue()) # if the file does NOT exist ... else: prompt = unicode('File "%s" not found. Try the Browse button.', 'utf8') self.memo.AppendText(prompt % self.txtSrcFileName.GetValue()) # Call the default processor event.Skip() def OnFormat(self, event): """ Select Output Format OnChoice event handler """ # Split the path from the file name (path, filename) = os.path.split(self.fileName) # Update the Output File Name to reflect the new Format self.SetOutputFilename(filename) # Enable or disable fields based on format type # Video formats first if self.ext in ['.mpg', '.mov', '.mp4']: # Enable video fields if self.videoSize.GetCount() > 1: self.videoSize.Enable(True) if self.videoBitrate.GetCount() > 1: self.videoBitrate.Enable(True) # Enable audio fields if self.audioBitrate.GetCount() > 1: self.audioBitrate.Enable(True) if self.audioSampleRate.GetCount() > 1: self.audioSampleRate.Enable(True) # Disable still image fields self.stillFrameRate.Enable(False) elif self.ext in ['.mp3', '.wav']: # Disable video fields self.videoSize.Enable(False) self.videoBitrate.Enable(False) # Enable audio fields if self.audioBitrate.GetCount() > 1: self.audioBitrate.Enable(True) if self.audioSampleRate.GetCount() > 1: self.audioSampleRate.Enable(True) # Disable still image fields self.stillFrameRate.Enable(False) elif self.ext in ['.jpg']: # Enable video size field if self.videoSize.GetCount() > 1: self.videoSize.Enable(True) # Disable video Bitrate field self.videoBitrate.Enable(False) # Disable audio fields self.audioBitrate.Enable(False) self.audioSampleRate.Enable(False) # Enable still image fields self.stillFrameRate.Enable(True) def SetOutputFilename(self, filename): """ Update the Output File Name """ # If we have a temporary file name because of the FFmpeg non-cp1252 compatibility issue ... if self.tmpFileName != '': # ... then we want to use the ORIGINAL file name here, not the altered one! (path, filename) = os.path.split(self.tmpFileName) # Separate the File Name and the Extension (fn, ext) = os.path.splitext(filename) # If we have a Clip Name ... if self.clipName != '': # ... use that rather than the media file name fn = unicode("Clip", 'utf8') + "_" + self.clipName elif self.format.GetStringSelection()[:4] == 'JPEG': if self.snapshot: fn += "_" + unicode("Snapshot", 'utf8') fn += _('_%06d') # Otherwise ... else: # If we have a Left-To-Right language ... if TransanaGlobal.configData.LayoutDirection == wx.Layout_LeftToRight: # ... add the TRANSLATED word "Analysis" on to indicate that this is the low-res Analysis version of the media file fn += unicode(_('-Analysis'), 'utf8') # With Right-To-Left languages, we can't use the Translated version of the word "Analysis"!! else: fn = fn + unicode('-Analysis', 'utf8') # Based on the Format Selection (start of the text), determine the appropriate file extension. # Remember it as a proxy for destination file type for later processing if 'MPEG-1' in self.format.GetStringSelection(): self.ext = '.mpg' elif 'MOV' in self.format.GetStringSelection(): self.ext = '.mov' elif 'MPEG-4' in self.format.GetStringSelection(): self.ext = '.mp4' elif 'MP3' in self.format.GetStringSelection(): self.ext = '.mp3' elif 'WAV' in self.format.GetStringSelection(): self.ext = '.wav' elif 'JPEG' in self.format.GetStringSelection(): self.ext = '.jpg' # Build a new file name, starting with the last path used newFilename = self.lastPath # If we have a Left-To-Right language ... if TransanaGlobal.configData.LayoutDirection == wx.Layout_LeftToRight: # If that path does NOT end with a file separator ... if newFilename[-1] != os.sep: # ... add one newFilename += os.sep # Add the File Name, the "Analysis" tag which flags files that have been converted for Analysis, and the proper extension newFilename += fn + self.ext # With Right-To-Left languages, we have to build file names BACKWARDS! else: # If that path does NOT end with a file separator ... if newFilename[-1] != os.sep: # ... add one newFilename = newFilename + os.sep # Add the File Name, the "Analysis" tag which flags files that have been converted for Analysis, and the proper extension newFilename = newFilename + fn + self.ext # Update the Destination File Name text control with the new file name self.txtDestFileName.SetValue(newFilename) # Enable the Destination File Name and Browse controls self.txtDestFileName.Enable(True) self.destBrowse.Enable(True) def OnHelp(self, event): """ Help Button event handler """ # If a MenuWindow is defined (which is should always be!) if TransanaGlobal.menuWindow != None: # ... then use the MenuWindow's ControlObject to call the Help infrastructure TransanaGlobal.menuWindow.ControlObject.Help('Media File Conversion') # For testing purposes, this module can run stand-alone. if __name__ == '__main__': # Create a simple app for testing. app = wx.PySimpleApp() # Create the form, no parent needed frame = MediaConvert(None) # , u'E:\\Vidëo\\Demo\\Demo.mpg') # Show the Dialog Box and process the result. frame.ShowModal() # Destroy the dialog box. frame.Destroy() # Call the app's MainLoop() app.MainLoop()
sanyaade-mediadev/Transana
MediaConvert.py
Python
gpl-2.0
92,531
[ "MOE" ]
a778320ab8273dc4e24c81d1e219f978d72641e410db2096388ede1f3af277d6
import logging from igraph import Graph from more_itertools import unique_everseen from parvusdb import GraphDatabase from .base_writer import BaseWriter class RelationTripletsWriter(BaseWriter): _logger = logging.getLogger(__name__) def visit(self, g): triplets = self.__get_relations_and_entities_from_graph(g) return triplets def __get_relations_and_entities_from_graph(self, g): if not isinstance(g, Graph): raise TypeError("The writer needs an igraph.Graph as an argument") db = GraphDatabase(g) lst = db.query("MATCH {}(a), {'type': 'relation', 'name': 'r'}(a,b), {}(b) RETURN a, b, r", repeat_n_times=5) triplets = [(self.__get_correct_name(item['a'], g), item['r']['text'], self.__get_correct_name(item['b'], g)) for item in lst] return list(unique_everseen(triplets, key=tuple)) def __get_correct_name(self, node, g): coreferent_name = node['refers_to'] if not coreferent_name: return node['compound'] return '|'.join(coreferent_name)
fractalego/pynsett
pynsett/writer/relation_triplets_writer.py
Python
mit
1,132
[ "VisIt" ]
431bbde156e9680c90c98bf9b66356ba119daa31b8896b5af676576e0e38f8b5
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import numpy as np from bigdl.util.common import JTensor from bigdl.util.common import JavaValue from bigdl.util.common import callBigDlFunc from bigdl.util.common import callJavaFunc from bigdl.util.common import get_spark_context from bigdl.util.common import to_list from bigdl.util.common import INTMAX, INTMIN, DOUBLEMAX from bigdl.optim.optimizer import L1Regularizer, L2Regularizer, L1L2Regularizer if sys.version >= '3': long = int unicode = str class Node(JavaValue): """ Represent a node in a graph. The connections between nodes are directed. """ def __init__(self, jvalue, bigdl_type, *args): self.value = jvalue if jvalue else callBigDlFunc( bigdl_type, JavaValue.jvm_class_constructor(self), *args) self.bigdl_type = bigdl_type @classmethod def of(cls, jvalue, bigdl_type="float"): return Node(jvalue, bigdl_type) def element(self): return Layer.of(self.value.element()) class Layer(JavaValue): """ Layer is the basic component of a neural network and it's also the base class of layers. Layer can connect to others to construct a complex neural network. """ def __init__(self, jvalue, bigdl_type, *args): self.value = jvalue if jvalue else callBigDlFunc( bigdl_type, JavaValue.jvm_class_constructor(self), *args) self.bigdl_type = bigdl_type def __str__(self): """ >>> conv2 = SpatialConvolution(6, 12, 5, 5).set_name("conv2") creating: createSpatialConvolution >>> print(conv2) SpatialConvolution[conv2](6 -> 12, 5 x 5, 1, 1, 0, 0) """ return self.value.toString() def __call__(self, x=None): """ Some other modules point to current module :param x: upstream module nodes. x is either a Node or list of Node. :return: node containing current module """ x = x if x else [] return Node.of(callBigDlFunc(self.bigdl_type, "createNode", self, to_list(x))) @classmethod def of(cls, jvalue, bigdl_type="float"): """ Create a Python Layer base on the given java value :param jvalue: Java object create by Py4j :return: A Python Layer """ model = Layer(jvalue, bigdl_type) return model def set_name(self, name): """ Give this model a name. There would be a generated name consist of class name and UUID if user doesn't set it. """ callJavaFunc(get_spark_context(), self.value.setName, name) return self def name(self): """ Name of this layer """ return callJavaFunc(get_spark_context(), self.value.getName) def set_seed(self, seed=123): """ You can control the random seed which used to init weights for this model. :param seed: random seed :return: Model itself. """ callBigDlFunc(self.bigdl_type, "setModelSeed", seed) return self def get_dtype(self): if "float" == self.bigdl_type: return "float32" else: return "float64" @staticmethod def check_input(input): """ :param input: ndarray or list of ndarray :return: (list of JTensor, isTable) """ if type(input) is list: if len(input) == 0: raise Exception('Error when checking: empty input') if not hasattr(input[0], 'shape'): raise Exception( 'Error when checking: expecting list of ndarray') return [JTensor.from_ndarray(i) for i in input], True else: if not hasattr(input, 'shape'): raise Exception( 'Error when checking: expecting list of ndarray') return [JTensor.from_ndarray(input)], False @staticmethod def convert_output(output): if type(output) is JTensor: return output.to_ndarray() elif(len(output) == 1): return output[0].to_ndarray() else: return [x.to_ndarray() for x in output] def forward(self, input): """ NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding output of the module :param input: ndarray or list of ndarray :return: ndarray or list of ndarray """ jinput, input_is_table = self.check_input(input) output = callBigDlFunc(self.bigdl_type, "modelForward", self.value, jinput, input_is_table) return self.convert_output(output) def backward(self, input, grad_output): """ NB: It's for debug only, please use optimizer.optimize() in production. Performs a back-propagation step through the module, with respect to the given input. In general this method makes the assumption forward(input) has been called before, with the same input. This is necessary for optimization reasons. If you do not respect this rule, backward() will compute incorrect gradients. :param input: ndarray or list of ndarray :param grad_output: ndarray or list of ndarray :return: ndarray or list of ndarray """ jinput, input_is_table = self.check_input(input) jgrad_output, grad_output_is_table = self.check_input(grad_output) output = callBigDlFunc(self.bigdl_type, "modelBackward", self.value, jinput, input_is_table, jgrad_output, grad_output_is_table) return self.convert_output(output) def zero_grad_parameters(self): """ NB: It's for debug only, please use optimizer.optimize() in production. If the module has parameters, this will zero the accumulation of the gradients with respect to these parameters. Otherwise, it does nothing. """ callJavaFunc(get_spark_context(), self.value.zeroGradParameters) def update_parameters(self, learning_rate): """ NB: It's for debug only, please use optimizer.optimize() in production. """ callBigDlFunc(self.bigdl_type, "updateParameters", self.value, learning_rate) def reset(self): """ Initialize the model weights. """ callJavaFunc(get_spark_context(), self.value.reset) return self def parameters(self): """ Get the model parameters which containing: weight, bias, gradBias, gradWeight :return: dict(layername -> dict(parametername -> ndarray)) """ name_to_params = callBigDlFunc(self.bigdl_type, "modelGetParameters", self.value) def to_ndarray(params): return dict((param_name, np.array(values[0], dtype=self.get_dtype()).reshape( values[1])) for param_name, values in params.items()) return dict((layer_name, to_ndarray(params)) for layer_name, params in name_to_params.items()) def predict(self, data_rdd): """ Model inference base on the given data. You need to invoke collect() to trigger those action \ as the returning result is an RDD. :param data_rdd: the data to be predict. :return: An RDD represent the predict result. """ result = callBigDlFunc(self.bigdl_type, "modelPredictRDD", self.value, data_rdd) return result.map(lambda data: data.to_ndarray()) def test(self, val_rdd, batch_size, val_methods): """ A method to benchmark the model quality. :param val_rdd: the input data :param batch_size: batch size :param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss. :return: """ return callBigDlFunc(self.bigdl_type, "modelTest", self.value, val_rdd, batch_size, val_methods) def set_weights(self, weights): """ Set weights for this layer :param weights: a list of numpy arrays which represent weight and bias :return: >>> linear = Linear(3,2) creating: createLinear >>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])]) >>> weights = linear.get_weights() >>> weights[0].shape == (2,3) True >>> weights[0][0] array([ 1., 2., 3.], dtype=float32) >>> weights[1] array([ 7., 8.], dtype=float32) >>> relu = ReLU() creating: createReLU >>> from py4j.protocol import Py4JJavaError >>> try: ... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])]) ... except Py4JJavaError as err: ... print(err.java_exception) ... java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias >>> relu.get_weights() The layer does not have weight/bias >>> add = Add(2) creating: createAdd >>> try: ... add.set_weights([np.array([7,8]), np.array([1,2])]) ... except Py4JJavaError as err: ... print(err.java_exception) ... java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer >>> cAdd = CAdd([4, 1]) creating: createCAdd >>> cAdd.set_weights(np.ones([4, 1])) >>> (cAdd.get_weights()[0] == np.ones([4, 1])).all() True """ tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)] callBigDlFunc(self.bigdl_type, "setWeights", self.value, tensors) def get_weights(self): """ Get weights for this layer :return: list of numpy arrays which represent weight and bias """ tensorWeights = callBigDlFunc(self.bigdl_type, "getWeights", self.value) if tensorWeights is not None: return [tensor.to_ndarray() for tensor in tensorWeights] else: print("The layer does not have weight/bias") return None def save(self, path, over_write = False): callBigDlFunc(self.bigdl_type, "modelSave", self.value, path, over_write) def setWRegularizer(self, wRegularizer): ''' set weight regularizer :param wRegularizer: weight regularizer :return: ''' self.value.wRegularizer = wRegularizer.value def setBRegularizer(self, bRegularizer): ''' set bias regularizer :param wRegularizer: bias regularizer :return: ''' self.value.bRegularizer = bRegularizer.value class Container(Layer): ''' [[Container]] is a sub-class of Model that declares methods defined in all containers. A container usually contain some other modules which can be added through the "add" method ''' def __init__(self, jvalue, bigdl_type, *args): super(Container, self).__init__(jvalue, bigdl_type, *args) def add(self, model): self.value.add(model.value) return self class Model(Container): """ A graph container. Each node can have multiple inputs. The output of the node should be a tensor. The output tensor can be connected to multiple nodes. So the module in each node can have a tensor or table input, and should have a tensor output. The graph container can have multiple inputs and multiple outputs. If there's one input, the input data fed to the graph module should be a tensor. If there're multiple inputs, the input data fed to the graph module should be a table, which is actually an sequence of tensor. The order of the input tensors should be same with the order of the input nodes. This is also applied to the gradient from the module in the back propagation. If there's one output, the module output is a tensor. If there're multiple outputs, the module output is a table, which is actually an sequence of tensor. The order of the output tensors is same with the order of the output modules. This is also applied to the gradient passed to the module in the back propagation. All inputs should be able to connect to outputs through some paths in the graph. It is allowed that some successors of the inputs node are not connect to outputs. If so, these nodes will be excluded in the computation. """ def __init__(self, inputs, outputs, bigdl_type="float"): super(Model, self).__init__(None, bigdl_type, to_list(inputs), to_list(outputs)) @staticmethod def load(path, bigdl_type="float"): """ Load a pre-trained Bigdl model. :param path: The path containing the pre-trained model. :return: A pre-trained model. """ jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path) return Layer.of(jmodel) @staticmethod def load_torch(path, bigdl_type="float"): """ Load a pre-trained Torch model. :param path: The path containing the pre-trained model. :return: A pre-trained model. """ jmodel = callBigDlFunc(bigdl_type, "loadTorch", path) return Layer.of(jmodel) @staticmethod def load_caffe(model, defPath, modelPath, match_all=True, bigdl_type="float"): """ Load a pre-trained Caffe model. :param model: A bigdl model definition \which equivalent to the pre-trained caffe model. :param defPath: The path containing the caffe model definition. :param modelPath: The path containing the pre-trained caffe model. :return: A pre-trained model. """ jmodel = callBigDlFunc(bigdl_type, "loadCaffe", model, defPath, modelPath, match_all) return Layer.of(jmodel) @staticmethod def load_tensorflow(path, inputs, outputs, byte_order = "little_endian", bigdl_type="float"): """ Load a pre-trained Tensorflow model. :param path: The path containing the pre-trained model. :return: A pre-trained model. """ jmodel = callBigDlFunc(bigdl_type, "loadTF", path, inputs, outputs, byte_order) return Model.of(jmodel) class Linear(Layer): ''' The [[Linear]] module applies a linear transformation to the input data, i.e. `y = Wx + b`. The input given in `forward(input)` must be either a vector (1D tensor) or matrix (2D tensor). If the input is a vector, it must have the size of `inputSize`. If it is a matrix, then each row is assumed to be an input sample of given batch (the number of rows means the batch size and the number of columns should be equal to the `inputSize`). :param input_size the size the each input sample :param output_size the size of the module output of each sample :param init_method: two initialized methods are supported here, which are [[Default]]and [[Xavier]], where [[Xavier]] set bias to zero here. For moredetailed information about `initMethod`, please refer to[[InitializationMethod]] :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> linear = Linear(100, 10, "Xavier", True, L1Regularizer(0.5), L1Regularizer(0.5)) creating: createL1Regularizer creating: createL1Regularizer creating: createLinear ''' def __init__(self, input_size, output_size, init_method="default", with_bias=True, wRegularizer=None, bRegularizer=None, bigdl_type="float"): super(Linear, self).__init__(None, bigdl_type, input_size, output_size, init_method, with_bias, wRegularizer, bRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class ReLU(Layer): ''' Applies the rectified linear unit (ReLU) function element-wise to the input Tensor, thus outputting a Tensor of the same dimension. ReLU is defined as: f(x) = max(0, x) Can optionally do its operation in-place without using extra state memory >>> relu = ReLU() creating: createReLU ''' def __init__(self, ip=False, bigdl_type="float"): super(ReLU, self).__init__(None, bigdl_type, ip) class Tanh(Layer): ''' Applies the Tanh function element-wise to the input Tensor, thus outputting a Tensor of the same dimension. Tanh is defined as f(x) = (exp(x)-exp(-x))/(exp(x)+exp(-x)). >>> tanh = Tanh() creating: createTanh ''' def __init__(self, bigdl_type="float"): super(Tanh, self).__init__(None, bigdl_type) class Echo(Layer): ''' This module is for debug purpose, which can print activation and gradient in your model topology >>> echo = Echo() creating: createEcho ''' def __init__(self, bigdl_type="float"): super(Echo, self).__init__(None, bigdl_type) class LogSoftMax(Layer): ''' Applies the LogSoftMax function to an n-dimensional input Tensor. LogSoftmax is defined as: f_i(x) = log(1 / a exp(x_i)) where a = sum_j[exp(x_j)]. >>> logSoftMax = LogSoftMax() creating: createLogSoftMax ''' def __init__(self, bigdl_type="float"): super(LogSoftMax, self).__init__(None, bigdl_type) class Sequential(Container): ''' Sequential provides a means to plug layers together in a feed-forward fully connected manner. >>> echo = Echo() creating: createEcho >>> s = Sequential() creating: createSequential >>> s = s.add(echo) >>> s = s.add(s) >>> s = s.add(echo) ''' def __init__(self, bigdl_type="float"): super(Sequential, self).__init__(None, bigdl_type) class SpatialConvolution(Layer): ''' Applies a 2D convolution over an input image composed of several input planes. The input tensor in forward(input) is expected to be a 3D tensor (nInputPlane x height x width). :param n_input_plane The number of expected input planes in the image given into forward() :param n_output_plane The number of output planes the convolution layer will produce. :param kernel_w The kernel width of the convolution :param kernel_h The kernel height of the convolution :param stride_w The step of the convolution in the width dimension. :param stride_h The step of the convolution in the height dimension :param pad_w The additional zeros added per width to the input planes. :param pad_h The additional zeros added per height to the input planes. :param n_group Kernel group number :param propagate_back Propagate gradient back :param init_method Initialization method to initialize bias and weight :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> spatialConvolution = SpatialConvolution(6, 12, 5, 5) creating: createSpatialConvolution >>> spatialConvolution.setWRegularizer(L1Regularizer(0.5)) creating: createL1Regularizer >>> spatialConvolution.setBRegularizer(L1Regularizer(0.5)) creating: createL1Regularizer ''' def __init__(self, n_input_plane, n_output_plane, kernel_w, kernel_h, stride_w=1, stride_h=1, pad_w=0, pad_h=0, n_group=1, propagate_back=True, init_method="default", wRegularizer=None, bRegularizer=None, bigdl_type="float"): super(SpatialConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, kernel_w, kernel_h, stride_w, stride_h, pad_w, pad_h, n_group, propagate_back, init_method, wRegularizer, bRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class SpatialMaxPooling(Layer): ''' Applies 2D max-pooling operation in kWxkH regions by step size dWxdH steps. The number of output features is equal to the number of input planes. If the input image is a 3D tensor nInputPlane x height x width, the output image size will be nOutputPlane x oheight x owidth where owidth = op((width + 2*padW - kW) / dW + 1) oheight = op((height + 2*padH - kH) / dH + 1) op is a rounding operator. By default, it is floor. It can be changed by calling :ceil() or :floor() methods. :param kW: kernel width :param kH: kernel height :param dW: step size in width :param dH: step size in height :param padW: padding in width :param padH: padding in height >>> spatialMaxPooling = SpatialMaxPooling(2, 2, 2, 2) creating: createSpatialMaxPooling ''' # to_ceil: call floor() when False; call ceil() when True def __init__(self, kw, kh, dw, dh, pad_w=0, pad_h=0, to_ceil=False, bigdl_type="float"): super(SpatialMaxPooling, self).__init__(None, bigdl_type, kw, kh, dw, dh, pad_w, pad_h, to_ceil) class Select(Layer): ''' A Simple layer selecting an index of the input tensor in the given dimension :param dimension: the dimension to select :param index: the index of the dimension to be selected >>> select = Select(1, 1) creating: createSelect ''' def __init__(self, dim, index, bigdl_type="float"): super(Select, self).__init__(None, bigdl_type, dim, index) class Recurrent(Container): ''' Recurrent module is a container of rnn cells Different types of rnn cells can be added using add() function >>> recurrent = Recurrent() creating: createRecurrent ''' def __init__(self, bigdl_type="float"): super(Recurrent, self).__init__(None, bigdl_type) class LSTM(Layer): ''' | Long Short Term Memory architecture. | Ref. | A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module) | B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf | C. http://arxiv.org/pdf/1503.04069v1.pdf | D. https://github.com/wojzaremba/lstm | E. https://github.com/Element-Research/rnn/blob/master/FastLSTM.lua :param inputSize: the size of each input vector :param hiddenSize: Hidden unit size in the LSTM :param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf) :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> lstm = LSTM(4, 3, 0.5, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5)) creating: createL1Regularizer creating: createL1Regularizer creating: createL1Regularizer creating: createLSTM ''' def __init__(self, input_size, hidden_size, p=0.0, wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"): super(LSTM, self).__init__(None, bigdl_type, input_size, hidden_size, p, wRegularizer, uRegularizer, bRegularizer) class LSTMPeephole(Layer): ''' | Long Short Term Memory architecture with peephole. | Ref. A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module) | B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf | C. http://arxiv.org/pdf/1503.04069v1.pdf | D. https://github.com/wojzaremba/lstm | E. https://github.com/Element-Research/rnn/blob/master/LSTM.lua :param input_size: the size of each input vector :param hidden_size: Hidden unit size in the LSTM :param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf) :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> lstm = LSTMPeephole(4, 3, 0.5, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5)) creating: createL1Regularizer creating: createL1Regularizer creating: createL1Regularizer creating: createLSTMPeephole ''' def __init__(self, input_size, hidden_size, p=0.0, wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"): super(LSTMPeephole, self).__init__(None, bigdl_type, input_size, hidden_size, p, wRegularizer, uRegularizer, bRegularizer) class GRU(Layer): ''' Gated Recurrent Units architecture. The first input in sequence uses zero value for cell and hidden state | Ref. | http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/ | https://github.com/Element-Research/rnn/blob/master/GRU.lua :param input_size: the size of each input vector :param hidden_size: Hidden unit size in GRU :param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf) :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> gru = GRU(4, 3, 0.5, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5)) creating: createL1Regularizer creating: createL1Regularizer creating: createL1Regularizer creating: createGRU ''' def __init__(self, input_size, hidden_size, p=0.0, wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"): super(GRU, self).__init__(None, bigdl_type, input_size, hidden_size, p, wRegularizer, uRegularizer, bRegularizer) class RnnCell(Layer): ''' It is a simple RNN. User can pass an activation function to the RNN. :param input_size: the size of each input vector :param hidden_size: Hidden unit size in simple RNN :param activation: activation function :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices. :param bRegularizer: instance of [[Regularizer]](../regularizers.md),applied to the bias. >>> reshape = RnnCell(4, 3, Tanh(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5)) creating: createTanh creating: createL1Regularizer creating: createL1Regularizer creating: createL1Regularizer creating: createRnnCell ''' def __init__(self, input_size, hidden_size, activation, wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"): super(RnnCell, self).__init__(None, bigdl_type, input_size, hidden_size, activation, wRegularizer, uRegularizer, bRegularizer) class TimeDistributed(Layer): ''' This layer is intended to apply contained layer to each temporal time slice of input tensor. For instance, The TimeDistributed Layer can feed each time slice of input tensor to the Linear layer. >>> td = TimeDistributed(Linear(2, 3)) creating: createLinear creating: createTimeDistributed ''' def __init__(self, model, bigdl_type="float"): super(TimeDistributed, self).__init__(None, bigdl_type, model) class Concat(Container): ''' Concat concatenates the output of one layer of "parallel" modules along the provided {@code dimension}: they take the same inputs, and their output is concatenated. ``` +-----------+ +----> module1 -----+ | | | | input -----+----> module2 -----+----> output | | | | +----> module3 -----+ +-----------+ ``` :param dimension: dimension >>> concat = Concat(2) creating: createConcat ''' def __init__(self, dimension, bigdl_type="float"): super(Concat, self).__init__(None, bigdl_type, dimension) class SpatialAveragePooling(Layer): ''' Applies 2D average-pooling operation in kWxkH regions by step size dWxdH steps. The number of output features is equal to the number of input planes. :param kW: kernel width :param kH: kernel height :param dW: step width :param dH: step height :param padW: padding width :param padH: padding height :param ceilMode: whether the output size is to be ceiled or floored :param countIncludePad: whether to include padding when dividing thenumber of elements in pooling region :param divide: whether to do the averaging >>> spatialAveragePooling = SpatialAveragePooling(7,7) creating: createSpatialAveragePooling ''' def __init__(self, kw, kh, dw=1, dh=1, pad_w=0, pad_h=0, ceil_mode=False, count_include_pad=True, divide=True, bigdl_type="float"): super(SpatialAveragePooling, self).__init__(None, bigdl_type, kw, kh, dw, dh, pad_w, pad_h, ceil_mode, count_include_pad, divide) class SpatialBatchNormalization(Layer): ''' This file implements Batch Normalization as described in the paper: "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift" by Sergey Ioffe, Christian Szegedy This implementation is useful for inputs coming from convolution layers. For non-convolutional layers, see [[BatchNormalization]] The operation implemented is: ``` ( x - mean(x) ) y = -------------------- * gamma + beta standard-deviation(x) ``` where gamma and beta are learnable parameters. The learning of gamma and beta is optional. >>> spatialBatchNormalization = SpatialBatchNormalization(1) creating: createSpatialBatchNormalization ''' def __init__(self, n_output, eps=1e-5, momentum=0.1, affine=True, bigdl_type="float"): super(SpatialBatchNormalization, self).__init__(None, bigdl_type, n_output, eps, momentum, affine) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class SpatialCrossMapLRN(Layer): ''' Applies Spatial Local Response Normalization between different feature maps. The operation implemented is: ``` x_f y_f = ------------------------------------------------- (k+(alpha/size)* sum_{l=l1 to l2} (x_l^2^))^beta^ ``` where x_f is the input at spatial locations h,w (not shown for simplicity) and feature map f, l1 corresponds to max(0,f-ceil(size/2)) and l2 to min(F, f-ceil(size/2) + size). Here, F is the number of feature maps. :param size: the number of channels to sum over (for cross channel LRN) or the side length ofthe square region to sum over (for within channel LRN) :param alpha: the scaling parameter :param beta: the exponent :param k: a constant >>> spatialCrossMapLRN = SpatialCrossMapLRN() creating: createSpatialCrossMapLRN ''' def __init__(self, size=5, alpha=1.0, beta=0.75, k=1.0, bigdl_type="float"): super(SpatialCrossMapLRN, self).__init__(None, bigdl_type, size, alpha, beta, k) class Dropout(Layer): ''' Dropout masks(set to zero) parts of input using a bernoulli distribution. Each input element has a probability initP of being dropped. If scale is set, the outputs are scaled by a factor of 1/(1-initP) during training. During evaluating, output is the same as input. :param initP: probability to be dropped :param inplace: inplace model :param scale: if scale by a factor of 1/(1-initP) >>> dropout = Dropout(0.4) creating: createDropout ''' def __init__(self, init_p=0.5, inplace=False, scale=True, bigdl_type="float"): super(Dropout, self).__init__(None, bigdl_type, init_p, inplace, scale) class View(Layer): ''' This module creates a new view of the input tensor using the sizes passed to the constructor. The method setNumInputDims() allows to specify the expected number of dimensions of the inputs of the modules. This makes it possible to use minibatch inputs when using a size -1 for one of the dimensions. :param size: sizes use for creates a new view >>> view = View([1024,2]) creating: createView ''' def __init__(self, sizes, num_input_dims=0, bigdl_type="float"): super(View, self).__init__(None, bigdl_type, sizes, num_input_dims) class Abs(Layer): ''' an element-wise abs operation >>> abs = Abs() creating: createAbs ''' def __init__(self, bigdl_type="float"): super(Abs, self).__init__(None, bigdl_type) class Add(Layer): ''' adds a bias term to input data ; :param input_size: size of input data >>> add = Add(1) creating: createAdd ''' def __init__(self, input_size, bigdl_type="float"): super(Add, self).__init__(None, bigdl_type, input_size) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class AddConstant(Layer): ''' adding a constant :param constant_scalar: constant value :param inplace: Can optionally do its operation in-place without using extra state memory >>> addConstant = AddConstant(1e-5, True) creating: createAddConstant ''' def __init__(self, constant_scalar, inplace=False, bigdl_type="float"): super(AddConstant, self).__init__(None, bigdl_type, constant_scalar, inplace) class BatchNormalization(Layer): ''' This layer implements Batch Normalization as described in the paper: "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift" by Sergey Ioffe, Christian Szegedy https://arxiv.org/abs/1502.03167 This implementation is useful for inputs NOT coming from convolution layers. For convolution layers, use nn.SpatialBatchNormalization. The operation implemented is: ``` ( x - mean(x) ) y = -------------------- * gamma + beta standard-deviation(x) ``` where gamma and beta are learnable parameters.The learning of gamma and beta is optional. :param n_output: output feature map number :param eps: avoid divide zero :param momentum: momentum for weight update :param affine: affine operation on output or not >>> batchNormalization = BatchNormalization(1, 1e-5, 1e-5, True) creating: createBatchNormalization ''' def __init__(self, n_output, eps=1e-5, momentum=0.1, affine=True, bigdl_type="float"): super(BatchNormalization, self).__init__(None, bigdl_type, n_output, eps, momentum, affine) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class Bilinear(Layer): ''' a bilinear transformation with sparse inputs, The input tensor given in forward(input) is a table containing both inputs x_1 and x_2, which are tensors of size N x inputDimension1 and N x inputDimension2, respectively. :param input_size1 input dimension of x_1 :param input_size2 input dimension of x_2 :param output_size output dimension :param bias_res whether use bias :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> bilinear = Bilinear(1, 1, 1, True, L1Regularizer(0.5)) creating: createL1Regularizer creating: createBilinear ''' def __init__(self, input_size1, input_size2, output_size, bias_res=True, wRegularizer=None, bRegularizer=None, bigdl_type="float"): super(Bilinear, self).__init__(None, bigdl_type, input_size1, input_size2, output_size, bias_res, wRegularizer, bRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class Bottle(Container): ''' Bottle allows varying dimensionality input to be forwarded through any module that accepts input of nInputDim dimensions, and generates output of nOutputDim dimensions. :param module: transform module :param n_input_dim: nInputDim dimensions of module :param n_output_dim1: output of nOutputDim dimensions >>> bottle = Bottle(Linear(100,10), 1, 1) creating: createLinear creating: createBottle ''' def __init__(self, module, n_input_dim=2, n_output_dim1=INTMAX, bigdl_type="float"): super(Bottle, self).__init__(None, bigdl_type, module, n_input_dim, n_output_dim1) class CAdd(Layer): ''' This layer has a bias tensor with given size. The bias will be added element wise to the input tensor. If the element number of the bias tensor match the input tensor, a simply element wise will be done. Or the bias will be expanded to the same size of the input. The expand means repeat on unmatched singleton dimension(if some unmatched dimension isn't singleton dimension, it will report an error). If the input is a batch, a singleton dimension will be add to the first dimension before the expand. :param size: the size of the bias :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> cAdd = CAdd([1,2]) creating: createCAdd ''' def __init__(self, size, bRegularizer=None, bigdl_type="float"): super(CAdd, self).__init__(None, bigdl_type, size, bRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class CAddTable(Layer): ''' Merge the input tensors in the input table by element wise adding them together. The input table is actually an array of tensor with same size. :param inplace: reuse the input memory >>> cAddTable = CAddTable(True) creating: createCAddTable ''' def __init__(self, inplace=False, bigdl_type="float"): super(CAddTable, self).__init__(None, bigdl_type, inplace) class CDivTable(Layer): ''' Takes a table with two Tensor and returns the component-wise division between them. >>> cDivTable = CDivTable() creating: createCDivTable ''' def __init__(self, bigdl_type="float"): super(CDivTable, self).__init__(None, bigdl_type) class CMaxTable(Layer): ''' Takes a table of Tensors and outputs the max of all of them. >>> cMaxTable = CMaxTable() creating: createCMaxTable ''' def __init__(self, bigdl_type="float"): super(CMaxTable, self).__init__(None, bigdl_type) class CMinTable(Layer): ''' Takes a table of Tensors and outputs the min of all of them. >>> cMinTable = CMinTable() creating: createCMinTable ''' def __init__(self, bigdl_type="float"): super(CMinTable, self).__init__(None, bigdl_type) class CMul(Layer): ''' Applies a component-wise multiplication to the incoming data :param size: size of the data >>> cMul = CMul([1,2]) creating: createCMul ''' def __init__(self, size, wRegularizer=None, bigdl_type="float"): super(CMul, self).__init__(None, bigdl_type, size, wRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class CMulTable(Layer): ''' Takes a table of Tensors and outputs the multiplication of all of them. >>> cMulTable = CMulTable() creating: createCMulTable ''' def __init__(self, bigdl_type="float"): super(CMulTable, self).__init__(None, bigdl_type) class CSubTable(Layer): ''' Takes a table with two Tensor and returns the component-wise subtraction between them. >>> cSubTable = CSubTable() creating: createCSubTable ''' def __init__(self, bigdl_type="float"): super(CSubTable, self).__init__(None, bigdl_type) class Clamp(Layer): ''' Clamps all elements into the range [min_value, max_value]. Output is identical to input in the range, otherwise elements less than min_value (or greater than max_value) are saturated to min_value (or max_value). :param min: :param max: >>> clamp = Clamp(1, 3) creating: createClamp ''' def __init__(self, min, max, bigdl_type="float"): super(Clamp, self).__init__(None, bigdl_type, min, max) class Contiguous(Layer): ''' used to make input, grad_output both contiguous >>> contiguous = Contiguous() creating: createContiguous ''' def __init__(self, bigdl_type="float"): super(Contiguous, self).__init__(None, bigdl_type) class Cosine(Layer): ''' Cosine calculates the cosine similarity of the input to k mean centers. The input given in forward(input) must be either a vector (1D tensor) or matrix (2D tensor). If the input is a vector, it must have the size of inputSize. If it is a matrix, then each row is assumed to be an input sample of given batch (the number of rows means the batch size and the number of columns should be equal to the inputSize). :param input_size: the size of each input sample :param output_size: the size of the module output of each sample >>> cosine = Cosine(2,3) creating: createCosine ''' def __init__(self, input_size, output_size, bigdl_type="float"): super(Cosine, self).__init__(None, bigdl_type, input_size, output_size) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class CosineDistance(Layer): ''' Outputs the cosine distance between inputs >>> cosineDistance = CosineDistance() creating: createCosineDistance ''' def __init__(self, bigdl_type="float"): super(CosineDistance, self).__init__(None, bigdl_type) class DotProduct(Layer): ''' This is a simple table layer which takes a table of two tensors as input and calculate the dot product between them as outputs >>> dotProduct = DotProduct() creating: createDotProduct ''' def __init__(self, bigdl_type="float"): super(DotProduct, self).__init__(None, bigdl_type) class ELU(Layer): ''' D-A Clevert, Thomas Unterthiner, Sepp Hochreiter Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) [http://arxiv.org/pdf/1511.07289.pdf] >>> eLU = ELU(1e-5, True) creating: createELU ''' def __init__(self, alpha=1.0, inplace=False, bigdl_type="float"): super(ELU, self).__init__(None, bigdl_type, alpha, inplace) class Euclidean(Layer): ''' Outputs the Euclidean distance of the input to outputSize centers :param inputSize: inputSize :param outputSize: outputSize :param T: Numeric type. Only support float/double now >>> euclidean = Euclidean(1, 1, True) creating: createEuclidean ''' def __init__(self, input_size, output_size, fast_backward=True, bigdl_type="float"): super(Euclidean, self).__init__(None, bigdl_type, input_size, output_size, fast_backward) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class Exp(Layer): ''' Applies element-wise exp to input tensor. >>> exp = Exp() creating: createExp ''' def __init__(self, bigdl_type="float"): super(Exp, self).__init__(None, bigdl_type) class FlattenTable(Layer): ''' This is a table layer which takes an arbitrarily deep table of Tensors (potentially nested) as input and a table of Tensors without any nested table will be produced >>> flattenTable = FlattenTable() creating: createFlattenTable ''' def __init__(self, bigdl_type="float"): super(FlattenTable, self).__init__(None, bigdl_type) class GradientReversal(Layer): ''' It is a simple module preserves the input, but takes the gradient from the subsequent layer, multiplies it by -lambda and passes it to the preceding layer. This can be used to maximise an objective function whilst using gradient descent, as described in ["Domain-Adversarial Training of Neural Networks" (http://arxiv.org/abs/1505.07818)] :param lambda: hyper-parameter lambda can be set dynamically during training >>> gradientReversal = GradientReversal(1e-5) creating: createGradientReversal >>> gradientReversal = GradientReversal() creating: createGradientReversal ''' def __init__(self, the_lambda=1.0, bigdl_type="float"): super(GradientReversal, self).__init__(None, bigdl_type, the_lambda) class HardShrink(Layer): ''' This is a transfer layer which applies the hard shrinkage function element-wise to the input Tensor. The parameter lambda is set to 0.5 by default ``` x, if x > lambda f(x) = x, if x < -lambda 0, otherwise ``` :param the_lambda: a threshold value whose default value is 0.5 >>> hardShrink = HardShrink(1e-5) creating: createHardShrink ''' def __init__(self, the_lambda=0.5, bigdl_type="float"): super(HardShrink, self).__init__(None, bigdl_type, the_lambda) class HardTanh(Layer): ''' Applies HardTanh to each element of input, HardTanh is defined: ``` | maxValue, if x > maxValue f(x) = | minValue, if x < minValue | x, otherwise ``` :param min_value: minValue in f(x), default is -1. :param max_value: maxValue in f(x), default is 1. :param inplace: whether enable inplace model. >>> hardTanh = HardTanh(1e-5, 1e5, True) creating: createHardTanh >>> hardTanh = HardTanh() creating: createHardTanh ''' def __init__(self, min_value=-1.0, max_value=1.0, inplace=False, bigdl_type="float"): super(HardTanh, self).__init__(None, bigdl_type, min_value, max_value, inplace) class Index(Layer): ''' Applies the Tensor index operation along the given dimension. :param dimension: the dimension to be indexed >>> index = Index(1) creating: createIndex ''' def __init__(self, dimension, bigdl_type="float"): super(Index, self).__init__(None, bigdl_type, dimension) class InferReshape(Layer): ''' Reshape with the support of infered size, Positive numbers are used directly, setting the corresponding dimension of the output tensor. In addition, two special values are accepted: 0 means "copy the respective dimension of the input". i.e., if the input has 2 as its 1st dimension, the output will have 2 as its 1st dimension as well -1 stands for "infer this from the other dimensions" this dimension is calculated to keep the overall element count the same as in the input. At most one -1 can be used in a reshape operation. For example, (4, 5, 6, 7) -> InferReshape (4, 0, 3, -1) -> (4, 5, 3, 14) with 1st and 3rd dim same as given size, with 2nd dim same as input, and the infered dim is 14 :param size: the target tensor size :param batch_mode: whether in batch mode >>> inferReshape = InferReshape([4, 0, 3, -1], False) creating: createInferReshape ''' def __init__(self, size, batch_mode=False, bigdl_type="float"): super(InferReshape, self).__init__(None, bigdl_type, size, batch_mode) class JoinTable(Layer): ''' It is a table module which takes a table of Tensors as input and outputs a Tensor by joining them together along the dimension `dimension`. The input to this layer is expected to be a tensor, or a batch of tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the user need to specify the number of dimensions of each sample tensor in the batch using `nInputDims`. :param dimension: to be join in this dimension :param nInputDims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size >>> joinTable = JoinTable(1, 1) creating: createJoinTable ''' def __init__(self, dimension, n_input_dims, bigdl_type="float"): super(JoinTable, self).__init__(None, bigdl_type, dimension, n_input_dims) class L1Penalty(Layer): ''' adds an L1 penalty to an input (for sparsity). L1Penalty is an inline module that in its forward propagation copies the input Tensor directly to the output, and computes an L1 loss of the latent state (input) and stores it in the module's loss field. During backward propagation: gradInput = gradOutput + gradLoss. :param l1weight: :param sizeAverage: :param provideOutput: >>> l1Penalty = L1Penalty(1, True, True) creating: createL1Penalty ''' def __init__(self, l1weight, size_average=False, provide_output=True, bigdl_type="float"): super(L1Penalty, self).__init__(None, bigdl_type, l1weight, size_average, provide_output) class LeakyReLU(Layer): ''' It is a transfer module that applies LeakyReLU, which parameter negval sets the slope of the negative part: LeakyReLU is defined as: f(x) = max(0, x) + negval * min(0, x) :param negval: sets the slope of the negative partl :param inplace: if it is true, doing the operation in-place without using extra state memory >>> leakyReLU = LeakyReLU(1e-5, True) creating: createLeakyReLU ''' def __init__(self, negval=0.01, inplace=False, bigdl_type="float"): super(LeakyReLU, self).__init__(None, bigdl_type, negval, inplace) class Log(Layer): ''' Applies the log function element-wise to the input Tensor, thus outputting a Tensor of the same dimension. >>> log = Log() creating: createLog ''' def __init__(self, bigdl_type="float"): super(Log, self).__init__(None, bigdl_type) class LogSigmoid(Layer): ''' This class is a transform layer corresponding to the sigmoid function: f(x) = Log(1 / (1 + e ^^ (-x))) >>> logSigmoid = LogSigmoid() creating: createLogSigmoid ''' def __init__(self, bigdl_type="float"): super(LogSigmoid, self).__init__(None, bigdl_type) class LookupTable(Layer): ''' a convolution of width 1, commonly used for word embeddings :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. >>> lookupTable = LookupTable(1, 1, 1e-5, 1e-5, 1e-5, True, L1Regularizer(0.5)) creating: createL1Regularizer creating: createLookupTable ''' def __init__(self, n_index, n_output, padding_value=0.0, max_norm=DOUBLEMAX, norm_type=2.0, should_scale_grad_by_freq=False, wRegularizer=None, bigdl_type="float"): super(LookupTable, self).__init__(None, bigdl_type, n_index, n_output, padding_value, max_norm, norm_type, should_scale_grad_by_freq, wRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class MM(Layer): ''' Module to perform matrix multiplication on two mini-batch inputs, producing a mini-batch. :param trans_a: specifying whether or not transpose the first input matrix :param trans_b: specifying whether or not transpose the second input matrix >>> mM = MM(True, True) creating: createMM ''' def __init__(self, trans_a=False, trans_b=False, bigdl_type="float"): super(MM, self).__init__(None, bigdl_type, trans_a, trans_b) class MV(Layer): ''' It is a module to perform matrix vector multiplication on two mini-batch inputs, producing a mini-batch. :param trans: whether make matrix transpose before multiplication >>> mV = MV(True) creating: createMV ''' def __init__(self, trans=False, bigdl_type="float"): super(MV, self).__init__(None, bigdl_type, trans) class MapTable(Container): ''' This class is a container for a single module which will be applied to all input elements. The member module is cloned as necessary to process all input elements. >>> mapTable = MapTable(Linear(100,10)) creating: createLinear creating: createMapTable ''' def __init__(self, module, bigdl_type="float"): super(MapTable, self).__init__(None, bigdl_type, module) class MaskedSelect(Layer): ''' Performs a torch.MaskedSelect on a Tensor. The mask is supplied as a tabular argument with the input on the forward and backward passes. >>> maskedSelect = MaskedSelect() creating: createMaskedSelect ''' def __init__(self, bigdl_type="float"): super(MaskedSelect, self).__init__(None, bigdl_type) class Max(Layer): ''' Applies a max operation over dimension `dim` :param dim: max along this dimension :param num_input_dims: Optional. If in a batch model, set to the inputDims. >>> max = Max(1) creating: createMax ''' def __init__(self, dim, num_input_dims=INTMIN, bigdl_type="float"): super(Max, self).__init__(None, bigdl_type, dim, num_input_dims) class Mean(Layer): ''' It is a simple layer which applies a mean operation over the given dimension. When nInputDims is provided, the input will be considered as batches. Then the mean operation will be applied in (dimension + 1). The input to this layer is expected to be a tensor, or a batch of tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the user need to specify the number of dimensions of each sample tensor in the batch using nInputDims. :param dimension: the dimension to be applied mean operation :param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimension would be consideredas batch size >>> mean = Mean(1, 1) creating: createMean ''' def __init__(self, dimension=1, n_input_dims=-1, bigdl_type="float"): super(Mean, self).__init__(None, bigdl_type, dimension, n_input_dims) class Min(Layer): ''' Applies a min operation over dimension `dim`. :param dim: min along this dimension :param num_input_dims: Optional. If in a batch model, set to the input_dim. >>> min = Min(1) creating: createMin ''' def __init__(self, dim, num_input_dims=INTMIN, bigdl_type="float"): super(Min, self).__init__(None, bigdl_type, dim, num_input_dims) class MixtureTable(Layer): ''' Creates a module that takes a table {gater, experts} as input and outputs the mixture of experts (a Tensor or table of Tensors) using a gater Tensor. When dim is provided, it specifies the dimension of the experts Tensor that will be interpolated (or mixed). Otherwise, the experts should take the form of a table of Tensors. This Module works for experts of dimension 1D or more, and for a 1D or 2D gater, i.e. for single examples or mini-batches. >>> mixtureTable = MixtureTable() creating: createMixtureTable >>> mixtureTable = MixtureTable(10) creating: createMixtureTable ''' def __init__(self, dim=INTMAX, bigdl_type="float"): super(MixtureTable, self).__init__(None, bigdl_type, dim) class Mul(Layer): ''' Multiply a single scalar factor to the incoming data >>> mul = Mul() creating: createMul ''' def __init__(self, bigdl_type="float"): super(Mul, self).__init__(None, bigdl_type) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class MulConstant(Layer): ''' Multiplies input Tensor by a (non-learnable) scalar constant. This module is sometimes useful for debugging purposes. :param scalar: scalar constant :param inplace: Can optionally do its operation in-place without using extra state memory >>> mulConstant = MulConstant(2.5) creating: createMulConstant ''' def __init__(self, scalar, inplace=False, bigdl_type="float"): super(MulConstant, self).__init__(None, bigdl_type, scalar, inplace) class Narrow(Layer): ''' Narrow is application of narrow operation in a module. The module further supports a negative length in order to handle inputs with an unknown size. >>> narrow = Narrow(1, 1, 1) creating: createNarrow ''' def __init__(self, dimension, offset, length=1, bigdl_type="float"): super(Narrow, self).__init__(None, bigdl_type, dimension, offset, length) class NarrowTable(Layer): ''' Creates a module that takes a table as input and outputs the subtable starting at index offset having length elements (defaults to 1 element). The elements can be either a table or a Tensor. If `length` is negative, it means selecting the elements from the offset to element which located at the abs(`length`) to the last element of the input. :param offset: the start index of table :param length: the length want to select >>> narrowTable = NarrowTable(1, 1) creating: createNarrowTable ''' def __init__(self, offset, length=1, bigdl_type="float"): super(NarrowTable, self).__init__(None, bigdl_type, offset, length) class Normalize(Layer): ''' Normalizes the input Tensor to have unit L_p norm. The smoothing parameter eps prevents division by zero when the input contains all zero elements (default = 1e-10). p can be the max value of double >>> normalize = Normalize(1e-5, 1e-5) creating: createNormalize ''' def __init__(self, p, eps=1e-10, bigdl_type="float"): super(Normalize, self).__init__(None, bigdl_type, p, eps) class PReLU(Layer): ''' Applies parametric ReLU, which parameter varies the slope of the negative part. PReLU: f(x) = max(0, x) + a * min(0, x) nOutputPlane's default value is 0, that means using PReLU in shared version and has only one parameters. Notice: Please don't use weight decay on this. :param n_output_plane: input map number. Default is 0. >>> pReLU = PReLU(1) creating: createPReLU ''' def __init__(self, n_output_plane=0, bigdl_type="float"): super(PReLU, self).__init__(None, bigdl_type, n_output_plane) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class Padding(Layer): ''' This module adds pad units of padding to dimension dim of the input. If pad is negative, padding is added to the left, otherwise, it is added to the right of the dimension. The input to this layer is expected to be a tensor, or a batch of tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the user need to specify the number of dimensions of each sample tensor in the batch using n_input_dim. :param dim: the dimension to be applied padding operation :param pad: num of the pad units :param n_input_dim: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size :param value: padding value >>> padding = Padding(1, 1, 1, 1e-5, 1) creating: createPadding ''' def __init__(self, dim, pad, n_input_dim, value=0.0, n_index=1, bigdl_type="float"): super(Padding, self).__init__(None, bigdl_type, dim, pad, n_input_dim, value, n_index) class PairwiseDistance(Layer): ''' It is a module that takes a table of two vectors as input and outputs the distance between them using the p-norm. The input given in `forward(input)` is a [[Table]] that contains two tensors which must be either a vector (1D tensor) or matrix (2D tensor). If the input is a vector, it must have the size of `inputSize`. If it is a matrix, then each row is assumed to be an input sample of the given batch (the number of rows means the batch size and the number of columns should be equal to the `inputSize`). :param norm: the norm of distance >>> pairwiseDistance = PairwiseDistance(2) creating: createPairwiseDistance ''' def __init__(self, norm=2, bigdl_type="float"): super(PairwiseDistance, self).__init__(None, bigdl_type, norm) class ParallelTable(Container): ''' It is a container module that applies the i-th member module to the i-th input, and outputs an output in the form of Table >>> parallelTable = ParallelTable() creating: createParallelTable ''' def __init__(self, bigdl_type="float"): super(ParallelTable, self).__init__(None, bigdl_type) class Power(Layer): ''' Apply an element-wise power operation with scale and shift. f(x) = (shift + scale * x)^power^ :param power: the exponent. :param scale: Default is 1. :param shift: Default is 0. >>> power = Power(1e-5) creating: createPower ''' def __init__(self, power, scale=1.0, shift=0.0, bigdl_type="float"): super(Power, self).__init__(None, bigdl_type, power, scale, shift) class RReLU(Layer): ''' Applies the randomized leaky rectified linear unit (RReLU) element-wise to the input Tensor, thus outputting a Tensor of the same dimension. Informally the RReLU is also known as 'insanity' layer. RReLU is defined as: ``` f(x) = max(0,x) + a * min(0, x) where a ~ U(l, u). ``` In training mode negative inputs are multiplied by a factor a drawn from a uniform random distribution U(l, u). In evaluation mode a RReLU behaves like a LeakyReLU with a constant mean factor a = (l + u) / 2. By default, l = 1/8 and u = 1/3. If l == u a RReLU effectively becomes a LeakyReLU. Regardless of operating in in-place mode a RReLU will internally allocate an input-sized noise tensor to store random factors for negative inputs. The backward() operation assumes that forward() has been called before. For reference see [Empirical Evaluation of Rectified Activations in Convolutional Network]( http://arxiv.org/abs/1505.00853). :param lower: lower boundary of uniform random distribution :param upper: upper boundary of uniform random distribution :param inplace: optionally do its operation in-place without using extra state memory >>> rReLU = RReLU(1e-5, 1e5, True) creating: createRReLU ''' def __init__(self, lower=1.0/8, upper=1.0/3, inplace=False, bigdl_type="float"): super(RReLU, self).__init__(None, bigdl_type, lower, upper, inplace) class ReLU6(Layer): ''' Same as ReLU except that the rectifying function f(x) saturates at x = 6 :param inplace: either True = in-place or False = keeping separate state >>> reLU6 = ReLU6(True) creating: createReLU6 ''' def __init__(self, inplace=False, bigdl_type="float"): super(ReLU6, self).__init__(None, bigdl_type, inplace) class Replicate(Layer): ''' Replicate repeats input `nFeatures` times along its `dim` dimension. Notice: No memory copy, it set the stride along the `dim`-th dimension to zero. :param n_features: replicate times. :param dim: dimension to be replicated. :param n_dim: specify the number of non-batch dimensions. >>> replicate = Replicate(2) creating: createReplicate ''' def __init__(self, n_features, dim=1, n_dim=INTMAX, bigdl_type="float"): super(Replicate, self).__init__(None, bigdl_type, n_features, dim, n_dim) class RoiPooling(Layer): ''' Region of interest pooling The RoIPooling uses max pooling to convert the features inside any valid region of interest into a small feature map with a fixed spatial extent of pooledH * pooledW (e.g., 7 * 7) an RoI is a rectangular window into a conv feature map. Each RoI is defined by a four-tuple (x1, y1, x2, y2) that specifies its top-left corner (x1, y1) and its bottom-right corner (x2, y2). RoI max pooling works by dividing the h * w RoI window into an pooledH * pooledW grid of sub-windows of approximate size h/H * w/W and then max-pooling the values in each sub-window into the corresponding output grid cell. Pooling is applied independently to each feature map channel :param pooled_w: spatial extent in width :param pooled_h: spatial extent in height :param spatial_scale: spatial scale >>> import numpy as np >>> input_data = np.random.rand(2,2,6,8) >>> input_rois = np.array([0, 0, 0, 7, 5, 1, 6, 2, 7, 5, 1, 3, 1, 6, 4, 0, 3, 3, 3, 3],dtype='float64').reshape(4,5) >>> m = RoiPooling(3,2,1.0) creating: createRoiPooling >>> out = m.forward([input_data,input_rois]) ''' def __init__(self, pooled_w, pooled_h, spatial_scale, bigdl_type="float"): super(RoiPooling, self).__init__(None, bigdl_type, pooled_w, pooled_h, spatial_scale) class Scale(Layer): ''' Scale is the combination of CMul and CAdd Computes the elementwise product of input and weight, with the shape of the weight "expand" to match the shape of the input. Similarly, perform a expand cdd bias and perform an elementwise add :param size: size of weight and bias >>> scale = Scale([1,2]) creating: createScale ''' def __init__(self, size, bigdl_type="float"): super(Scale, self).__init__(None, bigdl_type, size) class SelectTable(Layer): ''' Creates a module that takes a table as input and outputs the element at index `index` (positive or negative). This can be either a table or a Tensor. The gradients of the non-index elements are zeroed Tensors of the same size. This is true regardless of the depth of the encapsulated Tensor as the function used internally to do so is recursive. :param dimension: the dimension to be selected >>> selectTable = SelectTable(1) creating: createSelectTable ''' def __init__(self, dimension, bigdl_type="float"): super(SelectTable, self).__init__(None, bigdl_type, dimension) class Sigmoid(Layer): ''' Applies the Sigmoid function element-wise to the input Tensor, thus outputting a Tensor of the same dimension. >>> sigmoid = Sigmoid() creating: createSigmoid ''' def __init__(self, bigdl_type="float"): super(Sigmoid, self).__init__(None, bigdl_type) class SoftMax(Layer): ''' Applies the SoftMax function to an n-dimensional input Tensor, rescaling them so that the elements of the n-dimensional output Tensor lie in the range (0, 1) and sum to 1. Softmax is defined as: f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift) where shift = max_i(x_i). >>> softMax = SoftMax() creating: createSoftMax ''' def __init__(self, bigdl_type="float"): super(SoftMax, self).__init__(None, bigdl_type) class SoftMin(Layer): ''' Applies the SoftMin function to an n-dimensional input Tensor, rescaling them so that the elements of the n-dimensional output Tensor lie in the range (0,1) and sum to 1. Softmin is defined as: f_i(x) = exp(-x_i - shift) / sum_j exp(-x_j - shift) where shift = max_i(-x_i). >>> softMin = SoftMin() creating: createSoftMin ''' def __init__(self, bigdl_type="float"): super(SoftMin, self).__init__(None, bigdl_type) class SoftPlus(Layer): ''' Apply the SoftPlus function to an n-dimensional input tensor. SoftPlus function: f_i(x) = 1/beta * log(1 + exp(beta * x_i)) :param beta: Controls sharpness of transfer function >>> softPlus = SoftPlus(1e-5) creating: createSoftPlus ''' def __init__(self, beta=1.0, bigdl_type="float"): super(SoftPlus, self).__init__(None, bigdl_type, beta) class SoftShrink(Layer): ''' Apply the soft shrinkage function element-wise to the input Tensor SoftShrinkage operator: ``` | x - lambda, if x > lambda f(x) = | x + lambda, if x < -lambda | 0, otherwise ``` :param the_lambda: lambda, default is 0.5 >>> softShrink = SoftShrink(1e-5) creating: createSoftShrink ''' def __init__(self, the_lambda=0.5, bigdl_type="float"): super(SoftShrink, self).__init__(None, bigdl_type, the_lambda) class SoftSign(Layer): ''' Apply SoftSign function to an n-dimensional input Tensor. SoftSign function: f_i(x) = x_i / (1+|x_i|) >>> softSign = SoftSign() creating: createSoftSign ''' def __init__(self, bigdl_type="float"): super(SoftSign, self).__init__(None, bigdl_type) class SpatialDilatedConvolution(Layer): ''' Apply a 2D dilated convolution over an input image. The input tensor is expected to be a 3D or 4D(with batch) tensor. If input is a 3D tensor nInputPlane x height x width, owidth = floor(width + 2 * padW - dilationW * (kW-1) - 1) / dW + 1 oheight = floor(height + 2 * padH - dilationH * (kH-1) - 1) / dH + 1 Reference Paper: Yu F, Koltun V. Multi-scale context aggregation by dilated convolutions[J]. arXiv preprint arXiv:1511.07122, 2015. :param n_input_plane: The number of expected input planes in the image given into forward(). :param n_output_plane: The number of output planes the convolution layer will produce. :param kw: The kernel width of the convolution. :param kh: The kernel height of the convolution. :param dw: The step of the convolution in the width dimension. Default is 1. :param dh: The step of the convolution in the height dimension. Default is 1. :param pad_w: The additional zeros added per width to the input planes. Default is 0. :param pad_h: The additional zeros added per height to the input planes. Default is 0. :param dilation_w: The number of pixels to skip. Default is 1. :param dilation_h: The number of pixels to skip. Default is 1. :param init_method: Init method, Default, Xavier. :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> spatialDilatedConvolution = SpatialDilatedConvolution(1, 1, 1, 1) creating: createSpatialDilatedConvolution ''' def __init__(self, n_input_plane, n_output_plane, kw, kh, dw=1, dh=1, pad_w=0, pad_h=0, dilation_w=1, dilation_h=1, init_method='default', wRegularizer=None, bRegularizer=None, bigdl_type="float"): super(SpatialDilatedConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, kw, kh, dw, dh, pad_w, pad_h, dilation_w, dilation_h, init_method, wRegularizer, bRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class SpatialFullConvolution(Layer): ''' Apply a 2D full convolution over an input image. The input tensor is expected to be a 3D or 4D(with batch) tensor. Note that instead of setting adjW and adjH, SpatialFullConvolution[Table, T] also accepts a table input with two tensors: T(convInput, sizeTensor) where convInput is the standard input tensor, and the size of sizeTensor is used to set the size of the output (will ignore the adjW and adjH values used to construct the module). This module can be used without a bias by setting parameter noBias = true while constructing the module. If input is a 3D tensor nInputPlane x height x width, owidth = (width - 1) * dW - 2*padW + kW + adjW oheight = (height - 1) * dH - 2*padH + kH + adjH Other frameworks call this operation "In-network Upsampling", "Fractionally-strided convolution", "Backwards Convolution," "Deconvolution", or "Upconvolution." Reference Paper: Long J, Shelhamer E, Darrell T. Fully convolutional networks for semantic segmentation[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2015: 3431-3440. :param nInputPlane The number of expected input planes in the image given into forward() :param nOutputPlane The number of output planes the convolution layer will produce. :param kW The kernel width of the convolution. :param kH The kernel height of the convolution. :param dW The step of the convolution in the width dimension. Default is 1. :param dH The step of the convolution in the height dimension. Default is 1. :param padW The additional zeros added per width to the input planes. Default is 0. :param padH The additional zeros added per height to the input planes. Default is 0. :param adjW Extra width to add to the output image. Default is 0. :param adjH Extra height to add to the output image. Default is 0. :param nGroup Kernel group number. :param noBias If bias is needed. :param initMethod Init method, Default, Xavier, Bilinear. :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> spatialFullConvolution = SpatialFullConvolution(1, 1, 1, 1) creating: createSpatialFullConvolution ''' def __init__(self, n_input_plane, n_output_plane, kw, kh, dw=1, dh=1, pad_w=0, pad_h=0, adj_w=0, adj_h=0, n_group=1, no_bias=False, init_method='default', wRegularizer=None, bRegularizer=None, bigdl_type="float"): super(SpatialFullConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, kw, kh, dw, dh, pad_w, pad_h, adj_w, adj_h, n_group, no_bias, init_method, wRegularizer, bRegularizer) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class SpatialShareConvolution(Layer): ''' >>> spatialShareConvolution = SpatialShareConvolution(1, 1, 1, 1) creating: createSpatialShareConvolution ''' def __init__(self, n_input_plane, n_output_plane, kernel_w, kernel_h, stride_w=1, stride_h=1, pad_w=0, pad_h=0, n_group=1, propagate_back=True, init_method='default', bigdl_type="float"): super(SpatialShareConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, kernel_w, kernel_h, stride_w, stride_h, pad_w, pad_h, n_group, propagate_back, init_method) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class VolumetricConvolution(Layer): ''' Applies a 3D convolution over an input image composed of several input planes. The input tensor in forward(input) is expected to be a 4D tensor (nInputPlane x time x height x width). :param n_input_plane: The number of expected input planes in the image given into forward() :param n_output_plane: The number of output planes the convolution layer will produce. :param k_t: The kernel size of the convolution in time :param k_w: The kernel width of the convolution :param k_h: The kernel height of the convolution :param d_t: The step of the convolution in the time dimension. Default is 1 :param d_w: The step of the convolution in the width dimension. Default is 1 :param d_h: The step of the convolution in the height dimension. Default is 1 :param pad_t: Additional zeros added to the input plane data on both sides of time axis.Default is 0. (kT-1)/2 is often used here. :param pad_w: The additional zeros added per width to the input planes. :param pad_h: The additional zeros added per height to the input planes. :param with_bias: whether with bias :param init_method: Init method, Default, Xavier, Bilinear. >>> volumetricConvolution = VolumetricConvolution(6, 12, 5, 5, 5, 1, 1, 1) creating: createVolumetricConvolution ''' def __init__(self, n_input_plane, n_output_plane, k_t, k_w, k_h, d_t=1, d_w=1, d_h=1, pad_t=0, pad_w=0, pad_h=0, with_bias=True, init_method="default", bigdl_type="float"): super(VolumetricConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, k_t, k_w, k_h, d_t, d_w, d_h, pad_t, pad_w, pad_h, with_bias, init_method) def set_init_method(self, weight_init_method = None, bias_init_method = None): callBigDlFunc(self.bigdl_type, "setInitMethod", self.value, weight_init_method, bias_init_method) class VolumetricMaxPooling(Layer): ''' Applies 3D max-pooling operation in kTxkWxkH regions by step size dTxdWxdH steps. The number of output features is equal to the number of input planes / dT. The input can optionally be padded with zeros. Padding should be smaller than half of kernel size. That is, padT < kT/2, padW < kW/2 and padH < kH/2 :param k_t: The kernel size :param k_w: The kernel width :param k_h: The kernel height :param d_t: The step in the time dimension :param d_w: The step in the width dimension :param d_h: The step in the height dimension :param pad_t: The padding in the time dimension :param pad_w: The padding in the width dimension :param pad_h: The padding in the height dimension >>> volumetricMaxPooling = VolumetricMaxPooling(5, 5, 5, 1, 1, 1) creating: createVolumetricMaxPooling ''' def __init__(self, k_t, k_w, k_h, d_t, d_w, d_h, pad_t=0, pad_w=0, pad_h=0, bigdl_type="float"): super(VolumetricMaxPooling, self).__init__(None, bigdl_type, k_t, k_w, k_h, d_t, d_w, d_h, pad_t, pad_w, pad_h) class SpatialZeroPadding(Layer): ''' Each feature map of a given input is padded with specified number of zeros. If padding values are negative, then input is cropped. :param padLeft: pad left position :param padRight: pad right position :param padTop: pad top position :param padBottom: pad bottom position >>> spatialZeroPadding = SpatialZeroPadding(1, 1, 1, 1) creating: createSpatialZeroPadding ''' def __init__(self, pad_left, pad_right, pad_top, pad_bottom, bigdl_type="float"): super(SpatialZeroPadding, self).__init__(None, bigdl_type, pad_left, pad_right, pad_top, pad_bottom) class SplitTable(Layer): ''' Creates a module that takes a Tensor as input and outputs several tables, splitting the Tensor along the specified dimension `dimension`. The input to this layer is expected to be a tensor, or a batch of tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the user need to specify the number of dimensions of each sample tensor in a batch using `nInputDims`. :param dimension: to be split along this dimension :param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size >>> splitTable = SplitTable(1, 1) creating: createSplitTable ''' def __init__(self, dimension, n_input_dims=-1, bigdl_type="float"): super(SplitTable, self).__init__(None, bigdl_type, dimension, n_input_dims) class Sqrt(Layer): ''' Apply an element-wise sqrt operation. >>> sqrt = Sqrt() creating: createSqrt ''' def __init__(self, bigdl_type="float"): super(Sqrt, self).__init__(None, bigdl_type) class Square(Layer): ''' Apply an element-wise square operation. >>> square = Square() creating: createSquare ''' def __init__(self, bigdl_type="float"): super(Square, self).__init__(None, bigdl_type) class Squeeze(Layer): ''' Delete singleton all dimensions or a specific dim. :param dim: Optional. The dimension to be delete. Default: delete all dimensions. :param num_input_dims: Optional. If in a batch model, set to the inputDims. >>> squeeze = Squeeze(1) creating: createSqueeze ''' def __init__(self, dim, num_input_dims=INTMIN, bigdl_type="float"): super(Squeeze, self).__init__(None, bigdl_type, dim, num_input_dims) class Sum(Layer): ''' It is a simple layer which applies a sum operation over the given dimension. When nInputDims is provided, the input will be considered as a batches. Then the sum operation will be applied in (dimension + 1) The input to this layer is expected to be a tensor, or a batch of tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the user need to specify the number of dimensions of each sample tensor in the batch using `nInputDims`. :param dimension: the dimension to be applied sum operation :param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size :param size_average: default is false, if it is true, it will return the mean instead >>> sum = Sum(1, 1, True) creating: createSum ''' def __init__(self, dimension=1, n_input_dims=-1, size_average=False, bigdl_type="float"): super(Sum, self).__init__(None, bigdl_type, dimension, n_input_dims, size_average) class TanhShrink(Layer): ''' A simple layer for each element of the input tensor, do the following operation during the forward process: [f(x) = tanh(x) - 1] >>> tanhShrink = TanhShrink() creating: createTanhShrink ''' def __init__(self, bigdl_type="float"): super(TanhShrink, self).__init__(None, bigdl_type) class Threshold(Layer): ''' Threshold input Tensor. If values in the Tensor smaller than th, then replace it with v :param th: the threshold to compare with :param v: the value to replace with :param ip: inplace mode >>> threshold = Threshold(1e-5, 1e-5, True) creating: createThreshold ''' def __init__(self, th=1e-6, v=0.0, ip=False, bigdl_type="float"): super(Threshold, self).__init__(None, bigdl_type, th, v, ip) class Unsqueeze(Layer): ''' Create an Unsqueeze layer. Insert singleton dim (i.e., dimension 1) at position pos. For an input with dim = input.dim(), there are dim + 1 possible positions to insert the singleton dimension. :param pos: The position will be insert singleton. :param num_input_dims: Optional. If in a batch model, set to the inputDim >>> unsqueeze = Unsqueeze(1, 1) creating: createUnsqueeze ''' def __init__(self, pos, num_input_dims=INTMIN, bigdl_type="float"): super(Unsqueeze, self).__init__(None, bigdl_type, pos, num_input_dims) class Reshape(Layer): ''' The forward(input) reshape the input tensor into a size(0) * size(1) * ... tensor, taking the elements row-wise. :param size: the reshape size >>> reshape = Reshape([1, 28, 28]) creating: createReshape >>> reshape = Reshape([1, 28, 28], False) creating: createReshape ''' def __init__(self, size, batch_mode=None, bigdl_type="float"): super(Reshape, self).__init__(None, bigdl_type, size, batch_mode) class BiRecurrent(Container): ''' Create a Bidirectional recurrent layer :param merge: merge layer >>> biRecurrent = BiRecurrent(CAddTable()) creating: createCAddTable creating: createBiRecurrent >>> biRecurrent = BiRecurrent() creating: createBiRecurrent ''' def __init__(self, merge=None, bigdl_type="float"): super(BiRecurrent, self).__init__(None, bigdl_type, merge) class ConcatTable(Container): ''' ConcateTable is a container module like Concate. Applies an input to each member module, input can be a tensor or a table. ConcateTable usually works with CAddTable and CMulTable to implement element wise add/multiply on outputs of two modules. >>> concatTable = ConcatTable() creating: createConcatTable ''' def __init__(self, bigdl_type="float"): super(ConcatTable, self).__init__(None, bigdl_type) class Identity(Layer): ''' Identity just return the input to output. It's useful in same parallel container to get an origin input. >>> identity = Identity() creating: createIdentity ''' def __init__(self, bigdl_type="float"): super(Identity, self).__init__(None, bigdl_type) class Reverse(Layer): ''' Reverse the input w.r.t given dimension. The input can be a Tensor or Table. :param dim: >>> reverse = Reverse() creating: createReverse ''' def __init__(self, dimension=1, bigdl_type="float"): super(Reverse, self).__init__(None, bigdl_type, dimension) class Transpose(Layer): ''' Transpose input along specified dimensions :param permutations: dimension pairs that need to swap >>> transpose = Transpose([(1,2)]) creating: createTranspose ''' def __init__(self, permutations, bigdl_type="float"): super(Transpose, self).__init__(None, bigdl_type, permutations) class SpatialContrastiveNormalization(Layer): ''' Subtractive + divisive contrast normalization. :param n_input_plane: :param kernel: :param threshold: :param thresval: >>> kernel = np.ones([9,9]).astype("float32") >>> spatialContrastiveNormalization = SpatialContrastiveNormalization(1, kernel) creating: createSpatialContrastiveNormalization >>> spatialContrastiveNormalization = SpatialContrastiveNormalization() creating: createSpatialContrastiveNormalization ''' def __init__(self, n_input_plane=1, kernel=None, threshold=1e-4, thresval=1e-4, bigdl_type="float"): super(SpatialContrastiveNormalization, self).__init__(None, bigdl_type, n_input_plane, JTensor.from_ndarray(kernel), threshold, thresval) class SpatialConvolutionMap(Layer): ''' This class is a generalization of SpatialConvolution. It uses a generic connection table between input and output features. The SpatialConvolution is equivalent to using a full connection table. :param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices. :param bRegularizer: instance of [[Regularizer]]applied to the bias. >>> ct = np.ones([9,9]).astype("float32") >>> spatialConvolutionMap = SpatialConvolutionMap(ct, 9, 9) creating: createSpatialConvolutionMap ''' def __init__(self, conn_table, kw, kh, dw=1, dh=1, pad_w=0, pad_h=0, wRegularizer=None, bRegularizer=None, bigdl_type="float"): super(SpatialConvolutionMap, self).__init__(None, bigdl_type, JTensor.from_ndarray(conn_table), kw, kh, dw, dh, pad_w, pad_h, wRegularizer, bRegularizer) class SpatialDivisiveNormalization(Layer): ''' Applies a spatial division operation on a series of 2D inputs using kernel for computing the weighted average in a neighborhood. The neighborhood is defined for a local spatial region that is the size as kernel and across all features. For an input image, since there is only one feature, the region is only spatial. For an RGB image, the weighted average is taken over RGB channels and a spatial region. If the kernel is 1D, then it will be used for constructing and separable 2D kernel. The operations will be much more efficient in this case. The kernel is generally chosen as a gaussian when it is believed that the correlation of two pixel locations decrease with increasing distance. On the feature dimension, a uniform average is used since the weighting across features is not known. :param nInputPlane: number of input plane, default is 1. :param kernel: kernel tensor, default is a 9 x 9 tensor. :param threshold: threshold :param thresval: threshhold value to replace withif data is smaller than theshold >>> kernel = np.ones([9,9]).astype("float32") >>> spatialDivisiveNormalization = SpatialDivisiveNormalization(2,kernel) creating: createSpatialDivisiveNormalization >>> spatialDivisiveNormalization = SpatialDivisiveNormalization() creating: createSpatialDivisiveNormalization ''' def __init__(self, n_input_plane=1, kernel=None, threshold=1e-4, thresval=1e-4, bigdl_type="float"): super(SpatialDivisiveNormalization, self).__init__(None, bigdl_type, n_input_plane, JTensor.from_ndarray(kernel), threshold, thresval) class SpatialSubtractiveNormalization(Layer): ''' Applies a spatial subtraction operation on a series of 2D inputs using kernel for computing the weighted average in a neighborhood. The neighborhood is defined for a local spatial region that is the size as kernel and across all features. For a an input image, since there is only one feature, the region is only spatial. For an RGB image, the weighted average is taken over RGB channels and a spatial region. If the kernel is 1D, then it will be used for constructing and separable 2D kernel. The operations will be much more efficient in this case. The kernel is generally chosen as a gaussian when it is believed that the correlation of two pixel locations decrease with increasing distance. On the feature dimension, a uniform average is used since the weighting across features is not known. :param n_input_plane: number of input plane, default is 1. :param kernel: kernel tensor, default is a 9 x 9 tensor. >>> kernel = np.ones([9,9]).astype("float32") >>> spatialSubtractiveNormalization = SpatialSubtractiveNormalization(2,kernel) creating: createSpatialSubtractiveNormalization >>> spatialSubtractiveNormalization = SpatialSubtractiveNormalization() creating: createSpatialSubtractiveNormalization ''' def __init__(self, n_input_plane=1, kernel=None, bigdl_type="float"): super(SpatialSubtractiveNormalization, self).__init__(None, bigdl_type, n_input_plane, JTensor.from_ndarray(kernel)) class Pack(Layer): ''' Stacks a list of n-dimensional tensors into one (n+1)-dimensional tensor. >>> layer = Pack(1) creating: createPack ''' def __init__(self, dimension, bigdl_type="float"): super(Pack, self).__init__(None, bigdl_type, dimension) def _test(): import doctest from pyspark import SparkContext from bigdl.nn import layer from bigdl.util.common import init_engine from bigdl.util.common import create_spark_conf globs = layer.__dict__.copy() sc = SparkContext(master="local[4]", appName="test layer", conf=create_spark_conf()) globs['sc'] = sc init_engine() (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) if failure_count: exit(-1) if __name__ == "__main__": _test()
sethah/BigDL
pyspark/bigdl/nn/layer.py
Python
apache-2.0
111,477
[ "Gaussian" ]
7209594d6046f88106b3be6195516583e06adf4d3225d01ac1306b7f8a687087
# -*- coding: utf-8 -*- """A plugin to generate a list of domains visited.""" from urllib import parse as urlparse from plaso.analysis import interface from plaso.analysis import manager class UniqueDomainsVisitedPlugin(interface.AnalysisPlugin): """A plugin to generate a list all domains visited. This plugin will extract domains from browser history events extracted by Plaso. The list produced can be used to quickly determine if there has been a visit to a site of interest, for example, a known phishing site. """ NAME = 'unique_domains_visited' _SUPPORTED_EVENT_DATA_TYPES = frozenset([ 'chrome:history:file_downloaded', 'chrome:history:page_visited', 'firefox:downloads:download', 'firefox:places:page_visited', 'macosx:lsquarantine', 'msiecf:redirected', 'msiecf:url', 'msie:webcache:container', 'opera:history', 'safari:history:visit']) def __init__(self): """Initializes the domains visited plugin.""" super(UniqueDomainsVisitedPlugin, self).__init__() self._domains = [] # pylint: disable=unused-argument def ExamineEvent(self, mediator, event, event_data, event_data_stream): """Analyzes an event and extracts domains from it. We only evaluate straightforward web history events, not visits which can be inferred by TypedURLs, cookies or other means. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. event_data (EventData): event data. event_data_stream (EventDataStream): event data stream. """ if event_data.data_type not in self._SUPPORTED_EVENT_DATA_TYPES: return url = getattr(event_data, 'url', None) if url is None: return parsed_url = urlparse.urlparse(url) domain = getattr(parsed_url, 'netloc', None) if domain in self._domains: # We've already found an event containing this domain. return self._domains.append(domain) def CompileReport(self, mediator): """Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: the analysis report. """ lines_of_text = ['Listing domains visited by all users'] for domain in sorted(self._domains): lines_of_text.append(domain) lines_of_text.append('') report_text = '\n'.join(lines_of_text) analysis_report = super(UniqueDomainsVisitedPlugin, self).CompileReport( mediator) analysis_report.text = report_text return analysis_report manager.AnalysisPluginManager.RegisterPlugin(UniqueDomainsVisitedPlugin)
Onager/plaso
plaso/analysis/unique_domains_visited.py
Python
apache-2.0
2,814
[ "VisIt" ]
29fde9c8212a90aa39e730ee7b5844d570d74e6fd9d23277fa09e189c1a08414
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division, print_function __author__ = "Dilawar Singh" __copyright__ = "Copyright 2017-, Dilawar Singh" __version__ = "1.0.0" __maintainer__ = "Dilawar Singh" __email__ = "dilawars@ncbs.res.in" __status__ = "Development" import sys import os import socket import time import tarfile import tempfile def gen_prefix( msg, maxlength = 10 ): msg = '>%s' % msg if len(msg) < maxlength: msg += ' ' * (maxlength - len(msg)) return msg[:maxlength].encode( 'utf-8' ) def write_data_to_socket(conn, msg): msg = b'%010d%s' % (len(msg), msg) conn.sendall(msg) def relativePath(path, base): return os.path.relpath(path, base) def gen_payload( args ): path = args.path archive = os.path.join(tempfile.mkdtemp(), 'data.tar.bz2') # This mode (w|bz2) is suitable for streaming. The blocksize is default to # 20*512 bytes. We change this to 2048 with tarfile.open(archive, 'w|bz2', bufsize=2048 ) as h: if len(args.main) > 0: for i, f in enumerate(args.main): h.add(f, arcname='__main__%d.py'%i) if os.path.isfile(path): h.add(path, os.path.basename(path)) elif os.path.isdir(path): for d, ds, fs in os.walk(path): for f in fs: p = os.path.join(d, f) h.add(os.path.realpath(p), arcname=relativePath(p, path)) else: print( "[ERROR] Neither file nor directory %s" % path ) with open(archive, 'rb') as f: data = f.read() return data def offload( args ): zfile = create_zipfile( args.path ) send_zip( zfile ) def loop( sock ): sock.settimeout(1e-2) while True: try: d = sock.recv(10).strip() if len(d) > 0: print(d) except socket.timeout as e: print( '.', end='' ) sys.stdout.flush() def read_msg(conn): d = conn.recv(1024) try: d = d.decode('utf8').strip() except Exception as e: pass return d def save_bz2(conn, outfile): # first 6 bytes always tell how much to read next. Make sure the submit job # script has it d = conn.recv(10) while len(d) < 10: try: d = conn.recv(10) except Exception as e: print( "[ERROR] Error in format. First 6 bytes are size of msg." ) continue print( "Needs to get %s bytes" % d ) data = conn.recv(int(d)) with open(outfile, 'wb') as f: f.write(data) return data def main( args ): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: host, port = args.server.split(':') sock.connect( (host, int(port)) ) sock.settimeout(1) except Exception as e: print( "[ERROR] Failed to connect to %s... " % args.server ) print( e ) quit() data = gen_payload( args ) print( "[INFO ] Total bytes to send: %d" % len(data), end = '') write_data_to_socket(sock, data) print( ' ... [SENT]' ) while True: try: d = read_msg( sock ) print( d ) if '>DONE SIMULATION' in d: break except socket.timeout as e: time.sleep(0.5) data = save_bz2(sock, 'results.tar.bz2' ) print( "[INFO ] All done" ) if __name__ == '__main__': import argparse # Argument parser. description = '''Submit a job to moose server.''' parser = argparse.ArgumentParser(description=description) parser.add_argument('path', metavar='path' , help = 'File or directory to execute on server.' ) parser.add_argument('--main', '-m', action = 'append' , required = False, default = [] , help = 'In case of multiple files, scripts to execute' ' on the server, e.g. -m file1.py -m file2.py.' ' If not given, server will try to guess the best option.' ) parser.add_argument('--server', '-s' , required = False, type=str, default='localhost:31417' , help = 'IP address and PORT number of moose server e.g.' ' 172.16.1.2:31416' ) class Args: pass args = Args() parser.parse_args(namespace=args) main(args)
upibhalla/moose-core
scripts/submit.py
Python
gpl-3.0
4,374
[ "MOOSE" ]
ccc9f4c55c3460ae5ee18e1946ad32ac796cfcd38207dda30bec5bce96dfba2c
#!/usr/bin/env python # Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' domain decomposition COSMO See also the code on github https://github.com/filippolipparini/ddPCM and the papers [1] Domain decomposition for implicit solvation models. E. Cances, Y. Maday, B. Stamm J. Chem. Phys., 139, 054111 (2013) http://dx.doi.org/10.1063/1.4816767 [2] Fast Domain Decomposition Algorithm for Continuum Solvation Models: Energy and First Derivatives. F. Lipparini, B. Stamm, E. Cances, Y. Maday, B. Mennucci J. Chem. Theory Comput., 9, 3637-3648 (2013) http://dx.doi.org/10.1021/ct400280b [3] Quantum, classical, and hybrid QM/MM calculations in solution: General implementation of the ddCOSMO linear scaling strategy. F. Lipparini, G. Scalmani, L. Lagardere, B. Stamm, E. Cances, Y. Maday, J.-P.Piquemal, M. J. Frisch, B. Mennucci J. Chem. Phys., 141, 184108 (2014) http://dx.doi.org/10.1063/1.4901304 -- Dielectric constants (from https://gaussian.com/scrf/) -- More dataset can be found in Minnesota Solvent Descriptor Database (https://comp.chem.umn.edu/solvation) Water 78.3553 Acetonitrile 35.688 Methanol 32.613 Ethanol 24.852 IsoQuinoline 11.00 Quinoline 9.16 Chloroform 4.7113 DiethylEther 4.2400 Dichloromethane 8.93 DiChloroEthane 10.125 CarbonTetraChloride 2.2280 Benzene 2.2706 Toluene 2.3741 ChloroBenzene 5.6968 NitroMethane 36.562 Heptane 1.9113 CycloHexane 2.0165 Aniline 6.8882 Acetone 20.493 TetraHydroFuran 7.4257 DiMethylSulfoxide 46.826 Argon 1.430 Krypton 1.519 Xenon 1.706 n-Octanol 9.8629 1,1,1-TriChloroEthane 7.0826 1,1,2-TriChloroEthane 7.1937 1,2,4-TriMethylBenzene 2.3653 1,2-DiBromoEthane 4.9313 1,2-EthaneDiol 40.245 1,4-Dioxane 2.2099 1-Bromo-2-MethylPropane 7.7792 1-BromoOctane 5.0244 1-BromoPentane 6.269 1-BromoPropane 8.0496 1-Butanol 17.332 1-ChloroHexane 5.9491 1-ChloroPentane 6.5022 1-ChloroPropane 8.3548 1-Decanol 7.5305 1-FluoroOctane 3.89 1-Heptanol 11.321 1-Hexanol 12.51 1-Hexene 2.0717 1-Hexyne 2.615 1-IodoButane 6.173 1-IodoHexaDecane 3.5338 1-IodoPentane 5.6973 1-IodoPropane 6.9626 1-NitroPropane 23.73 1-Nonanol 8.5991 1-Pentanol 15.13 1-Pentene 1.9905 1-Propanol 20.524 2,2,2-TriFluoroEthanol 26.726 2,2,4-TriMethylPentane 1.9358 2,4-DiMethylPentane 1.8939 2,4-DiMethylPyridine 9.4176 2,6-DiMethylPyridine 7.1735 2-BromoPropane 9.3610 2-Butanol 15.944 2-ChloroButane 8.3930 2-Heptanone 11.658 2-Hexanone 14.136 2-MethoxyEthanol 17.2 2-Methyl-1-Propanol 16.777 2-Methyl-2-Propanol 12.47 2-MethylPentane 1.89 2-MethylPyridine 9.9533 2-NitroPropane 25.654 2-Octanone 9.4678 2-Pentanone 15.200 2-Propanol 19.264 2-Propen-1-ol 19.011 3-MethylPyridine 11.645 3-Pentanone 16.78 4-Heptanone 12.257 4-Methyl-2-Pentanone 12.887 4-MethylPyridine 11.957 5-Nonanone 10.6 AceticAcid 6.2528 AcetoPhenone 17.44 a-ChloroToluene 6.7175 Anisole 4.2247 Benzaldehyde 18.220 BenzoNitrile 25.592 BenzylAlcohol 12.457 BromoBenzene 5.3954 BromoEthane 9.01 Bromoform 4.2488 Butanal 13.45 ButanoicAcid 2.9931 Butanone 18.246 ButanoNitrile 24.291 ButylAmine 4.6178 ButylEthanoate 4.9941 CarbonDiSulfide 2.6105 Cis-1,2-DiMethylCycloHexane 2.06 Cis-Decalin 2.2139 CycloHexanone 15.619 CycloPentane 1.9608 CycloPentanol 16.989 CycloPentanone 13.58 Decalin-mixture 2.196 DiBromomEthane 7.2273 DiButylEther 3.0473 DiEthylAmine 3.5766 DiEthylSulfide 5.723 DiIodoMethane 5.32 DiIsoPropylEther 3.38 DiMethylDiSulfide 9.6 DiPhenylEther 3.73 DiPropylAmine 2.9112 e-1,2-DiChloroEthene 2.14 e-2-Pentene 2.051 EthaneThiol 6.667 EthylBenzene 2.4339 EthylEthanoate 5.9867 EthylMethanoate 8.3310 EthylPhenylEther 4.1797 FluoroBenzene 5.42 Formamide 108.94 FormicAcid 51.1 HexanoicAcid 2.6 IodoBenzene 4.5470 IodoEthane 7.6177 IodoMethane 6.8650 IsoPropylBenzene 2.3712 m-Cresol 12.44 Mesitylene 2.2650 MethylBenzoate 6.7367 MethylButanoate 5.5607 MethylCycloHexane 2.024 MethylEthanoate 6.8615 MethylMethanoate 8.8377 MethylPropanoate 6.0777 m-Xylene 2.3478 n-ButylBenzene 2.36 n-Decane 1.9846 n-Dodecane 2.0060 n-Hexadecane 2.0402 n-Hexane 1.8819 NitroBenzene 34.809 NitroEthane 28.29 n-MethylAniline 5.9600 n-MethylFormamide-mixture 181.56 n,n-DiMethylAcetamide 37.781 n,n-DiMethylFormamide 37.219 n-Nonane 1.9605 n-Octane 1.9406 n-Pentadecane 2.0333 n-Pentane 1.8371 n-Undecane 1.9910 o-ChloroToluene 4.6331 o-Cresol 6.76 o-DiChloroBenzene 9.9949 o-NitroToluene 25.669 o-Xylene 2.5454 Pentanal 10.0 PentanoicAcid 2.6924 PentylAmine 4.2010 PentylEthanoate 4.7297 PerFluoroBenzene 2.029 p-IsoPropylToluene 2.2322 Propanal 18.5 PropanoicAcid 3.44 PropanoNitrile 29.324 PropylAmine 4.9912 PropylEthanoate 5.5205 p-Xylene 2.2705 Pyridine 12.978 sec-ButylBenzene 2.3446 tert-ButylBenzene 2.3447 TetraChloroEthene 2.268 TetraHydroThiophene-s,s-dioxide 43.962 Tetralin 2.771 Thiophene 2.7270 Thiophenol 4.2728 trans-Decalin 2.1781 TriButylPhosphate 8.1781 TriChloroEthene 3.422 TriEthylAmine 2.3832 Xylene-mixture 2.3879 z-1,2-DiChloroEthene 9.2 ''' import ctypes import copy import numpy from pyscf import lib from pyscf.lib import logger from pyscf import gto from pyscf import df from pyscf.dft import gen_grid, numint from pyscf.data import radii from pyscf.symm import sph from pyscf.solvent import _attach_solvent @lib.with_doc(_attach_solvent._for_scf.__doc__) def ddcosmo_for_scf(mf, solvent_obj=None, dm=None): if solvent_obj is None: solvent_obj = DDCOSMO(mf.mol) return _attach_solvent._for_scf(mf, solvent_obj, dm) @lib.with_doc(_attach_solvent._for_casscf.__doc__) def ddcosmo_for_casscf(mc, solvent_obj=None, dm=None): if solvent_obj is None: if isinstance(getattr(mc._scf, 'with_solvent', None), DDCOSMO): solvent_obj = mc._scf.with_solvent else: solvent_obj = DDCOSMO(mc.mol) return _attach_solvent._for_casscf(mc, solvent_obj, dm) @lib.with_doc(_attach_solvent._for_casci.__doc__) def ddcosmo_for_casci(mc, solvent_obj=None, dm=None): if solvent_obj is None: if isinstance(getattr(mc._scf, 'with_solvent', None), DDCOSMO): solvent_obj = mc._scf.with_solvent else: solvent_obj = DDCOSMO(mc.mol) return _attach_solvent._for_casci(mc, solvent_obj, dm) @lib.with_doc(_attach_solvent._for_post_scf.__doc__) def ddcosmo_for_post_scf(method, solvent_obj=None, dm=None): if solvent_obj is None: if isinstance(getattr(method._scf, 'with_solvent', None), DDCOSMO): solvent_obj = method._scf.with_solvent else: solvent_obj = DDCOSMO(method.mol) return _attach_solvent._for_post_scf(method, solvent_obj, dm) @lib.with_doc(_attach_solvent._for_tdscf.__doc__) def ddcosmo_for_tdscf(method, solvent_obj=None, dm=None): scf_solvent = getattr(method._scf, 'with_solvent', None) assert scf_solvent is None or isinstance(scf_solvent, DDCOSMO) if solvent_obj is None: solvent_obj = DDCOSMO(method.mol) return _attach_solvent._for_tdscf(method, solvent_obj, dm) # Inject ddCOSMO into other methods from pyscf import scf from pyscf import mcscf from pyscf import mp, ci, cc from pyscf import tdscf scf.hf.SCF.ddCOSMO = scf.hf.SCF.DDCOSMO = ddcosmo_for_scf mcscf.casci.ddCOSMO = mcscf.casci.DDCOSMO = ddcosmo_for_casci mcscf.mc1step.ddCOSMO = mcscf.mc1step.DDCOSMO = ddcosmo_for_casscf mp.mp2.MP2.ddCOSMO = mp.mp2.MP2.DDCOSMO = ddcosmo_for_post_scf ci.cisd.CISD.ddCOSMO = ci.cisd.CISD.DDCOSMO = ddcosmo_for_post_scf cc.ccsd.CCSD.ddCOSMO = cc.ccsd.CCSD.DDCOSMO = ddcosmo_for_post_scf tdscf.rhf.TDA.ddCOSMO = tdscf.rhf.TDA.DDCOSMO = ddcosmo_for_tdscf # Keep gen_ddcosmo_solver for backward compatibility def gen_ddcosmo_solver(pcmobj, verbose=None): '''Generate ddcosmo function to compute energy and potential matrix ''' return pcmobj._get_vind def energy(pcmobj, dm): ''' ddCOSMO energy Es = 1/2 f(eps) \int rho(r) W(r) dr ''' epcm = pcmobj._get_vind(dm)[0] return epcm def get_atomic_radii(pcmobj): mol = pcmobj.mol vdw_radii = pcmobj.radii_table atom_radii = pcmobj.atom_radii atom_symb = [mol.atom_symbol(i) for i in range(mol.natm)] r_vdw = [vdw_radii[gto.charge(x)] for x in atom_symb] if atom_radii is not None: for i in range(mol.natm): if atom_symb[i] in atom_radii: r_vdw[i] = atom_radii[atom_symb[i]] return numpy.asarray(r_vdw) def regularize_xt(t, eta): xt = numpy.zeros_like(t) inner = t <= 1-eta on_shell = (1-eta < t) & (t < 1) xt[inner] = 1 ti = t[on_shell] # JCTC, 9, 3637 xt[on_shell] = 1./eta**5 * (1-ti)**3 * (6*ti**2 + (15*eta-12)*ti + 10*eta**2 - 15*eta + 6) # JCP, 139, 054111 # xt[on_shell] = 1./eta**4 * (1-ti)**2 * (ti-1+2*eta)**2 return xt def make_grids_one_sphere(lebedev_order): ngrid_1sph = gen_grid.LEBEDEV_ORDER[lebedev_order] leb_grid = numpy.empty((ngrid_1sph,4)) gen_grid.libdft.MakeAngularGrid(leb_grid.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(ngrid_1sph)) coords_1sph = leb_grid[:,:3] # Note the Lebedev angular grids are normalized to 1 in pyscf weights_1sph = 4*numpy.pi * leb_grid[:,3] return coords_1sph, weights_1sph def make_L(pcmobj, r_vdw, ylm_1sph, fi): # See JCTC, 9, 3637, Eq (18) mol = pcmobj.mol natm = mol.natm lmax = pcmobj.lmax eta = pcmobj.eta nlm = (lmax+1)**2 coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order) ngrid_1sph = weights_1sph.size atom_coords = mol.atom_coords() ylm_1sph = ylm_1sph.reshape(nlm,ngrid_1sph) # JCP, 141, 184108 Eq (9), (12) is incorrect # L_diag = <lm|(1/|s-s'|)|l'm'> # Using Laplace expansion for electrostatic potential 1/r # L_diag = 4pi/(2l+1)/|s| <lm|l'm'> L_diag = numpy.zeros((natm,nlm)) p1 = 0 for l in range(lmax+1): p0, p1 = p1, p1 + (l*2+1) L_diag[:,p0:p1] = 4*numpy.pi/(l*2+1) L_diag *= 1./r_vdw.reshape(-1,1) Lmat = numpy.diag(L_diag.ravel()).reshape(natm,nlm,natm,nlm) for ja in range(natm): # scale the weight, precontract d_nj and w_n # see JCTC 9, 3637, Eq (16) - (18) # Note all values are scaled by 1/r_vdw to make the formulas # consistent to Psi in JCP, 141, 184108 part_weights = weights_1sph.copy() part_weights[fi[ja]>1] /= fi[ja,fi[ja]>1] for ka in atoms_with_vdw_overlap(ja, atom_coords, r_vdw): vjk = r_vdw[ja] * coords_1sph + atom_coords[ja] - atom_coords[ka] tjk = lib.norm(vjk, axis=1) / r_vdw[ka] wjk = pcmobj.regularize_xt(tjk, eta, r_vdw[ka]) wjk *= part_weights pol = sph.multipoles(vjk, lmax) p1 = 0 for l in range(lmax+1): fac = 4*numpy.pi/(l*2+1) / r_vdw[ka]**(l+1) p0, p1 = p1, p1 + (l*2+1) a = numpy.einsum('xn,n,mn->xm', ylm_1sph, wjk, pol[l]) Lmat[ja,:,ka,p0:p1] += -fac * a return Lmat def make_fi(pcmobj, r_vdw): coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order) mol = pcmobj.mol eta = pcmobj.eta natm = mol.natm atom_coords = mol.atom_coords() ngrid_1sph = coords_1sph.shape[0] fi = numpy.zeros((natm,ngrid_1sph)) for ia in range(natm): for ja in atoms_with_vdw_overlap(ia, atom_coords, r_vdw): v = r_vdw[ia]*coords_1sph + atom_coords[ia] - atom_coords[ja] rv = lib.norm(v, axis=1) t = rv / r_vdw[ja] xt = pcmobj.regularize_xt(t, eta, r_vdw[ja]) fi[ia] += xt fi[fi < 1e-20] = 0 return fi def make_phi(pcmobj, dm, r_vdw, ui, ylm_1sph, with_nuc=True): ''' Induced potential of ddCOSMO model Kwargs: with_nuc (bool): Mute the contribution of nuclear charges when computing the second order derivatives of energy ''' mol = pcmobj.mol natm = mol.natm coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order) ngrid_1sph = coords_1sph.shape[0] dms = numpy.asarray(dm) is_single_dm = dms.ndim == 2 nao = dms.shape[-1] dms = dms.reshape(-1,nao,nao) n_dm = dms.shape[0] diagidx = numpy.arange(nao) diagidx = diagidx*(diagidx+1)//2 + diagidx tril_dm = lib.pack_tril(dms+dms.transpose(0,2,1)) tril_dm[:,diagidx] *= .5 atom_coords = mol.atom_coords() atom_charges = mol.atom_charges() extern_point_idx = ui > 0 cav_coords = (atom_coords.reshape(natm,1,3) + numpy.einsum('r,gx->rgx', r_vdw, coords_1sph)) v_phi = numpy.zeros((n_dm, natm, ngrid_1sph)) if with_nuc: for ia in range(natm): # Note (-) sign is not applied to atom_charges, because (-) is explicitly # included in rhs and L matrix d_rs = atom_coords.reshape(-1,1,3) - cav_coords[ia] v_phi[:,ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2)) max_memory = pcmobj.max_memory - lib.current_memory()[0] blksize = int(max(max_memory*.9e6/8/nao**2, 400)) cav_coords = cav_coords[extern_point_idx] v_phi_e = numpy.empty((n_dm, cav_coords.shape[0])) int3c2e = mol._add_suffix('int3c2e') cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e) for i0, i1 in lib.prange(0, cav_coords.shape[0], blksize): fakemol = gto.fakemol_for_charges(cav_coords[i0:i1]) v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s2ij', cintopt=cintopt) v_phi_e[:,i0:i1] = numpy.einsum('nx,xk->nk', tril_dm, v_nj) v_phi[:,extern_point_idx] -= v_phi_e phi = -numpy.einsum('n,xn,jn,ijn->ijx', weights_1sph, ylm_1sph, ui, v_phi) if is_single_dm: phi = phi[0] return phi def make_psi_vmat(pcmobj, dm, r_vdw, ui, ylm_1sph, cached_pol, Xvec, L, with_nuc=True): ''' The first order derivative of E_ddCOSMO wrt density matrix Kwargs: with_nuc (bool): Mute the contribution of nuclear charges when computing the second order derivatives of energy. ''' mol = pcmobj.mol natm = mol.natm lmax = pcmobj.lmax nlm = (lmax+1)**2 dms = numpy.asarray(dm) is_single_dm = dms.ndim == 2 grids = pcmobj.grids ni = numint.NumInt() max_memory = pcmobj.max_memory - lib.current_memory()[0] make_rho, n_dm, nao = ni._gen_rho_evaluator(mol, dms) dms = dms.reshape(n_dm,nao,nao) Xvec = Xvec.reshape(n_dm, natm, nlm) i1 = 0 scaled_weights = numpy.empty((n_dm, grids.weights.size)) for ia in range(natm): fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)] fac_pol = _vstack_factor_fak_pol(fak_pol, lmax) i0, i1 = i1, i1 + fac_pol.shape[1] scaled_weights[:,i0:i1] = numpy.einsum('mn,im->in', fac_pol, Xvec[:,ia]) scaled_weights *= grids.weights shls_slice = (0, mol.nbas) ao_loc = mol.ao_loc_nr() den = numpy.empty((n_dm, grids.weights.size)) vmat = numpy.zeros((n_dm, nao, nao)) p1 = 0 aow = None for ao, mask, weight, coords \ in ni.block_loop(mol, grids, nao, 0, max_memory): p0, p1 = p1, p1 + weight.size for i in range(n_dm): den[i,p0:p1] = make_rho(i, ao, mask, 'LDA') aow = numint._scale_ao(ao, scaled_weights[i,p0:p1], out=aow) vmat[i] -= numint._dot_ao_ao(mol, ao, aow, mask, shls_slice, ao_loc) den *= grids.weights ao = aow = scaled_weights = None nelec_leak = 0 psi = numpy.zeros((n_dm, natm, nlm)) i1 = 0 for ia in range(natm): fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)] fac_pol = _vstack_factor_fak_pol(fak_pol, lmax) i0, i1 = i1, i1 + fac_pol.shape[1] nelec_leak += den[:,i0:i1][:,leak_idx].sum(axis=1) psi[:,ia] = -numpy.einsum('in,mn->im', den[:,i0:i1], fac_pol) logger.debug(pcmobj, 'electron leaks %s', nelec_leak) # Contribution of nuclear charges to the total density # The factor numpy.sqrt(4*numpy.pi) is due to the product of 4*pi * Y_0^0 if with_nuc: for ia in range(natm): psi[:,ia,0] += numpy.sqrt(4*numpy.pi)/r_vdw[ia] * mol.atom_charge(ia) # <Psi, L^{-1}g> -> Psi = SL the adjoint equation to LX = g L_S = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi.reshape(n_dm,-1).T) L_S = L_S.reshape(natm,nlm,n_dm).transpose(2,0,1) coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order) # JCP, 141, 184108, Eq (39) xi_jn = numpy.einsum('n,jn,xn,ijx->ijn', weights_1sph, ui, ylm_1sph, L_S) extern_point_idx = ui > 0 cav_coords = (mol.atom_coords().reshape(natm,1,3) + numpy.einsum('r,gx->rgx', r_vdw, coords_1sph)) cav_coords = cav_coords[extern_point_idx] xi_jn = xi_jn[:,extern_point_idx] max_memory = pcmobj.max_memory - lib.current_memory()[0] blksize = int(max(max_memory*.9e6/8/nao**2, 400)) cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, 'int3c2e') vmat_tril = 0 for i0, i1 in lib.prange(0, cav_coords.shape[0], blksize): fakemol = gto.fakemol_for_charges(cav_coords[i0:i1]) v_nj = df.incore.aux_e2(mol, fakemol, intor='int3c2e', aosym='s2ij', cintopt=cintopt) vmat_tril += numpy.einsum('xn,in->ix', v_nj, xi_jn[:,i0:i1]) vmat += lib.unpack_tril(vmat_tril) if is_single_dm: psi = psi[0] L_S = L_S[0] vmat = vmat[0] return psi, vmat, L_S def cache_fake_multipoles(grids, r_vdw, lmax): # For each type of atoms, cache the product of last two terms in # JCP, 141, 184108, Eq (31): # x_{<}^{l} / x_{>}^{l+1} Y_l^m mol = grids.mol atom_grids_tab = grids.gen_atomic_grids(mol) r_vdw_type = {} for ia in range(mol.natm): symb = mol.atom_symbol(ia) if symb not in r_vdw_type: r_vdw_type[symb] = r_vdw[ia] cached_pol = {} for symb in atom_grids_tab: x_nj, w = atom_grids_tab[symb] r = lib.norm(x_nj, axis=1) # Different equations are used in JCTC, 9, 3637. r*Ys (the fake_pole) # is computed as r^l/r_vdw. "leak_idx" is not needed. # Here, the implementation is based on JCP, 141, 184108 leak_idx = r > r_vdw_type[symb] pol = sph.multipoles(x_nj, lmax) fak_pol = [] for l in range(lmax+1): # x_{<}^{l} / x_{>}^{l+1} Y_l^m in JCP, 141, 184108, Eq (31) #:Ys = sph.real_sph_vec(x_nj/r.reshape(-1,1), lmax, True) #:rr = numpy.zeros_like(r) #:rr[r<=r_vdw[ia]] = r[r<=r_vdw[ia]]**l / r_vdw[ia]**(l+1) #:rr[r> r_vdw[ia]] = r_vdw[ia]**l / r[r>r_vdw[ia]]**(l+1) #:xx_ylm = numpy.einsum('n,mn->mn', rr, Ys[l]) xx_ylm = pol[l] * (1./r_vdw_type[symb]**(l+1)) # The line below is not needed for JCTC, 9, 3637 xx_ylm[:,leak_idx] *= (r_vdw_type[symb]/r[leak_idx])**(2*l+1) fak_pol.append(xx_ylm) cached_pol[symb] = (fak_pol, leak_idx) return cached_pol def _vstack_factor_fak_pol(fak_pol, lmax): fac_pol = [] for l in range(lmax+1): fac = 4*numpy.pi/(l*2+1) fac_pol.append(fac * fak_pol[l]) return numpy.vstack(fac_pol) def atoms_with_vdw_overlap(atm_id, atom_coords, r_vdw): atm_dist = atom_coords - atom_coords[atm_id] atm_dist = numpy.einsum('pi,pi->p', atm_dist, atm_dist) atm_dist[atm_id] = 1e200 vdw_sum = r_vdw + r_vdw[atm_id] atoms_nearby = numpy.where(atm_dist < vdw_sum**2)[0] return atoms_nearby class DDCOSMO(lib.StreamObject): def __init__(self, mol): self.mol = mol self.stdout = mol.stdout self.verbose = mol.verbose self.max_memory = mol.max_memory #self.radii_table = radii.VDW self.radii_table = radii.UFF*1.1 #self.radii_table = radii.MM3 self.atom_radii = None self.lebedev_order = 17 self.lmax = 6 # max angular momentum of spherical harmonics basis self.eta = .1 # regularization parameter self.eps = 78.3553 self.grids = gen_grid.Grids(mol) # The maximum iterations and convergence tolerance to update solvent # effects in CASCI, CC, MP, CI, ... methods self.max_cycle = 20 self.conv_tol = 1e-7 self.state_id = 0 # Set frozen to enable/disable the frozen ddCOSMO solvent potential. # If frozen is set, _dm (density matrix) needs to be specified to # generate the potential. self.frozen = False # In the rapid process (such as vertical excitation), solvent does not # follow the fast change of electronic structure of solutes. A # calculation of non-equilibrium solvation should be used. For slow # process (like geometry optimization), solvent has enough time to # respond to the changes in electronic structure or geometry of # solutes. Equilibrium solvation should be enabled in the calculation. # See for example JPCA, 104, 5631 (2000) # # Note this attribute has no effects if .frozen is enabled. # self.equilibrium_solvation = False ################################################## # don't modify the following attributes, they are not input options # e (the dielectric correction) and v (the additional potential) are # updated during the SCF iterations self.e = None self.v = None self._dm = None self._intermediates = None self._keys = set(self.__dict__.keys()) @property def dm(self): '''Density matrix to generate the frozen ddCOSMO solvent potential.''' return self._dm @dm.setter def dm(self, dm): '''Set dm to enable/disable the frozen ddCOSMO solvent potential. Setting dm to None will disable the frozen potental, i.e. the potential will respond to the change of the density during SCF iterations. ''' if isinstance(dm, numpy.ndarray): self._dm = dm self.e, self.v = self.kernel(dm) else: self.e = self.v = self._dm = None # define epcm and vpcm for backward compatibility @property def epcm(self): return self.e_solvent @epcm.setter def epcm(self, val): self.e_solvent = val @property def vpcm(self): return self.v_solvent @vpcm.setter def vpcm(self, val): self.v_solvent = val def __setattr__(self, key, val): if key in ('radii_table', 'atom_radii', 'lebedev_order', 'lmax', 'eta', 'eps', 'grids'): self.reset() super(DDCOSMO, self).__setattr__(key, val) def dump_flags(self, verbose=None): logger.info(self, '******** %s ********', self.__class__) logger.info(self, 'lebedev_order = %s (%d grids per sphere)', self.lebedev_order, gen_grid.LEBEDEV_ORDER[self.lebedev_order]) logger.info(self, 'lmax = %s' , self.lmax) logger.info(self, 'eta = %s' , self.eta) logger.info(self, 'eps = %s' , self.eps) logger.info(self, 'frozen = %s' , self.frozen) logger.info(self, 'equilibrium_solvation = %s', self.equilibrium_solvation) logger.debug2(self, 'radii_table %s', self.radii_table) if self.atom_radii: logger.info(self, 'User specified atomic radii %s', str(self.atom_radii)) self.grids.dump_flags(verbose) return self # TODO: Testing the value of psi (make_psi_vmat). All intermediates except # psi are tested against ddPCM implementation on github. Psi needs to be # computed by the host program. It requires the numerical integration code. def build(self): if self.grids.coords is None: self.grids.build(with_non0tab=True) mol = self.mol natm = mol.natm lmax = self.lmax r_vdw = self.get_atomic_radii() coords_1sph, weights_1sph = make_grids_one_sphere(self.lebedev_order) ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True)) fi = make_fi(self, r_vdw) ui = 1 - fi ui[ui<0] = 0 nexposed = numpy.count_nonzero(ui==1) nbury = numpy.count_nonzero(ui==0) on_shell = numpy.count_nonzero(ui>0) - nexposed logger.debug(self, 'Num points exposed %d', nexposed) logger.debug(self, 'Num points buried %d', nbury) logger.debug(self, 'Num points on shell %d', on_shell) nlm = (lmax+1)**2 Lmat = make_L(self, r_vdw, ylm_1sph, fi) Lmat = Lmat.reshape(natm*nlm,-1) cached_pol = cache_fake_multipoles(self.grids, r_vdw, lmax) self._intermediates = { 'r_vdw': r_vdw, 'ylm_1sph': ylm_1sph, 'ui': ui, 'Lmat': Lmat, 'cached_pol': cached_pol, } def kernel(self, dm): '''A single shot solvent effects for given density matrix. ''' self._dm = dm self.e, self.v = self._get_vind(dm) logger.info(self, '%s E_diel = %.15g', self.__class__, self.e) return self.e, self.v def reset(self, mol=None): '''Reset mol and clean up relevant attributes for scanner mode''' if mol is not None: self.mol = mol self.grids.reset(mol) self._intermediates = None return self def _get_vind(self, dm): '''A single shot solvent effects for given density matrix. ''' if not self._intermediates or self.grids.coords is None: self.build() mol = self.mol r_vdw = self._intermediates['r_vdw' ] ylm_1sph = self._intermediates['ylm_1sph' ] ui = self._intermediates['ui' ] Lmat = self._intermediates['Lmat' ] cached_pol = self._intermediates['cached_pol'] if not (isinstance(dm, numpy.ndarray) and dm.ndim == 2): # spin-traced DM for UHF or ROHF dm = dm[0] + dm[1] phi = make_phi(self, dm, r_vdw, ui, ylm_1sph) Xvec = numpy.linalg.solve(Lmat, phi.ravel()).reshape(mol.natm,-1) psi, vmat = make_psi_vmat(self, dm, r_vdw, ui, ylm_1sph, cached_pol, Xvec, Lmat)[:2] dielectric = self.eps if dielectric > 0: f_epsilon = (dielectric-1.)/dielectric else: f_epsilon = 1 epcm = .5 * f_epsilon * numpy.einsum('jx,jx', psi, Xvec) vpcm = .5 * f_epsilon * vmat return epcm, vpcm def _B_dot_x(self, dm): ''' Compute the matrix-vector product B * x. The B matrix, as defined in the paper R. Cammi, JPCA, 104, 5631 (2000), is the second order derivatives of E_solvation wrt density matrices. Note: In ddCOSMO, strictly, B is not symmetric. To make it compatible with the CIS framework, it is symmetrized in current implementation. ''' if not self._intermediates or self.grids.coords is None: self.build() mol = self.mol r_vdw = self._intermediates['r_vdw' ] ylm_1sph = self._intermediates['ylm_1sph' ] ui = self._intermediates['ui' ] Lmat = self._intermediates['Lmat' ] cached_pol = self._intermediates['cached_pol'] natm = mol.natm nlm = (self.lmax+1)**2 dms = numpy.asarray(dm) dm_shape = dms.shape nao = dm_shape[-1] dms = dms.reshape(-1,nao,nao) phi = make_phi(self, dms, r_vdw, ui, ylm_1sph, with_nuc=False) Xvec = numpy.linalg.solve(Lmat, phi.reshape(-1,natm*nlm).T) Xvec = Xvec.reshape(natm,nlm,-1).transpose(2,0,1) vmat = make_psi_vmat(self, dms, r_vdw, ui, ylm_1sph, cached_pol, Xvec, Lmat, with_nuc=False)[1] dielectric = self.eps if dielectric > 0: f_epsilon = (dielectric-1.)/dielectric else: f_epsilon = 1 return .5 * f_epsilon * vmat.reshape(dm_shape) energy = energy gen_solver = as_solver = gen_ddcosmo_solver get_atomic_radii = get_atomic_radii def regularize_xt(self, t, eta, scale=1): # scale = eta*scale, is it correct? return regularize_xt(t, eta*scale) def nuc_grad_method(self, grad_method): '''For grad_method in vacuum, add nuclear gradients of solvent ''' from pyscf import tdscf from pyscf.solvent import ddcosmo_grad, _ddcosmo_tdscf_grad if self.frozen: raise RuntimeError('Frozen solvent model is not supported for ' 'energy gradients') if isinstance(grad_method.base, (tdscf.rhf.TDA, tdscf.rhf.TDHF)): return _ddcosmo_tdscf_grad.make_grad_object(grad_method) else: return ddcosmo_grad.make_grad_object(grad_method) if __name__ == '__main__': from pyscf import scf from pyscf import mcscf from pyscf import cc mol = gto.M(atom='H 0 0 0; H 0 1 1.2; H 1. .1 0; H .5 .5 1') natm = mol.natm r_vdw = [radii.VDW[gto.charge(mol.atom_symbol(i))] for i in range(natm)] r_vdw = numpy.asarray(r_vdw) pcmobj = DDCOSMO(mol) pcmobj.regularize_xt = lambda t, eta, scale: regularize_xt(t, eta) pcmobj.lebedev_order = 7 pcmobj.lmax = 6 pcmobj.eta = 0.1 nlm = (pcmobj.lmax+1)**2 coords_1sph, weights_1sph = make_grids_one_sphere(pcmobj.lebedev_order) fi = make_fi(pcmobj, r_vdw) ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcmobj.lmax, True)) L = make_L(pcmobj, r_vdw, ylm_1sph, fi) print(lib.fp(L) - 6.2823493771037473) mol = gto.Mole() mol.atom = ''' O 0.00000000 0.00000000 -0.11081188 H -0.00000000 -0.84695236 0.59109389 H -0.00000000 0.89830571 0.52404783 ''' mol.basis = '3-21g' #cc-pvdz' mol.build() cm = DDCOSMO(mol) cm.verbose = 4 mf = ddcosmo_for_scf(scf.RHF(mol), cm)#.newton() mf.verbose = 4 print(mf.kernel() - -75.570364368059) cm.verbose = 3 e = ddcosmo_for_casci(mcscf.CASCI(mf, 4, 4)).kernel()[0] print(e - -75.5743583693215) cc_cosmo = ddcosmo_for_post_scf(cc.CCSD(mf)).run() print(cc_cosmo.e_tot - -75.70961637250134) mol = gto.Mole() mol.atom = ''' Fe 0.00000000 0.00000000 -0.11081188 H -0.00000000 -0.84695236 0.59109389 H -0.00000000 0.89830571 0.52404783 ''' mol.basis = '3-21g' #cc-pvdz' mol.build() cm = DDCOSMO(mol) cm.eps = -1 cm.verbose = 4 mf = ddcosmo_for_scf(scf.ROHF(mol), cm).newton() mf.verbose=4 mf.kernel()
gkc1000/pyscf
pyscf/solvent/ddcosmo.py
Python
apache-2.0
35,982
[ "Gaussian", "PySCF" ]
378688be6108e6ece86ec61cb4f9b5a851b6910db8aaeb50fc1749ad666c2da0
# inputs: directories # read in each bam, check that it's # output: combined, indexed bam # for each input directory import argparse import glob import logging import os import peewee from peewee import fn from playhouse import shortcuts import pysam import traceback from utils.haplotype_caller import compute_output_bam_path from utils.database import Sample, _SharedVariantPositionFields from utils.constants import BAM_OUTPUT_DIR, PICARD_JAR_PATH, MAX_SAMPLES_TO_SHOW_PER_VARIANT def run(cmd): logging.info(cmd) os.system(cmd) def bam_path_to_fields(bam_path): # for example: /read_viz/22/5822/chr22-46615822-A-G_het0.bam return os.path.basename(bam_path).replace(".bam", "").replace('_', '-').split('-') def bam_path_to_dict(bam_path): # for example: /read_viz/22/5822/chr22-46615822-A-G_het0.bam return dict(zip(['chrom', 'pos', 'ref', 'alt', 'het_or_hom_or_hemi'], bam_path_to_fields(bam_path))) def bam_path_to_read_group_id(bam_path): return os.path.basename(str(bam_path).replace("chr", "").replace(".bam", "")) def get_all_samples_to_combine(variants_to_process): all_successful_samples = [] for v in variants_to_process: # choose the 1st min(n_expected_samples, MAX_SAMPLES_TO_SHOW_PER_VARIANT) samples v.n_expected_samples = min(v.n_expected_samples, MAX_SAMPLES_TO_SHOW_PER_VARIANT) successful_samples = list(Sample.select().where( (Sample.chrom == v.chrom) & (Sample.pos == v.pos) & (Sample.ref == v.ref) & (Sample.alt == v.alt) & (Sample.het_or_hom_or_hemi == v.het_or_hom_or_hemi) & (Sample.hc_succeeded == 1) & (Sample.started == 1) & (Sample.finished == 1) ).order_by(Sample.id.asc()).limit(v.n_expected_samples)) v.n_available_samples = len(successful_samples) for i, s in enumerate(successful_samples): s.output_bam_path2 = compute_output_bam_path( s.chrom, s.pos, s.ref, s.alt, s.het_or_hom_or_hemi, i) if s.output_bam_path != s.output_bam_path2: print(s.output_bam_path + " => NEW OUTPUT PATH: " + s.output_bam_path2) if v.n_available_samples < v.n_expected_samples: logging.error("%s-%s-%s-%s %s - ERROR: expected %s samples. Found only %s successful samples in database" % ( v.chrom, v.pos, v.ref, v.alt, v.het_or_hom_or_hemi, v.n_expected_samples, v.n_available_samples)) if len(successful_samples) > len(set((s.output_bam_path for s in successful_samples))): raise ValueError("%s-%s-%s-%s %s - ERROR: duplicate readviz bam paths found" % ( v.chrom, v.pos, v.ref, v.alt, v.het_or_hom_or_hemi)) all_successful_samples.extend(successful_samples) return all_successful_samples def generate_combined_bam(base_dir, samples_to_combine, temp_combined_bam_path, combined_bam_path): logging.info("combining %s bams into %s" % (len(samples_to_combine), combined_bam_path)) # sort bams by position so that the reads in the combined file are roughly in sorted order sorted_samples_to_combine = sorted(samples_to_combine, key=lambda s: int(bam_path_to_dict(s.output_bam_path)['pos'])) read_group_ids = [bam_path_to_read_group_id(s.output_bam_path2) for s in sorted_samples_to_combine] read_groups = [{'ID': read_group_id, "SM": 0} for read_group_id in read_group_ids] obam = None for s in samples_to_combine: # try reading in the reassembled bam and adding it to the combined bam try: ibam = pysam.AlignmentFile(os.path.join(base_dir, s.output_bam_path), "rb") if obam is None: header = { 'HD': {'VN': '1.4'}, #, 'SO': 'coordinate'}, 'SQ': ibam.header['SQ'], 'RG': read_groups, } obam = pysam.AlignmentFile(temp_combined_bam_path, "wb", header=header) # iterate over the reads rg_tag = (('RG', bam_path_to_read_group_id(s.output_bam_path2)), ) for r in ibam: r.tags = rg_tag obam.write(r) ibam.close() except (IOError, ValueError) as e: logging.error("ERROR on file %s: %s", s.output_bam_path2, e) logging.error(traceback.format_exc()) if obam is not None: obam.close() # sort the file logging.info("Running picard SortSam:") picard_jar = PICARD_JAR_PATH run(("java -jar %(picard_jar)s SortSam VALIDATION_STRINGENCY=LENIENT " "I=%(temp_combined_bam_path)s O=%(combined_bam_path)s SO=coordinate CREATE_INDEX=true") % locals()) run("rm %(temp_combined_bam_path)s" % locals()) bai_path = combined_bam_path.replace(".bam", ".bai") run("cp %(bai_path)s %(combined_bam_path)s.bai" % locals()) # copy the .bai file to .bam.bai since this is what IGV.js looks for def generate_sqlite_db(variants_to_process, temp_sqlite_db_path, sqlite_db_path): logging.info("populating sqlite database: " + temp_sqlite_db_path) if os.path.isfile(temp_sqlite_db_path): run("rm -f " + temp_sqlite_db_path) sqlite_db = peewee.SqliteDatabase(temp_sqlite_db_path, autocommit=False) class t(_SharedVariantPositionFields): n_expected_samples = peewee.IntegerField(index=True, null=True) n_available_samples = peewee.IntegerField(index=True, null=True) class Meta: database = sqlite_db indexes = ( (('chrom', 'pos', 'ref', 'alt', 'het_or_hom_or_hemi'), True), # True means unique index ) t.create_table(fail_silently=True) # copy the records from the Variant table used by generate_HC_bams.py sqlite_db.connect() with sqlite_db.atomic(): for v in variants_to_process: #Variant.select().where(Variant.finished==1).dicts(): #shortcuts.model_to_dict(v) d = { 'chrom': v.chrom, 'pos': v.pos, 'ref': v.ref, 'alt': v.alt, 'het_or_hom_or_hemi': v.het_or_hom_or_hemi, 'n_expected_samples': v.n_expected_samples, 'n_available_samples': v.n_available_samples, } # delete readviz_bam_paths as they're no longer relevant because the data from these is being combined into one bam file #print("INSERTING " + str(d)) t.insert(**d).execute() sqlite_db.close() run("mv %s %s" % (temp_sqlite_db_path, sqlite_db_path)) def combine_bams(output_dir, temp_dir, chrom, position_hash, force=False): """Generates the combined bam. Args: output_dir: the top level directory where files are stored. temp_dir: a local non-NFS directory that can be used for creating / modifying a SQLite database. This avoids SQLite incompatibility with network drives. chrom: chromosome for which to combine bams position_hash: bams are divided between directories with names 000 through 999. This should be a number between 0 and 999 which specifies which of these directories to process. force: proceed even if the .bam and .db are already there on disk. """ # create combined_bams output sub-directory hash_dir = "%03d" % (position_hash % 1000) temp_combined_bam_path = os.path.join(output_dir, "combined_bams", chrom, "_tmp.combined_chr%s_%s.bam" % (chrom, hash_dir)) combined_bam_path = temp_combined_bam_path.replace("_tmp.", "") if not os.path.isdir(os.path.dirname(temp_combined_bam_path)): run("mkdir -m 777 -p " + os.path.dirname(temp_combined_bam_path)) temp_sqlite_db_path = os.path.join(temp_dir, os.path.basename(temp_combined_bam_path.replace(".bam", ".db"))) sqlite_db_path = combined_bam_path.replace(".bam", ".db") # create iterator over variants in this bin #variants_to_process = [v for v in Sample._meta.database.execute_sql(("select chrom, pos, ref, alt, het_or_hom_or_hemi, count(*) as total_samples from %s " # "where chrom='%s' and pos_mod_1000=%s group by chrom, pos, ref, alt, het_or_hom_or_hemi") % (Sample._meta.db_table, chrom, position_hash)).dicts()] variants_to_process = [v for v in Sample.select( Sample.chrom, Sample.pos, Sample.ref, Sample.alt, Sample.het_or_hom_or_hemi, peewee.fn.COUNT(Sample.id).alias('n_expected_samples') ).where(Sample.chrom == chrom, Sample.pos_mod_1000 == position_hash ).group_by(Sample.chrom, Sample.pos, Sample.ref, Sample.alt, Sample.het_or_hom_or_hemi).execute()] # choose the samples to combine, and get their reassembled bam paths all_samples = get_all_samples_to_combine(variants_to_process) # check if combine_bam output files already exist on disk, and skip if yes if not force and os.path.isfile(combined_bam_path) and os.path.isfile(sqlite_db_path): try: ibam = pysam.AlignmentFile(combined_bam_path, "rb") num_read_groups = len(ibam.header['RG']) ibam.close() if num_read_groups == len(all_samples): logging.info("%s found on disk. size=%s read groups. Skipping..." % (combined_bam_path, num_read_groups)) return else: logging.info("ERROR: %s found on disk but num_read_groups (%s) != len(all_samples) (%s)" % (combined_bam_path, num_read_groups, len(all_samples))) except (IOError, ValueError) as e: logging.warning("WARNING: couldn't read combined file %s: %s", combined_bam_path, e) logging.warning(traceback.format_exc()) logging.warning("Will regenerate it..") # check that all_samples exist on disk all_available_bam_paths_on_disk = set(glob.glob(os.path.join(output_dir, chrom, hash_dir, 'chr*.bam'))) all_available_bam_paths_on_disk = set(map(lambda p: p.replace(output_dir, '').strip('/'), list(all_available_bam_paths_on_disk))) all_samples_with_bam_paths_on_disk = [s for s in all_samples if s.output_bam_path in all_available_bam_paths_on_disk] if len(all_samples_with_bam_paths_on_disk) < len(all_samples): logging.info("ERROR: found only %s out of %s reassembled bams on disk in %s/%s/%s. Missing bams: %s" % ( len(all_samples_with_bam_paths_on_disk), len(all_samples), output_dir, chrom, hash_dir, ", ".join([s.output_bam_path for s in all_samples if s.output_bam_path not in all_available_bam_paths_on_disk]))) else: logging.info("all %s out of %s reassembled bams found on disk in %s/%s/%s" % ( len(all_samples_with_bam_paths_on_disk), len(all_samples), output_dir, chrom, hash_dir)) if len(all_samples_with_bam_paths_on_disk) > 0: generate_combined_bam(base_dir=output_dir, samples_to_combine=all_samples_with_bam_paths_on_disk, temp_combined_bam_path=temp_combined_bam_path, combined_bam_path=combined_bam_path) # create a combined_chr<chrom>_<hash>.db sqlite db where the website code can look up the number of expected and # available readviz tracks for each variant in this bin generate_sqlite_db(variants_to_process, temp_sqlite_db_path, sqlite_db_path) logging.info("-- interval finished --") # detected by parallelize.py to mark this as done if __name__ == "__main__": p = argparse.ArgumentParser("Generates combined bams") p.add_argument("--chrom", help="chromosome", required=True) p.add_argument("-d", "--output-dir", help="the top-level output directory", default=BAM_OUTPUT_DIR) p.add_argument("-t", "--non-nfs-temp-dir", help="local non-NFS-mounted temp directory to use for sqlite oprations", default="/tmp") p.add_argument("-f", "--force", help="regenerate combined .bam and sqlite .db even they already exist", action="store_true") g = p.add_argument_group() g.add_argument("-k", "--position-hash", help="bams are divided between directories with names 000 through 999. " "This should be a number between 0 and 999 which specifies which of " "these directories to process.", type=int) g.add_argument("-k1", "--start-pos", help="bams are divided between directories with names 000 through 999. " "This should be a number between 0 and 999 which specifies the start of " "a range of these directories to process.", type=int) g.add_argument("-k2", "--end-pos", type=int, help="bams are divided between directories with names 000 through 999. " "This should be a number between 0 and 999 which specifies the end of " "a range of these directories to process.") args = p.parse_args() if args.position_hash is not None: combine_bams(args.output_dir, args.non_nfs_temp_dir, args.chrom, args.position_hash, force=args.force) elif args.start_pos is not None and args.end_pos is not None: for position_hash in range(args.start_pos, args.end_pos+1): logging.info("-------") combine_bams(args.output_dir, args.non_nfs_temp_dir, args.chrom, position_hash, force=args.force) else: p.error("Must specify -k or both -k1 and -k2") #CREATE TABLE t( # chrom text, # minrep_pos integer, # minrep_ref text, # minrep_alt text, # n_het integer, # n_hom_alt integer, # reassembled_bams_het text, # reassembled_bams_hom text, # finished bool); # CREATE UNIQUE INDEX variant_idx ON t( # chrom, # minrep_pos, # minrep_ref, # minrep_alt);
macarthur-lab/exac_readviz_scripts
pipeline/combine_bams.py
Python
mit
13,595
[ "pysam" ]
74c27e7a2146e8d2a5025beee9eeb7b5c25629bcc87c8e1dca17f4b5e4884eec
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generic presubmit checks that can be reused by other presubmit checks.""" from __future__ import print_function import os as _os from warnings import warn _HERE = _os.path.dirname(_os.path.abspath(__file__)) # These filters will be disabled if callers do not explicitly supply a # (possibly-empty) list. Ideally over time this list could be driven to zero. # TODO(pkasting): If some of these look like "should never enable", move them # to OFF_UNLESS_MANUALLY_ENABLED_LINT_FILTERS. # # Justifications for each filter: # # - build/include : Too many; fix in the future # TODO(pkasting): Try enabling subcategories # - build/include_order : Not happening; #ifdefed includes # - build/namespaces : TODO(pkasting): Try re-enabling # - readability/casting : Mistakes a whole bunch of function pointers # - runtime/int : Can be fixed long term; volume of errors too high # - whitespace/braces : We have a lot of explicit scoping in chrome code OFF_BY_DEFAULT_LINT_FILTERS = [ '-build/include', '-build/include_order', '-build/namespaces', '-readability/casting', '-runtime/int', '-whitespace/braces', ] # These filters will be disabled unless callers explicitly enable them, because # they are undesirable in some way. # # Justifications for each filter: # - build/c++11 : Include file and feature blocklists are # google3-specific # - runtime/references : No longer banned by Google style guide # - whitespace/... : Most whitespace issues handled by clang-format OFF_UNLESS_MANUALLY_ENABLED_LINT_FILTERS = [ '-build/c++11', '-runtime/references', '-whitespace/braces', '-whitespace/comma', '-whitespace/end_of_line', '-whitespace/forcolon', '-whitespace/indent', '-whitespace/line_length', '-whitespace/newline', '-whitespace/operators', '-whitespace/parens', '-whitespace/semicolon', '-whitespace/tab', ] ### Description checks def CheckChangeHasBugField(input_api, output_api): """Requires that the changelist have a Bug: field.""" bugs = input_api.change.BugsFromDescription() if bugs: if any(b.startswith('b/') for b in bugs): return [ output_api.PresubmitNotifyResult( 'Buganizer bugs should be prefixed with b:, not b/.') ] return [] return [output_api.PresubmitNotifyResult( 'If this change has an associated bug, add Bug: [bug number].')] def CheckChangeHasNoUnwantedTags(input_api, output_api): UNWANTED_TAGS = { 'FIXED': { 'why': 'is not supported', 'instead': 'Use "Fixed:" instead.' }, # TODO: BUG, ISSUE } errors = [] for tag, desc in UNWANTED_TAGS.items(): if tag in input_api.change.tags: subs = tag, desc['why'], desc.get('instead', '') errors.append(('%s= %s. %s' % subs).rstrip()) return [output_api.PresubmitError('\n'.join(errors))] if errors else [] def CheckDoNotSubmitInDescription(input_api, output_api): """Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description. """ # Keyword is concatenated to avoid presubmit check rejecting the CL. keyword = 'DO NOT ' + 'SUBMIT' if keyword in input_api.change.DescriptionText(): return [output_api.PresubmitError( keyword + ' is present in the changelist description.')] return [] def CheckChangeHasDescription(input_api, output_api): """Checks the CL description is not empty.""" text = input_api.change.DescriptionText() if text.strip() == '': if input_api.is_committing: return [output_api.PresubmitError('Add a description to the CL.')] return [output_api.PresubmitNotifyResult('Add a description to the CL.')] return [] def CheckChangeWasUploaded(input_api, output_api): """Checks that the issue was uploaded before committing.""" if input_api.is_committing and not input_api.change.issue: return [output_api.PresubmitError( 'Issue wasn\'t uploaded. Please upload first.')] return [] def CheckDescriptionUsesColonInsteadOfEquals(input_api, output_api): """Checks that the CL description uses a colon after 'Bug' and 'Fixed' tags instead of equals. crbug.com only interprets the lines "Bug: xyz" and "Fixed: xyz" but not "Bug=xyz" or "Fixed=xyz". """ text = input_api.change.DescriptionText() if input_api.re.search(r'^(Bug|Fixed)=', text, flags=input_api.re.IGNORECASE | input_api.re.MULTILINE): return [output_api.PresubmitError('Use Bug:/Fixed: instead of Bug=/Fixed=')] return [] ### Content checks def CheckAuthorizedAuthor(input_api, output_api, bot_allowlist=None): """For non-googler/chromites committers, verify the author's email address is in AUTHORS. """ if input_api.is_committing: error_type = output_api.PresubmitError else: error_type = output_api.PresubmitPromptWarning author = input_api.change.author_email if not author: input_api.logging.info('No author, skipping AUTHOR check') return [] # This is used for CLs created by trusted robot accounts. if bot_allowlist and author in bot_allowlist: return [] authors_path = input_api.os_path.join( input_api.PresubmitLocalPath(), 'AUTHORS') author_re = input_api.re.compile(r'[^#]+\s+\<(.+?)\>\s*$') valid_authors = [] with open(authors_path, 'rb') as fp: for line in fp: m = author_re.match(line.decode('utf8')) if m: valid_authors.append(m.group(1).lower()) if not any(input_api.fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors): input_api.logging.info('Valid authors are %s', ', '.join(valid_authors)) return [ error_type(( # pylint: disable=line-too-long '%s is not in AUTHORS file. If you are a new contributor, please visit\n' 'https://chromium.googlesource.com/chromium/src/+/refs/heads/main/docs/contributing.md#Legal-stuff\n' # pylint: enable=line-too-long 'and read the "Legal stuff" section\n' 'If you are a chromite, verify that the contributor signed the CLA.') % author) ] return [] def CheckDoNotSubmitInFiles(input_api, output_api): """Checks that the user didn't add 'DO NOT ''SUBMIT' to any files.""" # We want to check every text file, not just source files. file_filter = lambda x : x # Keyword is concatenated to avoid presubmit check rejecting the CL. keyword = 'DO NOT ' + 'SUBMIT' def DoNotSubmitRule(extension, line): try: return keyword not in line # Fallback to True for non-text content except UnicodeDecodeError: return True errors = _FindNewViolationsOfRule(DoNotSubmitRule, input_api, file_filter) text = '\n'.join('Found %s in %s' % (keyword, loc) for loc in errors) if text: return [output_api.PresubmitError(text)] return [] def GetCppLintFilters(lint_filters=None): filters = OFF_UNLESS_MANUALLY_ENABLED_LINT_FILTERS[:] if lint_filters is None: lint_filters = OFF_BY_DEFAULT_LINT_FILTERS filters.extend(lint_filters) return filters def CheckChangeLintsClean(input_api, output_api, source_file_filter=None, lint_filters=None, verbose_level=None): """Checks that all '.cc' and '.h' files pass cpplint.py.""" _RE_IS_TEST = input_api.re.compile(r'.*tests?.(cc|h)$') result = [] cpplint = input_api.cpplint # Access to a protected member _XX of a client class # pylint: disable=protected-access cpplint._cpplint_state.ResetErrorCounts() cpplint._SetFilters(','.join(GetCppLintFilters(lint_filters))) # We currently are more strict with normal code than unit tests; 4 and 5 are # the verbosity level that would normally be passed to cpplint.py through # --verbose=#. Hopefully, in the future, we can be more verbose. files = [f.AbsoluteLocalPath() for f in input_api.AffectedSourceFiles(source_file_filter)] for file_name in files: if _RE_IS_TEST.match(file_name): level = 5 else: level = 4 verbose_level = verbose_level or level cpplint.ProcessFile(file_name, verbose_level) if cpplint._cpplint_state.error_count > 0: if input_api.is_committing: res_type = output_api.PresubmitError else: res_type = output_api.PresubmitPromptWarning result = [res_type('Changelist failed cpplint.py check.')] return result def CheckChangeHasNoCR(input_api, output_api, source_file_filter=None): """Checks no '\r' (CR) character is in any source files.""" cr_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): if '\r' in input_api.ReadFile(f, 'rb'): cr_files.append(f.LocalPath()) if cr_files: return [output_api.PresubmitPromptWarning( 'Found a CR character in these files:', items=cr_files)] return [] def CheckChangeHasOnlyOneEol(input_api, output_api, source_file_filter=None): """Checks the files ends with one and only one \n (LF).""" eof_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') # Check that the file ends in one and only one newline character. if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'): eof_files.append(f.LocalPath()) if eof_files: return [output_api.PresubmitPromptWarning( 'These files should end in one (and only one) newline character:', items=eof_files)] return [] def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api, source_file_filter=None): """Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass. It is faster because it is reading the file only once. """ cr_files = [] eof_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') if '\r' in contents: cr_files.append(f.LocalPath()) # Check that the file ends in one and only one newline character. if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'): eof_files.append(f.LocalPath()) outputs = [] if cr_files: outputs.append(output_api.PresubmitPromptWarning( 'Found a CR character in these files:', items=cr_files)) if eof_files: outputs.append(output_api.PresubmitPromptWarning( 'These files should end in one (and only one) newline character:', items=eof_files)) return outputs def CheckGenderNeutral(input_api, output_api, source_file_filter=None): """Checks that there are no gendered pronouns in any of the text files to be submitted. """ gendered_re = input_api.re.compile( r'(^|\s|\(|\[)([Hh]e|[Hh]is|[Hh]ers?|[Hh]im|[Ss]he|[Gg]uys?)\\b') errors = [] for f in input_api.AffectedFiles(include_deletes=False, file_filter=source_file_filter): for line_num, line in f.ChangedContents(): if gendered_re.search(line): errors.append('%s (%d): %s' % (f.LocalPath(), line_num, line)) if errors: return [output_api.PresubmitPromptWarning('Found a gendered pronoun in:', long_text='\n'.join(errors))] return [] def _ReportErrorFileAndLine(filename, line_num, dummy_line): """Default error formatter for _FindNewViolationsOfRule.""" return '%s:%s' % (filename, line_num) def _GenerateAffectedFileExtList(input_api, source_file_filter): """Generate a list of (file, extension) tuples from affected files. The result can be fed to _FindNewViolationsOfRule() directly, or could be filtered before doing that. Args: input_api: object to enumerate the affected files. source_file_filter: a filter to be passed to the input api. Yields: A list of (file, extension) tuples, where |file| is an affected file, and |extension| its file path extension. """ for f in input_api.AffectedFiles( include_deletes=False, file_filter=source_file_filter): extension = str(f.LocalPath()).rsplit('.', 1)[-1] yield (f, extension) def _FindNewViolationsOfRuleForList(callable_rule, file_ext_list, error_formatter=_ReportErrorFileAndLine): """Find all newly introduced violations of a per-line rule (a callable). Prefer calling _FindNewViolationsOfRule() instead of this function, unless the list of affected files need to be filtered in a special way. Arguments: callable_rule: a callable taking a file extension and line of input and returning True if the rule is satisfied and False if there was a problem. file_ext_list: a list of input (file, extension) tuples, as returned by _GenerateAffectedFileExtList(). error_formatter: a callable taking (filename, line_number, line) and returning a formatted error string. Returns: A list of the newly-introduced violations reported by the rule. """ errors = [] for f, extension in file_ext_list: # For speed, we do two passes, checking first the full file. Shelling out # to the SCM to determine the changed region can be quite expensive on # Win32. Assuming that most files will be kept problem-free, we can # skip the SCM operations most of the time. if all(callable_rule(extension, line) for line in f.NewContents()): continue # No violation found in full text: can skip considering diff. for line_num, line in f.ChangedContents(): if not callable_rule(extension, line): errors.append(error_formatter(f.LocalPath(), line_num, line)) return errors def _FindNewViolationsOfRule(callable_rule, input_api, source_file_filter=None, error_formatter=_ReportErrorFileAndLine): """Find all newly introduced violations of a per-line rule (a callable). Arguments: callable_rule: a callable taking a file extension and line of input and returning True if the rule is satisfied and False if there was a problem. input_api: object to enumerate the affected files. source_file_filter: a filter to be passed to the input api. error_formatter: a callable taking (filename, line_number, line) and returning a formatted error string. Returns: A list of the newly-introduced violations reported by the rule. """ return _FindNewViolationsOfRuleForList( callable_rule, _GenerateAffectedFileExtList( input_api, source_file_filter), error_formatter) def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None): """Checks that there are no tab characters in any of the text files to be submitted. """ # In addition to the filter, make sure that makefiles are skipped. if not source_file_filter: # It's the default filter. source_file_filter = input_api.FilterSourceFile def filter_more(affected_file): basename = input_api.os_path.basename(affected_file.LocalPath()) return (not (basename in ('Makefile', 'makefile') or basename.endswith('.mk')) and source_file_filter(affected_file)) tabs = _FindNewViolationsOfRule(lambda _, line : '\t' not in line, input_api, filter_more) if tabs: return [output_api.PresubmitPromptWarning('Found a tab character in:', long_text='\n'.join(tabs))] return [] def CheckChangeTodoHasOwner(input_api, output_api, source_file_filter=None): """Checks that the user didn't add TODO(name) without an owner.""" unowned_todo = input_api.re.compile('TO''DO[^(]') errors = _FindNewViolationsOfRule(lambda _, x : not unowned_todo.search(x), input_api, source_file_filter) errors = ['Found TO''DO with no owner in ' + x for x in errors] if errors: return [output_api.PresubmitPromptWarning('\n'.join(errors))] return [] def CheckChangeHasNoStrayWhitespace(input_api, output_api, source_file_filter=None): """Checks that there is no stray whitespace at source lines end.""" errors = _FindNewViolationsOfRule(lambda _, line : line.rstrip() == line, input_api, source_file_filter) if errors: return [output_api.PresubmitPromptWarning( 'Found line ending with white spaces in:', long_text='\n'.join(errors))] return [] def CheckLongLines(input_api, output_api, maxlen, source_file_filter=None): """Checks that there aren't any lines longer than maxlen characters in any of the text files to be submitted. """ maxlens = { 'java': 100, # This is specifically for Android's handwritten makefiles (Android.mk). 'mk': 200, '': maxlen, } # Language specific exceptions to max line length. # '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a # superset of CPP_EXCEPTIONS. CPP_FILE_EXTS = ('c', 'cc') CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma') HTML_FILE_EXTS = ('html',) HTML_EXCEPTIONS = ('<g ', '<link ', '<path ',) JAVA_FILE_EXTS = ('java',) JAVA_EXCEPTIONS = ('import ', 'package ') JS_FILE_EXTS = ('js',) JS_EXCEPTIONS = ("GEN('#include", 'import ') TS_FILE_EXTS = ('ts',) TS_EXCEPTIONS = ('import ') OBJC_FILE_EXTS = ('h', 'm', 'mm') OBJC_EXCEPTIONS = ('#define', '#endif', '#if', '#import', '#include', '#pragma') PY_FILE_EXTS = ('py',) PY_EXCEPTIONS = ('import', 'from') LANGUAGE_EXCEPTIONS = [ (CPP_FILE_EXTS, CPP_EXCEPTIONS), (HTML_FILE_EXTS, HTML_EXCEPTIONS), (JAVA_FILE_EXTS, JAVA_EXCEPTIONS), (JS_FILE_EXTS, JS_EXCEPTIONS), (TS_FILE_EXTS, TS_EXCEPTIONS), (OBJC_FILE_EXTS, OBJC_EXCEPTIONS), (PY_FILE_EXTS, PY_EXCEPTIONS), ] def no_long_lines(file_extension, line): # Check for language specific exceptions. if any(file_extension in exts and line.lstrip().startswith(exceptions) for exts, exceptions in LANGUAGE_EXCEPTIONS): return True file_maxlen = maxlens.get(file_extension, maxlens['']) # Stupidly long symbols that needs to be worked around if takes 66% of line. long_symbol = file_maxlen * 2 / 3 # Hard line length limit at 50% more. extra_maxlen = file_maxlen * 3 / 2 line_len = len(line) if line_len <= file_maxlen: return True # Allow long URLs of any length. if any((url in line) for url in ('file://', 'http://', 'https://')): return True if line_len > extra_maxlen: return False if 'url(' in line and file_extension == 'css': return True if '<include' in line and file_extension in ('css', 'html', 'js'): return True return input_api.re.match( r'.*[A-Za-z][A-Za-z_0-9]{%d,}.*' % long_symbol, line) def is_global_pylint_directive(line, pos): """True iff the pylint directive starting at line[pos] is global.""" # Any character before |pos| that is not whitespace or '#' indidcates # this is a local directive. return not any(c not in " \t#" for c in line[:pos]) def check_python_long_lines(affected_files, error_formatter): errors = [] global_check_enabled = True for f in affected_files: file_path = f.LocalPath() for idx, line in enumerate(f.NewContents()): line_num = idx + 1 line_is_short = no_long_lines(PY_FILE_EXTS[0], line) pos = line.find('pylint: disable=line-too-long') if pos >= 0: if is_global_pylint_directive(line, pos): global_check_enabled = False # Global disable else: continue # Local disable. do_check = global_check_enabled pos = line.find('pylint: enable=line-too-long') if pos >= 0: if is_global_pylint_directive(line, pos): global_check_enabled = True # Global enable do_check = True # Ensure it applies to current line as well. else: do_check = True # Local enable if do_check and not line_is_short: errors.append(error_formatter(file_path, line_num, line)) return errors def format_error(filename, line_num, line): return '%s, line %s, %s chars' % (filename, line_num, len(line)) file_ext_list = list( _GenerateAffectedFileExtList(input_api, source_file_filter)) errors = [] # For non-Python files, a simple line-based rule check is enough. non_py_file_ext_list = [x for x in file_ext_list if x[1] not in PY_FILE_EXTS] if non_py_file_ext_list: errors += _FindNewViolationsOfRuleForList( no_long_lines, non_py_file_ext_list, error_formatter=format_error) # However, Python files need more sophisticated checks that need parsing # the whole source file. py_file_list = [x[0] for x in file_ext_list if x[1] in PY_FILE_EXTS] if py_file_list: errors += check_python_long_lines( py_file_list, error_formatter=format_error) if errors: msg = 'Found lines longer than %s characters (first 5 shown).' % maxlen return [output_api.PresubmitPromptWarning(msg, items=errors[:5])] return [] def CheckLicense(input_api, output_api, license_re=None, project_name=None, source_file_filter=None, accept_empty_files=True): """Verifies the license header. """ project_name = project_name or 'Chromium' # Accept any year number from 2006 to the current year, or the special # 2006-20xx string used on the oldest files. 2006-20xx is deprecated, but # tolerated on old files. current_year = int(input_api.time.strftime('%Y')) allowed_years = (str(s) for s in reversed(range(2006, current_year + 1))) years_re = '(' + '|'.join(allowed_years) + '|2006-2008|2006-2009|2006-2010)' # The (c) is deprecated, but tolerate it until it's removed from all files. license_re = license_re or ( r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors\. ' r'All rights reserved\.\r?\n' r'.*? Use of this source code is governed by a BSD-style license that ' r'can be\r?\n' r'.*? found in the LICENSE file\.(?: \*/)?\r?\n' ) % { 'year': years_re, 'project': project_name, } license_re = input_api.re.compile(license_re, input_api.re.MULTILINE) bad_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'r') if accept_empty_files and not contents: continue if not license_re.search(contents): bad_files.append(f.LocalPath()) if bad_files: return [output_api.PresubmitPromptWarning( 'License must match:\n%s\n' % license_re.pattern + 'Found a bad license header in these files:', items=bad_files)] return [] ### Other checks def CheckDoNotSubmit(input_api, output_api): return ( CheckDoNotSubmitInDescription(input_api, output_api) + CheckDoNotSubmitInFiles(input_api, output_api) ) def CheckTreeIsOpen(input_api, output_api, url=None, closed=None, json_url=None): """Check whether to allow commit without prompt. Supports two styles: 1. Checks that an url's content doesn't match a regexp that would mean that the tree is closed. (old) 2. Check the json_url to decide whether to allow commit without prompt. Args: input_api: input related apis. output_api: output related apis. url: url to use for regex based tree status. closed: regex to match for closed status. json_url: url to download json style status. """ if not input_api.is_committing: return [] try: if json_url: connection = input_api.urllib_request.urlopen(json_url) status = input_api.json.loads(connection.read()) connection.close() if not status['can_commit_freely']: short_text = 'Tree state is: ' + status['general_state'] long_text = status['message'] + '\n' + json_url return [output_api.PresubmitError(short_text, long_text=long_text)] else: # TODO(bradnelson): drop this once all users are gone. connection = input_api.urllib_request.urlopen(url) status = connection.read() connection.close() if input_api.re.match(closed, status): long_text = status + '\n' + url return [output_api.PresubmitError('The tree is closed.', long_text=long_text)] except IOError as e: return [output_api.PresubmitError('Error fetching tree status.', long_text=str(e))] return [] def GetUnitTestsInDirectory(input_api, output_api, directory, files_to_check=None, files_to_skip=None, env=None, run_on_python2=True, run_on_python3=True, skip_shebang_check=False, allowlist=None, blocklist=None): """Lists all files in a directory and runs them. Doesn't recurse. It's mainly a wrapper for RunUnitTests. Use allowlist and blocklist to filter tests accordingly. """ unit_tests = [] test_path = input_api.os_path.abspath( input_api.os_path.join(input_api.PresubmitLocalPath(), directory)) def check(filename, filters): return any(True for i in filters if input_api.re.match(i, filename)) to_run = found = 0 for filename in input_api.os_listdir(test_path): found += 1 fullpath = input_api.os_path.join(test_path, filename) if not input_api.os_path.isfile(fullpath): continue if files_to_check and not check(filename, files_to_check): continue if files_to_skip and check(filename, files_to_skip): continue unit_tests.append(input_api.os_path.join(directory, filename)) to_run += 1 input_api.logging.debug('Found %d files, running %d unit tests' % (found, to_run)) if not to_run: return [ output_api.PresubmitPromptWarning( 'Out of %d files, found none that matched c=%r, s=%r in directory %s' % (found, files_to_check, files_to_skip, directory)) ] return GetUnitTests(input_api, output_api, unit_tests, env, run_on_python2, run_on_python3, skip_shebang_check) def GetUnitTests(input_api, output_api, unit_tests, env=None, run_on_python2=True, run_on_python3=True, skip_shebang_check=False): """Runs all unit tests in a directory. On Windows, sys.executable is used for unit tests ending with ".py". """ assert run_on_python3 or run_on_python2, ( 'At least one of "run_on_python2" or "run_on_python3" must be set.') def has_py3_shebang(test): with open(test) as f: maybe_shebang = f.readline() return maybe_shebang.startswith('#!') and 'python3' in maybe_shebang # We don't want to hinder users from uploading incomplete patches. if input_api.is_committing: message_type = output_api.PresubmitError else: message_type = output_api.PresubmitPromptWarning results = [] for unit_test in unit_tests: cmd = [unit_test] if input_api.verbose: cmd.append('--verbose') kwargs = {'cwd': input_api.PresubmitLocalPath()} if env: kwargs['env'] = env if not unit_test.endswith('.py'): results.append(input_api.Command( name=unit_test, cmd=cmd, kwargs=kwargs, message=message_type)) else: test_run = False # TODO(crbug.com/1223478): The intent for this line was to run the test # on python3 if the file has a shebang OR if it was explicitly requested # to run on python3. Since tests have been broken since this landed, we # introduced the |skip_shebang_check| argument to work around the issue # until every caller in Chromium has been fixed. if (skip_shebang_check or has_py3_shebang(unit_test)) and run_on_python3: results.append(input_api.Command( name=unit_test, cmd=cmd, kwargs=kwargs, message=message_type, python3=True)) test_run = True if run_on_python2: results.append(input_api.Command( name=unit_test, cmd=cmd, kwargs=kwargs, message=message_type)) test_run = True if not test_run: output_api.PresubmitPromptWarning( "Some python tests were not run. You may need to add\n" "skip_shebang_check=True for python3 tests.", items=unit_test) return results def GetUnitTestsRecursively(input_api, output_api, directory, files_to_check, files_to_skip, run_on_python2=True, run_on_python3=True, skip_shebang_check=False): """Gets all files in the directory tree (git repo) that match files_to_check. Restricts itself to only find files within the Change's source repo, not dependencies. """ def check(filename): return (any(input_api.re.match(f, filename) for f in files_to_check) and not any(input_api.re.match(f, filename) for f in files_to_skip)) tests = [] to_run = found = 0 for filepath in input_api.change.AllFiles(directory): found += 1 if check(filepath): to_run += 1 tests.append(filepath) input_api.logging.debug('Found %d files, running %d' % (found, to_run)) if not to_run: return [ output_api.PresubmitPromptWarning( 'Out of %d files, found none that matched c=%r, s=%r in directory %s' % (found, files_to_check, files_to_skip, directory)) ] return GetUnitTests(input_api, output_api, tests, run_on_python2=run_on_python2, run_on_python3=run_on_python3, skip_shebang_check=skip_shebang_check) def GetPythonUnitTests(input_api, output_api, unit_tests): """Run the unit tests out of process, capture the output and use the result code to determine success. DEPRECATED. """ # We don't want to hinder users from uploading incomplete patches. if input_api.is_committing: message_type = output_api.PresubmitError else: message_type = output_api.PresubmitNotifyResult results = [] for unit_test in unit_tests: # Run the unit tests out of process. This is because some unit tests # stub out base libraries and don't clean up their mess. It's too easy to # get subtle bugs. cwd = None env = None unit_test_name = unit_test # 'python -m test.unit_test' doesn't work. We need to change to the right # directory instead. if '.' in unit_test: # Tests imported in submodules (subdirectories) assume that the current # directory is in the PYTHONPATH. Manually fix that. unit_test = unit_test.replace('.', '/') cwd = input_api.os_path.dirname(unit_test) unit_test = input_api.os_path.basename(unit_test) env = input_api.environ.copy() # At least on Windows, it seems '.' must explicitly be in PYTHONPATH backpath = [ '.', input_api.os_path.pathsep.join(['..'] * (cwd.count('/') + 1)) ] # We convert to str, since on Windows on Python 2 only strings are allowed # as environment variables, but literals are unicode since we're importing # unicode_literals from __future__. if env.get('PYTHONPATH'): backpath.append(env.get('PYTHONPATH')) env['PYTHONPATH'] = input_api.os_path.pathsep.join((backpath)) env.pop('VPYTHON_CLEAR_PYTHONPATH', None) cmd = [input_api.python_executable, '-m', '%s' % unit_test] results.append(input_api.Command( name=unit_test_name, cmd=cmd, kwargs={'env': env, 'cwd': cwd}, message=message_type)) return results def RunUnitTestsInDirectory(input_api, *args, **kwargs): """Run tests in a directory serially. For better performance, use GetUnitTestsInDirectory and then pass to input_api.RunTests. """ return input_api.RunTests( GetUnitTestsInDirectory(input_api, *args, **kwargs), False) def RunUnitTests(input_api, *args, **kwargs): """Run tests serially. For better performance, use GetUnitTests and then pass to input_api.RunTests. """ return input_api.RunTests(GetUnitTests(input_api, *args, **kwargs), False) def RunPythonUnitTests(input_api, *args, **kwargs): """Run python tests in a directory serially. DEPRECATED """ return input_api.RunTests( GetPythonUnitTests(input_api, *args, **kwargs), False) def _FetchAllFiles(input_api, files_to_check, files_to_skip): """Hack to fetch all files.""" # We cannot use AffectedFiles here because we want to test every python # file on each single python change. It's because a change in a python file # can break another unmodified file. # Use code similar to InputApi.FilterSourceFile() def Find(filepath, filters): if input_api.platform == 'win32': filepath = filepath.replace('\\', '/') for item in filters: if input_api.re.match(item, filepath): return True return False files = [] path_len = len(input_api.PresubmitLocalPath()) for dirpath, dirnames, filenames in input_api.os_walk( input_api.PresubmitLocalPath()): # Passes dirnames in block list to speed up search. for item in dirnames[:]: filepath = input_api.os_path.join(dirpath, item)[path_len + 1:] if Find(filepath, files_to_skip): dirnames.remove(item) for item in filenames: filepath = input_api.os_path.join(dirpath, item)[path_len + 1:] if Find(filepath, files_to_check) and not Find(filepath, files_to_skip): files.append(filepath) return files def GetPylint(input_api, output_api, files_to_check=None, files_to_skip=None, disabled_warnings=None, extra_paths_list=None, pylintrc=None, version='1.5'): """Run pylint on python files. The default files_to_check enforces looking only at *.py files. Currently only pylint version '1.5', '2.6' and '2.7' are supported. """ files_to_check = tuple(files_to_check or (r'.*\.py$', )) files_to_skip = tuple(files_to_skip or input_api.DEFAULT_FILES_TO_SKIP) extra_paths_list = extra_paths_list or [] assert version in ('1.5', '2.6', '2.7'), \ 'Unsupported pylint version: %s' % version python2 = (version == '1.5') if input_api.is_committing: error_type = output_api.PresubmitError else: error_type = output_api.PresubmitPromptWarning # Only trigger if there is at least one python file affected. def rel_path(regex): """Modifies a regex for a subject to accept paths relative to root.""" def samefile(a, b): # Default implementation for platforms lacking os.path.samefile # (like Windows). return input_api.os_path.abspath(a) == input_api.os_path.abspath(b) samefile = getattr(input_api.os_path, 'samefile', samefile) if samefile(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()): return regex prefix = input_api.os_path.join(input_api.os_path.relpath( input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()), '') return input_api.re.escape(prefix) + regex src_filter = lambda x: input_api.FilterSourceFile( x, map(rel_path, files_to_check), map(rel_path, files_to_skip)) if not input_api.AffectedSourceFiles(src_filter): input_api.logging.info('Skipping pylint: no matching changes.') return [] if pylintrc is not None: pylintrc = input_api.os_path.join(input_api.PresubmitLocalPath(), pylintrc) else: pylintrc = input_api.os_path.join(_HERE, 'pylintrc') extra_args = ['--rcfile=%s' % pylintrc] if disabled_warnings: extra_args.extend(['-d', ','.join(disabled_warnings)]) files = _FetchAllFiles(input_api, files_to_check, files_to_skip) if not files: return [] files.sort() input_api.logging.info('Running pylint %s on %d files', version, len(files)) input_api.logging.debug('Running pylint on: %s', files) env = input_api.environ.copy() env['PYTHONPATH'] = input_api.os_path.pathsep.join(extra_paths_list) env.pop('VPYTHON_CLEAR_PYTHONPATH', None) input_api.logging.debug(' with extra PYTHONPATH: %r', extra_paths_list) def GetPylintCmd(flist, extra, parallel): # Windows needs help running python files so we explicitly specify # the interpreter to use. It also has limitations on the size of # the command-line, so we pass arguments via a pipe. tool = input_api.os_path.join(_HERE, 'pylint-' + version) kwargs = {'env': env} if input_api.platform == 'win32': # On Windows, scripts on the current directory take precedence over PATH. # When `pylint.bat` calls `vpython`, it will execute the `vpython` of the # depot_tools under test instead of the one in the bot. # As a workaround, we run the tests from the parent directory instead. cwd = input_api.change.RepositoryRoot() if input_api.os_path.basename(cwd) == 'depot_tools': kwargs['cwd'] = input_api.os_path.dirname(cwd) flist = [input_api.os_path.join('depot_tools', f) for f in flist] tool += '.bat' cmd = [tool, '--args-on-stdin'] if len(flist) == 1: description = flist[0] else: description = '%s files' % len(flist) args = extra_args[:] if extra: args.extend(extra) description += ' using %s' % (extra,) if parallel: args.append('--jobs=%s' % input_api.cpu_count) description += ' on %d cores' % input_api.cpu_count kwargs['stdin'] = '\n'.join(args + flist) if input_api.sys.version_info.major != 2: kwargs['stdin'] = kwargs['stdin'].encode('utf-8') return input_api.Command( name='Pylint (%s)' % description, cmd=cmd, kwargs=kwargs, message=error_type, python3=not python2) # Always run pylint and pass it all the py files at once. # Passing py files one at time is slower and can produce # different results. input_api.verbose used to be used # to enable this behaviour but differing behaviour in # verbose mode is not desirable. # Leave this unreachable code in here so users can make # a quick local edit to diagnose pylint issues more # easily. if True: # pylint's cycle detection doesn't work in parallel, so spawn a second, # single-threaded job for just that check. # Some PRESUBMITs explicitly mention cycle detection. if not any('R0401' in a or 'cyclic-import' in a for a in extra_args): return [ GetPylintCmd(files, ["--disable=cyclic-import"], True), GetPylintCmd(files, ["--disable=all", "--enable=cyclic-import"], False) ] return [ GetPylintCmd(files, [], True) ] return map(lambda x: GetPylintCmd([x], [], 1), files) def RunPylint(input_api, *args, **kwargs): """Legacy presubmit function. For better performance, get all tests and then pass to input_api.RunTests. """ return input_api.RunTests(GetPylint(input_api, *args, **kwargs), False) def CheckDirMetadataFormat(input_api, output_api, dirmd_bin=None): # TODO(crbug.com/1102997): Remove OWNERS once DIR_METADATA migration is # complete. file_filter = lambda f: ( input_api.basename(f.LocalPath()) in ('DIR_METADATA', 'OWNERS')) affected_files = { f.AbsoluteLocalPath() for f in input_api.change.AffectedFiles( include_deletes=False, file_filter=file_filter) } if not affected_files: return [] name = 'Validate metadata in OWNERS and DIR_METADATA files' kwargs = {} if dirmd_bin is None: dirmd_bin = 'dirmd.bat' if input_api.is_windows else 'dirmd' cmd = [dirmd_bin, 'validate'] + sorted(affected_files) return [input_api.Command( name, cmd, kwargs, output_api.PresubmitError)] def CheckNoNewMetadataInOwners(input_api, output_api): """Check that no metadata is added to OWNERS files.""" _METADATA_LINE_RE = input_api.re.compile( r'^#\s*(TEAM|COMPONENT|OS|WPT-NOTIFY)+\s*:\s*\S+$', input_api.re.MULTILINE | input_api.re.IGNORECASE) affected_files = input_api.change.AffectedFiles( include_deletes=False, file_filter=lambda f: input_api.basename(f.LocalPath()) == 'OWNERS') errors = [] for f in affected_files: for _, line in f.ChangedContents(): if _METADATA_LINE_RE.search(line): errors.append(f.AbsoluteLocalPath()) break if not errors: return [] return [output_api.PresubmitError( 'New metadata was added to the following OWNERS files, but should ' 'have been added to DIR_METADATA files instead:\n' + '\n'.join(errors) + '\n' + 'See https://source.chromium.org/chromium/infra/infra/+/HEAD:' 'go/src/infra/tools/dirmd/proto/dir_metadata.proto for details.')] def CheckOwnersDirMetadataExclusive(input_api, output_api): """Check that metadata in OWNERS files and DIR_METADATA files are mutually exclusive. """ _METADATA_LINE_RE = input_api.re.compile( r'^#\s*(TEAM|COMPONENT|OS|WPT-NOTIFY)+\s*:\s*\S+$', input_api.re.MULTILINE) file_filter = ( lambda f: input_api.basename(f.LocalPath()) in ('OWNERS', 'DIR_METADATA')) affected_dirs = { input_api.os_path.dirname(f.AbsoluteLocalPath()) for f in input_api.change.AffectedFiles( include_deletes=False, file_filter=file_filter) } errors = [] for path in affected_dirs: owners_path = input_api.os_path.join(path, 'OWNERS') dir_metadata_path = input_api.os_path.join(path, 'DIR_METADATA') if (not input_api.os_path.isfile(dir_metadata_path) or not input_api.os_path.isfile(owners_path)): continue if _METADATA_LINE_RE.search(input_api.ReadFile(owners_path)): errors.append(owners_path) if not errors: return [] return [output_api.PresubmitError( 'The following OWNERS files should contain no metadata, as there is a ' 'DIR_METADATA file present in the same directory:\n' + '\n'.join(errors))] def CheckOwnersFormat(input_api, output_api): if input_api.gerrit and input_api.gerrit.IsCodeOwnersEnabledOnRepo(): return [] affected_files = { f.LocalPath() for f in input_api.change.AffectedFiles() if 'OWNERS' in f.LocalPath() and f.Action() != 'D' } if not affected_files: return [] try: owners_db = input_api.owners_db owners_db.override_files = {} owners_db.load_data_needed_for(affected_files) return [] except Exception as e: return [output_api.PresubmitError( 'Error parsing OWNERS files:\n%s' % e)] def CheckOwners( input_api, output_api, source_file_filter=None, allow_tbr=True): # Skip OWNERS check when Owners-Override label is approved. This is intended # for global owners, trusted bots, and on-call sheriffs. Review is still # required for these changes. if (input_api.change.issue and input_api.gerrit.IsOwnersOverrideApproved(input_api.change.issue)): return [] if input_api.gerrit and input_api.gerrit.IsCodeOwnersEnabledOnRepo(): return [] affected_files = {f.LocalPath() for f in input_api.change.AffectedFiles( file_filter=source_file_filter)} owner_email, reviewers = GetCodereviewOwnerAndReviewers( input_api, approval_needed=input_api.is_committing) owner_email = owner_email or input_api.change.author_email approval_status = input_api.owners_client.GetFilesApprovalStatus( affected_files, reviewers.union([owner_email]), []) missing_files = [ f for f in affected_files if approval_status[f] != input_api.owners_client.APPROVED] affects_owners = any('OWNERS' in name for name in missing_files) if input_api.is_committing: if input_api.tbr and not affects_owners: return [output_api.PresubmitNotifyResult( '--tbr was specified, skipping OWNERS check')] needed = 'LGTM from an OWNER' output_fn = output_api.PresubmitError if input_api.change.issue: if input_api.dry_run: output_fn = lambda text: output_api.PresubmitNotifyResult( 'This is a dry run, but these failures would be reported on ' + 'commit:\n' + text) else: return [output_api.PresubmitError( 'OWNERS check failed: this CL has no Gerrit change number, ' 'so we can\'t check it for approvals.')] else: needed = 'OWNER reviewers' output_fn = output_api.PresubmitNotifyResult if missing_files: output_list = [ output_fn('Missing %s for these files:\n %s' % (needed, '\n '.join(sorted(missing_files))))] if input_api.tbr and affects_owners: output_list.append(output_fn('TBR for OWNERS files are ignored.')) if not input_api.is_committing: suggested_owners = input_api.owners_client.SuggestOwners( missing_files, exclude=[owner_email]) output_list.append(output_fn('Suggested OWNERS: ' + '(Use "git-cl owners" to interactively select owners.)\n %s' % ('\n '.join(suggested_owners)))) return output_list if (input_api.is_committing and not reviewers and not input_api.gerrit.IsBotCommitApproved(input_api.change.issue)): return [output_fn('Missing LGTM from someone other than %s' % owner_email)] return [] def GetCodereviewOwnerAndReviewers( input_api, _email_regexp=None, approval_needed=True): """Return the owner and reviewers of a change, if any. If approval_needed is True, only reviewers who have approved the change will be returned. """ # Recognizes 'X@Y' email addresses. Very simplistic. EMAIL_REGEXP = input_api.re.compile(r'^[\w\-\+\%\.]+\@[\w\-\+\%\.]+$') issue = input_api.change.issue if not issue: return None, (set() if approval_needed else _ReviewersFromChange(input_api.change)) owner_email = input_api.gerrit.GetChangeOwner(issue) reviewers = set( r for r in input_api.gerrit.GetChangeReviewers(issue, approval_needed) if _match_reviewer_email(r, owner_email, EMAIL_REGEXP)) input_api.logging.debug('owner: %s; approvals given by: %s', owner_email, ', '.join(sorted(reviewers))) return owner_email, reviewers def _ReviewersFromChange(change): """Return the reviewers specified in the |change|, if any.""" reviewers = set() reviewers.update(change.ReviewersFromDescription()) reviewers.update(change.TBRsFromDescription()) # Drop reviewers that aren't specified in email address format. return set(reviewer for reviewer in reviewers if '@' in reviewer) def _match_reviewer_email(r, owner_email, email_regexp): return email_regexp.match(r) and r != owner_email def CheckSingletonInHeaders(input_api, output_api, source_file_filter=None): """Deprecated, must be removed.""" return [ output_api.PresubmitNotifyResult( 'CheckSingletonInHeaders is deprecated, please remove it.') ] def PanProjectChecks(input_api, output_api, excluded_paths=None, text_files=None, license_header=None, project_name=None, owners_check=True, maxlen=80): """Checks that ALL chromium orbit projects should use. These are checks to be run on all Chromium orbit project, including: Chromium Native Client V8 When you update this function, please take this broad scope into account. Args: input_api: Bag of input related interfaces. output_api: Bag of output related interfaces. excluded_paths: Don't include these paths in common checks. text_files: Which file are to be treated as documentation text files. license_header: What license header should be on files. project_name: What is the name of the project as it appears in the license. Returns: A list of warning or error objects. """ excluded_paths = tuple(excluded_paths or []) text_files = tuple(text_files or ( r'.+\.txt$', r'.+\.json$', )) results = [] # This code loads the default skip list (e.g. third_party, experimental, etc) # and add our skip list (breakpad, skia and v8 are still not following # google style and are not really living this repository). # See presubmit_support.py InputApi.FilterSourceFile for the (simple) usage. files_to_skip = input_api.DEFAULT_FILES_TO_SKIP + excluded_paths files_to_check = input_api.DEFAULT_FILES_TO_CHECK + text_files sources = lambda x: input_api.FilterSourceFile(x, files_to_skip=files_to_skip) text_files = lambda x: input_api.FilterSourceFile( x, files_to_skip=files_to_skip, files_to_check=files_to_check) snapshot_memory = [] def snapshot(msg): """Measures & prints performance warning if a rule is running slow.""" if input_api.sys.version_info.major == 2: dt2 = input_api.time.clock() else: dt2 = input_api.time.process_time() if snapshot_memory: delta_ms = int(1000*(dt2 - snapshot_memory[0])) if delta_ms > 500: print(" %s took a long time: %dms" % (snapshot_memory[1], delta_ms)) snapshot_memory[:] = (dt2, msg) snapshot("checking owners files format") results.extend(input_api.canned_checks.CheckOwnersFormat( input_api, output_api)) if owners_check: snapshot("checking owners") results.extend(input_api.canned_checks.CheckOwners( input_api, output_api, source_file_filter=None)) snapshot("checking long lines") results.extend(input_api.canned_checks.CheckLongLines( input_api, output_api, maxlen, source_file_filter=sources)) snapshot( "checking tabs") results.extend(input_api.canned_checks.CheckChangeHasNoTabs( input_api, output_api, source_file_filter=sources)) snapshot( "checking stray whitespace") results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace( input_api, output_api, source_file_filter=sources)) snapshot("checking license") results.extend(input_api.canned_checks.CheckLicense( input_api, output_api, license_header, project_name, source_file_filter=sources)) if input_api.is_committing: snapshot("checking was uploaded") results.extend(input_api.canned_checks.CheckChangeWasUploaded( input_api, output_api)) snapshot("checking description") results.extend(input_api.canned_checks.CheckChangeHasDescription( input_api, output_api)) results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription( input_api, output_api)) snapshot("checking do not submit in files") results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles( input_api, output_api)) if input_api.change.scm == 'git': snapshot("checking for commit objects in tree") results.extend(input_api.canned_checks.CheckForCommitObjects( input_api, output_api)) snapshot("done") return results def CheckPatchFormatted(input_api, output_api, bypass_warnings=True, check_clang_format=True, check_js=False, check_python=None, result_factory=None): result_factory = result_factory or output_api.PresubmitPromptWarning import git_cl display_args = [] if not check_clang_format: display_args.append('--no-clang-format') if check_js: display_args.append('--js') # Explicitly setting check_python to will enable/disable python formatting # on all files. Leaving it as None will enable checking patch formatting # on files that have a .style.yapf file in a parent directory. if check_python is not None: if check_python: display_args.append('--python') else: display_args.append('--no-python') cmd = ['-C', input_api.change.RepositoryRoot(), 'cl', 'format', '--dry-run', '--presubmit'] + display_args presubmit_subdir = input_api.os_path.relpath( input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) if presubmit_subdir.startswith('..') or presubmit_subdir == '.': presubmit_subdir = '' # If the PRESUBMIT.py is in a parent repository, then format the entire # subrepository. Otherwise, format only the code in the directory that # contains the PRESUBMIT.py. if presubmit_subdir: cmd.append(input_api.PresubmitLocalPath()) code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=bypass_warnings) # bypass_warnings? Only fail with code 2. # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. if code == 2 or (code and not bypass_warnings): if presubmit_subdir: short_path = presubmit_subdir else: short_path = input_api.basename(input_api.change.RepositoryRoot()) display_args.append(presubmit_subdir) return [result_factory( 'The %s directory requires source formatting. ' 'Please run: git cl format %s' % (short_path, ' '.join(display_args)))] return [] def CheckGNFormatted(input_api, output_api): import gn affected_files = input_api.AffectedFiles( include_deletes=False, file_filter=lambda x: x.LocalPath().endswith('.gn') or x.LocalPath().endswith('.gni') or x.LocalPath().endswith('.typemap')) warnings = [] for f in affected_files: cmd = ['gn', 'format', '--dry-run', f.AbsoluteLocalPath()] rc = gn.main(cmd) if rc == 2: warnings.append(output_api.PresubmitPromptWarning( '%s requires formatting. Please run:\n gn format %s' % ( f.AbsoluteLocalPath(), f.LocalPath()))) # It's just a warning, so ignore other types of failures assuming they'll be # caught elsewhere. return warnings def CheckCIPDManifest(input_api, output_api, path=None, content=None): """Verifies that a CIPD ensure file manifest is valid against all platforms. Exactly one of "path" or "content" must be provided. An assertion will occur if neither or both are provided. Args: path (str): If provided, the filesystem path to the manifest to verify. content (str): If provided, the raw content of the manifest to veirfy. """ cipd_bin = 'cipd' if not input_api.is_windows else 'cipd.bat' cmd = [cipd_bin, 'ensure-file-verify'] kwargs = {} if input_api.is_windows: # Needs to be able to resolve "cipd.bat". kwargs['shell'] = True if input_api.verbose: cmd += ['-log-level', 'debug'] if path: assert content is None, 'Cannot provide both "path" and "content".' cmd += ['-ensure-file', path] name = 'Check CIPD manifest %r' % path elif content: assert path is None, 'Cannot provide both "path" and "content".' cmd += ['-ensure-file=-'] kwargs['stdin'] = content # quick and dirty parser to extract checked packages. packages = [ l.split()[0] for l in (ll.strip() for ll in content.splitlines()) if ' ' in l and not l.startswith('$') ] name = 'Check CIPD packages from string: %r' % (packages,) else: raise Exception('Exactly one of "path" or "content" must be provided.') return input_api.Command( name, cmd, kwargs, output_api.PresubmitError) def CheckCIPDPackages(input_api, output_api, platforms, packages): """Verifies that all named CIPD packages can be resolved against all supplied platforms. Args: platforms (list): List of CIPD platforms to verify. packages (dict): Mapping of package name to version. """ manifest = [] for p in platforms: manifest.append('$VerifiedPlatform %s' % (p,)) for k, v in packages.items(): manifest.append('%s %s' % (k, v)) return CheckCIPDManifest(input_api, output_api, content='\n'.join(manifest)) def CheckCIPDClientDigests(input_api, output_api, client_version_file): """Verifies that *.digests file was correctly regenerated. <client_version_file>.digests file contains pinned hashes of the CIPD client. It is consulted during CIPD client bootstrap and self-update. It should be regenerated each time CIPD client version file changes. Args: client_version_file (str): Path to a text file with CIPD client version. """ cmd = [ 'cipd' if not input_api.is_windows else 'cipd.bat', 'selfupdate-roll', '-check', '-version-file', client_version_file, ] if input_api.verbose: cmd += ['-log-level', 'debug'] return input_api.Command( 'Check CIPD client_version_file.digests file', cmd, {'shell': True} if input_api.is_windows else {}, # to resolve cipd.bat output_api.PresubmitError) def CheckForCommitObjects(input_api, output_api): """Validates that there are no commit objects in the repository. Commit objects are put into the git tree typically by submodule tooling. Because we use gclient to handle external repository references instead, we want to avoid this. Having commit objects in the tree can confuse git tooling in some scenarios into thinking that the tree is dirty (e.g. the presence of a DEPS subrepo at a location where a commit object is stored in the tree). Args: input_api: Bag of input related interfaces. output_api: Bag of output related interfaces. Returns: A presubmit error if a commit object is present in the tree. """ def parse_tree_entry(ent): """Splits a tree entry into components Args: ent: a tree entry in the form "filemode type hash\tname" Returns: The tree entry split into component parts """ tabparts = ent.split('\t', 1) spaceparts = tabparts[0].split(' ', 2) return (spaceparts[0], spaceparts[1], spaceparts[2], tabparts[1]) full_tree = input_api.subprocess.check_output( ['git', 'ls-tree', '-r', '--full-tree', 'HEAD'], cwd=input_api.PresubmitLocalPath() ).decode('utf8') tree_entries = full_tree.split('\n') tree_entries = [x for x in tree_entries if len(x) > 0] tree_entries = map(parse_tree_entry, tree_entries) bad_tree_entries = [x for x in tree_entries if x[1] == 'commit'] bad_tree_entries = [x[3] for x in bad_tree_entries] if len(bad_tree_entries) > 0: return [output_api.PresubmitError( 'Commit objects present within tree.\n' 'This may be due to submodule-related interactions; the presence of a\n' 'commit object in the tree may lead to odd situations where files are\n' 'inconsistently checked-out. Remove these commit entries and validate\n' 'your changeset again:\n', bad_tree_entries)] return [] def CheckVPythonSpec(input_api, output_api, file_filter=None): """Validates any changed .vpython files with vpython verification tool. Args: input_api: Bag of input related interfaces. output_api: Bag of output related interfaces. file_filter: Custom function that takes a path (relative to client root) and returns boolean, which is used to filter files for which to apply the verification to. Defaults to any path ending with .vpython, which captures both global .vpython and <script>.vpython files. Returns: A list of input_api.Command objects containing verification commands. """ file_filter = file_filter or (lambda f: f.LocalPath().endswith('.vpython')) affected_files = input_api.AffectedTestableFiles(file_filter=file_filter) affected_files = map(lambda f: f.AbsoluteLocalPath(), affected_files) commands = [] for f in affected_files: commands.append(input_api.Command( 'Verify %s' % f, ['vpython', '-vpython-spec', f, '-vpython-tool', 'verify'], {'stderr': input_api.subprocess.STDOUT}, output_api.PresubmitError)) return commands def CheckChangedLUCIConfigs(input_api, output_api): import collections import base64 import json import logging import auth import git_cl LUCI_CONFIG_HOST_NAME = 'luci-config.appspot.com' cl = git_cl.Changelist() if input_api.change.issue and input_api.gerrit: remote_branch = input_api.gerrit.GetDestRef(input_api.change.issue) else: remote, remote_branch = cl.GetRemoteBranch() if remote_branch.startswith('refs/remotes/%s/' % remote): remote_branch = remote_branch.replace( 'refs/remotes/%s/' % remote, 'refs/heads/', 1) if remote_branch.startswith('refs/remotes/branch-heads/'): remote_branch = remote_branch.replace( 'refs/remotes/branch-heads/', 'refs/branch-heads/', 1) remote_host_url = cl.GetRemoteUrl() if not remote_host_url: return [output_api.PresubmitError( 'Remote host url for git has not been defined')] remote_host_url = remote_host_url.rstrip('/') if remote_host_url.endswith('.git'): remote_host_url = remote_host_url[:-len('.git')] # authentication try: acc_tkn = auth.Authenticator().get_access_token() except auth.LoginRequiredError as e: return [output_api.PresubmitError( 'Error in authenticating user.', long_text=str(e))] def request(endpoint, body=None): api_url = ('https://%s/_ah/api/config/v1/%s' % (LUCI_CONFIG_HOST_NAME, endpoint)) req = input_api.urllib_request.Request(api_url) req.add_header('Authorization', 'Bearer %s' % acc_tkn.token) if body is not None: req.add_header('Content-Type', 'application/json') req.data = json.dumps(body).encode('utf-8') return json.load(input_api.urllib_request.urlopen(req)) try: config_sets = request('config-sets').get('config_sets') except input_api.urllib_error.HTTPError as e: return [output_api.PresubmitError( 'Config set request to luci-config failed', long_text=str(e))] if not config_sets: return [output_api.PresubmitPromptWarning('No config_sets were returned')] loc_pref = '%s/+/%s/' % (remote_host_url, remote_branch) logging.debug('Derived location prefix: %s', loc_pref) dir_to_config_set = { '%s/' % cs['location'][len(loc_pref):].rstrip('/'): cs['config_set'] for cs in config_sets if cs['location'].startswith(loc_pref) or ('%s/' % cs['location']) == loc_pref } if not dir_to_config_set: warning_long_text_lines = [ 'No config_set found for %s.' % loc_pref, 'Found the following:', ] for loc in sorted(cs['location'] for cs in config_sets): warning_long_text_lines.append(' %s' % loc) warning_long_text_lines.append('') warning_long_text_lines.append( 'If the requested location is internal,' ' the requester may not have access.') return [output_api.PresubmitPromptWarning( warning_long_text_lines[0], long_text='\n'.join(warning_long_text_lines))] cs_to_files = collections.defaultdict(list) for f in input_api.AffectedFiles(include_deletes=False): # windows file_path = f.LocalPath().replace(_os.sep, '/') logging.debug('Affected file path: %s', file_path) for dr, cs in dir_to_config_set.items(): if dr == '/' or file_path.startswith(dr): cs_to_files[cs].append({ 'path': file_path[len(dr):] if dr != '/' else file_path, 'content': base64.b64encode( '\n'.join(f.NewContents()).encode('utf-8')).decode('utf-8') }) outputs = [] for cs, f in cs_to_files.items(): try: # TODO(myjang): parallelize res = request( 'validate-config', body={'config_set': cs, 'files': f}) except input_api.urllib_error.HTTPError as e: return [output_api.PresubmitError( 'Validation request to luci-config failed', long_text=str(e))] for msg in res.get('messages', []): sev = msg['severity'] if sev == 'WARNING': out_f = output_api.PresubmitPromptWarning elif sev in ('ERROR', 'CRITICAL'): out_f = output_api.PresubmitError else: out_f = output_api.PresubmitNotifyResult outputs.append( out_f('Config validation for %s: %s' % ([str(obj['path']) for obj in f], msg['text']))) return outputs def CheckLucicfgGenOutput(input_api, output_api, entry_script): """Verifies configs produced by `lucicfg` are up-to-date and pass validation. Runs the check unconditionally, regardless of what files are modified. Examine input_api.AffectedFiles() yourself before using CheckLucicfgGenOutput if this is a concern. Assumes `lucicfg` binary is in PATH and the user is logged in. Args: entry_script: path to the entry-point *.star script responsible for generating a single config set. Either absolute or relative to the currently running PRESUBMIT.py script. Returns: A list of input_api.Command objects containing verification commands. """ return [ input_api.Command( 'lucicfg validate "%s"' % entry_script, [ 'lucicfg' if not input_api.is_windows else 'lucicfg.bat', 'validate', entry_script, '-log-level', 'debug' if input_api.verbose else 'warning', ], { 'stderr': input_api.subprocess.STDOUT, 'shell': input_api.is_windows, # to resolve *.bat 'cwd': input_api.PresubmitLocalPath(), }, output_api.PresubmitError) ] def CheckJsonParses(input_api, output_api, file_filter=None): """Verifies that all JSON files at least parse as valid JSON. By default, file_filter will look for all files that end with .json""" import json if file_filter is None: file_filter = lambda x: x.LocalPath().endswith('.json') affected_files = input_api.AffectedFiles( include_deletes=False, file_filter=file_filter) warnings = [] for f in affected_files: with open(f.AbsoluteLocalPath()) as j: try: json.load(j) except ValueError: # Just a warning for now, in case people are using JSON5 somewhere. warnings.append(output_api.PresubmitPromptWarning( '%s does not appear to be valid JSON.' % f.LocalPath())) return warnings # string pattern, sequence of strings to show when pattern matches, # error flag. True if match is a presubmit error, otherwise it's a warning. _NON_INCLUSIVE_TERMS = ( ( # Note that \b pattern in python re is pretty particular. In this # regexp, 'class WhiteList ...' will match, but 'class FooWhiteList # ...' will not. This may require some tweaking to catch these cases # without triggering a lot of false positives. Leaving it naive and # less matchy for now. r'/\b(?i)((black|white)list|slave)\b', # nocheck ( 'Please don\'t use blacklist, whitelist, ' # nocheck 'or slave in your', # nocheck 'code and make every effort to use other terms. Using "// nocheck"', '"# nocheck" or "<!-- nocheck -->"', 'at the end of the offending line will bypass this PRESUBMIT error', 'but avoid using this whenever possible. Reach out to', 'community@chromium.org if you have questions'), True),) def _GetMessageForMatchingTerm(input_api, affected_file, line_number, line, term, message): """Helper method for CheckInclusiveLanguage. Returns an string composed of the name of the file, the line number where the match has been found and the additional text passed as |message| in case the target type name matches the text inside the line passed as parameter. """ result = [] # A // nocheck comment will bypass this error. if line.endswith(" nocheck") or line.endswith("<!-- nocheck -->"): return result # Ignore C-style single-line comments about banned terms. if input_api.re.search(r"//.*$", line): line = input_api.re.sub(r"//.*$", "", line) # Ignore lines from C-style multi-line comments. if input_api.re.search(r"^\s*\*", line): return result # Ignore Python-style comments about banned terms. # This actually removes comment text from the first # on. if input_api.re.search(r"#.*$", line): line = input_api.re.sub(r"#.*$", "", line) matched = False if term[0:1] == '/': regex = term[1:] if input_api.re.search(regex, line): matched = True elif term in line: matched = True if matched: result.append(' %s:%d:' % (affected_file.LocalPath(), line_number)) for message_line in message: result.append(' %s' % message_line) return result def CheckInclusiveLanguage(input_api, output_api, excluded_directories_relative_path=None, non_inclusive_terms=_NON_INCLUSIVE_TERMS): """Make sure that banned non-inclusive terms are not used.""" # Presubmit checks may run on a bot where the changes are actually # in a repo that isn't chromium/src (e.g., when testing src + tip-of-tree # ANGLE), but this particular check only makes sense for changes to # chromium/src. if input_api.change.RepositoryRoot() != input_api.PresubmitLocalPath(): return [] warnings = [] errors = [] if excluded_directories_relative_path is None: excluded_directories_relative_path = [ 'infra', 'inclusive_language_presubmit_exempt_dirs.txt' ] # Note that this matches exact path prefixes, and does not match # subdirectories. Only files directly in an exlcluded path will # match. def IsExcludedFile(affected_file, excluded_paths): local_dir = input_api.os_path.dirname(affected_file.LocalPath()) return local_dir in excluded_paths def CheckForMatch(affected_file, line_num, line, term, message, error): problems = _GetMessageForMatchingTerm(input_api, affected_file, line_num, line, term, message) if problems: if error: errors.extend(problems) else: warnings.extend(problems) excluded_paths = [] dirs_file_path = input_api.os_path.join(input_api.change.RepositoryRoot(), *excluded_directories_relative_path) f = input_api.ReadFile(dirs_file_path) for line in f.splitlines(): path = line.split()[0] if len(path) > 0: excluded_paths.append(path) excluded_paths = set(excluded_paths) for f in input_api.AffectedFiles(): for line_num, line in f.ChangedContents(): for term, message, error in non_inclusive_terms: if IsExcludedFile(f, excluded_paths): continue CheckForMatch(f, line_num, line, term, message, error) result = [] if (warnings): result.append( output_api.PresubmitPromptWarning( 'Banned non-inclusive language was used.\n' + '\n'.join(warnings))) if (errors): result.append( output_api.PresubmitError('Banned non-inclusive language was used.\n' + '\n'.join(errors))) return result
CoherentLabs/depot_tools
presubmit_canned_checks.py
Python
bsd-3-clause
71,292
[ "VisIt" ]
9e7781eeab288231cab05c4e047efc1d7154265a11fb3bf27b996e27854a1088
# encoding: utf-8 """ Creates geometry objects from facets. """ from yade.wrapper import * import utils,math,numpy try: from minieigen import * except ImportError: from miniEigen import * #facetBox=============================================================== def facetBox(center,extents,orientation=Quaternion.Identity,wallMask=63,**kw): """ Create arbitrarily-aligned box composed of facets, with given center, extents and orientation. If any of the box dimensions is zero, corresponding facets will not be created. The facets are oriented outwards from the box. :param Vector3 center: center of the box :param Vector3 extents: lengths of the box sides :param Quaternion orientation: orientation of the box :param bitmask wallMask: determines which walls will be created, in the order -x (1), +x (2), -y (4), +y (8), -z (16), +z (32). The numbers are ANDed; the default 63 means to create all walls :param \*\*kw: (unused keyword arguments) passed to :yref:`yade.utils.facet` :returns: list of facets forming the box """ return facetParallelepiped(center=center, extents=extents, height=extents[2], orientation=orientation, wallMask=wallMask, **kw) #facetParallelepiped=============================================================== def facetParallelepiped(center,extents,height,orientation=Quaternion.Identity,wallMask=63,**kw): """ Create arbitrarily-aligned Parallelepiped composed of facets, with given center, extents, height and orientation. If any of the parallelepiped dimensions is zero, corresponding facets will not be created. The facets are oriented outwards from the parallelepiped. :param Vector3 center: center of the parallelepiped :param Vector3 extents: lengths of the parallelepiped sides :param Real height: height of the parallelepiped (along axis z) :param Quaternion orientation: orientation of the parallelepiped :param bitmask wallMask: determines which walls will be created, in the order -x (1), +x (2), -y (4), +y (8), -z (16), +z (32). The numbers are ANDed; the default 63 means to create all walls :param \*\*kw: (unused keyword arguments) passed to :yref:`yade.utils.facet` :returns: list of facets forming the parallelepiped """ if (height<0): raise RuntimeError("The height should have the positive value"); if (height>extents[2]): raise RuntimeError("The height should be smaller or equal as extents[2]"); #Defense from zero dimensions if (wallMask>63): print "wallMask must be 63 or less" wallMask=63 if (extents[0]==0): wallMask=1 elif (extents[1]==0): wallMask=4 elif (extents[2]==0 or height==0): wallMask=16 if (((extents[0]==0) and (extents[1]==0)) or ((extents[0]==0) and (extents[2]==0)) or ((extents[1]==0) and (extents[2]==0))): raise RuntimeError("Please, specify at least 2 none-zero dimensions in extents!"); # ___________________________ #inclination angle beta = 0; dx = 0 if (height>0): beta = math.asin(height/extents[2]) dx = math.cos(beta)*extents[2] mn,mx=[-extents[i] for i in 0,1,2],[extents[i] for i in 0,1,2] def doWall(a,b,c,d): return [utils.facet((a,b,c),**kw),utils.facet((a,c,d),**kw)] ret=[] mn[2] = -height mx[2] = +height A=orientation*Vector3(mn[0],mn[1],mn[2])+center B=orientation*Vector3(mx[0],mn[1],mn[2])+center C=orientation*Vector3(mx[0],mx[1],mn[2])+center D=orientation*Vector3(mn[0],mx[1],mn[2])+center E=orientation*Vector3(mn[0]+dx,mn[1],mx[2])+center F=orientation*Vector3(mx[0]+dx,mn[1],mx[2])+center G=orientation*Vector3(mx[0]+dx,mx[1],mx[2])+center H=orientation*Vector3(mn[0]+dx,mx[1],mx[2])+center if wallMask&1: ret+=doWall(A,D,H,E) if wallMask&2: ret+=doWall(B,F,G,C) if wallMask&4: ret+=doWall(A,E,F,B) if wallMask&8: ret+=doWall(D,C,G,H) if wallMask&16: ret+=doWall(A,B,C,D) if wallMask&32: ret+=doWall(E,H,G,F) return ret #facetCylinder========================================================== def facetCylinder(center,radius,height,orientation=Quaternion.Identity,segmentsNumber=10,wallMask=7,angleRange=None,closeGap=False,**kw): """ Create arbitrarily-aligned cylinder composed of facets, with given center, radius, height and orientation. Return List of facets forming the cylinder; :param Vector3 center: center of the created cylinder :param float radius: cylinder radius :param float height: cylinder height :param Quaternion orientation: orientation of the cylinder; the reference orientation has axis along the $+x$ axis. :param int segmentsNumber: number of edges on the cylinder surface (>=5) :param bitmask wallMask: determines which walls will be created, in the order up (1), down (2), side (4). The numbers are ANDed; the default 7 means to create all walls :param (θmin,Θmax) angleRange: allows one to create only part of bunker by specifying range of angles; if ``None``, (0,2*pi) is assumed. :param bool closeGap: close range skipped in angleRange with triangular facets at cylinder bases. :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (radius<=0): raise RuntimeError("The radius should have the positive value"); if (height<=0): wallMask = 1; return facetCylinderConeGenerator(center=center,radiusTop=radius,height=height,orientation=orientation,segmentsNumber=segmentsNumber,wallMask=wallMask,angleRange=angleRange,closeGap=closeGap,**kw) #facetSphere========================================================== def facetSphere(center,radius,thetaResolution=8,phiResolution=8,returnElementMap=False,**kw): """ Create arbitrarily-aligned sphere composed of facets, with given center, radius and orientation. Return List of facets forming the sphere. Parameters inspired by ParaView sphere glyph :param Vector3 center: center of the created sphere :param float radius: sphere radius :param int thetaResolution: number of facets around "equator" :param int phiResolution: number of facets between "poles" + 1 :param bool returnElementMap: returns also tuple of nodes ((x1,y1,z1),(x2,y2,z2),...) and elements ((id01,id02,id03),(id11,id12,id13),...) if true, only facets otherwise :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (radius<=0): raise RuntimeError("The radius should have the positive value"); if (thetaResolution<3): raise RuntimeError("thetaResolution must be > 3"); if (phiResolution<3): raise RuntimeError("phiResolution must be > 3"); r,c0,c1,c2 = radius,center[0],center[1],center[2] nodes = [Vector3(c0,c1,c2+radius)] phis = numpy.linspace(math.pi/(phiResolution-1),math.pi,phiResolution-2,endpoint=False) thetas = numpy.linspace(0,2*math.pi,thetaResolution,endpoint=False) nodes.extend((Vector3(c0+r*math.cos(theta)*math.sin(phi),c1+r*math.sin(theta)*math.sin(phi),c2+r*math.cos(phi)) for phi in phis for theta in thetas)) nodes.append(Vector3(c0,c1,c2-radius)) n = len(nodes)-1 elements = [(0,i+1,i+2) for i in xrange(thetaResolution-1)] elements.append((0,1,thetaResolution)) for j in xrange(0,phiResolution-3): k = j*thetaResolution + 1 elements.extend((k+i,k+i+1,k+i+thetaResolution) for i in xrange(thetaResolution-1)) elements.append((k,k+thetaResolution-1,k+2*thetaResolution-1)) elements.extend((k+i+thetaResolution,k+i+1+thetaResolution,k+i+1) for i in xrange(thetaResolution-1)) elements.append((k+2*thetaResolution-1,k+thetaResolution,k)) elements.extend((n,n-i-1,n-i-2) for i in xrange(thetaResolution-1)) elements.append((n,n-1,n-thetaResolution)) facets = [utils.facet(tuple(nodes[node] for node in elem),**kw) for elem in elements] if returnElementMap: return facets,nodes,elements return facets #facetCone============================================================== def facetCone(center,radiusTop,radiusBottom,height,orientation=Quaternion.Identity,segmentsNumber=10,wallMask=7,angleRange=None,closeGap=False,**kw): """ Create arbitrarily-aligned cone composed of facets, with given center, radius, height and orientation. Return List of facets forming the cone; :param Vector3 center: center of the created cylinder :param float radiusTop: cone top radius :param float radiusBottom: cone bottom radius :param float height: cone height :param Quaternion orientation: orientation of the cone; the reference orientation has axis along the $+x$ axis. :param int segmentsNumber: number of edges on the cone surface (>=5) :param bitmask wallMask: determines which walls will be created, in the order up (1), down (2), side (4). The numbers are ANDed; the default 7 means to create all walls :param (θmin,Θmax) angleRange: allows one to create only part of cone by specifying range of angles; if ``None``, (0,2*pi) is assumed. :param bool closeGap: close range skipped in angleRange with triangular facets at cylinder bases. :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if ((radiusBottom<=0) and (radiusTop<=0)): raise RuntimeError("The radiusBottom or radiusTop should have the positive value"); return facetCylinderConeGenerator(center=center,radiusTop=radiusTop,radiusBottom=radiusBottom,height=height,orientation=orientation,segmentsNumber=segmentsNumber,wallMask=wallMask,angleRange=angleRange,closeGap=closeGap,**kw) #facetPolygon=========================================================== def facetPolygon(center,radiusOuter,orientation=Quaternion.Identity,segmentsNumber=10,angleRange=None,radiusInner=0,**kw): """ Create arbitrarily-aligned polygon composed of facets, with given center, radius (outer and inner) and orientation. Return List of facets forming the polygon; :param Vector3 center: center of the created cylinder :param float radiusOuter: outer radius :param float radiusInner: inner height (can be 0) :param Quaternion orientation: orientation of the polygon; the reference orientation has axis along the $+x$ axis. :param int segmentsNumber: number of edges on the polygon surface (>=3) :param (θmin,Θmax) angleRange: allows one to create only part of polygon by specifying range of angles; if ``None``, (0,2*pi) is assumed. :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (abs(angleRange[1]-angleRange[0])>2.0*math.pi): raise RuntimeError("The |angleRange| cannot be larger 2.0*math.pi"); return facetPolygonHelixGenerator(center=center,radiusOuter=radiusOuter,orientation=orientation,segmentsNumber=segmentsNumber,angleRange=angleRange,radiusInner=radiusInner,**kw) #facetHelix=========================================================== def facetHelix(center,radiusOuter,pitch,orientation=Quaternion.Identity,segmentsNumber=10,angleRange=None,radiusInner=0,**kw): """ Create arbitrarily-aligned helix composed of facets, with given center, radius (outer and inner), pitch and orientation. Return List of facets forming the helix; :param Vector3 center: center of the created cylinder :param float radiusOuter: outer radius :param float radiusInner: inner height (can be 0) :param Quaternion orientation: orientation of the helix; the reference orientation has axis along the $+x$ axis. :param int segmentsNumber: number of edges on the helix surface (>=3) :param (θmin,Θmax) angleRange: range of angles; if ``None``, (0,2*pi) is assumed. :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (pitch<=0): raise RuntimeError("The pitch should have the positive value"); return facetPolygonHelixGenerator(center=center,radiusOuter=radiusOuter,orientation=orientation,segmentsNumber=segmentsNumber,angleRange=angleRange,radiusInner=radiusInner,pitch=pitch,**kw) #facetBunker============================================================ def facetBunker(center,dBunker,dOutput,hBunker,hOutput,hPipe=0.0,orientation=Quaternion.Identity,segmentsNumber=10,wallMask=4,angleRange=None,closeGap=False,**kw): """ Create arbitrarily-aligned bunker, composed of facets, with given center, radii, heights and orientation. Return List of facets forming the bunker; .. code-block:: none dBunker ______________ | | | | | | hBunker | | | | | | |____________| \ / \ / \ / hOutput \ / \____/ | | |____| hPipe dOutput :param Vector3 center: center of the created bunker :param float dBunker: bunker diameter, top :param float dOutput: bunker output diameter :param float hBunker: bunker height :param float hOutput: bunker output height :param float hPipe: bunker pipe height :param Quaternion orientation: orientation of the bunker; the reference orientation has axis along the $+x$ axis. :param int segmentsNumber: number of edges on the bunker surface (>=5) :param bitmask wallMask: determines which walls will be created, in the order up (1), down (2), side (4). The numbers are ANDed; the default 7 means to create all walls :param (θmin,Θmax) angleRange: allows one to create only part of bunker by specifying range of angles; if ``None``, (0,2*pi) is assumed. :param bool closeGap: close range skipped in angleRange with triangular facets at cylinder bases. :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (dBunker<=0): raise RuntimeError("The diameter dBunker should have the positive value"); if (dOutput<=0): raise RuntimeError("The diameter dOutput should have the positive value"); if (hBunker<0): raise RuntimeError("The height hBunker should have the positive or or zero"); if (hOutput<=0): raise RuntimeError("The height hOutput should have the positive value"); if (hPipe<0): raise RuntimeError("The height hPipe should have the positive value or zero"); ret=[] if ((hPipe>0) or (wallMask&2)): centerPipe = Vector3(0,0,hPipe/2.0) ret+=facetCylinder(center=centerPipe,radius=dOutput/2.0,height=hPipe,segmentsNumber=segmentsNumber,wallMask=wallMask&6,angleRange=angleRange,closeGap=closeGap,**kw) centerOutput = Vector3(0.0,0.0,hPipe+hOutput/2.0) ret+=facetCone(center=centerOutput,radiusTop=dBunker/2.0,radiusBottom=dOutput/2.0,height=hOutput,segmentsNumber=segmentsNumber,wallMask=wallMask&4,angleRange=angleRange,closeGap=closeGap,**kw) if (hBunker>0): centerBunker = Vector3(0.0,0.0,hPipe+hOutput+hBunker/2.0) ret+=facetCylinder(center=centerBunker,radius=dBunker/2.0,height=hBunker,segmentsNumber=segmentsNumber,wallMask=wallMask&5,angleRange=angleRange,closeGap=closeGap,**kw) for i in ret: i.state.pos=orientation*(i.state.pos)+Vector3(center) i.state.ori=orientation return ret #facetPolygonHelixGenerator================================================== def facetPolygonHelixGenerator(center,radiusOuter,pitch=0,orientation=Quaternion.Identity,segmentsNumber=10,angleRange=None,radiusInner=0,**kw): """ Please, do not use this function directly! Use geom.facetPloygon and geom.facetHelix instead. This is the base function for generating polygons and helixes from facets. """ # check zero dimentions if (segmentsNumber<3): raise RuntimeError("The segmentsNumber should be at least 3"); if (radiusOuter<=0): raise RuntimeError("The radiusOuter should have the positive value"); if (radiusInner<0): raise RuntimeError("The radiusInner should have the positive value or 0"); if angleRange==None: angleRange=(0,2*math.pi) anglesInRad = numpy.linspace(angleRange[0], angleRange[1], segmentsNumber+1, endpoint=True) heightsInRad = numpy.linspace(0, pitch*(abs(angleRange[1]-angleRange[0])/(2.0*math.pi)), segmentsNumber+1, endpoint=True) POuter=[]; PInner=[]; PCenter=[]; z=0; for i in anglesInRad: XOuter=radiusOuter*math.cos(i); YOuter=radiusOuter*math.sin(i); POuter.append(Vector3(XOuter,YOuter,heightsInRad[z])) PCenter.append(Vector3(0,0,heightsInRad[z])) if (radiusInner<>0): XInner=radiusInner*math.cos(i); YInner=radiusInner*math.sin(i); PInner.append(Vector3(XInner,YInner,heightsInRad[z])) z+=1 for i in range(0,len(POuter)): POuter[i]=orientation*POuter[i]+center PCenter[i]=orientation*PCenter[i]+center if (radiusInner<>0): PInner[i]=orientation*PInner[i]+center ret=[] for i in range(1,len(POuter)): if (radiusInner==0): ret.append(utils.facet((PCenter[i],POuter[i],POuter[i-1]),**kw)) else: ret.append(utils.facet((PInner[i-1],POuter[i-1],POuter[i]),**kw)) ret.append(utils.facet((PInner[i],PInner[i-1],POuter[i]),**kw)) return ret #facetCylinderConeGenerator============================================= def facetCylinderConeGenerator(center,radiusTop,height,orientation=Quaternion.Identity,segmentsNumber=10,wallMask=7,angleRange=None,closeGap=False,radiusBottom=-1,**kw): """ Please, do not use this function directly! Use geom.facetCylinder and geom.facetCone instead. This is the base function for generating cylinders and cones from facets. :param float radiusTop: top radius :param float radiusBottom: bottom radius :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ #For cylinders top and bottom radii are equal if (radiusBottom == -1): radiusBottom = radiusTop # check zero dimentions if (segmentsNumber<3): raise RuntimeError("The segmentsNumber should be at least 3"); if (height<0): raise RuntimeError("The height should have the positive value"); if angleRange==None: angleRange=(0,2*math.pi) if (abs(angleRange[1]-angleRange[0])>2.0*math.pi): raise RuntimeError("The |angleRange| cannot be larger 2.0*math.pi"); if (angleRange[1]<angleRange[0]): raise RuntimeError("angleRange[1] should be larger or equal angleRange[1]"); if isinstance(angleRange,float): print u'WARNING: geom.facetCylinder,angleRange should be (Θmin,Θmax), not just Θmax (one number), update your code.' angleRange=(0,angleRange) anglesInRad = numpy.linspace(angleRange[0], angleRange[1], segmentsNumber+1, endpoint=True) PTop=[]; PTop.append(Vector3(0,0,+height/2)) PBottom=[]; PBottom.append(Vector3(0,0,-height/2)) for i in anglesInRad: XTop=radiusTop*math.cos(i); YTop=radiusTop*math.sin(i); PTop.append(Vector3(XTop,YTop,+height/2)) XBottom=radiusBottom*math.cos(i); YBottom=radiusBottom*math.sin(i); PBottom.append(Vector3(XBottom,YBottom,-height/2)) for i in range(0,len(PTop)): PTop[i]=orientation*PTop[i]+center PBottom[i]=orientation*PBottom[i]+center ret=[] for i in range(2,len(PTop)): if (wallMask&1)and(radiusTop!=0): ret.append(utils.facet((PTop[0],PTop[i],PTop[i-1]),**kw)) if (wallMask&2)and(radiusBottom!=0): ret.append(utils.facet((PBottom[0],PBottom[i-1],PBottom[i]),**kw)) if wallMask&4: if (radiusBottom!=0): ret.append(utils.facet((PTop[i],PBottom[i],PBottom[i-1]),**kw)) if (radiusTop!=0): ret.append(utils.facet((PBottom[i-1],PTop[i-1],PTop[i]),**kw)) if (closeGap): if (wallMask&1)and(radiusTop!=0)and(abs(((angleRange[1]-angleRange[0])) > math.pi)): pts=[(radiusTop*math.cos(angleRange[i]),radiusTop*math.sin(angleRange[i])) for i in (0,1)] pp=[(pts[0][0],pts[0][1],+height/2.0), (pts[1][0],pts[1][1],+height/2.0), (0,0,+height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) if (wallMask&2)and(radiusBottom!=0)and(abs(((angleRange[1]-angleRange[0])) > math.pi)): pts=[(radiusBottom*math.cos(angleRange[i]),radiusBottom*math.sin(angleRange[i])) for i in (0,1)] pp=[(0,0,-height/2.0), (pts[1][0],pts[1][1],-height/2.0), (pts[0][0],pts[0][1],-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) if (wallMask&4): ptsBottom=[(radiusBottom*math.cos(angleRange[i]),radiusBottom*math.sin(angleRange[i])) for i in (0,1)] ptsTop=[(radiusTop*math.cos(angleRange[i]),radiusTop*math.sin(angleRange[i])) for i in (0,1)] if (abs(((angleRange[1]-angleRange[0])) >= math.pi)): if (radiusBottom!=0)and(radiusTop!=0): #Cylinder pp=[(ptsBottom[0][0],ptsBottom[0][1],-height/2.0),(ptsBottom[1][0],ptsBottom[1][1],-height/2.0),(ptsTop[0][0],ptsTop[0][1],height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) pp=[(ptsBottom[1][0],ptsBottom[1][1],-height/2.0), (ptsTop[1][0],ptsTop[1][1],height/2.0), (ptsTop[0][0],ptsTop[0][1],height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) elif (radiusBottom==0)and(radiusTop!=0): #ConeTop pp=[(ptsTop[1][0],ptsTop[1][1],height/2.0), (ptsTop[0][0],ptsTop[0][1],height/2.0), (0,0,-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) elif (radiusTop==0)and(radiusBottom!=0): #ConeBottom pp=[(0,0,height/2.0),(ptsBottom[0][0],ptsBottom[0][1],-height/2.0),(ptsBottom[1][0],ptsBottom[1][1],-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) else: if (radiusBottom!=0)and(radiusTop!=0): #Cylinder pp=[(ptsBottom[0][0],ptsBottom[0][1],-height/2.0),(0,0,-height/2.0),(ptsTop[0][0],ptsTop[0][1],height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) pp=[(0,0,-height/2.0), (0,0,height/2.0), (ptsTop[0][0],ptsTop[0][1],height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) pp=[(0,0,-height/2.0),(ptsBottom[1][0],ptsBottom[1][1],-height/2.0),(0,0,height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) pp=[(ptsBottom[1][0],ptsBottom[1][1],-height/2.0), (ptsTop[1][0],ptsTop[1][1],height/2.0), (0,0,height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) elif (radiusBottom==0)and(radiusTop!=0): #ConeTop pp=[(0,0,height/2.0), (ptsTop[0][0],ptsTop[0][1],height/2.0), (0,0,-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) pp=[(ptsTop[1][0],ptsTop[1][1],height/2.0), (0,0,height/2.0), (0,0,-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) elif (radiusTop==0)and(radiusBottom!=0): #ConeBottom pp=[(0,0,height/2.0),(ptsBottom[0][0],ptsBottom[0][1],-height/2.0),(0,0,-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) pp=[(0,0,height/2.0),(0,0,-height/2.0),(ptsBottom[1][0],ptsBottom[1][1],-height/2.0)] pp=[orientation*p+center for p in pp] ret.append(utils.facet(pp,**kw)) return ret
bchareyre/trial
py/geom.py
Python
gpl-2.0
22,443
[ "ParaView" ]
702ac41a8f8151920ad4dd0a589c0bd35f1ae1d6ffc457d5c3dd619d8eef0cda
import string import math from pymol.cgo import * from pymol import cmd axes = [ LINEWIDTH, 3.0, BEGIN, LINES, COLOR, 0.2, 1.0, 0.2, VERTEX, 0.0, 0.0, 0.0, VERTEX, 3.0, 0.0, 0.0, COLOR, 1.0, 0.2, 0.2, VERTEX, 0.0, 0.0, 0.0, VERTEX, 0.0, 3.0, 0.0, COLOR, 0.2, 0.2, 1.0, VERTEX, 0.0, 0.0, 0.0, VERTEX, 00, 0.0, 3.0, END ] c=0 for a in xrange(0,63): balls = [ COLOR, 0.2, 1.0, 0.2, SPHERE, 1.0+math.cos(a/10.0), 1.0+math.sin(a/20.0), 1.0+math.cos(a/10.0), 0.2+math.cos(a/5.0)/10.0, COLOR, 1.0, 0.2, 0.2, SPHERE, 2.0-math.cos(a/10.0), 1.0+math.sin(0.5+a/10.0), 1.0+math.cos(a/10.0), 0.2+math.cos(a/5.0)/10.0, ] obj = axes + balls cmd.load_cgo(obj,'cgo01',c) c = c + 1 # counter label pdb_list = [ "HETATM%5d C UNK 1 %8.3f%8.3f%8.3f 1.00 10.00\n"%(c,2.0,0,2.0), ] cmd.read_pdbstr(string.join(pdb_list,''),'lab1',c,discrete=1) cmd.label("(lab1 and id %d)"%c,"'frame %d %6.3f'"%(c,math.sin(a/10.0))) cmd.hide("nonbonded","lab1") # axes labels pdb_list = [ "HETATM 1 X UNK 1 %8.3f%8.3f%8.3f 1.00 10.00\n"%(3.2,0,0), "HETATM 2 Y UNK 2 %8.3f%8.3f%8.3f 1.00 10.00\n"%(0,3.2,0), "HETATM 3 Z UNK 3 %8.3f%8.3f%8.3f 1.00 10.00\n"%(0,0,3.2),] cmd.read_pdbstr(string.join(pdb_list,''),'lab2') cmd.hide('(lab2)') cmd.label('lab2','name') cmd.color('white','lab2') cmd.zoom('cgo01') cmd.clip('far',-5) cmd.mplay()
gratefulfrog/lib
python/pymol/pymol_path/examples/devel/cgo_label_hack.py
Python
gpl-2.0
1,498
[ "PyMOL" ]
d41646be90be071c9fd5cec67dcf8044807700d866c1dff442cd83b9583a3c72
""" Student Views """ import datetime import logging import uuid import json import warnings from collections import defaultdict from urlparse import urljoin, urlsplit, parse_qs, urlunsplit from django.views.generic import TemplateView from pytz import UTC from requests import HTTPError from ipware.ip import get_ip import edx_oauth2_provider from django.conf import settings from django.contrib.auth import logout, authenticate, login from django.contrib.auth.models import User, AnonymousUser from django.contrib.auth.decorators import login_required from django.contrib.auth.views import password_reset_confirm from django.contrib import messages from django.core.context_processors import csrf from django.core import mail from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy from django.core.validators import validate_email, ValidationError from django.db import IntegrityError, transaction from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404 from django.shortcuts import redirect from django.utils.encoding import force_bytes, force_text from django.utils.translation import ungettext from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode from django.utils.translation import ugettext as _, get_language from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie from django.views.decorators.http import require_POST, require_GET from django.db.models.signals import post_save from django.dispatch import receiver, Signal from django.template.response import TemplateResponse from provider.oauth2.models import Client from ratelimitbackend.exceptions import RateLimitException from social.apps.django_app import utils as social_utils from social.backends import oauth as social_oauth from social.exceptions import AuthException, AuthAlreadyAssociated from edxmako.shortcuts import render_to_response, render_to_string from util.enterprise_helpers import data_sharing_consent_requirement_at_login from course_modes.models import CourseMode from shoppingcart.api import order_history from student.models import ( Registration, UserProfile, PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user, CourseEnrollmentAllowed, UserStanding, LoginFailures, create_comments_service_user, PasswordHistory, UserSignupSource, DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED, LogoutViewConfiguration, RegistrationCookieConfiguration) from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form from student.tasks import send_activation_email from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error from certificates.models import CertificateStatuses, certificate_status_for_student from certificates.api import ( # pylint: disable=import-error get_certificate_url, has_html_certificates_enabled, ) from lms.djangoapps.grades.new.course_grade import CourseGradeFactory from xmodule.modulestore.django import modulestore from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.locator import CourseLocator from xmodule.modulestore.exceptions import ItemNotFoundError from collections import namedtuple from courseware.courses import get_course_about_section from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error from courseware.access import has_access from courseware.models import CoursePreference from django_comment_common.models import Role from openedx.core.djangoapps.external_auth.models import ExternalAuthMap import openedx.core.djangoapps.external_auth.views from openedx.core.djangoapps.external_auth.login_and_register import ( login as external_auth_login, register as external_auth_register ) from bulk_email.models import Optout import track.views import dogstats_wrapper as dog_stats_api from util.date_utils import get_default_time_display from util.db import outer_atomic from util.json_request import JsonResponse from util.bad_request_rate_limiter import BadRequestRateLimiter from util.keyword_substitution import substitute_keywords_with_data from util.milestones_helpers import ( get_pre_requisite_courses_not_completed, ) from util.password_policy_validators import validate_password_strength import third_party_auth from third_party_auth import pipeline, provider from student.helpers import ( check_verify_status_by_course, auth_pipeline_urls, get_next_url_for_login_page, DISABLE_UNENROLL_CERT_STATES, destroy_oauth_tokens ) from student.cookies import set_logged_in_cookies, delete_logged_in_cookies, set_user_info_cookie from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange from shoppingcart.models import DonationConfiguration, CourseRegistrationCode from openedx.core.djangoapps.embargo import api as embargo_api import analytics from eventtracking import tracker # Note that this lives in LMS, so this dependency should be refactored. from notification_prefs.views import enable_notifications from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY from openedx.core.djangoapps.programs import utils as programs_utils from openedx.core.djangoapps.programs.models import ProgramsApiConfig from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from openedx.core.djangoapps.theming import helpers as theming_helpers from openedx.core.djangoapps.user_api.preferences import api as preferences_api from openedx.core.djangoapps.catalog.utils import get_programs_data from openedx.stanford.common.djangoapps.student.views import notify_enrollment_by_email log = logging.getLogger("edx.student") AUDIT_LOG = logging.getLogger("audit") ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated' # Used as the name of the user attribute for tracking affiliate registrations REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id' REGISTRATION_UTM_PARAMETERS = { 'utm_source': 'registration_utm_source', 'utm_medium': 'registration_utm_medium', 'utm_campaign': 'registration_utm_campaign', 'utm_term': 'registration_utm_term', 'utm_content': 'registration_utm_content', } REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at' # used to announce a registration REGISTER_USER = Signal(providing_args=["user", "profile"]) LOGIN_LOCKOUT_PERIOD_PLUS_FIVE_MINUTES = int((5 * 60 + settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS) / 60) # Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint # pylint: disable=logging-format-interpolation def csrf_token(context): """A csrf token that can be included in a form.""" token = context.get('csrf_token', '') if token == 'NOTPROVIDED': return '' return (u'<div style="display:none"><input type="hidden"' ' name="csrfmiddlewaretoken" value="%s" /></div>' % (token)) # NOTE: This view is not linked to directly--it is called from # branding/views.py:index(), which is cached for anonymous users. # This means that it should always return the same thing for anon # users. (in particular, no switching based on query params allowed) def index(request, extra_context=None, user=AnonymousUser()): """ Render the edX main page. extra_context is used to allow immediate display of certain modal windows, eg signup, as used by external_auth. """ if extra_context is None: extra_context = {} programs_list = [] courses = get_courses(user) if configuration_helpers.get_value( "ENABLE_COURSE_SORTING_BY_START_DATE", settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"], ): courses = sort_by_start_date(courses) else: courses = sort_by_announcement(courses) context = {'courses': courses} context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html') # This appears to be an unused context parameter, at least for the master templates... context['show_partners'] = configuration_helpers.get_value('show_partners', True) # TO DISPLAY A YOUTUBE WELCOME VIDEO # 1) Change False to True context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False) # 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration # Note: This value should be moved into a configuration setting and plumbed-through to the # context via the site configuration workflow, versus living here youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id") context['homepage_promo_video_youtube_id'] = youtube_video_id # allow for theme override of the courses list context['courses_list'] = theming_helpers.get_template_path('courses_list.html') # Insert additional context for use in the template context.update(extra_context) # Getting all the programs from course-catalog service. The programs_list is being added to the context but it's # not being used currently in lms/templates/index.html. To use this list, you need to create a custom theme that # overrides index.html. The modifications to index.html to display the programs will be done after the support # for edx-pattern-library is added. if configuration_helpers.get_value("DISPLAY_PROGRAMS_ON_MARKETING_PAGES", settings.FEATURES.get("DISPLAY_PROGRAMS_ON_MARKETING_PAGES")): programs_list = get_programs_data(user) context["programs_list"] = programs_list return render_to_response('index.html', context) def process_survey_link(survey_link, user): """ If {UNIQUE_ID} appears in the link, replace it with a unique id for the user. Currently, this is sha1(user.username). Otherwise, return survey_link. """ return survey_link.format(UNIQUE_ID=unique_id_for_user(user)) def cert_info(user, course_overview, course_mode): """ Get the certificate info needed to render the dashboard section for the given student and course. Arguments: user (User): A user. course_overview (CourseOverview): A course. course_mode (str): The enrollment mode (honor, verified, audit, etc.) Returns: dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys: 'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted' 'show_download_url': bool 'download_url': url, only present if show_download_url is True 'show_disabled_download_button': bool -- true if state is 'generating' 'show_survey_button': bool 'survey_url': url, only if show_survey_button is True 'grade': if status is not 'processing' 'can_unenroll': if status allows for unenrollment """ if not course_overview.may_certify(): return {} return _cert_info( user, course_overview, certificate_status_for_student(user, course_overview.id), course_mode ) def reverification_info(statuses): """ Returns reverification-related information for *all* of user's enrollments whose reverification status is in statuses. Args: statuses (list): a list of reverification statuses we want information for example: ["must_reverify", "denied"] Returns: dictionary of lists: dictionary with one key per status, e.g. dict["must_reverify"] = [] dict["must_reverify"] = [some information] """ reverifications = defaultdict(list) # Sort the data by the reverification_end_date for status in statuses: if reverifications[status]: reverifications[status].sort(key=lambda x: x.date) return reverifications def get_course_enrollments(user, org_to_include, orgs_to_exclude): """ Given a user, return a filtered set of his or her course enrollments. Arguments: user (User): the user in question. org_to_include (str): If not None, ONLY courses of this org will be returned. orgs_to_exclude (list[str]): If org_to_include is not None, this argument is ignored. Else, courses of this org will be excluded. Returns: generator[CourseEnrollment]: a sequence of enrollments to be displayed on the user's dashboard. """ for enrollment in CourseEnrollment.enrollments_for_user(user): # If the course is missing or broken, log an error and skip it. course_overview = enrollment.course_overview if not course_overview: log.error( "User %s enrolled in broken or non-existent course %s", user.username, enrollment.course_id ) continue # Filter out anything that is not attributed to the current ORG. if org_to_include and course_overview.location.org != org_to_include: continue # Conversely, filter out any enrollments with courses attributed to current ORG. elif course_overview.location.org in orgs_to_exclude: continue # Else, include the enrollment. else: yield enrollment def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument """ Implements the logic for cert_info -- split out for testing. Arguments: user (User): A user. course_overview (CourseOverview): A course. course_mode (str): The enrollment mode (honor, verified, audit, etc.) """ # simplify the status for the template using this lookup table template_state = { CertificateStatuses.generating: 'generating', CertificateStatuses.downloadable: 'ready', CertificateStatuses.notpassing: 'notpassing', CertificateStatuses.restricted: 'restricted', CertificateStatuses.auditing: 'auditing', CertificateStatuses.audit_passing: 'auditing', CertificateStatuses.audit_notpassing: 'auditing', CertificateStatuses.unverified: 'unverified', } default_status = 'processing' default_info = { 'status': default_status, 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': False, 'can_unenroll': True, } if cert_status is None: return default_info is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing') if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status: return {} status = template_state.get(cert_status['status'], default_status) status_dict = { 'status': status, 'show_download_url': status == 'ready', 'show_disabled_download_button': status == 'generating', 'mode': cert_status.get('mode', None), 'linked_in_url': None, 'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES, } if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and course_overview.end_of_course_survey_url is not None): status_dict.update({ 'show_survey_button': True, 'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)}) else: status_dict['show_survey_button'] = False if status == 'ready': # showing the certificate web view button if certificate is ready state and feature flags are enabled. if has_html_certificates_enabled(course_overview.id, course_overview): if course_overview.has_any_active_web_certificate: status_dict.update({ 'show_cert_web_view': True, 'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid']) }) else: # don't show download certificate button if we don't have an active certificate for course status_dict['show_download_url'] = False elif 'download_url' not in cert_status: log.warning( u"User %s has a downloadable cert for %s, but no download url", user.username, course_overview.id ) return default_info else: status_dict['download_url'] = cert_status['download_url'] # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() # posting certificates to LinkedIn is not currently # supported in White Labels if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site(): status_dict['linked_in_url'] = linkedin_config.add_to_profile_url( course_overview.id, course_overview.display_name, cert_status.get('mode'), cert_status['download_url'] ) if status in {'generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'}: persisted_grade = CourseGradeFactory().get_persisted(user, course_overview) if persisted_grade is not None: status_dict['grade'] = unicode(persisted_grade.percent) elif 'grade' in cert_status: status_dict['grade'] = cert_status['grade'] else: # Note: as of 11/20/2012, we know there are students in this state-- cs169.1x, # who need to be regraded (we weren't tracking 'notpassing' at first). # We can add a log.warning here once we think it shouldn't happen. return default_info return status_dict @ensure_csrf_cookie def signin_user(request): """Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.""" external_auth_response = external_auth_login(request) if external_auth_response is not None: return external_auth_response if UserProfile.has_registered(request.user): return redirect(reverse('dashboard')) # Determine the URL to redirect to following login: redirect_to = get_next_url_for_login_page(request) if request.user.is_authenticated(): return redirect(redirect_to) third_party_auth_error = None for msg in messages.get_messages(request): if msg.extra_tags.split()[0] == "social-auth": # msg may or may not be translated. Try translating [again] in case we are able to: third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string break context = { 'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header # Bool injected into JS to submit form if we're inside a running third- # party auth pipeline; distinct from the actual instance of the running # pipeline, if any. 'pipeline_running': 'true' if pipeline.running(request) else 'false', 'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to), 'platform_name': configuration_helpers.get_value( 'platform_name', settings.PLATFORM_NAME ), 'third_party_auth_error': third_party_auth_error } return render_to_response('login.html', context) @ensure_csrf_cookie def register_user(request, extra_context=None): """Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.""" # Determine the URL to redirect to following login: redirect_to = get_next_url_for_login_page(request) if UserProfile.has_registered(request.user): return redirect(redirect_to) external_auth_response = external_auth_register(request) if external_auth_response is not None: return external_auth_response context = { 'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header 'email': '', 'name': '', 'running_pipeline': None, 'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to), 'platform_name': configuration_helpers.get_value( 'platform_name', settings.PLATFORM_NAME ), 'selected_provider': '', 'username': '', } if extra_context is not None: context.update(extra_context) if context.get("extauth_domain", '').startswith( openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX ): return render_to_response('register-shib.html', context) # If third-party auth is enabled, prepopulate the form with data from the # selected provider. if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) current_provider = provider.Registry.get_from_pipeline(running_pipeline) if current_provider is not None: overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs')) overrides['running_pipeline'] = running_pipeline overrides['selected_provider'] = current_provider.name context.update(overrides) return render_to_response('register.html', context) def complete_course_mode_info(course_id, enrollment, modes=None): """ We would like to compute some more information from the given course modes and the user's current enrollment Returns the given information: - whether to show the course upsell information - numbers of days until they can't upsell anymore """ if modes is None: modes = CourseMode.modes_for_course_dict(course_id) mode_info = {'show_upsell': False, 'days_for_upsell': None} # we want to know if the user is already enrolled as verified or credit and # if verified is an option. if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES: mode_info['show_upsell'] = True mode_info['verified_sku'] = modes['verified'].sku mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku # if there is an expiration date, find out how long from now it is if modes['verified'].expiration_datetime: today = datetime.datetime.now(UTC).date() mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days return mode_info def is_course_blocked(request, redeemed_registration_codes, course_key): """Checking either registration is blocked or not .""" blocked = False for redeemed_registration in redeemed_registration_codes: # registration codes may be generated via Bulk Purchase Scenario # we have to check only for the invoice generated registration codes # that their invoice is valid or not if redeemed_registration.invoice_item: if not redeemed_registration.invoice_item.invoice.is_valid: blocked = True # disabling email notifications for unpaid registration courses Optout.objects.get_or_create(user=request.user, course_id=course_key) log.info( u"User %s (%s) opted out of receiving emails from course %s", request.user.username, request.user.email, course_key, ) track.views.server_track( request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard', ) break return blocked @login_required @ensure_csrf_cookie def dashboard(request): """ Provides the LMS dashboard view TODO: This is lms specific and does not belong in common code. Arguments: request: The request object. Returns: The dashboard response. """ user = request.user if not UserProfile.has_registered(user): logout(request) return redirect(reverse('dashboard')) platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME) enable_verified_certificates = configuration_helpers.get_value( 'ENABLE_VERIFIED_CERTIFICATES', settings.FEATURES.get('ENABLE_VERIFIED_CERTIFICATES') ) display_course_modes_on_dashboard = configuration_helpers.get_value( 'DISPLAY_COURSE_MODES_ON_DASHBOARD', settings.FEATURES.get('DISPLAY_COURSE_MODES_ON_DASHBOARD', True) ) # we want to filter and only show enrollments for courses within # the 'ORG' defined in configuration. course_org_filter = configuration_helpers.get_value('course_org_filter') # Let's filter out any courses in an "org" that has been declared to be # in a configuration org_filter_out_set = configuration_helpers.get_all_orgs() # remove our current org from the "filter out" list, if applicable if course_org_filter: org_filter_out_set.remove(course_org_filter) # Build our (course, enrollment) list for the user, but ignore any courses that no # longer exist (because the course IDs have changed). Still, we don't delete those # enrollments, because it could have been a data push snafu. course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set)) # sort the enrollment pairs by the enrollment date course_enrollments.sort(key=lambda x: x.created, reverse=True) # Retrieve the course modes for each course enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments] __, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids) course_modes_by_course = { course_id: { mode.slug: mode for mode in modes } for course_id, modes in unexpired_course_modes.iteritems() } # Check to see if the student has recently enrolled in a course. # If so, display a notification message confirming the enrollment. enrollment_message = _create_recent_enrollment_message( course_enrollments, course_modes_by_course ) course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True) message = "" if not user.is_active: message = render_to_string( 'registration/activate_account_notice.html', {'email': user.email, 'platform_name': platform_name} ) # Global staff can see what courses errored on their dashboard staff_access = False errored_courses = {} if has_access(user, 'staff', 'global'): # Show any courses that errored on load staff_access = True errored_courses = modulestore().get_errored_courses() show_courseware_links_for = frozenset( enrollment.course_id for enrollment in course_enrollments if has_access(request.user, 'load', enrollment.course_overview) and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview) ) # Find programs associated with courses being displayed. This information # is passed in the template context to allow rendering of program-related # information on the dashboard. meter = programs_utils.ProgramProgressMeter(user, enrollments=course_enrollments) programs_by_run = meter.engaged_programs(by_run=True) # Construct a dictionary of course mode information # used to render the course list. We re-use the course modes dict # we loaded earlier to avoid hitting the database. course_mode_info = { enrollment.course_id: complete_course_mode_info( enrollment.course_id, enrollment, modes=course_modes_by_course[enrollment.course_id] ) for enrollment in course_enrollments } # Determine the per-course verification status # This is a dictionary in which the keys are course locators # and the values are one of: # # VERIFY_STATUS_NEED_TO_VERIFY # VERIFY_STATUS_SUBMITTED # VERIFY_STATUS_APPROVED # VERIFY_STATUS_MISSED_DEADLINE # # Each of which correspond to a particular message to display # next to the course on the dashboard. # # If a course is not included in this dictionary, # there is no verification messaging to display. verify_status_by_course = check_verify_status_by_course(user, course_enrollments) cert_statuses = { enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode) for enrollment in course_enrollments } # only show email settings for Mongo course and when bulk email is turned on show_email_settings_for = frozenset( enrollment.course_id for enrollment in course_enrollments if ( BulkEmailFlag.feature_enabled(enrollment.course_id) ) ) # Verification Attempts # Used to generate the "you must reverify for course x" banner verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user) # Gets data for midcourse reverifications, if any are necessary or have failed statuses = ["approved", "denied", "pending", "must_reverify"] reverifications = reverification_info(statuses) show_refund_option_for = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.refundable() ) block_courses = frozenset( enrollment.course_id for enrollment in course_enrollments if is_course_blocked( request, CourseRegistrationCode.objects.filter( course_id=enrollment.course_id, registrationcoderedemption__redeemed_by=request.user ), enrollment.course_id ) ) enrolled_courses_either_paid = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.is_paid_course() ) # If there are *any* denied reverifications that have not been toggled off, # we'll display the banner denied_banner = any(item.display for item in reverifications["denied"]) # Populate the Order History for the side-bar. order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set) # get list of courses having pre-requisites yet to be completed courses_having_prerequisites = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.course_overview.pre_requisite_courses ) courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites) if 'notlive' in request.GET: redirect_message = _("The course you are looking for does not start until {date}.").format( date=request.GET['notlive'] ) elif 'course_closed' in request.GET: redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format( date=request.GET['course_closed'] ) else: redirect_message = '' context = { 'enrollment_message': enrollment_message, 'redirect_message': redirect_message, 'course_enrollments': course_enrollments, 'course_optouts': course_optouts, 'message': message, 'staff_access': staff_access, 'errored_courses': errored_courses, 'show_courseware_links_for': show_courseware_links_for, 'all_course_modes': course_mode_info, 'cert_statuses': cert_statuses, 'credit_statuses': _credit_statuses(user, course_enrollments), 'show_email_settings_for': show_email_settings_for, 'reverifications': reverifications, 'verification_status': verification_status, 'verification_status_by_course': verify_status_by_course, 'verification_msg': verification_msg, 'show_refund_option_for': show_refund_option_for, 'block_courses': block_courses, 'denied_banner': denied_banner, 'billing_email': settings.PAYMENT_SUPPORT_EMAIL, 'user': user, 'logout_url': reverse('logout'), 'platform_name': platform_name, 'enrolled_courses_either_paid': enrolled_courses_either_paid, 'provider_states': [], 'order_history_list': order_history_list, 'courses_requirements_not_met': courses_requirements_not_met, 'nav_hidden': True, 'programs_by_run': programs_by_run, 'show_program_listing': ProgramsApiConfig.current().show_program_listing, 'disable_courseware_js': True, 'display_course_modes_on_dashboard': enable_verified_certificates and display_course_modes_on_dashboard, } ecommerce_service = EcommerceService() if ecommerce_service.is_enabled(request.user): context.update({ 'use_ecommerce_payment_flow': True, 'ecommerce_payment_page': ecommerce_service.payment_page_url(), }) response = render_to_response('dashboard.html', context) set_user_info_cookie(response, request) return response def _create_and_login_nonregistered_user(request): new_student = UserProfile.create_nonregistered_user() new_student.backend = settings.AUTHENTICATION_BACKENDS[0] login(request, new_student) request.session.set_expiry(604800) # set session to very long to reduce number of nonreg users created @require_POST def setup_sneakpeek(request, course_id): course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) if not CoursePreference.course_allows_nonregistered_access(course_key): return HttpResponseForbidden("Cannot access the course") if not request.user.is_authenticated(): # if there's no user, create a nonregistered user _create_and_login_nonregistered_user(request) elif UserProfile.has_registered(request.user): # registered users can't sneakpeek, so log them out and create a new nonregistered user logout(request) _create_and_login_nonregistered_user(request) # fall-through case is a sneakpeek user that's already logged in can_enroll, error_msg = _check_can_enroll_in_course(request.user, course_key, access_type='within_enrollment_period') if not can_enroll: log.error(error_msg) return HttpResponseBadRequest(error_msg) CourseEnrollment.enroll(request.user, course_key) return HttpResponse("OK. Allowed sneakpeek") def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name """ Builds a recent course enrollment message. Constructs a new message template based on any recent course enrollments for the student. Args: course_enrollments (list[CourseEnrollment]): a list of course enrollments. course_modes (dict): Mapping of course ID's to course mode dictionaries. Returns: A string representing the HTML message output from the message template. None if there are no recently enrolled courses. """ recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments) if recently_enrolled_courses: enroll_messages = [ { "course_id": enrollment.course_overview.id, "course_name": enrollment.course_overview.display_name, "allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment) } for enrollment in recently_enrolled_courses ] platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME) return render_to_string( 'enrollment/course_enrollment_message.html', {'course_enrollment_messages': enroll_messages, 'platform_name': platform_name} ) def _get_recently_enrolled_courses(course_enrollments): """ Given a list of enrollments, filter out all but recent enrollments. Args: course_enrollments (list[CourseEnrollment]): A list of course enrollments. Returns: list[CourseEnrollment]: A list of recent course enrollments. """ seconds = DashboardConfiguration.current().recent_enrollment_time_delta time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds)) return [ enrollment for enrollment in course_enrollments # If the enrollment has no created date, we are explicitly excluding the course # from the list of recent enrollments. if enrollment.is_active and enrollment.created > time_delta ] def _allow_donation(course_modes, course_id, enrollment): """Determines if the dashboard will request donations for the given course. Check if donations are configured for the platform, and if the current course is accepting donations. Args: course_modes (dict): Mapping of course ID's to course mode dictionaries. course_id (str): The unique identifier for the course. enrollment(CourseEnrollment): The enrollment object in which the user is enrolled Returns: True if the course is allowing donations. """ if course_id not in course_modes: flat_unexpired_modes = { unicode(course_id): [mode for mode in modes] for course_id, modes in course_modes.iteritems() } flat_all_modes = { unicode(course_id): [mode.slug for mode in modes] for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems() } log.error( u'Can not find `%s` in course modes.`%s`. All modes: `%s`', course_id, flat_unexpired_modes, flat_all_modes ) donations_enabled = DonationConfiguration.current().enabled return ( donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0 ) def _update_email_opt_in(request, org): """Helper function used to hit the profile API if email opt-in is enabled.""" email_opt_in = request.POST.get('email_opt_in') if email_opt_in is not None: email_opt_in_boolean = email_opt_in == 'true' preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean) def _credit_statuses(user, course_enrollments): """ Retrieve the status for credit courses. A credit course is a course for which a user can purchased college credit. The current flow is: 1. User becomes eligible for credit (submits verifications, passes the course, etc.) 2. User purchases credit from a particular credit provider. 3. User requests credit from the provider, usually creating an account on the provider's site. 4. The credit provider notifies us whether the user's request for credit has been accepted or rejected. The dashboard is responsible for communicating the user's state in this flow. Arguments: user (User): The currently logged-in user. course_enrollments (list[CourseEnrollment]): List of enrollments for the user. Returns: dict The returned dictionary has keys that are `CourseKey`s and values that are dictionaries with: * eligible (bool): True if the user is eligible for credit in this course. * deadline (datetime): The deadline for purchasing and requesting credit for this course. * purchased (bool): Whether the user has purchased credit for this course. * provider_name (string): The display name of the credit provider. * provider_status_url (string): A URL the user can visit to check on their credit request status. * request_status (string): Either "pending", "approved", or "rejected" * error (bool): If true, an unexpected error occurred when retrieving the credit status, so the user should contact the support team. Example: >>> _credit_statuses(user, course_enrollments) { CourseKey.from_string("edX/DemoX/Demo_Course"): { "course_key": "edX/DemoX/Demo_Course", "eligible": True, "deadline": 2015-11-23 00:00:00 UTC, "purchased": True, "provider_name": "Hogwarts", "provider_status_url": "http://example.com/status", "request_status": "pending", "error": False } } """ from openedx.core.djangoapps.credit import api as credit_api # Feature flag off if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"): return {} request_status_by_course = { request["course_key"]: request["status"] for request in credit_api.get_credit_requests_for_user(user.username) } credit_enrollments = { enrollment.course_id: enrollment for enrollment in course_enrollments if enrollment.mode == "credit" } # When a user purchases credit in a course, the user's enrollment # mode is set to "credit" and an enrollment attribute is set # with the ID of the credit provider. We retrieve *all* such attributes # here to minimize the number of database queries. purchased_credit_providers = { attribute.enrollment.course_id: attribute.value for attribute in CourseEnrollmentAttribute.objects.filter( namespace="credit", name="provider_id", enrollment__in=credit_enrollments.values() ).select_related("enrollment") } provider_info_by_id = { provider["id"]: provider for provider in credit_api.get_credit_providers() } statuses = {} for eligibility in credit_api.get_eligibilities_for_user(user.username): course_key = CourseKey.from_string(unicode(eligibility["course_key"])) providers_names = get_credit_provider_display_names(course_key) status = { "course_key": unicode(course_key), "eligible": True, "deadline": eligibility["deadline"], "purchased": course_key in credit_enrollments, "provider_name": make_providers_strings(providers_names), "provider_status_url": None, "provider_id": None, "request_status": request_status_by_course.get(course_key), "error": False, } # If the user has purchased credit, then include information about the credit # provider from which the user purchased credit. # We retrieve the provider's ID from the an "enrollment attribute" set on the user's # enrollment when the user's order for credit is fulfilled by the E-Commerce service. if status["purchased"]: provider_id = purchased_credit_providers.get(course_key) if provider_id is None: status["error"] = True log.error( u"Could not find credit provider associated with credit enrollment " u"for user %s in course %s. The user will not be able to see his or her " u"credit request status on the student dashboard. This attribute should " u"have been set when the user purchased credit in the course.", user.id, course_key ) else: provider_info = provider_info_by_id.get(provider_id, {}) status["provider_name"] = provider_info.get("display_name") status["provider_status_url"] = provider_info.get("status_url") status["provider_id"] = provider_id statuses[course_key] = status return statuses @transaction.non_atomic_requests @require_POST @outer_atomic(read_committed=True) def change_enrollment(request, check_access=True): """ Modify the enrollment status for the logged-in user. TODO: This is lms specific and does not belong in common code. The request parameter must be a POST request (other methods return 405) that specifies course_id and enrollment_action parameters. If course_id or enrollment_action is not specified, if course_id is not valid, if enrollment_action is something other than "enroll" or "unenroll", if enrollment_action is "enroll" and enrollment is closed for the course, or if enrollment_action is "unenroll" and the user is not enrolled in the course, a 400 error will be returned. If the user is not logged in, 403 will be returned; it is important that only this case return 403 so the front end can redirect the user to a registration or login page when this happens. This function should only be called from an AJAX request, so the error messages in the responses should never actually be user-visible. Args: request (`Request`): The Django request object Keyword Args: check_access (boolean): If True, we check that an accessible course actually exists for the given course_key before we enroll the student. The default is set to False to avoid breaking legacy code or code with non-standard flows (ex. beta tester invitations), but for any standard enrollment flow you probably want this to be True. Returns: Response """ # Get the user user = request.user # Ensure the user is authenticated if not UserProfile.has_registered(user): return HttpResponseForbidden() # Ensure we received a course_id action = request.POST.get("enrollment_action") if 'course_id' not in request.POST: return HttpResponseBadRequest(_("Course id not specified")) try: course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id")) except InvalidKeyError: log.warning( u"User %s tried to %s with invalid course id: %s", user.username, action, request.POST.get("course_id"), ) return HttpResponseBadRequest(_("Invalid course id")) if action == "enroll": # Make sure the course exists # We don't do this check on unenroll, or a bad course id can't be unenrolled from if not modulestore().has_course(course_id): log.warning( u"User %s tried to enroll in non-existent course %s", user.username, course_id ) return HttpResponseBadRequest(_("Course id is invalid")) can_enroll, error_msg = _check_can_enroll_in_course(user, course_id) if not can_enroll: return HttpResponseBadRequest(error_msg) # Record the user's email opt-in preference if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'): _update_email_opt_in(request, course_id.org) available_modes = CourseMode.modes_for_course_dict(course_id) # Check whether the user is blocked from enrolling in this course # This can occur if the user's IP is on a global blacklist # or if the user is enrolling in a country in which the course # is not available. redirect_url = embargo_api.redirect_if_blocked( course_id, user=user, ip_address=get_ip(request), url=request.path ) if redirect_url: return HttpResponse(redirect_url) # Check that auto enrollment is allowed for this course # (= the course is NOT behind a paywall) if CourseMode.can_auto_enroll(course_id): # Enroll the user using the default mode (audit) # We're assuming that users of the course enrollment table # will NOT try to look up the course enrollment model # by its slug. If they do, it's possible (based on the state of the database) # for no such model to exist, even though we've set the enrollment type # to "audit". try: enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes) if enroll_mode: CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode) except Exception: # pylint: disable=broad-except return HttpResponseBadRequest(_("Could not enroll")) try: course = modulestore().get_course(course_id) except ItemNotFoundError: log.warning("User {0} tried to enroll in non-existent course {1}" .format(user.username, course_id)) return HttpResponseBadRequest(_("Course id is invalid")) enrollment_email_result = json.loads(notify_enrollment_by_email(course, user, request).content) if ('is_success' in enrollment_email_result and not enrollment_email_result['is_success']): return HttpResponseBadRequest(_(enrollment_email_result['error'])) # If we have more than one course mode or professional ed is enabled, # then send the user to the choose your track page. # (In the case of no-id-professional/professional ed, this will redirect to a page that # funnels users directly into the verification / payment flow) if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes): return HttpResponse( reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)}) ) # Otherwise, there is only one mode available (the default) return HttpResponse() elif action == "unenroll": enrollment = CourseEnrollment.get_enrollment(user, course_id) if not enrollment: return HttpResponseBadRequest(_("You are not enrolled in this course")) certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode) if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES: return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course")) CourseEnrollment.unenroll(user, course_id) return HttpResponse() else: return HttpResponseBadRequest(_("Enrollment action is invalid")) def _check_can_enroll_in_course(user, course_key, access_type="enroll"): """ Refactored check for user being able to enroll in course Returns (bool, error_message), where error message is only applicable if bool == False """ try: course = modulestore().get_course(course_key) except ItemNotFoundError: log.warning("User {0} tried to enroll in non-existent course {1}" .format(user.username, course_key)) return False, _("Course id is invalid") if not has_access(user, access_type, course): return False, _("Enrollment is closed") return True, "" # Need different levels of logging @ensure_csrf_cookie def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument """AJAX request to log in the user.""" backend_name = None email = None password = None redirect_url = None response = None running_pipeline = None third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request) third_party_auth_successful = False trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password')) user = None platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME) if third_party_auth_requested and not trumped_by_first_party_auth: # The user has already authenticated via third-party auth and has not # asked to do first party auth by supplying a username or password. We # now want to put them through the same logging and cookie calculation # logic as with first-party auth. running_pipeline = pipeline.get(request) username = running_pipeline['kwargs'].get('username') backend_name = running_pipeline['backend'] third_party_uid = running_pipeline['kwargs']['uid'] requested_provider = provider.Registry.get_from_pipeline(running_pipeline) try: user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid) third_party_auth_successful = True except User.DoesNotExist: AUDIT_LOG.warning( u"Login failed - user with username {username} has no social auth " "with backend_name {backend_name}".format( username=username, backend_name=backend_name) ) message = _( "You've successfully logged into your {provider_name} account, " "but this account isn't linked with an {platform_name} account yet." ).format( platform_name=platform_name, provider_name=requested_provider.name, ) message += "<br/><br/>" message += _( "Use your {platform_name} username and password to log into {platform_name} below, " "and then link your {platform_name} account with {provider_name} from your dashboard." ).format( platform_name=platform_name, provider_name=requested_provider.name, ) message += "<br/><br/>" message += _( "If you don't have an {platform_name} account yet, " "click <strong>Register</strong> at the top of the page." ).format( platform_name=platform_name ) return HttpResponse(message, content_type="text/plain", status=403) else: if 'email' not in request.POST or 'password' not in request.POST: return JsonResponse({ "success": False, # TODO: User error message "value": _('There was an error receiving your login information. Please email us.'), }) # TODO: this should be status code 400 email = request.POST['email'] password = request.POST['password'] try: user = User.objects.get(email=email) except User.DoesNotExist: if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u"Login failed - Unknown user email") else: AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email)) # check if the user has a linked shibboleth account, if so, redirect the user to shib-login # This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu # address into the Gmail login. if settings.FEATURES.get('AUTH_USE_SHIB') and user: try: eamap = ExternalAuthMap.objects.get(user=user) if eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX): return JsonResponse({ "success": False, "redirect": reverse('shib-login'), }) # TODO: this should be status code 301 # pylint: disable=fixme except ExternalAuthMap.DoesNotExist: # This is actually the common case, logging in user without external linked login AUDIT_LOG.info(u"User %s w/o external auth attempting login", user) # see if account has been locked out due to excessive login failures user_found_by_email_lookup = user if user_found_by_email_lookup and LoginFailures.is_feature_enabled(): if LoginFailures.is_user_locked_out(user_found_by_email_lookup): lockout_message = ungettext( "This account has been temporarily locked due to excessive login failures. " "Try again in {minutes} minute. For security reasons, " "resetting the password will NOT lift the lockout. Please wait for {minutes} minute.", "This account has been temporarily locked due to excessive login failures. " "Try again in {minutes} minutes. For security reasons, " "resetting the password will NOT lift the lockout. Please wait for {minutes} minutes.", LOGIN_LOCKOUT_PERIOD_PLUS_FIVE_MINUTES ).format( minutes=LOGIN_LOCKOUT_PERIOD_PLUS_FIVE_MINUTES, ) return JsonResponse({ "success": False, "value": lockout_message, }) # TODO: this should be status code 429 # pylint: disable=fixme # see if the user must reset his/her password due to any policy settings if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup): return JsonResponse({ "success": False, "value": _('Your password has expired due to password policy on this account. You must ' 'reset your password before you can log in again. Please click the ' '"Forgot Password" link on this page to reset your password before logging in again.'), }) # TODO: this should be status code 403 # pylint: disable=fixme # if the user doesn't exist, we want to set the username to an invalid # username so that authentication is guaranteed to fail and we can take # advantage of the ratelimited backend username = user.username if user else "" if not third_party_auth_successful: try: user = authenticate(username=username, password=password, request=request) # this occurs when there are too many attempts from the same IP address except RateLimitException: return JsonResponse({ "success": False, "value": _('Too many failed login attempts. Try again later.'), }) # TODO: this should be status code 429 # pylint: disable=fixme if user is None: # tick the failed login counters if the user exists in the database if user_found_by_email_lookup and LoginFailures.is_feature_enabled(): LoginFailures.increment_lockout_counter(user_found_by_email_lookup) # if we didn't find this username earlier, the account for this email # doesn't exist, and doesn't have a corresponding password if username != "": if settings.FEATURES['SQUELCH_PII_IN_LOGS']: loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>" AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id)) else: AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email)) return JsonResponse({ "success": False, "value": _('Email or password is incorrect.'), }) # TODO: this should be status code 400 # pylint: disable=fixme # successful login, clear failed login attempts counters, if applicable if LoginFailures.is_feature_enabled(): LoginFailures.clear_lockout_counter(user) # Track the user's sign in if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() analytics.identify( user.id, { 'email': email, 'username': username }, { # Disable MailChimp because we don't want to update the user's email # and username in MailChimp on every page load. We only need to capture # this data on registration/activation. 'MailChimp': False } ) analytics.track( user.id, "edx.bi.user.account.authenticated", { 'category': "conversion", 'label': request.POST.get('course_id'), 'provider': None }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } ) if user is not None and user.is_active: try: # We do not log here, because we have a handler registered # to perform logging on successful logins. login(request, user) if request.POST.get('remember') == 'true': request.session.set_expiry(604800) log.debug("Setting user session to never expire") else: request.session.set_expiry(0) except Exception as exc: # pylint: disable=broad-except AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?") log.critical("Login failed - Could not create session. Is memcached running?") log.exception(exc) raise redirect_url = None # The AJAX method calling should know the default destination upon success if third_party_auth_successful: redirect_url = pipeline.get_complete_url(backend_name) response = JsonResponse({ "success": True, "redirect_url": redirect_url, }) # Ensure that the external marketing site can # detect that the user is logged in. return set_logged_in_cookies(request, response, user) if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id)) else: AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username)) reactivation_email_for_user(user) not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an " "email message with instructions for activating your account.") return JsonResponse({ "success": False, "value": not_activated_msg, }) # TODO: this should be status code 400 # pylint: disable=fixme @csrf_exempt @require_POST @social_utils.strategy("social:complete") def login_oauth_token(request, backend): """ Authenticate the client using an OAuth access token by using the token to retrieve information from a third party and matching that information to an existing user. """ warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning) backend = request.backend if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2): if "access_token" in request.POST: # Tell third party auth pipeline that this is an API call request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API user = None try: user = backend.do_auth(request.POST["access_token"]) except (HTTPError, AuthException): pass # do_auth can return a non-User object if it fails if user and isinstance(user, User): login(request, user) return JsonResponse(status=204) else: # Ensure user does not re-enter the pipeline request.social_strategy.clean_partial_pipeline() return JsonResponse({"error": "invalid_token"}, status=401) else: return JsonResponse({"error": "invalid_request"}, status=400) raise Http404 @require_GET @login_required @ensure_csrf_cookie def manage_user_standing(request): """ Renders the view used to manage user standing. Also displays a table of user accounts that have been disabled and who disabled them. """ if not request.user.is_staff: raise Http404 all_disabled_accounts = UserStanding.objects.filter( account_status=UserStanding.ACCOUNT_DISABLED ) all_disabled_users = [standing.user for standing in all_disabled_accounts] headers = ['username', 'account_changed_by'] rows = [] for user in all_disabled_users: row = [user.username, user.standing.changed_by] rows.append(row) context = {'headers': headers, 'rows': rows} return render_to_response("manage_user_standing.html", context) @require_POST @login_required @ensure_csrf_cookie def disable_account_ajax(request): """ Ajax call to change user standing. Endpoint of the form in manage_user_standing.html """ if not request.user.is_staff: raise Http404 username = request.POST.get('username') context = {} if username is None or username.strip() == '': context['message'] = _('Please enter a username') return JsonResponse(context, status=400) account_action = request.POST.get('account_action') if account_action is None: context['message'] = _('Please choose an option') return JsonResponse(context, status=400) username = username.strip() try: user = User.objects.get(username=username) except User.DoesNotExist: context['message'] = _("User with username {} does not exist").format(username) return JsonResponse(context, status=400) else: user_account, _success = UserStanding.objects.get_or_create( user=user, defaults={'changed_by': request.user}, ) if account_action == 'disable': user_account.account_status = UserStanding.ACCOUNT_DISABLED context['message'] = _("Successfully disabled {}'s account").format(username) log.info(u"%s disabled %s's account", request.user, username) elif account_action == 'reenable': user_account.account_status = UserStanding.ACCOUNT_ENABLED context['message'] = _("Successfully reenabled {}'s account").format(username) log.info(u"%s reenabled %s's account", request.user, username) else: context['message'] = _("Unexpected account status") return JsonResponse(context, status=400) user_account.changed_by = request.user user_account.standing_last_changed_at = datetime.datetime.now(UTC) user_account.save() return JsonResponse(context) @login_required @ensure_csrf_cookie def change_setting(request): """JSON call to change a profile setting: Right now, location""" # TODO (vshnayder): location is no longer used u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache if 'location' in request.POST: u_prof.location = request.POST['location'] u_prof.save() return JsonResponse({ "success": True, "location": u_prof.location, }) class AccountValidationError(Exception): def __init__(self, message, field): super(AccountValidationError, self).__init__(message) self.field = field @receiver(post_save, sender=User) def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument """ handler that saves the user Signup Source when the user is created """ if 'created' in kwargs and kwargs['created']: site = configuration_helpers.get_value('SITE_NAME') if site: user_signup_source = UserSignupSource(user=kwargs['instance'], site=site) user_signup_source.save() log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id)) def _do_create_account(form, custom_form=None): """ Given cleaned post variables, create the User and UserProfile objects, as well as the registration for this user. Returns a tuple (User, UserProfile, Registration). Note: this function is also used for creating test users. """ errors = {} errors.update(form.errors) if custom_form: errors.update(custom_form.errors) if errors: raise ValidationError(errors) user = User( username=form.cleaned_data["username"], email=form.cleaned_data["email"], is_active=False ) user.set_password(form.cleaned_data["password"]) registration = Registration() # TODO: Rearrange so that if part of the process fails, the whole process fails. # Right now, we can have e.g. no registration e-mail sent out and a zombie account try: with transaction.atomic(): user.save() if custom_form: custom_model = custom_form.save(commit=False) custom_model.user = user custom_model.save() except IntegrityError: # Figure out the cause of the integrity error if len(User.objects.filter(username=user.username)) > 0: raise AccountValidationError( _("An account with the Public Username '{username}' already exists.").format(username=user.username), field="username" ) elif len(User.objects.filter(email=user.email)) > 0: raise AccountValidationError( _("An account with the Email '{email}' already exists.").format(email=user.email), field="email" ) else: raise # add this account creation to password history # NOTE, this will be a NOP unless the feature has been turned on in configuration password_history_entry = PasswordHistory() password_history_entry.create(user) registration.register(user) profile_fields = [ "name", "level_of_education", "gender", "mailing_address", "city", "country", "goals", "year_of_birth" ] profile = UserProfile( user=user, **{key: form.cleaned_data.get(key) for key in profile_fields} ) extended_profile = form.cleaned_extended_profile if extended_profile: profile.meta = json.dumps(extended_profile) try: profile.save() except Exception: # pylint: disable=broad-except log.exception("UserProfile creation failed for user {id}.".format(id=user.id)) raise return (user, profile, registration) def create_account_with_params(request, params): """ Given a request and a dict of parameters (which may or may not have come from the request), create an account for the requesting user, including creating a comments service user object and sending an activation email. This also takes external/third-party auth into account, updates that as necessary, and authenticates the user for the request's session. Does not return anything. Raises AccountValidationError if an account with the username or email specified by params already exists, or ValidationError if any of the given parameters is invalid for any other reason. Issues with this code: * It is not transactional. If there is a failure part-way, an incomplete account will be created and left in the database. * Third-party auth passwords are not verified. There is a comment that they are unused, but it would be helpful to have a sanity check that they are sane. * It is over 300 lines long (!) and includes disprate functionality, from registration e-mails to all sorts of other things. It should be broken up into semantically meaningful functions. * The user-facing text is rather unfriendly (e.g. "Username must be a minimum of two characters long" rather than "Please use a username of at least two characters"). """ # Copy params so we can modify it; we can't just do dict(params) because if # params is request.POST, that results in a dict containing lists of values params = dict(params.items()) # allow to define custom set of required/optional/hidden fields via configuration extra_fields = configuration_helpers.get_value( 'REGISTRATION_EXTRA_FIELDS', getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}) ) # Boolean of whether a 3rd party auth provider and credentials were provided in # the API so the newly created account can link with the 3rd party account. # # Note: this is orthogonal to the 3rd party authentication pipeline that occurs # when the account is created via the browser and redirect URLs. should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)): params["password"] = pipeline.make_random_password() # Add a form requirement for data sharing consent if the EnterpriseCustomer # for the request requires it at login extra_fields['data_sharing_consent'] = data_sharing_consent_requirement_at_login(request) # if doing signup for an external authorization, then get email, password, name from the eamap # don't use the ones from the form, since the user could have hacked those # unless originally we didn't get a valid email or name from the external auth # TODO: We do not check whether these values meet all necessary criteria, such as email length do_external_auth = 'ExternalAuthMap' in request.session if do_external_auth: eamap = request.session['ExternalAuthMap'] try: validate_email(eamap.external_email) params["email"] = eamap.external_email except ValidationError: pass if eamap.external_name.strip() != '': params["name"] = eamap.external_name params["password"] = eamap.internal_password log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"]) extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', []) enforce_password_policy = ( settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and not do_external_auth ) # Can't have terms of service for certain SHIB users, like at Stanford registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}) tos_required = ( registration_fields.get('terms_of_service') != 'hidden' or registration_fields.get('honor_code') != 'hidden' ) and ( not settings.FEATURES.get("AUTH_USE_SHIB") or not settings.FEATURES.get("SHIB_DISABLE_TOS") or not do_external_auth or not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX) ) if not tos_required: extra_fields.pop('terms_of_service', None) form = AccountCreationForm( data=params, extra_fields=extra_fields, extended_profile_fields=extended_profile_fields, enforce_username_neq_password=True, enforce_password_policy=enforce_password_policy, tos_required=tos_required, ) custom_form = get_registration_extension_form(data=params) # Perform operations within a transaction that are critical to account creation with transaction.atomic(): # first, create the account (user, profile, registration) = _do_create_account(form, custom_form) # next, link the account with social auth, if provided via the API. # (If the user is using the normal register page, the social auth pipeline does the linking, not this code) if should_link_with_social_auth: backend_name = params['provider'] request.social_strategy = social_utils.load_strategy(request) redirect_uri = reverse('social:complete', args=(backend_name, )) request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri) social_access_token = params.get('access_token') if not social_access_token: raise ValidationError({ 'access_token': [ _("An access_token is required when passing value ({}) for provider.").format( params['provider'] ) ] }) request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API pipeline_user = None error_message = "" try: pipeline_user = request.backend.do_auth(social_access_token, user=user) except AuthAlreadyAssociated: error_message = _("The provided access_token is already associated with another user.") except (HTTPError, AuthException): error_message = _("The provided access_token is not valid.") if not pipeline_user or not isinstance(pipeline_user, User): # Ensure user does not re-enter the pipeline request.social_strategy.clean_partial_pipeline() raise ValidationError({'access_token': [error_message]}) # Perform operations that are non-critical parts of account creation preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language()) if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'): try: enable_notifications(user) except Exception: # pylint: disable=broad-except log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id)) dog_stats_api.increment("common.student.account_created") # If the user is registering via 3rd party auth, track which provider they use third_party_provider = None running_pipeline = None if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) third_party_provider = provider.Registry.get_from_pipeline(running_pipeline) # Store received data sharing consent field values in the pipeline for use # by any downstream pipeline elements which require them. running_pipeline['kwargs']['data_sharing_consent'] = form.cleaned_data.get('data_sharing_consent', None) # Track the user's registration if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() identity_args = [ user.id, # pylint: disable=no-member { 'email': user.email, 'username': user.username, 'name': profile.name, # Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey. 'age': profile.age or -1, 'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year, 'education': profile.level_of_education_display, 'address': profile.mailing_address, 'gender': profile.gender_display, 'country': unicode(profile.country), } ] if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'): identity_args.append({ "MailChimp": { "listId": settings.MAILCHIMP_NEW_USER_LIST_ID } }) analytics.identify(*identity_args) analytics.track( user.id, "edx.bi.user.account.registered", { 'category': 'conversion', 'label': params.get('course_id'), 'provider': third_party_provider.name if third_party_provider else None }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } ) # Announce registration REGISTER_USER.send(sender=None, user=user, profile=profile) create_comments_service_user(user) # Don't send email if we are: # # 1. Doing load testing. # 2. Random user generation for other forms of testing. # 3. External auth bypassing activation. # 4. Have the platform configured to not require e-mail activation. # 5. Registering a new user using a trusted third party provider (with skip_email_verification=True) # # Note that this feature is only tested as a flag set one way or # the other for *new* systems. we need to be careful about # changing settings on a running system to make sure no users are # left in an inconsistent state (or doing a migration if they are). send_email = ( not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and not ( third_party_provider and third_party_provider.skip_email_verification and user.email == running_pipeline['kwargs'].get('details', {}).get('email') ) ) if send_email: dest_addr = user.email context = { 'name': profile.name, 'key': registration.activation_key, } # composes activation email subject = render_to_string('emails/activation_email_subject.txt', context) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('emails/activation_email.txt', context) from_address = configuration_helpers.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'): dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL'] message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) + '-' * 80 + '\n\n' + message) send_activation_email.delay(subject, message, from_address, dest_addr) else: registration.activate() _enroll_user_in_pending_courses(user) # Enroll student in any pending courses # Immediately after a user creates an account, we log them in. They are only # logged in until they close the browser. They can't log in again until they click # the activation link from the email. new_user = authenticate(username=user.username, password=params['password']) login(request, new_user) request.session.set_expiry(0) try: record_registration_attributions(request, new_user) # Don't prevent a user from registering due to attribution errors. except Exception: # pylint: disable=broad-except log.exception('Error while attributing cookies to user registration.') # TODO: there is no error checking here to see that the user actually logged in successfully, # and is not yet an active user. if new_user is not None: AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username)) if do_external_auth: eamap.user = new_user eamap.dtsignup = datetime.datetime.now(UTC) eamap.save() AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username) AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap) if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'): log.info('bypassing activation email') new_user.is_active = True new_user.save() AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email)) return new_user def _enroll_user_in_pending_courses(student): """ Enroll student in any pending courses he/she may have. """ ceas = CourseEnrollmentAllowed.objects.filter(email=student.email) for cea in ceas: if cea.auto_enroll: enrollment = CourseEnrollment.enroll(student, cea.course_id) manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email) if manual_enrollment_audit is not None: # get the enrolled by user and reason from the ManualEnrollmentAudit table. # then create a new ManualEnrollmentAudit table entry for the same email # different transition state. ManualEnrollmentAudit.create_manual_enrollment_audit( manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED, manual_enrollment_audit.reason, enrollment ) def record_affiliate_registration_attribution(request, user): """ Attribute this user's registration to the referring affiliate, if applicable. """ affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME) if user and affiliate_id: UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id) def record_utm_registration_attribution(request, user): """ Attribute this user's registration to the latest UTM referrer, if applicable. """ utm_cookie_name = RegistrationCookieConfiguration.current().utm_cookie_name utm_cookie = request.COOKIES.get(utm_cookie_name) if user and utm_cookie: utm = json.loads(utm_cookie) for utm_parameter_name in REGISTRATION_UTM_PARAMETERS: utm_parameter = utm.get(utm_parameter_name) if utm_parameter: UserAttribute.set_user_attribute( user, REGISTRATION_UTM_PARAMETERS.get(utm_parameter_name), utm_parameter ) created_at_unixtime = utm.get('created_at') if created_at_unixtime: # We divide by 1000 here because the javascript timestamp generated is in milliseconds not seconds. # PYTHON: time.time() => 1475590280.823698 # JS: new Date().getTime() => 1475590280823 created_at_datetime = datetime.datetime.fromtimestamp(int(created_at_unixtime) / float(1000), tz=UTC) UserAttribute.set_user_attribute( user, REGISTRATION_UTM_CREATED_AT, created_at_datetime ) def record_registration_attributions(request, user): """ Attribute this user's registration based on referrer cookies. """ record_affiliate_registration_attribution(request, user) record_utm_registration_attribution(request, user) @csrf_exempt def create_account(request, post_override=None): """ JSON call to create new edX account. Used by form in signup_modal.html, which is included into navigation.html """ warnings.warn("Please use RegistrationView instead.", DeprecationWarning) try: user = create_account_with_params(request, post_override or request.POST) except AccountValidationError as exc: return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400) except ValidationError as exc: field, error_list = next(exc.message_dict.iteritems()) return JsonResponse( { "success": False, "field": field, "value": error_list[0], }, status=400 ) redirect_url = None # The AJAX method calling should know the default destination upon success # Resume the third-party-auth pipeline if necessary. if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) redirect_url = pipeline.get_complete_url(running_pipeline['backend']) response = JsonResponse({ 'success': True, 'redirect_url': redirect_url, }) set_logged_in_cookies(request, response, user) return response def auto_auth(request): """ Create or configure a user account, then log in as that user. Enabled only when settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true. Accepts the following querystring parameters: * `username`, `email`, and `password` for the user account * `full_name` for the user profile (the user's full name; defaults to the username) * `staff`: Set to "true" to make the user global staff. * `course_id`: Enroll the student in the course with `course_id` * `roles`: Comma-separated list of roles to grant the student in the course with `course_id` * `no_login`: Define this to create the user but not login * `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or course home page if course_id is defined, otherwise it will redirect to dashboard * `redirect_to`: will redirect to to this url * `is_active` : make/update account with status provided as 'is_active' If username, email, or password are not provided, use randomly generated credentials. """ # Generate a unique name to use if none provided unique_name = uuid.uuid4().hex[0:30] # Use the params from the request, otherwise use these defaults username = request.GET.get('username', unique_name) password = request.GET.get('password', unique_name) email = request.GET.get('email', unique_name + "@example.com") full_name = request.GET.get('full_name', username) is_staff = request.GET.get('staff', None) is_superuser = request.GET.get('superuser', None) course_id = request.GET.get('course_id', None) redirect_to = request.GET.get('redirect_to', None) active_status = request.GET.get('is_active') # mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit' enrollment_mode = request.GET.get('enrollment_mode', 'honor') active_status = (not active_status or active_status == 'true') course_key = None if course_id: course_key = CourseLocator.from_string(course_id) role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()] redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to login_when_done = 'no_login' not in request.GET form = AccountCreationForm( data={ 'username': username, 'email': email, 'password': password, 'name': full_name, }, tos_required=False ) # Attempt to create the account. # If successful, this will return a tuple containing # the new user object. try: user, profile, reg = _do_create_account(form) except (AccountValidationError, ValidationError): # Attempt to retrieve the existing user. user = User.objects.get(username=username) user.email = email user.set_password(password) user.is_active = active_status user.save() profile = UserProfile.objects.get(user=user) reg = Registration.objects.get(user=user) # Set the user's global staff bit if is_staff is not None: user.is_staff = (is_staff == "true") user.save() if is_superuser is not None: user.is_superuser = (is_superuser == "true") user.save() if active_status: reg.activate() reg.save() # ensure parental consent threshold is met year = datetime.date.today().year age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT profile.year_of_birth = (year - age_limit) - 1 profile.save() # Enroll the user in a course if course_key is not None: CourseEnrollment.enroll(user, course_key, mode=enrollment_mode) # Apply the roles for role_name in role_names: role = Role.objects.get(name=role_name, course_id=course_key) user.roles.add(role) # Log in as the user if login_when_done: user = authenticate(username=username, password=password) login(request, user) create_comments_service_user(user) # Provide the user with a valid CSRF token # then return a 200 response unless redirect is true if redirect_when_done: # Redirect to specific page if specified if redirect_to: redirect_url = redirect_to # Redirect to course info page if course_id is known elif course_id: try: # redirect to course info page in LMS redirect_url = reverse( 'info', kwargs={'course_id': course_id} ) except NoReverseMatch: # redirect to course outline page in Studio redirect_url = reverse( 'course_handler', kwargs={'course_key_string': course_id} ) else: try: # redirect to dashboard for LMS redirect_url = reverse('dashboard') except NoReverseMatch: # redirect to home for Studio redirect_url = reverse('home') return redirect(redirect_url) elif request.META.get('HTTP_ACCEPT') == 'application/json': response = JsonResponse({ 'created_status': u"Logged in" if login_when_done else "Created", 'username': username, 'email': email, 'password': password, 'user_id': user.id, # pylint: disable=no-member 'anonymous_id': anonymous_id_for_user(user, None), }) else: success_msg = u"{} user {} ({}) with password {} and user_id {}".format( u"Logged in" if login_when_done else "Created", username, email, password, user.id # pylint: disable=no-member ) response = HttpResponse(success_msg) response.set_cookie('csrftoken', csrf(request)['csrf_token']) return response @ensure_csrf_cookie def activate_account(request, key): """When link in activation e-mail is clicked""" regs = Registration.objects.filter(activation_key=key) if len(regs) == 1: user_logged_in = request.user.is_authenticated() already_active = True if not regs[0].user.is_active: regs[0].activate() already_active = False # Enroll student in any pending courses he/she may have if auto_enroll flag is set _enroll_user_in_pending_courses(regs[0].user) resp = render_to_response( "registration/activation_complete.html", { 'user_logged_in': user_logged_in, 'already_active': already_active } ) return resp if len(regs) == 0: return render_to_response( "registration/activation_invalid.html", {'csrf': csrf(request)['csrf_token']} ) return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened.")) @csrf_exempt @require_POST def password_reset(request): """ Attempts to send a password reset e-mail. """ # Add some rate limiting here by re-using the RateLimitMixin as a helper class limiter = BadRequestRateLimiter() if limiter.is_rate_limit_exceeded(request): AUDIT_LOG.warning("Rate limit exceeded in password_reset") return HttpResponseForbidden() form = PasswordResetFormNoActive(request.POST) if form.is_valid(): form.save(use_https=request.is_secure(), from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL), request=request) # When password change is complete, a "edx.user.settings.changed" event will be emitted. # But because changing the password is multi-step, we also emit an event here so that we can # track where the request was initiated. tracker.emit( SETTING_CHANGE_INITIATED, { "setting": "password", "old": None, "new": None, "user_id": request.user.id, } ) destroy_oauth_tokens(request.user) else: # bad user? tick the rate limiter counter AUDIT_LOG.info("Bad password_reset user passed in.") limiter.tick_bad_request_counter(request) return JsonResponse({ 'success': True, 'value': render_to_string('registration/password_reset_done.html', {}), }) def uidb36_to_uidb64(uidb36): """ Needed to support old password reset URLs that use base36-encoded user IDs https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231 Args: uidb36: base36-encoded user ID Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID """ try: uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36)))) except ValueError: uidb64 = '1' # dummy invalid ID (incorrect padding for base64) return uidb64 def validate_password(user, password): """ Tie in password policy enforcement as an optional level of security protection Args: user: the user object whose password we're checking. password: the user's proposed new password. Returns: is_valid_password: a boolean indicating if the new password passes the validation. err_msg: an error message if there's a violation of one of the password checks. Otherwise, `None`. """ err_msg = None if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False): try: validate_password_strength(password) except ValidationError as err: err_msg = _('Password: ') + '; '.join(err.messages) # also, check the password reuse policy if not PasswordHistory.is_allowable_password_reuse(user, password): if user.is_staff: num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE'] else: num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE'] # Because of how ngettext is, splitting the following into shorter lines would be ugly. # pylint: disable=line-too-long err_msg = ungettext( "You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.", "You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.", num_distinct ).format(num=num_distinct) # also, check to see if passwords are getting reset too frequent if PasswordHistory.is_password_reset_too_soon(user): num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'] # Because of how ngettext is, splitting the following into shorter lines would be ugly. # pylint: disable=line-too-long err_msg = ungettext( "You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.", "You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.", num_days ).format(num=num_days) is_password_valid = err_msg is None return is_password_valid, err_msg def password_reset_confirm_wrapper(request, uidb36=None, token=None): """ A wrapper around django.contrib.auth.views.password_reset_confirm. Needed because we want to set the user as active at this step. We also optionally do some additional password policy checks. """ # convert old-style base36-encoded user id to base64 uidb64 = uidb36_to_uidb64(uidb36) platform_name = { "platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME) } try: uid_int = base36_to_int(uidb36) user = User.objects.get(id=uid_int) except (ValueError, User.DoesNotExist): # if there's any error getting a user, just let django's # password_reset_confirm function handle it. return password_reset_confirm( request, uidb64=uidb64, token=token, extra_context=platform_name ) if request.method == 'POST': password = request.POST['new_password1'] is_password_valid, password_err_msg = validate_password(user, password) if not is_password_valid: # We have a password reset attempt which violates some security # policy. Use the existing Django template to communicate that # back to the user. context = { 'validlink': True, 'form': None, 'title': _('Password reset unsuccessful'), 'err_msg': password_err_msg, } context.update(platform_name) return TemplateResponse( request, 'registration/password_reset_confirm.html', context ) # remember what the old password hash is before we call down old_password_hash = user.password response = password_reset_confirm( request, uidb64=uidb64, token=token, extra_context=platform_name ) # If password reset was unsuccessful a template response is returned (status_code 200). # Check if form is invalid then show an error to the user. # Note if password reset was successful we get response redirect (status_code 302). if response.status_code == 200 and not response.context_data['form'].is_valid(): response.context_data['err_msg'] = _('Error in resetting your password. Please try again.') return response # get the updated user updated_user = User.objects.get(id=uid_int) # did the password hash change, if so record it in the PasswordHistory if updated_user.password != old_password_hash: entry = PasswordHistory() entry.create(updated_user) updated_user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, updated_user) else: response = password_reset_confirm( request, uidb64=uidb64, token=token, extra_context=platform_name ) response_was_successful = response.context_data.get('validlink') if response_was_successful and not user.is_active: user.is_active = True user.save() return response def reactivation_email_for_user(user): try: reg = Registration.objects.get(user=user) except Registration.DoesNotExist: return JsonResponse({ "success": False, "error": _('No inactive user with this e-mail exists'), }) # TODO: this should be status code 400 # pylint: disable=fixme context = { 'name': user.profile.name, 'key': reg.activation_key, } subject = render_to_string('emails/activation_email_subject.txt', context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/activation_email.txt', context) from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL) try: user.email_user(subject, message, from_address) except Exception: # pylint: disable=broad-except log.error( u'Unable to send reactivation email from "%s" to "%s"', from_address, user.email, exc_info=True ) return JsonResponse({ "success": False, "error": _('Unable to send reactivation email') }) # TODO: this should be status code 500 # pylint: disable=fixme return JsonResponse({"success": True}) def validate_new_email(user, new_email): """ Given a new email for a user, does some basic verification of the new address If any issues are encountered with verification a ValueError will be thrown. """ try: validate_email(new_email) except ValidationError: raise ValueError(_('Valid e-mail address required.')) if new_email == user.email: raise ValueError(_('Old email is the same as the new email.')) if User.objects.filter(email=new_email).count() != 0: raise ValueError(_('An account with this e-mail already exists.')) def do_email_change_request(user, new_email, activation_key=None): """ Given a new email for a user, does some basic verification of the new address and sends an activation message to the new address. If any issues are encountered with verification or sending the message, a ValueError will be thrown. """ pec_list = PendingEmailChange.objects.filter(user=user) if len(pec_list) == 0: pec = PendingEmailChange() pec.user = user else: pec = pec_list[0] # if activation_key is not passing as an argument, generate a random key if not activation_key: activation_key = uuid.uuid4().hex pec.new_email = new_email pec.activation_key = activation_key pec.save() context = { 'key': pec.activation_key, 'old_email': user.email, 'new_email': pec.new_email } subject = render_to_string('emails/email_change_subject.txt', context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/email_change.txt', context) from_address = configuration_helpers.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) try: mail.send_mail(subject, message, from_address, [pec.new_email]) except Exception: # pylint: disable=broad-except log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True) raise ValueError(_('Unable to send email activation link. Please try again later.')) # When the email address change is complete, a "edx.user.settings.changed" event will be emitted. # But because changing the email address is multi-step, we also emit an event here so that we can # track where the request was initiated. tracker.emit( SETTING_CHANGE_INITIATED, { "setting": "email", "old": context['old_email'], "new": context['new_email'], "user_id": user.id, } ) @ensure_csrf_cookie def confirm_email_change(request, key): # pylint: disable=unused-argument """ User requested a new e-mail. This is called when the activation link is clicked. We confirm with the old e-mail, and update """ with transaction.atomic(): try: pec = PendingEmailChange.objects.get(activation_key=key) except PendingEmailChange.DoesNotExist: response = render_to_response("invalid_email_key.html", {}) transaction.set_rollback(True) return response user = pec.user address_context = { 'old_email': user.email, 'new_email': pec.new_email } if len(User.objects.filter(email=pec.new_email)) != 0: response = render_to_response("email_exists.html", {}) transaction.set_rollback(True) return response subject = render_to_string('emails/email_change_subject.txt', address_context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/confirm_email_change.txt', address_context) u_prof = UserProfile.objects.get(user=user) meta = u_prof.get_meta() if 'old_emails' not in meta: meta['old_emails'] = [] meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()]) u_prof.set_meta(meta) u_prof.save() # Send it to the old email... try: user.email_user( subject, message, configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL) ) except Exception: # pylint: disable=broad-except log.warning('Unable to send confirmation email to old address', exc_info=True) response = render_to_response("email_change_failed.html", {'email': user.email}) transaction.set_rollback(True) return response user.email = pec.new_email user.save() pec.delete() # And send it to the new email... try: user.email_user( subject, message, configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL) ) except Exception: # pylint: disable=broad-except log.warning('Unable to send confirmation email to new address', exc_info=True) response = render_to_response("email_change_failed.html", {'email': pec.new_email}) transaction.set_rollback(True) return response response = render_to_response("email_change_successful.html", address_context) return response @require_POST @login_required @ensure_csrf_cookie def change_email_settings(request): """Modify logged-in user's setting for receiving emails from a course.""" user = request.user course_id = request.POST.get("course_id") course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) receive_emails = request.POST.get("receive_emails") if receive_emails: optout_object = Optout.objects.filter(user=user, course_id=course_key) if optout_object: optout_object.delete() log.info( u"User %s (%s) opted in to receive emails from course %s", user.username, user.email, course_id, ) track.views.server_track( request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard', ) else: Optout.objects.get_or_create(user=user, course_id=course_key) log.info( u"User %s (%s) opted out of receiving emails from course %s", user.username, user.email, course_id, ) track.views.server_track( request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard', ) return JsonResponse({"success": True}) class LogoutView(TemplateView): """ Logs out user and redirects. The template should load iframes to log the user out of OpenID Connect services. See http://openid.net/specs/openid-connect-logout-1_0.html. """ oauth_client_ids = [] template_name = 'logout.html' # Keep track of the page to which the user should ultimately be redirected. target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/' def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring # We do not log here, because we have a handler registered to perform logging on successful logouts. request.is_from_logout = True # Get the list of authorized clients before we clear the session. self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, []) logout(request) # If we don't need to deal with OIDC logouts, just redirect the user. if LogoutViewConfiguration.current().enabled and self.oauth_client_ids: response = super(LogoutView, self).dispatch(request, *args, **kwargs) else: response = redirect(self.target) # Clear the cookie used by the edx.org marketing site delete_logged_in_cookies(response) return response def _build_logout_url(self, url): """ Builds a logout URL with the `no_redirect` query string parameter. Args: url (str): IDA logout URL Returns: str """ scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) query_params['no_redirect'] = 1 new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment)) def get_context_data(self, **kwargs): context = super(LogoutView, self).get_context_data(**kwargs) # Create a list of URIs that must be called to log the user out of all of the IDAs. uris = Client.objects.filter(client_id__in=self.oauth_client_ids, logout_uri__isnull=False).values_list('logout_uri', flat=True) referrer = self.request.META.get('HTTP_REFERER', '').strip('/') logout_uris = [] for uri in uris: if not referrer or (referrer and not uri.startswith(referrer)): logout_uris.append(self._build_logout_url(uri)) context.update({ 'target': self.target, 'logout_uris': logout_uris, }) return context
caesar2164/edx-platform
common/djangoapps/student/views.py
Python
agpl-3.0
114,210
[ "VisIt" ]
cacc4b309de5b74e6965f07f8a8ae60b7c16e59a0cc8ead68cecefb71df88f51
# The following code has been adapted from Till A. Hoffmann. # See https://nbviewer.jupyter.org/gist/tillahoffmann/f844bce2ec264c1c8cb5 # and https://stackoverflow.com/questions/27623919/weighted-gaussian-kernel-density-estimation-in-python import numpy as np from scipy.spatial.distance import cdist import thalesians.tsa.checks as checks import thalesians.tsa.distrs as distrs import thalesians.tsa.numpyutils as npu class GaussianKDEDistr(distrs.Distr): """ Representation of a kernel-density estimate using Gaussian kernels. Kernel density estimation is a way to estimate the probability density function (PDF) of a random variable in a non-parametric way. `GaussianKDEDistr` works for both univariate and multivariate data. It includes automatic bandwidth determination. The estimation works best for a unimodal distribution; bimodal or multimodal distributions tend to be oversmoothed. Parameters ---------- dataset : array_like Datapoints to estimate from. In case of univariate data this is a 1-D array, otherwise a 2-D array with shape (# of data points / particles, # of dimensions in each particle). weights : array_like, shape (n, ), optional, default: None An array of weights, of the same shape as `x`. Each value in `x` only contributes its associated weight towards the bin count (instead of 1). bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `GaussianKDEDistr` instance as the only parameter and return a scalar. If None (default), 'scott' is used. See Notes for more details. Attributes ---------- dataset : ndarray The dataset with which `GaussianKDEDistr` was initialized. d : int Number of dimensions. n : int Number of datapoints. factor : float The bandwidth factor, obtained from `kde.covariance_factor`, with which the covariance matrix is multiplied. covariance : ndarray The covariance matrix of `dataset`, scaled by the calculated bandwidth (`kde.factor`). inv_cov : ndarray The inverse of `covariance`. Methods ------- kde.evaluate(points) : ndarray Evaluate the estimated pdf on a provided set of points. kde(points) : ndarray Same as kde.evaluate(points) kde.pdf(points) : ndarray Alias for ``kde.evaluate(points)``. kde.set_bandwidth(bw_method='scott') : None Computes the bandwidth, i.e. the coefficient that multiplies the data covariance matrix to obtain the kernel covariance matrix. kde.covariance_factor : float Computes the coefficient (`kde.factor`) that multiplies the data covariance matrix to obtain the kernel covariance matrix. The default is `scotts_factor`. A subclass can overwrite this method to provide a different method, or set it through a call to `kde.set_bandwidth`. Notes ----- Bandwidth selection strongly influences the estimate obtained from the KDE (much more so than the actual shape of the kernel). Bandwidth selection can be done by a "rule of thumb", by cross-validation, by "plug-in methods" or by other means; see [3]_, [4]_ for reviews. `GaussianKDEDistr` uses a rule of thumb, the default is Scott's Rule. Scott's Rule [1]_, implemented as `scotts_factor`, is:: n**(-1./(d+4)), with ``n`` the number of data points and ``d`` the number of dimensions. Silverman's Rule [2]_, implemented as `silverman_factor`, is:: (n * (d + 2) / 4.)**(-1. / (d + 4)). Good general descriptions of kernel density estimation can be found in [1]_ and [2]_, the mathematics for this multi-dimensional implementation can be found in [1]_. References ---------- .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and Visualization", John Wiley & Sons, New York, Chichester, 1992. .. [2] B.W. Silverman, "Density Estimation for Statistics and Data Analysis", Vol. 26, Monographs on Statistics and Applied Probability, Chapman and Hall, London, 1986. .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993. .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel conditional density estimation", Computational Statistics & Data Analysis, Vol. 36, pp. 279-298, 2001. Examples -------- Generate some random two-dimensional data: >>> from scipy import stats >>> def measure(n): >>> "Measurement model, return two coupled measurements." >>> m1 = np.random.normal(size=n) >>> m2 = np.random.normal(scale=0.5, size=n) >>> return m1+m2, m1-m2 >>> m1, m2 = measure(2000) >>> xmin = m1.min() >>> xmax = m1.max() >>> ymin = m2.min() >>> ymax = m2.max() Perform a kernel density estimate on the data: >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] >>> positions = np.vstack([X.ravel(), Y.ravel()]) >>> values = np.vstack([m1, m2]) >>> kernel = stats.gaussian_kde(values) >>> Z = np.reshape(kernel(positions).T, X.shape) Plot the results: >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax]) >>> ax.plot(m1, m2, 'k.', markersize=2) >>> ax.set_xlim([xmin, xmax]) >>> ax.set_ylim([ymin, ymax]) >>> plt.show() """ def __init__(self, empirical_distr, bw_method=None): """ Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `GaussianKDEDistr` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Examples -------- >>> x1 = np.array([-7, -5, 1, 4, 5.]) >>> kde = stats.gaussian_kde(x1) >>> xs = np.linspace(-10, 10, num=50) >>> y1 = kde(xs) >>> kde.set_bandwidth(bw_method='silverman') >>> y2 = kde(xs) >>> kde.set_bandwidth(bw_method=kde.factor / 3.) >>> y3 = kde(xs) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo', label='Data points (rescaled)') >>> ax.plot(xs, y1, label='Scott (default)') >>> ax.plot(xs, y2, label='Silverman') >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)') >>> ax.legend() >>> plt.show() """ self._empirical_distr = empirical_distr if bw_method is None: pass elif bw_method == 'scott': self.covariance_factor = self.scotts_factor elif bw_method == 'silverman': self.covariance_factor = self.silverman_factor elif np.isscalar(bw_method) and not checks.is_string(bw_method): self._bw_method = 'use constant' self.covariance_factor = lambda: bw_method elif callable(bw_method): self._bw_method = bw_method self.covariance_factor = lambda: self._bw_method(self) else: raise ValueError("`bw_method` should be 'scott', 'silverman', a scalar or a callable.") self._cov = None self._inv_cov = None self._pdf_norm_factor = None self._to_string_helper_GaussianKDEDistr = None self._str_GaussianKDEDistr = None super().__init__() def _compute_covariance(self): """ Computes the covariance matrix for each Gaussian kernel using covariance_factor(). """ self._cov = self.empirical_distr.cov * self.covariance_factor()**2 self._inv_cov = np.linalg.inv(self.empirical_distr.cov) / self.covariance_factor()**2 self._pdf_norm_factor = np.sqrt(np.linalg.det(2 * np.pi * self._cov)) #* self.n def scotts_factor(self): return np.power(self.empirical_distr.effective_particle_count, -1. / (self.dim + 4)) def silverman_factor(self): return np.power(self.empirical_distr.effective_particle_count * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4)) # Default method to calculate bandwidth, can be overwritten by subclass: covariance_factor = scotts_factor @property def empirical_distr(self): return self._empirical_distr @property def dim(self): return self._empirical_distr.dim @property def particle_count(self): return self._empirical_distr.particle_count @property def mean(self): return self._empirical_distr.mean @property def cov(self): if self._cov is None: self._compute_covariance() return self._cov @property def inv_cov(self): if self._inv_cov is None: self._compute_covariance() return self._inv_cov @property def pdf_norm_factor(self): if self._pdf_norm_factor is None: self._compute_covariance() return self._pdf_norm_factor def sample(self, size=1, random_state=None): raise NotImplementedError() def pdf(self, points): """ Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE. """ points = npu.to_ndim_2(points, ndim_1_to_col=True) m, d = np.shape(points) if d != self.dim: if d == 1 and m == self.dim: # points was passed in as a column vector points = np.reshape(points, (1, self.dim)) m = 1 else: msg = "points have dimension %s, particles has dimension %s" % (d, self.dim) raise ValueError(msg) # compute the normalized residuals chi2 = cdist(points, self.empirical_distr.particles, 'mahalanobis', VI=self.inv_cov) ** 2 # compute the pdf result = np.sum(np.exp(-.5 * chi2) * self.empirical_distr.normalized_weights.T, axis=1) / self.pdf_norm_factor return result def to_string_helper(self): if self._to_string_helper_GaussianKDEDistr is None: self._to_string_helper_GaussianKDEDistr = super().to_string_helper() \ .set_type(self) \ .add('particle_count', self.particle_count) \ .add('dim', self.dim) return self._to_string_helper_GaussianKDEDistr def __str__(self): if self._str_GaussianKDEDistr is None: self._str_GaussianKDEDistr = self.to_string_helper().to_string() return self._str_GaussianKDEDistr
thalesians/tsa
src/main/python/thalesians/tsa/kde.py
Python
apache-2.0
11,693
[ "Gaussian" ]
9eb26d2d0e4be38ea3ac4cc96f2237ebaaa86bc650ccd5300fa9973a4f995abb
# -*- coding:utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2010, 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """bibindex_engine_tokenizer_tests - unit tests for bibindex_engine_tokenizer There should always be at least one test class for each class in b_e_t. """ import unittest from invenio.testutils import make_test_suite, run_test_suite import bibindex_engine_tokenizer as tokenizer_lib class TestFuzzyNameTokenizerScanning(unittest.TestCase): """Test BibIndex name tokenization""" def setUp(self): self.tokenizer = tokenizer_lib.BibIndexFuzzyNameTokenizer() self.scan = self.tokenizer.scan def test_bifnt_scan_single(self): """BibIndexFuzzyNameTokenizer - scanning single names like 'Dido'""" teststr = "Dido" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Dido'], 'nonlastnames': [], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_simple_western_forward(self): """BibIndexFuzzyNameTokenizer - scanning simple Western-style: first last""" teststr = "Ringo Starr" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_simple_western_reverse(self): """BibIndexFuzzyNameTokenizer - scanning simple Western-style: last, first""" teststr = "Starr, Ringo" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_forward(self): """BibIndexFuzzyNameTokenizer - scanning multiword: first middle last""" teststr = "Michael Edward Peskin" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dotcrammed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: f.m. last""" teststr = "M.E. Peskin" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dotcrammed_reversed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: last, f.m.""" teststr = "Peskin, M.E." output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dashcrammed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: first-middle last""" teststr = "Jean-Luc Picard" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dashcrammed_reversed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: last, first-middle""" teststr = "Picard, Jean-Luc" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_dashes(self): """BibIndexFuzzyNameTokenizer - scanning multiword: first middle last-last""" teststr = "Cantina Octavia Jones-Smith" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_dashes_reverse(self): """BibIndexFuzzyNameTokenizer - scanning multiword: last-last, first middle""" teststr = "Jones-Smith, Cantina Octavia" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_reverse(self): """BibIndexFuzzyNameTokenizer - scanning compound last: last last, first""" teststr = "Alvarez Gaume, Joachim" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_titled(self): """BibIndexFuzzyNameTokenizer - scanning title-bearing: last, first, title""" teststr = "Epstein, Brian, The Fifth Beatle" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : teststr} self.assertEqual(output, anticipated) def test_bifnt_scan_wildly_interesting(self): """BibIndexFuzzyNameTokenizer - scanning last last last, first first, title, title""" teststr = "Ibanez y Gracia, Maria Luisa, II., ed." output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II.', 'ed.'], 'raw' : teststr} self.assertEqual(output, anticipated) class TestFuzzyNameTokenizerTokens(unittest.TestCase): """Test BibIndex name variant token generation from scanned and tagged sets""" def setUp(self): self.tokenizer = tokenizer_lib.BibIndexFuzzyNameTokenizer() self.get_index_tokens = self.tokenizer.parse_scanned def test_bifnt_tokenize_single(self): """BibIndexFuzzyNameTokenizer - tokens for single-word name Ronaldo """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Ronaldo'], 'nonlastnames': [], 'titles': [], 'raw' : 'Ronaldo'} output = self.get_index_tokens(tagged_data) anticipated = ['Ronaldo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_simple_forward(self): """BibIndexFuzzyNameTokenizer - tokens for first last Ringo Starr """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Ringo Starr'} output = self.get_index_tokens(tagged_data) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_simple_reverse(self): """BibIndexFuzzyNameTokenizer - tokens for last, first Starr, Ringo """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Starr, Ringo'} output = self.get_index_tokens(tagged_data) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_twoname_forward(self): """BibIndexFuzzyNameTokenizer - tokens for first middle last Michael Edward Peskin """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : 'Michael Edward Peskin'} output = self.get_index_tokens(tagged_data) anticipated = ['E Peskin', 'Edward Peskin', 'M E Peskin', 'M Edward Peskin', 'M Peskin', 'Michael E Peskin', 'Michael Edward Peskin', 'Michael Peskin', 'Peskin, E', 'Peskin, Edward', 'Peskin, M', 'Peskin, M E', 'Peskin, M Edward', 'Peskin, Michael', 'Peskin, Michael E', 'Peskin, Michael Edward'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_compound_last(self): """BibIndexFuzzyNameTokenizer - tokens for last last, first Alvarez Gaume, Joachim """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : 'Alvarez Gaume, Joachim'} output = self.get_index_tokens(tagged_data) anticipated = ['Alvarez Gaume, J', 'Alvarez Gaume, Joachim', 'Alvarez, J', 'Alvarez, Joachim', 'Gaume, J', 'Gaume, Joachim', 'J Alvarez', 'J Alvarez Gaume', 'J Gaume', 'Joachim Alvarez', 'Joachim Alvarez Gaume', 'Joachim Gaume'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_titled(self): """BibIndexFuzzyNameTokenizer - tokens for last, first, title Epstein, Brian, The Fifth Beatle """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : 'Epstein, Brian, The Fifth Beatle'} output = self.get_index_tokens(tagged_data) anticipated = ['B Epstein', 'B Epstein, The Fifth Beatle', 'Brian Epstein', 'Brian Epstein, The Fifth Beatle', 'Epstein, B', 'Epstein, B, The Fifth Beatle', 'Epstein, Brian', 'Epstein, Brian, The Fifth Beatle'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_wildly_interesting(self): """BibIndexFuzzyNameTokenizer - tokens for last last last, first first, title, title Ibanez y Gracia, Maria Luisa, II, (ed.) """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II', '(ed.)'], 'raw' : 'Ibanez y Gracia, Maria Luisa, II, (ed.)'} output = self.get_index_tokens(tagged_data) anticipated = ['Gracia, L', 'Gracia, Luisa', 'Gracia, M', 'Gracia, M L', 'Gracia, M Luisa', 'Gracia, Maria', 'Gracia, Maria L', 'Gracia, Maria Luisa', 'Ibanez y Gracia, L', 'Ibanez y Gracia, L, II', 'Ibanez y Gracia, Luisa', 'Ibanez y Gracia, Luisa, II', 'Ibanez y Gracia, M', 'Ibanez y Gracia, M L', 'Ibanez y Gracia, M L, II', 'Ibanez y Gracia, M Luisa', 'Ibanez y Gracia, M Luisa, II', 'Ibanez y Gracia, M, II', 'Ibanez y Gracia, Maria', 'Ibanez y Gracia, Maria L', 'Ibanez y Gracia, Maria L, II', 'Ibanez y Gracia, Maria Luisa', 'Ibanez y Gracia, Maria Luisa, II', 'Ibanez y Gracia, Maria, II', 'Ibanez, L', 'Ibanez, Luisa', 'Ibanez, M', 'Ibanez, M L', 'Ibanez, M Luisa', 'Ibanez, Maria', 'Ibanez, Maria L', 'Ibanez, Maria Luisa', 'L Gracia', 'L Ibanez', 'L Ibanez y Gracia', 'L Ibanez y Gracia, II', 'Luisa Gracia', 'Luisa Ibanez', 'Luisa Ibanez y Gracia', 'Luisa Ibanez y Gracia, II', 'M Gracia', 'M Ibanez', 'M Ibanez y Gracia', 'M Ibanez y Gracia, II', 'M L Gracia', 'M L Ibanez', 'M L Ibanez y Gracia', 'M L Ibanez y Gracia, II', 'M Luisa Gracia', 'M Luisa Ibanez', 'M Luisa Ibanez y Gracia', 'M Luisa Ibanez y Gracia, II', 'Maria Gracia', 'Maria Ibanez', 'Maria Ibanez y Gracia', 'Maria Ibanez y Gracia, II', 'Maria L Gracia', 'Maria L Ibanez', 'Maria L Ibanez y Gracia', 'Maria L Ibanez y Gracia, II', 'Maria Luisa Gracia', 'Maria Luisa Ibanez', 'Maria Luisa Ibanez y Gracia', 'Maria Luisa Ibanez y Gracia, II'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_multimiddle_forward(self): """BibIndexFuzzyNameTokenizer - tokens for first middle middle last W K H Panofsky """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Panofsky'], 'nonlastnames': ['W', 'K', 'H'], 'titles': [], 'raw' : 'W K H Panofsky'} output = self.get_index_tokens(tagged_data) anticipated = ['H Panofsky', 'K H Panofsky', 'K Panofsky', 'Panofsky, H', 'Panofsky, K', 'Panofsky, K H', 'Panofsky, W', 'Panofsky, W H', 'Panofsky, W K', 'Panofsky, W K H', 'W H Panofsky', 'W K H Panofsky', 'W K Panofsky', 'W Panofsky'] self.assertEqual(output, anticipated) def test_tokenize(self): """BibIndexFuzzyNameTokenizer - check tokenize() Ringo Starr """ teststr = "Ringo Starr" output = self.tokenizer.tokenize(teststr) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) class TestExactNameTokenizer(unittest.TestCase): """Test exact author name tokenizer.""" def setUp(self): """setup""" self.tokenizer = tokenizer_lib.BibIndexExactNameTokenizer() def test_exact_author_name_tokenizer_bare(self): """BibIndexExactNameTokenizer - bare name""" self.assertEqual(self.tokenizer.tokenize('John Doe'), ['John Doe']) def test_exact_author_name_tokenizer_dots(self): """BibIndexExactNameTokenizer - name with dots""" self.assertEqual(self.tokenizer.tokenize('J. Doe'), ['J Doe']) self.assertEqual(self.tokenizer.tokenize('J.R. Doe'), ['J R Doe']) self.assertEqual(self.tokenizer.tokenize('J. R. Doe'), ['J R Doe']) def test_exact_author_name_tokenizer_trailing_dots(self): """BibIndexExactNameTokenizer - name with trailing dots""" self.assertEqual(self.tokenizer.tokenize('Doe, J'), ['Doe, J']) self.assertEqual(self.tokenizer.tokenize('Doe, J.'), ['Doe, J']) def test_exact_author_name_tokenizer_hyphens(self): """BibIndexExactNameTokenizer - name with hyphens""" self.assertEqual(self.tokenizer.tokenize('Doe, Jean-Pierre'), ['Doe, Jean Pierre']) TEST_SUITE = make_test_suite(TestFuzzyNameTokenizerScanning, TestFuzzyNameTokenizerTokens, TestExactNameTokenizer,) if __name__ == '__main__': #unittest.main() run_test_suite(TEST_SUITE)
fjorba/invenio
modules/bibindex/lib/bibindex_engine_tokenizer_unit_tests.py
Python
gpl-2.0
16,481
[ "Brian" ]
892d3e97c91ddbe6860b9339c333954d24f8656de7c22e916de4b4302eb898a0
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse import datetime import difflib import glob import os import re import sys parser = argparse.ArgumentParser() parser.add_argument( "filenames", help="list of files to check, all files if unspecified", nargs='*') rootdir = os.path.dirname(__file__) + "/../../" rootdir = os.path.abspath(rootdir) parser.add_argument( "--rootdir", default=rootdir, help="root directory to examine") default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate") parser.add_argument( "--boilerplate-dir", default=default_boilerplate_dir) parser.add_argument( "-v", "--verbose", help="give verbose output regarding why a file does not pass", action="store_true") args = parser.parse_args() verbose_out = sys.stderr if args.verbose else open("/dev/null", "w") def get_refs(): refs = {} for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")): extension = os.path.basename(path).split(".")[1] ref_file = open(path, 'r') ref = ref_file.read().splitlines() ref_file.close() refs[extension] = ref return refs def is_generated_file(filename, data, regexs): for d in skipped_ungenerated_files: if d in filename: return False p = regexs["generated"] return p.search(data) def file_passes(filename, refs, regexs): try: f = open(filename, 'r') except Exception as exc: print("Unable to open %s: %s" % (filename, exc), file=verbose_out) return False data = f.read() f.close() # determine if the file is automatically generated generated = is_generated_file(filename, data, regexs) basename = os.path.basename(filename) if generated: extension = "generatego" else: extension = file_extension(filename) if extension != "": ref = refs[extension] else: ref = refs[basename] # remove build tags from the top of Go files if extension == "go" or extension =="generatego": p = regexs["go_build_constraints"] (data, found) = p.subn("", data, 1) # remove shebang from the top of shell files if extension == "sh": p = regexs["shebang"] (data, found) = p.subn("", data, 1) data = data.splitlines() # if our test file is smaller than the reference it surely fails! if len(ref) > len(data): print('File %s smaller than reference (%d < %d)' % (filename, len(data), len(ref)), file=verbose_out) return False # trim our file to the same number of lines as the reference file data = data[:len(ref)] p = regexs["year"] for d in data: if p.search(d): if generated: print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out) else: print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out) return False if not generated: # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR" p = regexs["date"] for i, d in enumerate(data): (data[i], found) = p.subn('YEAR', d) if found != 0: break # if we don't match the reference at this point, fail if ref != data: print("Header in %s does not match reference, diff:" % filename, file=verbose_out) if args.verbose: print(file=verbose_out) for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''): print(line, file=verbose_out) print(file=verbose_out) return False return True def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test", "pkg/generated/bindata.go"] # list all the files contain 'DO NOT EDIT', but are not generated skipped_ungenerated_files = ['hack/build-ui.sh', 'hack/lib/swagger.sh', 'hack/boilerplate/boilerplate.py'] def normalize_files(files): newfiles = [] for pathname in files: if any(x in pathname for x in skipped_dirs): continue newfiles.append(pathname) for i, pathname in enumerate(newfiles): if not os.path.isabs(pathname): newfiles[i] = os.path.join(args.rootdir, pathname) return newfiles def get_files(extensions): files = [] if len(args.filenames) > 0: files = args.filenames else: for root, dirs, walkfiles in os.walk(args.rootdir): # don't visit certain dirs. This is just a performance improvement # as we would prune these later in normalize_files(). But doing it # cuts down the amount of filesystem walking we do and cuts down # the size of the file list for d in skipped_dirs: if d in dirs: dirs.remove(d) for name in walkfiles: pathname = os.path.join(root, name) files.append(pathname) files = normalize_files(files) outfiles = [] for pathname in files: basename = os.path.basename(pathname) extension = file_extension(pathname) if extension in extensions or basename in extensions: outfiles.append(pathname) return outfiles def get_dates(): years = datetime.datetime.now().year return '(%s)' % '|'.join((str(year) for year in range(2014, years+1))) def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing regexs["year"] = re.compile( 'YEAR' ) # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)"; # company holder names can be anything regexs["date"] = re.compile(get_dates()) # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) # Search for generated files regexs["generated"] = re.compile( 'DO NOT EDIT' ) return regexs def main(): regexs = get_regexs() refs = get_refs() filenames = get_files(refs.keys()) for filename in filenames: if not file_passes(filename, refs, regexs): print(filename, file=sys.stdout) return 0 if __name__ == "__main__": sys.exit(main())
imcsk8/origin
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py
Python
apache-2.0
7,352
[ "VisIt" ]
1da3b19a8e77bb6004396ea881e68d0814ef2822f8634b1e3a84b05c7046857e
#!/usr/bin/python # # This source file is part of appleseed. # Visit https://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2020 Terry Chen, The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import print_function import datetime import platform def get_python_version(): return "{0} ({1})".format(platform.python_version(), platform.platform()) def print_runtime_details(script, version, script_path, current_time=datetime.datetime.now(), print_function=print): print_function("{0} version {1}".format(script, version)) print_function(" Script Path : {0}".format(script_path)) print_function(" Current Time : {0}".format(current_time)) print_function(" Python Version : {0}".format(get_python_version())) print_function()
luisbarrancos/appleseed
scripts/utils.py
Python
mit
1,898
[ "VisIt" ]
73d0533f88b13735721ac17feea3030b94dc00d210d16655621fce557e3972c7
""" Demonstrate SVG filtering effects which might be used with mpl. Note that the filtering effects are only effective if your svg rederer support it. """ from __future__ import print_function import matplotlib matplotlib.use("Svg") import matplotlib.pyplot as plt import matplotlib.transforms as mtransforms fig1 = plt.figure() ax = fig1.add_axes([0.1, 0.1, 0.8, 0.8]) # draw lines l1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], "bo-", mec="b", lw=5, ms=10, label="Line 1") l2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], "rs-", mec="r", lw=5, ms=10, color="r", label="Line 2") for l in [l1, l2]: # draw shadows with same lines with slight offset and gray colors. xx = l.get_xdata() yy = l.get_ydata() shadow, = ax.plot(xx, yy) shadow.update_from(l) # adjust color shadow.set_color("0.2") # adjust zorder of the shadow lines so that it is drawn below the # original lines shadow.set_zorder(l.get_zorder()-0.5) # offset transform ot = mtransforms.offset_copy(l.get_transform(), fig1, x=4.0, y=-6.0, units='points') shadow.set_transform(ot) # set the id for a later use shadow.set_gid(l.get_label()+"_shadow") ax.set_xlim(0., 1.) ax.set_ylim(0., 1.) # save the figure as a string in the svg format. from StringIO import StringIO f = StringIO() plt.savefig(f, format="svg") import xml.etree.cElementTree as ET # filter definition for a gaussian blur filter_def = """ <defs xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'> <filter id='dropshadow' height='1.2' width='1.2'> <feGaussianBlur result='blur' stdDeviation='3'/> </filter> </defs> """ # read in the saved svg tree, xmlid = ET.XMLID(f.getvalue()) # insert the filter definition in the svg dom tree. tree.insert(0, ET.XML(filter_def)) for l in [l1, l2]: # pick up the svg element with given id shadow = xmlid[l.get_label()+"_shadow"] # apply shdow filter shadow.set("filter",'url(#dropshadow)') fn = "svg_filter_line.svg" print("Saving '%s'" % fn) ET.ElementTree(tree).write(fn)
lthurlow/Network-Grapher
proj/external/matplotlib-1.2.1/lib/mpl_examples/misc/svg_filter_line.py
Python
mit
2,145
[ "Gaussian" ]
99ec65ca9d2324789de4b11a46c3d5e25b8b82b492738255f3654ffc17123e96
import sys import bisect sys.path.append('../../../vmdgadgets') import vmdutil def get_index(frame_no, keys): index = bisect.bisect_left(keys, frame_no) if index <= len(keys) -1 and keys[index] == frame_no: return index, True else: return index - 1, False def get_vmdtransformation(frame_no, key_frames, frames): vmd_index, b = get_index(frame_no, key_frames) if vmd_index < 0: return frames[0].rotation, frames[0].position else: begin = frames[vmd_index] if vmd_index < len(key_frames) - 1: end = frames[vmd_index + 1] position = vmdutil.interpolate_position( frame_no, begin, end, 'bones') rotation = vmdutil.interpolate_rotation( frame_no, begin, end, 'bones') else: position = begin.position rotation = begin.rotation return rotation, position if __name__ == '__main__': vmd = vmdutil.Vmdio() if len(sys.argv) > 1: vmd.load(sys.argv[1]) else: vmd.load_fd(sys.stdin.buffer) bones = vmd.get_frames('bones') frame_dict = vmdutil.frames_to_dict(bones) name_dict = vmdutil.make_name_dict(frame_dict, True) target_bones = {'センター', '右足IK', '左足IK'} root_frames = name_dict['全ての親'] root_key_frames = [frame.frame for frame in root_frames] new_frames = [] for move_bone in target_bones: for frame in name_dict[move_bone]: p_rot, p_pos = get_vmdtransformation( frame.frame, root_key_frames, root_frames) frame = frame._replace(position=tuple(vmdutil.add_v( frame.position, p_pos))) new_frames.append(frame) for bone_name in name_dict: if bone_name not in target_bones and bone_name != '全ての親': new_frames.extend(name_dict[bone_name]) vmd.set_frames('bones', new_frames) if len(sys.argv) > 2: vmd.store(sys.argv[2]) else: vmd.store_fd(sys.stdout.buffer)
Hashi4/vmdgadgets
sample/lookat/sm31942771/modify_root.py
Python
apache-2.0
2,047
[ "VMD" ]
a3dc18e3703fd21349e496917091c4e26a0653cb784eafa8ac98abc411c26bdf
#! /usr/bin/python """ Creates a pileup file from a bam file and a reference. usage: %prog [options] -p, --input1=p: bam file -o, --output1=o: Output pileup -R, --ref=R: Reference file type -n, --ownFile=n: User-supplied fasta reference file -d, --dbkey=d: dbkey of user-supplied file -x, --indexDir=x: Index directory -b, --bamIndex=b: BAM index file -s, --lastCol=s: Print the mapping quality as the last column -i, --indels=i: Only output lines containing indels -M, --mapCap=M: Cap mapping quality -c, --consensus=c: Call the consensus sequence using MAQ consensu model -T, --theta=T: Theta paramter (error dependency coefficient) -N, --hapNum=N: Number of haplotypes in sample -r, --fraction=r: Expected fraction of differences between a pair of haplotypes -I, --phredProb=I: Phred probability of an indel in sequencing/prep """ import os, sys, tempfile from galaxy import eggs import pkg_resources; pkg_resources.require( "bx-python" ) from bx.cookbook import doc_optparse def stop_err( msg ): sys.stderr.write( msg ) sys.exit() def check_seq_file( dbkey, GALAXY_DATA_INDEX_DIR ): seq_file = "%s/sam_fa_indices.loc" % GALAXY_DATA_INDEX_DIR seq_path = '' for line in open( seq_file ): line = line.rstrip( '\r\n' ) if line and not line.startswith( "#" ) and line.startswith( 'index' ): fields = line.split( '\t' ) if len( fields ) < 3: continue if fields[1] == dbkey: seq_path = fields[2].strip() break return seq_path def __main__(): #Parse Command Line options, args = doc_optparse.parse( __doc__ ) seq_path = check_seq_file( options.dbkey, options.indexDir ) tmp_dir = tempfile.gettempdir() os.chdir(tmp_dir) tmpf0 = tempfile.NamedTemporaryFile(dir=tmp_dir) tmpf0bam = '%s.bam' % tmpf0.name tmpf0bambai = '%s.bam.bai' % tmpf0.name tmpf1 = tempfile.NamedTemporaryFile(dir=tmp_dir) tmpf1fai = '%s.fai' % tmpf1.name opts = '%s %s -M %s' % (('','-s')[options.lastCol=='yes'], ('','-i')[options.indels=='yes'], options.mapCap) if options.consensus == 'yes': opts += ' -c -T %s -N %s -r %s -I %s' % (options.theta, options.hapNum, options.fraction, options.phredProb) cmd1 = None cmd2 = 'cp %s %s; cp %s %s' % (options.input1, tmpf0bam, options.bamIndex, tmpf0bambai) cmd3 = 'samtools pileup %s -f %s %s > %s 2> /dev/null' if options.ref =='indexed': full_path = "%s.fai" % seq_path if not os.path.exists( full_path ): stop_err( "No sequences are available for '%s', request them by reporting this error." % options.dbkey ) cmd3 = cmd3 % (opts, seq_path, tmpf0bam, options.output1) elif options.ref == 'history': cmd1 = 'cp %s %s; samtools faidx %s' % (options.ownFile, tmpf1.name, tmpf1.name) cmd3 = cmd3 % (opts, tmpf1.name, tmpf0bam, options.output1) # index reference if necessary if cmd1: try: os.system(cmd1) if options.ref == 'history' and not os.path.exists( tmpf1fai ): stop_err( "Problem creating index file from history item." ) except Exception, eq: stop_err('Error handling reference sequence\n' + str(eq)) # copy bam index to working directory try: os.system(cmd2) except Exception, eq: stop_err('Error moving files to temp directory\n' + str(eq)) # perform pileup command try: os.system(cmd3) except Exception, eq: stop_err('Error running SAMtools pileup tool\n' + str(eq)) # clean up temp files tmpf1.close() tmpf0.close() if os.path.exists(tmpf0bam): os.remove(tmpf0bam) if os.path.exists(tmpf0bambai): os.remove(tmpf0bambai) if os.path.exists(tmpf1fai): os.remove(tmpf1fai) if __name__ == "__main__" : __main__()
volpino/Yeps-EURAC
tools/samtools/sam_pileup.py
Python
mit
3,931
[ "Galaxy" ]
76f303a6bd3ae23f19302855dd94644b24c0cfdd88e02e00a59849c27c9d1253
# # script.py - non-interactive, script based anaconda interface # # Copyright (C) 2011 # Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author(s): Brian C. Lane <bcl@redhat.com> # from installinterfacebase import InstallInterfaceBase import cmdline from cmdline import setupProgressDisplay import logging log = logging.getLogger("anaconda") stepToClasses = { "install" : "setupProgressDisplay", "complete": "Finished" } class InstallInterface(cmdline.InstallInterface): def enableNetwork(self): # Assume we want networking return True def display_step(self, step): if stepToClasses.has_key(step): s = "nextWin = %s" % (stepToClasses[step],) exec s nextWin(self.anaconda) else: errtxt = _("In interactive step can't continue. (%s)" % (step,)) print(errtxt) raise RuntimeError(errtxt) def Finished(anaconda): """ Install is finished. Lets just exit. """ return 0
kalev/anaconda
pyanaconda/script.py
Python
gpl-2.0
1,640
[ "Brian" ]
3b48c71760848879b49fe308bebe2d919bdb1f7c0cbda995f20211577841128b
# Copyright (C) 2014 # Pierre de Buyl # Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ************************************************* **espressopp.interaction.HarmonicTrap** ************************************************* .. math:: U = K \frac{1}{2}d^2 .. function:: espressopp.interaction.HarmonicTrap() .. function:: espressopp.interaction.SingleParticleHarmonicTrap(system, potential) :param system: :param potential: :type system: :type potential: .. function:: espressopp.interaction.SingleParticleHarmonicTrap.setPotential(potential) :param potential: :type potential: """ from espressopp import pmi from espressopp.esutil import * from espressopp.interaction.SingleParticlePotential import * from espressopp.interaction.Interaction import * from _espressopp import interaction_HarmonicTrap, interaction_SingleParticleHarmonicTrap class HarmonicTrapLocal(SingleParticlePotentialLocal, interaction_HarmonicTrap): def __init__(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_HarmonicTrap) class SingleParticleHarmonicTrapLocal(InteractionLocal, interaction_SingleParticleHarmonicTrap): def __init__(self, system, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_SingleParticleHarmonicTrap, system, potential) def setPotential(self, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, potential) if pmi.isController: class HarmonicTrap(SingleParticlePotential): 'The HarmonicTrap potential.' pmiproxydefs = dict( cls = 'espressopp.interaction.HarmonicTrapLocal', pmiproperty = ['k', 'center'] ) class SingleParticleHarmonicTrap(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.SingleParticleHarmonicTrapLocal', pmicall = ['setPotential'] )
capoe/espressopp.soap
src/interaction/HarmonicTrap.py
Python
gpl-3.0
3,020
[ "ESPResSo" ]
c42238b80ab63a69a255200fd4759073252c07111eabbb5be7389ededab7c4de
#! /usr/bin/env python3 metadata = { "name" : "DIMS report", "output" : "html", "description" : "Generate a report for Moderate and Severe Intellectual Disability (DIMS)", "localisation" : ["fr-FR"] } def report_data(analysis_id, data, cache_path, output_path, annso_core=None): import ipdb import collections import csv import glob import gzip import itertools import json import logging import os import pprint import requests import shutil import subprocess import tempfile import wand.color import wand.image # report generation based on jinja2 template import jinja2 # Need some customisation to be able to request the website import http.client http.client._MAXHEADERS = 1000 from bs4 import BeautifulSoup # Need virtual display to take website snapshot with cutycapt import pyvirtualdisplay from pyvirtualdisplay.smartdisplay import SmartDisplay from core.framework import log, war, err, chr_from_db from core.model import db_engine def notify(msg, data): if annso_core is not None: annso_core.notify_all({'msg':msg, 'data' : data}) __version__ = '0.1.0' notify('report_dims', {'analysis_id' : analysis_id, 'progress_label' : 'Initialising report data', 'progress_percent' : 0}) genemap_api = 'http://api.omim.org/api/search/geneMap' omim_api = 'http://api.omim.org/api/entry' entrez_api = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/{}.fcgi' omim_api_key_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'omim_api_key') strasbourg_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'db/strasbourg_di_panels.csv') sfari_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'db/sfari_20160914.csv') rvis_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'db/rvis_v3_20160312.csv') morbid_map_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'db/Morbid-COe-Eichler_20160914.csv') # Check that omim api key is defined if os.path.exists(omim_api_key_filename): with open(omim_api_key_filename, 'rt') as omim_api_key_file: omim_api_key = omim_api_key_file.read() else: omim_api_key = None annotation_ids = { 'SnpEff': ['Annotation', 'Annotation_Impact', 'Feature_Type', 'Rank', 'HGVS.c', 'HGVS.p'], 'dbNSFP': ['dbNSFP_1000Gp1_AF', 'dbNSFP_ExAC_AF', 'dbNSFP_ESP6500_AA_AF', 'dbNSFP_ESP6500_EA_AF', 'dbNSFP_SIFT_pred', 'dbNSFP_Polyphen2_HDIV_pred', 'dbNSFP_Polyphen2_HVAR_pred', 'dbNSFP_MutationTaster_pred', 'dbNSFP_CADD_phred', 'dbNSFP_LRT_pred', 'dbNSFP_MetaSVM_pred', 'dbNSFP_MutationAssessor_pred', 'dbNSFP_PROVEAN_pred', 'dbNSFP_GERP___RS', 'dbNSFP_FATHMM_pred', 'dbNSFP_PhastCons100way_vertebrate'], 'dbNSFP_1000Gp1': ['dbNSFP_1000Gp1_AFR_AF', 'dbNSFP_1000Gp1_AMR_AF', 'dbNSFP_1000Gp1_EUR_AF', 'dbNSFP_1000Gp1_ASN_AF'], 'dbNSFP_ExAC': ['dbNSFP_ExAC_NFE_AF', 'dbNSFP_ExAC_SAS_AF', 'dbNSFP_ExAC_Adj_AF', 'dbNSFP_ExAC_AFR_AF', 'dbNSFP_ExAC_FIN_AF', 'dbNSFP_ExAC_AMR_AF', 'dbNSFP_ExAC_EAS_AF'], } blacklisted_feature_annotations = set(['upstream_gene_variant', 'downstream_gene_variant', 'intron_variant']) publication_themes = ['autism', 'epilepsy', 'intellectual', 'mental', 'schizophrenia', 'seizures'] # Get ASDP gene list for "dims" id_genes_list = 'https://raw.githubusercontent.com/REGOVAR/GenesPanel/master/intellectual_disability.lst' r = requests.get(id_genes_list) if r.status_code == requests.codes.ok: id_genes = set(r.text.splitlines()) else: war('Unable to access the list of ID genes') id_genes = set() strasbourg_panels = {} with open(strasbourg_filename, 'rt') as strasbourg_file: strasbourg_reader = csv.reader(strasbourg_file, delimiter=',', quotechar='"') next(strasbourg_reader, None) # skip the headers for row in strasbourg_reader: strasbourg_panels[row[0]] = row[1] sfari_genes = set() with open(sfari_filename, 'rt') as sfari_file: sfari_reader = csv.reader(sfari_file, delimiter=',', quotechar='"') next(sfari_reader, None) # skip the headers for row in sfari_reader: sfari_genes.add(row[0]) rvis_score = {} with open(rvis_filename, 'rt', newline='') as rvis_file: rvis_reader = csv.reader(rvis_file, delimiter=',', quotechar='"') rvis_column_names = next(rvis_reader, None) # headers for row in rvis_reader: rvis_score[row[0]] = { rvis_column_names[column_id]: column_value for column_id, column_value in enumerate(row) } morbid_map_score = {} # with open(morbid_map_filename, 'rt') as morbid_map_file: # morbid_map_reader = csv.reader(morbid_map_file, delimiter=',', quotechar='"') # next(morbid_map_reader, None) # skip the headers # for row in morbid_map_reader: # values = [int(row[column_id] or 0) if row[column_id] and row[column_id] != '#N/A' else 0 for column_id in [4, 5, 22, 23, 24]] # values.append(sum(values[2:5])) # morbid_map_score[row[3]] = values class ModeData: def __init__(self, vcf_filename, min_variant_count): self.vcf_filename = vcf_filename self.min_variant_count = min_variant_count def fill_omim_info(gene_data, gene_name): info_filename = os.path.join(cache_path, 'omim_info_{}'.format(gene_name)) if os.path.exists(info_filename): with open(info_filename, 'rt') as info_file: info = json.load(info_file) gene_data.mim_number = info['mim_number'] gene_data.name = info['name'] gene_data.symbols = info['symbols'] gene_data.text = info['text'] return def get_gene_map_list(name): r = requests.get(genemap_api, params = { 'search': name, 'format': 'json', 'apiKey': omim_api_key, }) if r.status_code == requests.codes.ok: data = r.json() response = data['omim']['searchResponse'] if response['totalResults'] == 0: if response['searchSpelling']: return get_gene_map_list(response['searchSpelling']) return None return response['geneMapList'] gene_map_list = get_gene_map_list(gene_name) if gene_map_list: for gene_map_list_entry in gene_map_list: gene_map = gene_map_list_entry['geneMap'] gene_map['geneSymbols'] = [symbol.strip() for symbol in gene_map['geneSymbols'].split(',')] symbols = set([symbol.lower() for symbol in gene_map['geneSymbols']]) if gene_name.lower() in symbols: gene_data.mim_number = gene_map['mimNumber'] gene_data.name = gene_map['geneName'] gene_data.symbols = gene_map['geneSymbols'] gene_data.text = [] r = requests.get(omim_api, params = { 'mimNumber': gene_data.mim_number, 'include': 'text', 'format': 'json', 'apiKey': omim_api_key, }) if r.status_code == requests.codes.ok: data = r.json() gene_entry = data['omim']['entryList'][0]['entry'] for textSection in gene_entry['textSectionList']: gene_data.text.append(textSection['textSection']['textSectionContent']) else: war('Unable to get the OMIM entry for gene {}'.format(gene_name)) break else: war('Unable to find the OMIM gene map for gene {}'.format(gene_name)) else: war('Unable to find the OMIM gene map for gene {}'.format(gene_name)) info = { 'mim_number': gene_data.mim_number, 'name': gene_data.name, 'symbols': gene_data.symbols, 'text': gene_data.text, } with open(info_filename, 'wt') as info_file: json.dump(info, info_file) class GeneData: def __init__(self, name): self.mim_number = None self.name = None self.symbols = [] self.text = [] self.article_count = 0 self.articles = [] fill_omim_info(self, name) self.strasbourg_panel = strasbourg_panels.get(name, None) self.strasbourg_panel_as = None if not self.strasbourg_panel: for symbol in self.symbols: self.strasbourg_panel = strasbourg_panels.get(symbol, None) if self.strasbourg_panel: self.strasbourg_panel_as = symbol break self.id_gene = (name in id_genes) self.id_gene_as = None if not self.id_gene: for symbol in self.symbols: self.id_gene = (symbol in id_genes) if self.id_gene: self.id_gene_as = symbol break self.sfari_gene = (name in sfari_genes) self.sfari_gene_as = None if not self.sfari_gene: for symbol in self.symbols: self.sfari_gene = (symbol in sfari_genes) if self.sfari_gene: self.sfari_gene_as = symbol break self.rvis_score = rvis_score.get(name, None) self.rvis_score_as = None if not self.rvis_score: for symbol in self.symbols: self.rvis_score = rvis_score.get(symbol, None) if self.rvis_score: self.rvis_score_as = symbol break self.morbid_map_score = morbid_map_score.get(name, None) self.morbid_map_score_as = None if not self.morbid_map_score: for symbol in self.symbols: self.morbid_map_score = morbid_map_score.get(symbol, None) if self.morbid_map_score: self.morbid_map_score_as = symbol break self.hbt_image = get_hbt_image(name) self.hbt_image_as = None if not self.hbt_image: for symbol in self.symbols: self.hbt_image = get_hbt_image(symbol) if self.hbt_image: self.hbt_image_as = symbol break self.ta_graph = get_ta_image(name) self.ta_image_as = None if not self.ta_graph: for symbol in self.symbols: self.ta_graph = get_ta_image(symbol) if self.ta_graph: self.ta_image_as = symbol break if os.path.exists(self.ta_graph): with open(self.ta_graph, 'r') as myfile: self.ta_graph=myfile.read() else: self.ta_graph = "no graph" # TODO FIXME protein / tissue atlas snapshot self.sp_image = get_sp_image(name) self.sp_image_as = None if not self.sp_image: for symbol in self.symbols: self.sp_image = get_sp_image(symbol) if self.sp_image: self.sp_image_as = symbol break self.decipher_image = get_decipher_image(name) self.decipher_image_as = None if not self.decipher_image: for symbol in self.symbols: self.decipher_image = get_decipher_image(symbol) if self.decipher_image: self.decipher_image_as = symbol break fill_pubmed_articles(self, name) self.articles_as = None if not self.article_count: for symbol in self.symbols: fill_pubmed_articles(self, symbol) if self.article_count: self.article_as = symbol break self.articles = collections.OrderedDict() for theme in publication_themes: pubmed_data = PubMedData() fill_pubmed_articles(pubmed_data, name, theme) if not pubmed_data.article_count: for symbol in self.symbols: fill_pubmed_articles(pubmed_data, symbol, theme) if pubmed_data.article_count: pubmed_data.gene_name = symbol break if pubmed_data.article_count: self.articles[theme] = pubmed_data class Gene: __cache = {} def __init__(self, name, variants): self.name = name self.variants = variants self.data = Gene.__cache.setdefault(name, GeneData(name)) self.aliases = list(itertools.chain([self.name], sorted(set(self.data.symbols) - set([self.name])))) # TODO aliases from genecards and NCBI as well def get_formated_aliases(self, template): return [template.format(alias, alias) for alias in self.aliases] def get_variant_annotations(self, *annotation_ids): if len(annotation_ids) > 1: return [', '.join(value) for value in zip(*tuple([self.get_variant_annotations(annotation_id) for annotation_id in annotation_ids]))] else: annotation_id = annotation_ids[0] def stringify(value): if type(value) is float: if value < .0010: return '{:.2e}'.format(value) else: return '{:.4f}'.format(value) return str(value) variant_annotations = [] for variant in self.variants: annotations = variant.annotations.get(annotation_id, '') if type(annotations) is tuple: variant_annotations.append(', '.join([stringify(annotation) for annotation in annotations])) else: variant_annotations.append(stringify(annotations)) return variant_annotations def get_formated_variant_ids(self, template): return [variant.get_formated_ids(template) for variant in self.variants] def get_clinical_significances(self): return [variant.get_clinical_significances() for variant in self.variants] class VariantData: def __init__(self): self.genes = set() class Variant: __cache = {} def __init__(self, chromosome, position, reference, alternatives, ids, clinical_significances, gene_name): self.chromosome = chromosome self.position = position self.reference = reference self.alternatives = alternatives self.ids = ids self.clinical_significances = clinical_significances self.gene_name = gene_name self.annotations = {} self.features = {} self.data = Variant.__cache.setdefault((chromosome, position, reference, alternatives), VariantData()) self.data.genes.add(gene_name) def has_overlaps(self): return len(self.data.genes) > 1 def overlaps(self): return sorted([gene for gene in self.data.genes if gene != self.gene_name]) def get_formated_ids(self, template): return ', '.join([template.format(variant_id, variant_id) for variant_id in self.ids]) def get_clinical_significances(self): return ', '.join([clinical_significance and '{}: {}'.format(*clinical_significance) or '' for clinical_significance in self.clinical_significances]) class Feature: def __init__(self, name): self.name = name self.annotations = {} def upper_first_letter(string): return string[:1].upper() + string[1:] def get_vcf_filenames(directory, pattern): return glob.glob(os.path.join(root, directory, pattern), recursive = True) def extract_index(filename): return filename.split(os.sep)[-1].split('.')[1][:-2] def get_indexes_filenames(): result = {} for mode, (directory, pattern, min_variant_count) in modes.items(): filenames = get_vcf_filenames(directory, pattern) for filename in filenames: index = extract_index(filename) result.setdefault(index, collections.OrderedDict())[mode] = ModeData(filename, min_variant_count) return collections.OrderedDict(sorted(result.items(), key = lambda index: index[3:5] + index[0:2] + index[6:9])) def get_snpeff_annotation_id(info): if 'ANN' in info: snpeff_annotation_id = 'ANN' if 'EFF' in info: war('Found both ANN and EFF in header, using ANN') elif 'EFF' in info: snpeff_annotation_id = 'EFF' else: snpeff_annotation_id = None war('Neither EFF nor ANN found in header') return snpeff_annotation_id def get_snpeff_annotation_columns(snpeff_metadata): #metadata: contenu de la ligne info qui contient un id en particulier annotations = [annotation.strip() for annotation in snpeff_metadata.description.split("'")[1].split('|')] snpeff_annotation_columns = {annotation: position for position, annotation in enumerate(annotations)} return snpeff_annotation_columns def get_vep_annotation_columns(vep_metadata): annotations = [annotation.strip() for annotation in vep_metadata.description.split(':')[1].split('|')] vep_annotation_columns = {annotation: position for position, annotation in enumerate(annotations)} return vep_annotation_columns def extract_genes(vcf_filename, min_variant_count): try: vcf_context = pysam.VariantFile(vcf_filename) except ValueError: war('Error while loading {}, probably bug #259 of pysam'.format(vcf_filename)) return [] with vcf_context as vcf_file: snpeff_annotation_id = get_snpeff_annotation_id(vcf_file.header.info) if snpeff_annotation_id is None: war('SnpEff annotation ID (ANN or EFF) not found in header for {}'.format(vcf_filename)) return [] snpeff_metadata = vcf_file.header.info[snpeff_annotation_id] snpeff_annotation_columns = get_snpeff_annotation_columns(snpeff_metadata) if 'Gene_Name' not in snpeff_annotation_columns: war('Gene_Name not found in SnpEff annotation description in header for {}'.format(vcf_filename)) return [] gene_name_column_number = snpeff_annotation_columns['Gene_Name'] feature_id_column_number = snpeff_annotation_columns['Feature_ID'] if 'CSQ' in vcf_file.header.info: vep_metadata = vcf_file.header.info['CSQ'] vep_annotation_columns = get_vep_annotation_columns(vep_metadata) feature_annotation_column_number = snpeff_annotation_columns['Annotation'] clinical_significance_levels = {level.strip(): label.strip().lower() for level, label in [level_info.split('-') for level_info in 'CLNSIG' in vcf_file.header.info and vcf_file.header.info['CLNSIG'].description.split(',')[1:] or []]} genes = {} for row in vcf_file: # TODO FIXME VEP (CSQ) annotations snpeff_annotations = row.info[snpeff_annotation_id] for snpeff_annotation in snpeff_annotations: feature_annotations = snpeff_annotation.split('|') gene_name = feature_annotations[gene_name_column_number] if gene_name: variant_ids = row.id and row.id.split(';') or [] variant_clinical_significances = 'CLNSIG' in row.info and [(clinical_significance, clinical_significance_levels[clinical_significance]) for clinical_significance in itertools.chain.from_iterable([clinical_significances.split('|') for clinical_significances in row.info['CLNSIG']])] or [] variant = genes.setdefault(gene_name, {}).setdefault((row.chrom, row.pos, row.ref, row.alts), Variant(row.chrom, row.pos, row.ref, row.alts, variant_ids, variant_clinical_significances, gene_name)) for annotation_id in annotation_ids['dbNSFP']: if annotation_id in row.info: variant.annotations[annotation_id] = row.info[annotation_id] dbNSFP_1000Gp1_values = [row.info[annotation_id] for annotation_id in annotation_ids['dbNSFP_1000Gp1'] if annotation_id in row.info] variant.annotations['dbNSFP_1000Gp1_max'] = tuple(max(values) for values in zip(*tuple(dbNSFP_1000Gp1_values))) dbNSFP_ExAC_values = [row.info[annotation_id] for annotation_id in annotation_ids['dbNSFP_ExAC'] if annotation_id in row.info] variant.annotations['dbNSFP_ExAC_max'] = tuple(max(values) for values in zip(*tuple(dbNSFP_ExAC_values))) #print(row, dbNSFP_ExAC_values, variant.annotations['dbNSFP_ExAC_max']) feature_name = feature_annotations[feature_id_column_number] feature_annotation = feature_annotations[feature_annotation_column_number] if feature_name not in variant.features and \ feature_annotation not in blacklisted_feature_annotations: feature = Feature(feature_name) for annotation_id in annotation_ids['SnpEff']: feature.annotations[annotation_id] = feature_annotations[snpeff_annotation_columns[annotation_id]] variant.features[feature_name] = feature def filter_genes(): for gene_name, variants in genes.items(): variants = [variant for variant in variants.values() if variant.features] for variant in variants: variant.features = sorted(variant.features.values(), key = lambda feature: feature.name) if len(variants) >= min_variant_count: yield Gene(gene_name, sorted(variants, key=lambda variant: (variant.chromosome, variant.position, variant.reference))) return sorted(filter_genes(), key = lambda gene: gene.name) def render_report(data, template_name): template = templates.get_template('{}.tpl'.format(template_name)) return template.render( data=data, len=len, upper_first_letter=upper_first_letter, ) def convert_html(source, destination, delay=0): print (source, destination, delay) with SmartDisplay(visible=0, bgcolor='black') as disp: subprocess.run(['cutycapt', '--url="{}"'.format(source), '--out={}'.format(destination), '--delay={}'.format(delay)]) def convert_doc(source, destination): subprocess.run(['pandoc', '-s', '-o', destination, source]) def get_hbt_image(gene_name): image_url = 'https://hbatlas.org/hbtd/images/wholeBrain/{}.pdf'.format(gene_name) image_filename = os.path.join(cache_path, 'hbt_image_{}.png'.format(gene_name)) missing_filename = os.path.join(cache_path, 'hbt_image_{}.missing'.format(gene_name)) if os.path.exists(image_filename) : return image_filename elif os.path.exists(missing_filename): return None try: r = requests.get(image_url, stream=True) if r.status_code == requests.codes.ok: with tempfile.TemporaryFile() as image_file: r.raw.decode_content = True shutil.copyfileobj(r.raw, image_file) image_file.seek(0) with wand.image.Image(file=image_file, resolution=200) as image: with wand.image.Image(width=image.width, height=image.height, background=wand.color.Color('white')) as bg: bg.composite(image, 0, 0) bg.save(filename=image_filename) return image_filename else: war('Unable to retrieve PDF from HBT for {}'.format(gene_name)) except: war('Unable to convert HBT PDF to PNG for the gene {}'.format(gene_name)) with open(missing_filename, 'wb') as image: pass return None def get_sp_image(gene_name): image_url = 'https://string-db.org/api/image/network?identifier={}_HUMAN'.format(gene_name) image_filename = os.path.join(cache_path, 'sp_image_{}.png'.format(gene_name)) missing_filename = os.path.join(cache_path, 'sp_image_{}.missing'.format(gene_name)) if os.path.exists(image_filename): return image_filename elif os.path.exists(missing_filename): return None try: r = requests.get(image_url, stream=True) if r.status_code == requests.codes.ok: with open(image_filename, 'wb') as image_file: r.raw.decode_content = True shutil.copyfileobj(r.raw, image_file) return image_filename else: war('Unable to retrieve image from String Pathway for {}'.format(gene_name)) except: war('Unable to retrieve image from String Pathway for {}'.format(gene_name)) with open(missing_filename, 'wb') as image: pass return None def get_decipher_image(gene_name): dec_filename = os.path.join(cache_path, 'decipher_image_{}.png'.format(gene_name)) dec_url = 'https://decipher.sanger.ac.uk/search?q=%s#consented-patients/results' % gene_name if os.path.isfile(dec_filename) : return dec_filename # 1- retrieve image convert_html(dec_url, '{}_source.png'.format(dec_filename), 3000) # 2- crop image if os.path.isfile(dec_filename + '_source.png'): with open(dec_filename + '_source.png', 'rb') as f: with wand.image.Image(file=f) as image: w = image.width - 10 h = image.height - 315 - 360 image.crop(5, 315, width=w, height=h) image.save(filename=dec_filename) os.remove(dec_filename + '_source.png') return dec_filename else: war('Unable to retrieve image from Decipher for the gene {}'.format(gene_name)) return None def get_ta_image(gene_name): ta_filename = os.path.join(cache_path, 'ta_image_{}.html'.format(gene_name)) ta_url = 'http://www.proteinatlas.org/search/%s' % gene_name if os.path.isfile(ta_filename) : return ta_filename # 1- Retrieve "true url" from TA "user website url" r = requests.get(ta_url) if r.status_code == requests.codes.ok: soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): if (link.text.upper()==gene_name.upper()): ta_url = 'http://www.proteinatlas.org' + link.get('href') # 2- Retrieve html page with graphs r = requests.get(ta_url) if r.status_code == requests.codes.ok: soup = BeautifulSoup(r.text, 'html.parser') html = "" # """<script language="javascript" src="https://www.proteinatlas.org/utils/jquery.min.js?version=15.0.0" type="text/javascript"></script> # <script language="javascript" src="https://www.proteinatlas.org/common.js?version=15.0.0" type="text/javascript"></script> # <script language="javascript" src="https://www.proteinatlas.org/utils/d3.min.js?version=15.0.0" type="text/javascript"></script>""" p = soup.find(text='RNA EXPRESSION OVERVIEW') while p.name != 'p': p = p.parent html += p.findParent('table').prettify() # 3- clean html by removing all image and toggle control html = BeautifulSoup(html, 'html.parser') for x in html.findAll('img'): x.extract() for x in html.findAll("div", { "class" : "slideToggle" }): x.extract() # 4- save into file with open(ta_filename, 'wt') as ta_file: ta_file.write(html.prettify()) # 5- convert html into png # TODO/FIXME : cutycapt seems not able to convert local html file into image in python virtual display (but work fine in shell with xvfb-run) # convert_html('file:{}.html'.format(ta_filename), ta_filename, 500) # os.remove(ta_filename+'.html') return ta_filename class PubMedData: def __init__(self): self.gene_name = None self.article_count = 0 self.articles = [] def fill_pubmed_articles(pubmed_data, gene_name, theme=''): info_filename = os.path.join(cache_path, 'pubmed_info_{}_{}'.format(gene_name, theme)) if os.path.exists(info_filename): with open(info_filename, 'rt') as info_file: info = json.load(info_file) pubmed_data.article_count = info['article_count'] pubmed_data.articles = info['articles'] return # TODO FIXME pubmed from all aliases as well (aggregate with OR) pubmed_ids = [] r = requests.get(entrez_api.format('esearch'), params = { 'term': '{} AND {}'.format(gene_name, theme), 'db': 'pubmed', 'retmode': 'json', 'retmax': 10, }) if r.status_code == requests.codes.ok: data = r.json() pubmed_ids = data['esearchresult']['idlist'] pubmed_data.article_count = int(data['esearchresult']['count']) else: war('Unable to get the PubMed publications for gene {} and theme {}'.format(gene_name, theme)) return r = requests.get(entrez_api.format('esummary'), params = { 'id': ','.join(pubmed_ids), 'db': 'pubmed', 'retmode': 'json', }) if r.status_code == requests.codes.ok: data = r.json() for pubmed_id in pubmed_ids: pubmed_data.articles.append(data['result'][pubmed_id]) else: war('Unable to get the PubMed publication details for gene {} and theme {}'.format(gene_name, theme)) return info = { 'article_count': pubmed_data.article_count, 'articles': pubmed_data.articles } with open(info_filename, 'wt') as info_file: json.dump(info, info_file) # For each variant, find gene log('Start DIMS report generation for gene id : {}'.format(','.join(data['variants']))) samples = data['samples'] sql = "SELECT DISTINCT v.chr, v.pos, v.ref, v.alt, rg.name2 FROM variant_hg19 v " sql += "INNER JOIN refgene_hg19 rg ON v.chr = rg.chr AND rg.txrange @> int8(v.pos) " sql += "WHERE v.id IN (" + ','.join(data['variants']) + ") and rg.name2<>'' ORDER BY rg.name2" genes = [] variants = [] gene_name = "" gs = [] for r in db_engine.execute(sql): if gene_name != r.name2: if gene_name != "": gs.append(gene_name) genes.append(Gene(gene_name, variants)) gene_name = r.name2 variants = [] variants.append( Variant(chr_from_db(r.chr), r.pos, r.ref, r.alt, [], [], gene_name)) genes.append(Gene(gene_name, variants)) log('Following gene''names have been found : {}'.format(','.join(gs))) # Generate report def render_jinja_html(template_loc, file_name,**context): return jinja2.Environment(loader=jinja2.FileSystemLoader(template_loc+'/')).get_template(file_name).render(context) html = render_jinja_html(os.path.dirname(os.path.abspath(__file__)), 'report.html', genes=genes) with open(output_path, "w") as f: f.write(html) # def get_data(): # logger.info('Listing VCF files and indexes...') # data = get_indexes_filenames() # logger.info('Extracting gene information...') # with multiprocessing.Pool() as pool: # for index, index_data in data.items(): # for mode, mode_data in index_data.items(): # mode_data.async_job = pool.apply_async(extract_genes, (mode_data.vcf_filename, mode_data.min_variant_count)) # for index, index_data in data.items(): # for mode, mode_data in index_data.items(): # mode_data.genes = mode_data.async_job.get() # del mode_data.async_job # return data # if __name__ == '__main__': # data = get_data() # if data: # os.makedirs('reports', exist_ok=True) # logger.info('Rendering reports as reStructuredText...') # for index, index_data in data.items(): # with open('reports/report_{}.rst'.format(index), 'wt') as report: # report.write(render_report({index: index_data}, 'restructuredtext')) # logger.info('Converting reports from reStructuredText to Microsoft Word...') # pool = multiprocessing.Pool() # for index in data: # pool.apply_async(convert_doc, ('reports/report_{}.rst'.format(index), 'reports/report_{}.docx'.format(index))) # pool.close() # pool.join() # logger.info('{} reports generated.'.format(len(data)))
REGOVAR/regovar-server
annso/reports/dims/report.py
Python
agpl-3.0
34,592
[ "pysam" ]
62ea910aa9ad59b9f07d8a931158226d1be79bab95f4918f9ab00c4be40754b8
import copy import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.animation as animation import matplotlib.patches as mpatches from scipy.stats import beta from .simulation import ReMapSim from .formatting import cmap_colors, mpl_defaults import time def HistAnimation(core_dict): """ Makes an animation of a Histogram. TODO * fix bins * custom user labels """ # defaults fig, ax = mpl_defaults.ani() plt.rcParams['figure.titlesize'] = 14 plt.rcParams['font.size'] = 12 plt.rcParams['axes.labelsize'] = 13 # plot variables cmap1 = plt.cm.get_cmap('BuGn') cmap2 = plt.cm.get_cmap('Blues') bins = np.linspace(0,core_dict['bins'][0],100) # formatting plt.tick_params( axis='y', which='both', left='off', right='off') plt.tick_params( axis='x', which='both', top='off') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_color('none') # load data data = np.loadtxt(core_dict['DataFolder'] + "/data0.txt", float) # update function used by the animation def _update(num, data): nonlocal cmap1, bins, ax # clear axes, load data to refresh plt.cla() data = np.loadtxt(core_dict['DataFolder'] + "/data0.txt", float) # plots plt.axvline(x = np.average(data), color = cmap1(0.5), ls="--" , linewidth=1.7) plt.hist(data, bins, alpha=0.6, normed=1, facecolor=cmap1(0.8), label="X ~ Beta(2,5)") # labels legend = plt.legend(loc='upper right', framealpha = 1.0) legend.get_frame().set_linewidth(1) plt.title(core_dict['PlotTitle'], style='italic') plt.xlabel('Regret') plt.ylabel('Frequency') ax.set_ylim([0,0.2]) # build the animation and run it my_ani = animation.FuncAnimation(fig, _update, fargs=(data, ), interval=1000/core_dict['FPS']) plt.show() def ConfAnimation(core_dict): """ Animates the confidence intervals. Since only one cycle is run at a given time, the bandit algorithm is run at the same time as the animation rather than using an external data source. TODO * y limits might not be correct in some cases; specify """ # defaults fig, ax = mpl_defaults.ani() # map the core_dict to actual objects ReMapSim(core_dict['sim'][0]) # plot variables sim = core_dict['sim'][0]['Simulation'] arms = sim.bandit.n_arms title = "Upper Confidence Animation\n" not_picked_color = plt.cm.get_cmap('BuGn')(0.6) picked_color = plt.cm.get_cmap('OrRd')(0.6) bar_width = 0.45 # simulation variables horizon = core_dict['horizon'] confidence = sim.bandit.U_conf index = np.arange(arms) # initialize the bandit for i in range(arms): sim.bandit.pullArm(i) pause = False # the animation update function def _update(num, confidence): nonlocal ax, pause # ste bandit 1 timestep if not pause: sim.runStep(1, horizon) confidence = sim.bandit.U_conf max_confidence = np.amax(sim.bandit.U_conf) # clear plot to redraw plt.cla() x_labels = ["Pulls: "+str(item) for item in sim.bandit.T] # make a bar plot bar_list = plt.bar(index, confidence, bar_width, align='center', color=not_picked_color, edgecolor='#e6e6e6') # add a maximum line plt.axhline(y=max_confidence, ls='dashdot', xmin=0.0, xmax=1.0, linewidth=2.0, color='#434343') # change the colour of the best arm (the picked arm) bar_list[np.argmax(confidence)].set_facecolor(picked_color) # add dashed lines for the actual mean of each arm for i in range(sim.bandit.n_arms): x = [i - bar_width/2, i + bar_width/2] y = [sim.bandit.mean_list[i], sim.bandit.mean_list[i]] plt.plot(x,y,'--', linewidth = 2, color='#000000', ls='dashed', mfc='white') # add dotted lines for the empirical mean of each arm for i in range(sim.bandit.n_arms): x = [i - bar_width/2, i + bar_width/2] y = [sim.bandit.U[i], sim.bandit.U[i]] plt.plot(x,y,'--', linewidth = 1, color='#000000', ls='-', mfc='white') # add x labels plt.xticks(index, x_labels, weight='bold') # add two legends: # * a real legend # * one to display interesting information picked_color_path = mpatches.Patch( color=picked_color, label='Arm chosen') not_picked_color_path = mpatches.Patch( color=not_picked_color, label='Arm(s) not chosen') dash_label, = plt.plot([], [], ls="dashed", label="Actual Mean", color='black') dashdot_label, = plt.plot([], [], ls="-", linewidth=1.0, label="Empirical Mean", color='black') legend_1 = plt.legend( handles=[picked_color_path, not_picked_color_path, dash_label, dashdot_label], loc='upper right') plt.gca().add_artist(legend_1) # prevent legend_2 from overwriting legend_1.get_frame().set_linewidth(1) blank_path_1 = mpatches.Patch( label='Regret: {:04.2f}'.format(sim.bandit.giveRegret())) blank_path_2 = mpatches.Patch( label='Timestep: {:2d}'.format(sim.bandit.timestep[0])) legend_2 = plt.legend( handles=[blank_path_1, blank_path_2], loc='upper left', handlelength=0, handletextpad=0) legend_2.get_frame().set_linewidth(1) # formatting ax.grid(False) # resets grid default ax.set_ylim([0,1.2]) ax.yaxis.grid(True) # turns horizontal grid on plt.title(title) def onClick(event): nonlocal pause pause ^= True fig.canvas.mpl_connect('button_press_event', onClick) # the animation declaration my_ani = animation.FuncAnimation(fig, _update, fargs=(sim.bandit.U_conf, ), interval=1000/core_dict['FPS']) # fargs is used as the data required in update_hist plt.show() def EllipseAnimation(core_dict): """ Animates the confidence intervals. Since only one cycle is run at a given time, the bandit algorithm is run at the same time as the animation rather than using an external data source. TODO * y limits might not be correct in some cases; specify """ # defaults fig, ax = mpl_defaults.ani() ax.set_aspect('equal') # map the core_dict sim to actual objects ReMapSim(core_dict['sim'][0]) # simulation variables sim = core_dict['sim'][0]['Simulation'] sim_dict = core_dict['sim'][0] arms = sim.bandit.arm_vecs mean = np.array(sim.bandit.mean) horizon = core_dict['horizon'] title = "2D Confidence Ellipse Animation\n" # the locations of the perpindicular projection of the arm vector onto the # mean vector projs = np.array( [mean / np.linalg.norm(mean)**2 * np.inner(arm, mean) for arm in arms]) cmap1 = plt.cm.get_cmap('BuGn') cmap2 = plt.cm.get_cmap('Reds') # establish plot boundaries all_points = np.vstack((arms, mean)) x_max = np.amax(all_points[:,0]) x_min = np.amin(all_points[:,0]) y_max = np.amax(all_points[:,1]) y_min = np.amin(all_points[:,1]) x_margin = (x_max-x_min) y_margin = (y_max-y_min) x_max += x_margin/3 x_min -= x_margin/5 # less margin on left y_max += y_margin/3 y_min -= y_margin/5 # less margin at bottom # set up the figure and axes # initialize the bandit for i in range(sim.bandit.n_arms): sim.bandit.pullArm(i) def plot_ellipse(rho, cov, pos, ax=None, **kwargs): """ Plots an ellipse based on the specified covariance on the parameter rho matrix (`cov`). Additional keyword arguments are passed on to the ellipse patch artist. Parameters ---------- rho : the radius of the ellipse cov : The 2x2 covariance matrix to base the ellipse on pos : The location of the center of the ellipse. Expects a 2-element sequence of [x0, y0]. ax : The axis that the ellipse will be plotted on. Defaults to the current axis. Additional keyword arguments are pass on to the ellipse patch. Returns ------- A matplotlib ellipse artist """ def eigsorted(cov): vals, vecs = np.linalg.eigh(cov) order = vals.argsort()[::-1] return vals[order], vecs[:,order] if ax is None: ax = plt.gca() vals, vecs = eigsorted(cov) theta = np.degrees(np.arctan2(*vecs[:,0][::-1])) # width and height are full width, not radius width, height = 2 * np.sqrt(rho * vals) ellip = mpatches.Ellipse( xy=pos, width=width, height=height, angle=theta, **kwargs) ax.add_artist(ellip) return ellip # the animation update function pause = False def _update(num, confidence): nonlocal ax, pause # step the bandit if not pause: sim.runStep(1, horizon) rho = sim.bandit.dim * np.log(sim.bandit.timestep[0]) G_inv = np.linalg.inv(sim.bandit.G) bandit_mean = sim.bandit.U chosen_arm = sim.bandit.arm_vecs[np.argmax(sim.bandit.U_conf)] # clear plot to redraw plt.cla() # general formatting ax.grid(True) ax.set_ylim([y_min,y_max]) ax.set_xlim([x_min,x_max]) ax.set_aspect('equal') for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_bbox(dict( facecolor='white', edgecolor='None', alpha=0.65 )) # format the axes ax.spines['left'].set_visible(True) ax.xaxis.set_ticks_position('bottom') ax.spines['bottom'].set_position(('data',0)) ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data',0)) # remove label option if core_dict['NoAxesTick']: ax.set_xticks([]) ax.set_yticks([]) # adds circle for normalized bandit if sim_dict['Normalized']: ax.add_artist(mpatches.Circle( xy=(0.,0.), radius=1, edgecolor='black', alpha=0.2, facecolor='none')) # position of mean vector labelstr = 'True Mean' if sim_dict['Normalized']: labelstr = 'Normalized Mean' plt.plot([mean[0]],[mean[1]], "o", color='black', label=labelstr) # plot the ellipse and the mean; checks for LevelCurves option if (sim.alg.var_dict['algtype'].__name__ == "TS_Lin" and core_dict['LevelCurves'] == True): for i in [33, 29, 25, 21, 17, 13, 9, 5, 1]: plot_ellipse(i, G_inv, bandit_mean, alpha=0.1, ls='dashed', facecolor=cmap1(0.2), edgecolor='black') else: plot_ellipse(rho, G_inv, bandit_mean, alpha=0.2, facecolor=cmap1(0.5), edgecolor=cmap1(1.0)) plt.plot([sim.bandit.U[0]], [sim.bandit.U[1]], "o", markerfacecolor=cmap2(0.5), markeredgecolor=cmap2(1.0), label='Approximated Mean') # HelpLines: dashed projection lines to show reward value if core_dict['HelpLines']: for proj, arm in zip(projs, arms): plt.plot([proj[0], arm[0]], [proj[1], arm[1]], color='black', linestyle='dashdot', linewidth=0.8) plt.plot([-mean[0]*50,mean[0]*50],[-mean[1]*50,mean[1]*50], "--", linewidth=0.5, markersize=3, color='black') # arm vector points plt.plot([arm[0] for arm in sim.bandit.arm_vecs], [arm[1] for arm in sim.bandit.arm_vecs], "o", markersize=5, markerfacecolor=cmap1(0.5), markeredgecolor=cmap1(1.0), label='Arm Vectors') # chosen arm; draws a red circle around it plt.plot(chosen_arm[0],chosen_arm[1], "or", markersize=10, markerfacecolor='none', markeredgecolor='red', markeredgewidth=2, label='Chosen Arm') # legend and title legend = plt.legend(loc='upper right') plt.gca().add_artist(legend) legend.get_frame().set_linewidth(1) # additional information legend blank_path_1 = mpatches.Patch( label='Regret: {:04.2f}'.format(sim.bandit.giveRegret())) blank_path_2 = mpatches.Patch( label='Timestep: {:2d}'.format(sim.bandit.timestep[0])) legend_2 = plt.legend( handles=[blank_path_1, blank_path_2], loc='upper left', handlelength=0, handletextpad=0) legend_2.get_frame().set_linewidth(1) plt.title(title) def onClick(event): nonlocal pause pause ^= True fig.canvas.mpl_connect('button_press_event', onClick) # the animation declaration my_ani = animation.FuncAnimation(fig, _update, fargs=(sim.bandit.U_conf, ), interval=1000/core_dict['FPS']) # fargs is used as the data required in update_hist plt.show() def DistAnimation(core_dict): """ Creates an animation of the distributions used for beta and gaussian random sampling and confidence algorithms. """ # defaults fig, ax = mpl_defaults.ani() ReMapSim(core_dict['sim'][0]) # simulation variables sim = core_dict['sim'][0]['Simulation'] sim_dict = core_dict['sim'][0] horizon = core_dict['horizon'] n_arms = sim.bandit.n_arms title = "Distribution Animation\n" if sim.alg.var_dict['algtype'].__name__[-5:] == 'Gauss': xlims = [np.amin(sim.bandit.mean_list) - 1.5,np.amax(sim.bandit.mean_list) + 1.5] elif sim.alg.var_dict['algtype'].__name__[-4:] == 'Beta': xlims = [0,1] for i in range(sim.bandit.n_arms): sim.bandit.pullArm(i) # distribution function def npdf(x, mu, sigma): return 0.39894228 / sigma * 0.60653066**(((x-mu)/(sigma))**2) pause = False # the animation update function def _update(num, confidence): nonlocal ax, xlims, pause plt.cla() # step the bandit if not pause: sim.runStep(1, horizon) samples = 500 xdata = np.tile(np.linspace(xlims[0], xlims[1], samples), (n_arms,1)) if sim.alg.var_dict['algtype'].__name__[-5:] == 'Gauss': mu = np.tile( sim.bandit.U.reshape(n_arms,1), (1,samples)) sigma = np.tile( np.sqrt(1.0/sim.bandit.T).reshape(n_arms,1), (1,samples)) ydata = npdf(xdata, mu, sigma) elif sim.alg.var_dict['algtype'].__name__[-4:] == 'Beta': beta_a = np.tile( (sim.bandit.arm_reward+1).reshape(n_arms,1), (1,samples)) beta_b = np.tile( (sim.bandit.T - sim.bandit.arm_reward + 1).reshape(n_arms,1), (1,samples)) ydata = beta.pdf(xdata, beta_a, beta_b) ydata_max = np.amax(ydata, axis=1) ymax = np.maximum(3, np.amax(ydata_max)*1.1) v_line_max = np.minimum(ydata_max/ymax, 1) for i in range(n_arms): cmap = cmap_colors.sequential1[i] plt.plot(xdata[i], ydata[i], "-", color=cmap(0.5)) ax.fill_between(xdata[i], 0, ydata[i], color=cmap(0.5), alpha=0.2, linewidth=0.5, label="Arm {}: Pulls: {}".format(i+1, sim.bandit.T[i])) plt.axvline(sim.bandit.U[i], ymax=v_line_max[i], ls='dashed', color=cmap(0.8)) plt.axvline(sim.bandit.mean_list[i], ymax=1, ls='dashdot', alpha=0.6, color=cmap(0.8)) ylims = [0, ymax] # general formatting ax.grid(True) ax.set_ylim(ylims) ax.set_xlim(xlims) legend = plt.legend(loc='upper right') plt.gca().add_artist(legend) legend.get_frame().set_linewidth(1) # additional information legend blank_path_1 = mpatches.Patch( label='Regret: {:04.2f}'.format(sim.bandit.giveRegret())) blank_path_2 = mpatches.Patch( label='Timestep: {:2d}'.format(sim.bandit.timestep[0])) legend_2 = plt.legend( handles=[blank_path_1, blank_path_2], loc='upper left', handlelength=0, handletextpad=0) legend_2.get_frame().set_linewidth(1) plt.title(title) def onClick(event): nonlocal pause pause ^= True fig.canvas.mpl_connect('button_press_event', onClick) # the animation declaration my_ani = animation.FuncAnimation(fig, _update, fargs=(sim.bandit.U_conf, ), interval=2000/(core_dict['FPS'])) # fargs is used as the data required in update_hist plt.show()
alexrutar/banditvis
banditvis/animation.py
Python
mit
18,173
[ "Gaussian" ]
f7e06ef986ad110b5ac6546ecc6be9d8c4c502d567544d5219588dc5d9215a67