text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # This file contains code for reading metadata from the build system into # data structures. r"""Read build frontend files into data structures. In terms of code architecture, the main interface is BuildReader. BuildReader starts with a root mozbuild file. It creates a new execution environment for this file, which is represented by the Sandbox class. The Sandbox class is used to fill a Context, representing the output of an individual mozbuild file. The The BuildReader contains basic logic for traversing a tree of mozbuild files. It does this by examining specific variables populated during execution. """ from __future__ import absolute_import, print_function, unicode_literals import ast import inspect import logging import os import sys import textwrap import time import traceback import types from collections import ( defaultdict, OrderedDict, ) from io import StringIO from mozbuild.util import ( EmptyValue, HierarchicalStringList, memoize, ReadOnlyDefaultDict, ) from mozbuild.testing import ( TEST_MANIFESTS, REFTEST_FLAVORS, WEB_PLATFORM_TESTS_FLAVORS, ) from mozbuild.backend.configenvironment import ConfigEnvironment from mozpack.files import FileFinder import mozpack.path as mozpath from .data import ( AndroidEclipseProjectData, JavaJarData, ) from .sandbox import ( default_finder, SandboxError, SandboxExecutionError, SandboxLoadError, Sandbox, ) from .context import ( Context, ContextDerivedValue, Files, FUNCTIONS, VARIABLES, DEPRECATION_HINTS, SourcePath, SPECIAL_VARIABLES, SUBCONTEXTS, SubContext, TemplateContext, ) from mozbuild.base import ExecutionSummary if sys.version_info.major == 2: text_type = unicode type_type = types.TypeType else: text_type = str type_type = type def log(logger, level, action, params, formatter): logger.log(level, formatter, extra={'action': action, 'params': params}) class EmptyConfig(object): """A config object that is empty. This config object is suitable for using with a BuildReader on a vanilla checkout, without any existing configuration. The config is simply bootstrapped from a top source directory path. """ class PopulateOnGetDict(ReadOnlyDefaultDict): """A variation on ReadOnlyDefaultDict that populates during .get(). This variation is needed because CONFIG uses .get() to access members. Without it, None (instead of our EmptyValue types) would be returned. """ def get(self, key, default=None): return self[key] def __init__(self, topsrcdir): self.topsrcdir = topsrcdir self.topobjdir = '' self.substs = self.PopulateOnGetDict(EmptyValue, { # These 2 variables are used semi-frequently and it isn't worth # changing all the instances. b'MOZ_APP_NAME': b'empty', b'MOZ_CHILD_PROCESS_NAME': b'empty', # Set manipulations are performed within the moz.build files. But # set() is not an exposed symbol, so we can't create an empty set. b'NECKO_PROTOCOLS': set(), # Needed to prevent js/src's config.status from loading. b'JS_STANDALONE': b'1', }) udict = {} for k, v in self.substs.items(): if isinstance(v, str): udict[k.decode('utf-8')] = v.decode('utf-8') else: udict[k] = v self.substs_unicode = self.PopulateOnGetDict(EmptyValue, udict) self.defines = self.substs self.external_source_dir = None self.error_is_fatal = False def is_read_allowed(path, config): """Whether we are allowed to load a mozbuild file at the specified path. This is used as cheap security to ensure the build is isolated to known source directories. We are allowed to read from the main source directory and any defined external source directories. The latter is to allow 3rd party applications to hook into our build system. """ assert os.path.isabs(path) assert os.path.isabs(config.topsrcdir) path = mozpath.normpath(path) topsrcdir = mozpath.normpath(config.topsrcdir) if mozpath.basedir(path, [topsrcdir]): return True if config.external_source_dir: external_dir = os.path.normcase(config.external_source_dir) norm_path = os.path.normcase(path) if mozpath.basedir(norm_path, [external_dir]): return True return False class SandboxCalledError(SandboxError): """Represents an error resulting from calling the error() function.""" def __init__(self, file_stack, message): SandboxError.__init__(self, file_stack) self.message = message class MozbuildSandbox(Sandbox): """Implementation of a Sandbox tailored for mozbuild files. We expose a few useful functions and expose the set of variables defining Mozilla's build system. context is a Context instance. metadata is a dict of metadata that can be used during the sandbox evaluation. """ def __init__(self, context, metadata={}, finder=default_finder): assert isinstance(context, Context) Sandbox.__init__(self, context, finder=finder) self._log = logging.getLogger(__name__) self.metadata = dict(metadata) exports = self.metadata.get('exports', {}) self.exports = set(exports.keys()) context.update(exports) self.templates = self.metadata.setdefault('templates', {}) self.special_variables = self.metadata.setdefault('special_variables', SPECIAL_VARIABLES) self.functions = self.metadata.setdefault('functions', FUNCTIONS) self.subcontext_types = self.metadata.setdefault('subcontexts', SUBCONTEXTS) def __getitem__(self, key): if key in self.special_variables: return self.special_variables[key][0](self._context) if key in self.functions: return self._create_function(self.functions[key]) if key in self.subcontext_types: return self._create_subcontext(self.subcontext_types[key]) if key in self.templates: return self._create_template_wrapper(self.templates[key]) return Sandbox.__getitem__(self, key) def __contains__(self, key): if any(key in d for d in (self.special_variables, self.functions, self.subcontext_types, self.templates)): return True return Sandbox.__contains__(self, key) def __setitem__(self, key, value): if key in self.special_variables and value is self[key]: return if key in self.special_variables or key in self.functions or key in self.subcontext_types: raise KeyError('Cannot set "%s" because it is a reserved keyword' % key) if key in self.exports: self._context[key] = value self.exports.remove(key) return Sandbox.__setitem__(self, key, value) def exec_file(self, path): """Override exec_file to normalize paths and restrict file loading. Paths will be rejected if they do not fall under topsrcdir or one of the external roots. """ # realpath() is needed for true security. But, this isn't for security # protection, so it is omitted. if not is_read_allowed(path, self._context.config): raise SandboxLoadError(self._context.source_stack, sys.exc_info()[2], illegal_path=path) Sandbox.exec_file(self, path) def _add_java_jar(self, name): """Add a Java JAR build target.""" if not name: raise Exception('Java JAR cannot be registered without a name') if '/' in name or '\\' in name or '.jar' in name: raise Exception('Java JAR names must not include slashes or' ' .jar: %s' % name) if name in self['JAVA_JAR_TARGETS']: raise Exception('Java JAR has already been registered: %s' % name) jar = JavaJarData(name) self['JAVA_JAR_TARGETS'][name] = jar return jar # Not exposed to the sandbox. def add_android_eclipse_project_helper(self, name): """Add an Android Eclipse project target.""" if not name: raise Exception('Android Eclipse project cannot be registered without a name') if name in self['ANDROID_ECLIPSE_PROJECT_TARGETS']: raise Exception('Android Eclipse project has already been registered: %s' % name) data = AndroidEclipseProjectData(name) self['ANDROID_ECLIPSE_PROJECT_TARGETS'][name] = data return data def _add_android_eclipse_project(self, name, manifest): if not manifest: raise Exception('Android Eclipse project must specify a manifest') data = self.add_android_eclipse_project_helper(name) data.manifest = manifest data.is_library = False return data def _add_android_eclipse_library_project(self, name): data = self.add_android_eclipse_project_helper(name) data.manifest = None data.is_library = True return data def _export(self, varname): """Export the variable to all subdirectories of the current path.""" exports = self.metadata.setdefault('exports', dict()) if varname in exports: raise Exception('Variable has already been exported: %s' % varname) try: # Doing a regular self._context[varname] causes a set as a side # effect. By calling the dict method instead, we don't have any # side effects. exports[varname] = dict.__getitem__(self._context, varname) except KeyError: self.last_name_error = KeyError('global_ns', 'get_unknown', varname) raise self.last_name_error def recompute_exports(self): """Recompute the variables to export to subdirectories with the current values in the subdirectory.""" if 'exports' in self.metadata: for key in self.metadata['exports']: self.metadata['exports'][key] = self[key] def _include(self, path): """Include and exec another file within the context of this one.""" # path is a SourcePath self.exec_file(path.full_path) def _warning(self, message): # FUTURE consider capturing warnings in a variable instead of printing. print('WARNING: %s' % message, file=sys.stderr) def _error(self, message): if self._context.error_is_fatal: raise SandboxCalledError(self._context.source_stack, message) else: self._warning(message) def _template_decorator(self, func): """Registers a template function.""" if not inspect.isfunction(func): raise Exception('`template` is a function decorator. You must ' 'use it as `@template` preceding a function declaration.') name = func.func_name if name in self.templates: raise KeyError( 'A template named "%s" was already declared in %s.' % (name, self.templates[name].path)) if name.islower() or name.isupper() or name[0].islower(): raise NameError('Template function names must be CamelCase.') self.templates[name] = TemplateFunction(func, self) @memoize def _create_subcontext(self, cls): """Return a function object that creates SubContext instances.""" def fn(*args, **kwargs): return cls(self._context, *args, **kwargs) return fn @memoize def _create_function(self, function_def): """Returns a function object for use within the sandbox for the given function definition. The wrapper function does type coercion on the function arguments """ func, args_def, doc = function_def def function(*args): def coerce(arg, type): if not isinstance(arg, type): if issubclass(type, ContextDerivedValue): arg = type(self._context, arg) else: arg = type(arg) return arg args = [coerce(arg, type) for arg, type in zip(args, args_def)] return func(self)(*args) return function @memoize def _create_template_wrapper(self, template): """Returns a function object for use within the sandbox for the given TemplateFunction instance.. When a moz.build file contains a reference to a template call, the sandbox needs a function to execute. This is what this method returns. That function creates a new sandbox for execution of the template. After the template is executed, the data from its execution is merged with the context of the calling sandbox. """ def template_wrapper(*args, **kwargs): context = TemplateContext( template=template.name, allowed_variables=self._context._allowed_variables, config=self._context.config) context.add_source(self._context.current_path) for p in self._context.all_paths: context.add_source(p) sandbox = MozbuildSandbox(context, metadata={ # We should arguably set these defaults to something else. # Templates, for example, should arguably come from the state # of the sandbox from when the template was declared, not when # it was instantiated. Bug 1137319. 'functions': self.metadata.get('functions', {}), 'special_variables': self.metadata.get('special_variables', {}), 'subcontexts': self.metadata.get('subcontexts', {}), 'templates': self.metadata.get('templates', {}) }, finder=self._finder) template.exec_in_sandbox(sandbox, *args, **kwargs) # This is gross, but allows the merge to happen. Eventually, the # merging will go away and template contexts emitted independently. klass = self._context.__class__ self._context.__class__ = TemplateContext # The sandbox will do all the necessary checks for these merges. for key, value in context.items(): if isinstance(value, dict): self[key].update(value) elif isinstance(value, (list, HierarchicalStringList)): self[key] += value else: self[key] = value self._context.__class__ = klass for p in context.all_paths: self._context.add_source(p) return template_wrapper class TemplateFunction(object): def __init__(self, func, sandbox): self.path = func.func_code.co_filename self.name = func.func_name code = func.func_code firstlineno = code.co_firstlineno lines = sandbox._current_source.splitlines(True) lines = inspect.getblock(lines[firstlineno - 1:]) # The code lines we get out of inspect.getsourcelines look like # @template # def Template(*args, **kwargs): # VAR = 'value' # ... func_ast = ast.parse(''.join(lines), self.path) # Remove decorators func_ast.body[0].decorator_list = [] # Adjust line numbers accordingly ast.increment_lineno(func_ast, firstlineno - 1) # When using a custom dictionary for function globals/locals, Cpython # actually never calls __getitem__ and __setitem__, so we need to # modify the AST so that accesses to globals are properly directed # to a dict. self._global_name = b'_data' # AST wants str for this, not unicode # In case '_data' is a name used for a variable in the function code, # prepend more underscores until we find an unused name. while (self._global_name in code.co_names or self._global_name in code.co_varnames): self._global_name += '_' func_ast = self.RewriteName(sandbox, self._global_name).visit(func_ast) # Execute the rewritten code. That code now looks like: # def Template(*args, **kwargs): # _data['VAR'] = 'value' # ... # The result of executing this code is the creation of a 'Template' # function object in the global namespace. glob = {'__builtins__': sandbox._builtins} func = types.FunctionType( compile(func_ast, self.path, 'exec'), glob, self.name, func.func_defaults, func.func_closure, ) func() self._func = glob[self.name] def exec_in_sandbox(self, sandbox, *args, **kwargs): """Executes the template function in the given sandbox.""" # Create a new function object associated with the execution sandbox glob = { self._global_name: sandbox, '__builtins__': sandbox._builtins } func = types.FunctionType( self._func.func_code, glob, self.name, self._func.func_defaults, self._func.func_closure ) sandbox.exec_function(func, args, kwargs, self.path, becomes_current_path=False) class RewriteName(ast.NodeTransformer): """AST Node Transformer to rewrite variable accesses to go through a dict. """ def __init__(self, sandbox, global_name): self._sandbox = sandbox self._global_name = global_name def visit_Str(self, node): # String nodes we got from the AST parser are str, but we want # unicode literals everywhere, so transform them. node.s = unicode(node.s) return node def visit_Name(self, node): # Modify uppercase variable references and names known to the # sandbox as if they were retrieved from a dict instead. if not node.id.isupper() and node.id not in self._sandbox: return node def c(new_node): return ast.copy_location(new_node, node) return c(ast.Subscript( value=c(ast.Name(id=self._global_name, ctx=ast.Load())), slice=c(ast.Index(value=c(ast.Str(s=node.id)))), ctx=node.ctx )) class SandboxValidationError(Exception): """Represents an error encountered when validating sandbox results.""" def __init__(self, message, context): Exception.__init__(self, message) self.context = context def __str__(self): s = StringIO() delim = '=' * 30 s.write('\n%s\nERROR PROCESSING MOZBUILD FILE\n%s\n\n' % (delim, delim)) s.write('The error occurred while processing the following file or ') s.write('one of the files it includes:\n') s.write('\n') s.write(' %s/moz.build\n' % self.context.srcdir) s.write('\n') s.write('The error occurred when validating the result of ') s.write('the execution. The reported error is:\n') s.write('\n') s.write(''.join(' %s\n' % l for l in self.message.splitlines())) s.write('\n') return s.getvalue() class BuildReaderError(Exception): """Represents errors encountered during BuildReader execution. The main purpose of this class is to facilitate user-actionable error messages. Execution errors should say: - Why they failed - Where they failed - What can be done to prevent the error A lot of the code in this class should arguably be inside sandbox.py. However, extraction is somewhat difficult given the additions MozbuildSandbox has over Sandbox (e.g. the concept of included files - which affect error messages, of course). """ def __init__(self, file_stack, trace, sandbox_exec_error=None, sandbox_load_error=None, validation_error=None, other_error=None, sandbox_called_error=None): self.file_stack = file_stack self.trace = trace self.sandbox_called_error = sandbox_called_error self.sandbox_exec = sandbox_exec_error self.sandbox_load = sandbox_load_error self.validation_error = validation_error self.other = other_error @property def main_file(self): return self.file_stack[-1] @property def actual_file(self): # We report the file that called out to the file that couldn't load. if self.sandbox_load is not None: if len(self.sandbox_load.file_stack) > 1: return self.sandbox_load.file_stack[-2] if len(self.file_stack) > 1: return self.file_stack[-2] if self.sandbox_error is not None and \ len(self.sandbox_error.file_stack): return self.sandbox_error.file_stack[-1] return self.file_stack[-1] @property def sandbox_error(self): return self.sandbox_exec or self.sandbox_load or \ self.sandbox_called_error def __str__(self): s = StringIO() delim = '=' * 30 s.write('\n%s\nERROR PROCESSING MOZBUILD FILE\n%s\n\n' % (delim, delim)) s.write('The error occurred while processing the following file:\n') s.write('\n') s.write(' %s\n' % self.actual_file) s.write('\n') if self.actual_file != self.main_file and not self.sandbox_load: s.write('This file was included as part of processing:\n') s.write('\n') s.write(' %s\n' % self.main_file) s.write('\n') if self.sandbox_error is not None: self._print_sandbox_error(s) elif self.validation_error is not None: s.write('The error occurred when validating the result of ') s.write('the execution. The reported error is:\n') s.write('\n') s.write(''.join(' %s\n' % l for l in self.validation_error.message.splitlines())) s.write('\n') else: s.write('The error appears to be part of the %s ' % __name__) s.write('Python module itself! It is possible you have stumbled ') s.write('across a legitimate bug.\n') s.write('\n') for l in traceback.format_exception(type(self.other), self.other, self.trace): s.write(unicode(l)) return s.getvalue() def _print_sandbox_error(self, s): # Try to find the frame of the executed code. script_frame = None # We don't currently capture the trace for SandboxCalledError. # Therefore, we don't get line numbers from the moz.build file. # FUTURE capture this. trace = getattr(self.sandbox_error, 'trace', None) frames = [] if trace: frames = traceback.extract_tb(trace) for frame in frames: if frame[0] == self.actual_file: script_frame = frame # Reset if we enter a new execution context. This prevents errors # in this module from being attributes to a script. elif frame[0] == __file__ and frame[2] == 'exec_function': script_frame = None if script_frame is not None: s.write('The error was triggered on line %d ' % script_frame[1]) s.write('of this file:\n') s.write('\n') s.write(' %s\n' % script_frame[3]) s.write('\n') if self.sandbox_called_error is not None: self._print_sandbox_called_error(s) return if self.sandbox_load is not None: self._print_sandbox_load_error(s) return self._print_sandbox_exec_error(s) def _print_sandbox_called_error(self, s): assert self.sandbox_called_error is not None s.write('A moz.build file called the error() function.\n') s.write('\n') s.write('The error it encountered is:\n') s.write('\n') s.write(' %s\n' % self.sandbox_called_error.message) s.write('\n') s.write('Correct the error condition and try again.\n') def _print_sandbox_load_error(self, s): assert self.sandbox_load is not None if self.sandbox_load.illegal_path is not None: s.write('The underlying problem is an illegal file access. ') s.write('This is likely due to trying to access a file ') s.write('outside of the top source directory.\n') s.write('\n') s.write('The path whose access was denied is:\n') s.write('\n') s.write(' %s\n' % self.sandbox_load.illegal_path) s.write('\n') s.write('Modify the script to not access this file and ') s.write('try again.\n') return if self.sandbox_load.read_error is not None: if not os.path.exists(self.sandbox_load.read_error): s.write('The underlying problem is we referenced a path ') s.write('that does not exist. That path is:\n') s.write('\n') s.write(' %s\n' % self.sandbox_load.read_error) s.write('\n') s.write('Either create the file if it needs to exist or ') s.write('do not reference it.\n') else: s.write('The underlying problem is a referenced path could ') s.write('not be read. The trouble path is:\n') s.write('\n') s.write(' %s\n' % self.sandbox_load.read_error) s.write('\n') s.write('It is possible the path is not correct. Is it ') s.write('pointing to a directory? It could also be a file ') s.write('permissions issue. Ensure that the file is ') s.write('readable.\n') return # This module is buggy if you see this. raise AssertionError('SandboxLoadError with unhandled properties!') def _print_sandbox_exec_error(self, s): assert self.sandbox_exec is not None inner = self.sandbox_exec.exc_value if isinstance(inner, SyntaxError): s.write('The underlying problem is a Python syntax error ') s.write('on line %d:\n' % inner.lineno) s.write('\n') s.write(' %s\n' % inner.text) if inner.offset: s.write((' ' * (inner.offset + 4)) + '^\n') s.write('\n') s.write('Fix the syntax error and try again.\n') return if isinstance(inner, KeyError): self._print_keyerror(inner, s) elif isinstance(inner, ValueError): self._print_valueerror(inner, s) else: self._print_exception(inner, s) def _print_keyerror(self, inner, s): if not inner.args or inner.args[0] not in ('global_ns', 'local_ns'): self._print_exception(inner, s) return if inner.args[0] == 'global_ns': import difflib verb = None if inner.args[1] == 'get_unknown': verb = 'read' elif inner.args[1] == 'set_unknown': verb = 'write' elif inner.args[1] == 'reassign': s.write('The underlying problem is an attempt to reassign ') s.write('a reserved UPPERCASE variable.\n') s.write('\n') s.write('The reassigned variable causing the error is:\n') s.write('\n') s.write(' %s\n' % inner.args[2]) s.write('\n') s.write('Maybe you meant "+=" instead of "="?\n') return else: raise AssertionError('Unhandled global_ns: %s' % inner.args[1]) s.write('The underlying problem is an attempt to %s ' % verb) s.write('a reserved UPPERCASE variable that does not exist.\n') s.write('\n') s.write('The variable %s causing the error is:\n' % verb) s.write('\n') s.write(' %s\n' % inner.args[2]) s.write('\n') close_matches = difflib.get_close_matches(inner.args[2], VARIABLES.keys(), 2) if close_matches: s.write('Maybe you meant %s?\n' % ' or '.join(close_matches)) s.write('\n') if inner.args[2] in DEPRECATION_HINTS: s.write('%s\n' % textwrap.dedent(DEPRECATION_HINTS[inner.args[2]]).strip()) return s.write('Please change the file to not use this variable.\n') s.write('\n') s.write('For reference, the set of valid variables is:\n') s.write('\n') s.write(', '.join(sorted(VARIABLES.keys())) + '\n') return s.write('The underlying problem is a reference to an undefined ') s.write('local variable:\n') s.write('\n') s.write(' %s\n' % inner.args[2]) s.write('\n') s.write('Please change the file to not reference undefined ') s.write('variables and try again.\n') def _print_valueerror(self, inner, s): if not inner.args or inner.args[0] not in ('global_ns', 'local_ns'): self._print_exception(inner, s) return assert inner.args[1] == 'set_type' s.write('The underlying problem is an attempt to write an illegal ') s.write('value to a special variable.\n') s.write('\n') s.write('The variable whose value was rejected is:\n') s.write('\n') s.write(' %s' % inner.args[2]) s.write('\n') s.write('The value being written to it was of the following type:\n') s.write('\n') s.write(' %s\n' % type(inner.args[3]).__name__) s.write('\n') s.write('This variable expects the following type(s):\n') s.write('\n') if type(inner.args[4]) == type_type: s.write(' %s\n' % inner.args[4].__name__) else: for t in inner.args[4]: s.write( ' %s\n' % t.__name__) s.write('\n') s.write('Change the file to write a value of the appropriate type ') s.write('and try again.\n') def _print_exception(self, e, s): s.write('An error was encountered as part of executing the file ') s.write('itself. The error appears to be the fault of the script.\n') s.write('\n') s.write('The error as reported by Python is:\n') s.write('\n') s.write(' %s\n' % traceback.format_exception_only(type(e), e)) class BuildReader(object): """Read a tree of mozbuild files into data structures. This is where the build system starts. You give it a tree configuration (the output of configuration) and it executes the moz.build files and collects the data they define. The reader can optionally call a callable after each sandbox is evaluated but before its evaluated content is processed. This gives callers the opportunity to modify contexts before side-effects occur from their content. This callback receives the ``Context`` containing the result of each sandbox evaluation. Its return value is ignored. """ def __init__(self, config, finder=default_finder): self.config = config self._log = logging.getLogger(__name__) self._read_files = set() self._execution_stack = [] self._finder = finder self._execution_time = 0.0 self._file_count = 0 def summary(self): return ExecutionSummary( 'Finished reading {file_count:d} moz.build files in ' '{execution_time:.2f}s', file_count=self._file_count, execution_time=self._execution_time) def read_topsrcdir(self): """Read the tree of linked moz.build files. This starts with the tree's top-most moz.build file and descends into all linked moz.build files until all relevant files have been evaluated. This is a generator of Context instances. As each moz.build file is read, a new Context is created and emitted. """ path = mozpath.join(self.config.topsrcdir, 'moz.build') return self.read_mozbuild(path, self.config) def all_mozbuild_paths(self): """Iterator over all available moz.build files. This method has little to do with the reader. It should arguably belong elsewhere. """ # In the future, we may traverse moz.build files by looking # for DIRS references in the AST, even if a directory is added behind # a conditional. For now, just walk the filesystem. ignore = { # Ignore fake moz.build files used for testing moz.build. 'python/mozbuild/mozbuild/test', # Ignore object directories. 'obj*', } finder = FileFinder(self.config.topsrcdir, find_executables=False, ignore=ignore) # The root doesn't get picked up by FileFinder. yield 'moz.build' for path, f in finder.find('**/moz.build'): yield path def find_sphinx_variables(self): """This function finds all assignments of Sphinx documentation variables. This is a generator of tuples of (moz.build path, var, key, value). For variables that assign to keys in objects, key will be defined. With a little work, this function could be made more generic. But if we end up writing a lot of ast code, it might be best to import a high-level AST manipulation library into the tree. """ # This function looks for assignments to SPHINX_TREES and # SPHINX_PYTHON_PACKAGE_DIRS variables. # # SPHINX_TREES is a dict. Keys and values should both be strings. The # target of the assignment should be a Subscript node. The value # assigned should be a Str node. e.g. # # SPHINX_TREES['foo'] = 'bar' # # This is an Assign node with a Subscript target. The Subscript's value # is a Name node with id "SPHINX_TREES." The slice of this target # is an Index node and its value is a Str with value "foo." # # SPHINX_PYTHON_PACKAGE_DIRS is a simple list. The target of the # assignment should be a Name node. Values should be a List node, whose # elements are Str nodes. e.g. # # SPHINX_PYTHON_PACKAGE_DIRS += ['foo'] # # This is an AugAssign node with a Name target with id # "SPHINX_PYTHON_PACKAGE_DIRS." The value is a List node containing 1 # Str elt whose value is "foo." relevant = [ 'SPHINX_TREES', 'SPHINX_PYTHON_PACKAGE_DIRS', ] def assigned_variable(node): # This is not correct, but we don't care yet. if hasattr(node, 'targets'): # Nothing in moz.build does multi-assignment (yet). So error if # we see it. assert len(node.targets) == 1 target = node.targets[0] else: target = node.target if isinstance(target, ast.Subscript): if not isinstance(target.value, ast.Name): return None, None name = target.value.id elif isinstance(target, ast.Name): name = target.id else: return None, None if name not in relevant: return None, None key = None if isinstance(target, ast.Subscript): assert isinstance(target.slice, ast.Index) assert isinstance(target.slice.value, ast.Str) key = target.slice.value.s return name, key def assigned_values(node): value = node.value if isinstance(value, ast.List): for v in value.elts: assert isinstance(v, ast.Str) yield v.s else: assert isinstance(value, ast.Str) yield value.s assignments = [] class Visitor(ast.NodeVisitor): def helper(self, node): name, key = assigned_variable(node) if not name: return for v in assigned_values(node): assignments.append((name, key, v)) def visit_Assign(self, node): self.helper(node) def visit_AugAssign(self, node): self.helper(node) for p in self.all_mozbuild_paths(): assignments[:] = [] full = os.path.join(self.config.topsrcdir, p) with open(full, 'rb') as fh: source = fh.read() tree = ast.parse(source, full) Visitor().visit(tree) for name, key, value in assignments: yield p, name, key, value def read_mozbuild(self, path, config, descend=True, metadata={}): """Read and process a mozbuild file, descending into children. This starts with a single mozbuild file, executes it, and descends into other referenced files per our traversal logic. The traversal logic is to iterate over the *DIRS variables, treating each element as a relative directory path. For each encountered directory, we will open the moz.build file located in that directory in a new Sandbox and process it. If descend is True (the default), we will descend into child directories and files per variable values. Arbitrary metadata in the form of a dict can be passed into this function. This feature is intended to facilitate the build reader injecting state and annotations into moz.build files that is independent of the sandbox's execution context. Traversal is performed depth first (for no particular reason). """ self._execution_stack.append(path) try: for s in self._read_mozbuild(path, config, descend=descend, metadata=metadata): yield s except BuildReaderError as bre: raise bre except SandboxCalledError as sce: raise BuildReaderError(list(self._execution_stack), sys.exc_info()[2], sandbox_called_error=sce) except SandboxExecutionError as se: raise BuildReaderError(list(self._execution_stack), sys.exc_info()[2], sandbox_exec_error=se) except SandboxLoadError as sle: raise BuildReaderError(list(self._execution_stack), sys.exc_info()[2], sandbox_load_error=sle) except SandboxValidationError as ve: raise BuildReaderError(list(self._execution_stack), sys.exc_info()[2], validation_error=ve) except Exception as e: raise BuildReaderError(list(self._execution_stack), sys.exc_info()[2], other_error=e) def _read_mozbuild(self, path, config, descend, metadata): path = mozpath.normpath(path) log(self._log, logging.DEBUG, 'read_mozbuild', {'path': path}, 'Reading file: {path}') if path in self._read_files: log(self._log, logging.WARNING, 'read_already', {'path': path}, 'File already read. Skipping: {path}') return self._read_files.add(path) time_start = time.time() topobjdir = config.topobjdir if not mozpath.basedir(path, [config.topsrcdir]): external = config.external_source_dir if external and mozpath.basedir(path, [external]): config = ConfigEnvironment.from_config_status( mozpath.join(topobjdir, 'config.status')) config.topsrcdir = external config.external_source_dir = None relpath = mozpath.relpath(path, config.topsrcdir) reldir = mozpath.dirname(relpath) if mozpath.dirname(relpath) == 'js/src' and \ not config.substs.get('JS_STANDALONE'): config = ConfigEnvironment.from_config_status( mozpath.join(topobjdir, reldir, 'config.status')) config.topobjdir = topobjdir config.external_source_dir = None context = Context(VARIABLES, config, self._finder) sandbox = MozbuildSandbox(context, metadata=metadata, finder=self._finder) sandbox.exec_file(path) self._execution_time += time.time() - time_start self._file_count += len(context.all_paths) # Yield main context before doing any processing. This gives immediate # consumers an opportunity to change state before our remaining # processing is performed. yield context # We need the list of directories pre-gyp processing for later. dirs = list(context.get('DIRS', [])) curdir = mozpath.dirname(path) gyp_contexts = [] for target_dir in context.get('GYP_DIRS', []): gyp_dir = context['GYP_DIRS'][target_dir] for v in ('input', 'variables'): if not getattr(gyp_dir, v): raise SandboxValidationError('Missing value for ' 'GYP_DIRS["%s"].%s' % (target_dir, v), context) # The make backend assumes contexts for sub-directories are # emitted after their parent, so accumulate the gyp contexts. # We could emit the parent context before processing gyp # configuration, but we need to add the gyp objdirs to that context # first. from .gyp_reader import read_from_gyp non_unified_sources = set() for s in gyp_dir.non_unified_sources: source = SourcePath(context, s) if not self._finder.get(source.full_path): raise SandboxValidationError('Cannot find %s.' % source, context) non_unified_sources.add(source) time_start = time.time() for gyp_context in read_from_gyp(context.config, mozpath.join(curdir, gyp_dir.input), mozpath.join(context.objdir, target_dir), gyp_dir.variables, non_unified_sources = non_unified_sources): gyp_context.update(gyp_dir.sandbox_vars) gyp_contexts.append(gyp_context) self._file_count += len(gyp_context.all_paths) self._execution_time += time.time() - time_start for gyp_context in gyp_contexts: context['DIRS'].append(mozpath.relpath(gyp_context.objdir, context.objdir)) sandbox.subcontexts.append(gyp_context) for subcontext in sandbox.subcontexts: yield subcontext # Traverse into referenced files. # It's very tempting to use a set here. Unfortunately, the recursive # make backend needs order preserved. Once we autogenerate all backend # files, we should be able to convert this to a set. recurse_info = OrderedDict() for d in dirs: if d in recurse_info: raise SandboxValidationError( 'Directory (%s) registered multiple times' % ( mozpath.relpath(d.full_path, context.srcdir)), context) recurse_info[d] = {} for key in sandbox.metadata: if key == 'exports': sandbox.recompute_exports() recurse_info[d][key] = dict(sandbox.metadata[key]) for path, child_metadata in recurse_info.items(): child_path = path.join('moz.build').full_path # Ensure we don't break out of the topsrcdir. We don't do realpath # because it isn't necessary. If there are symlinks in the srcdir, # that's not our problem. We're not a hosted application: we don't # need to worry about security too much. if not is_read_allowed(child_path, context.config): raise SandboxValidationError( 'Attempting to process file outside of allowed paths: %s' % child_path, context) if not descend: continue for res in self.read_mozbuild(child_path, context.config, metadata=child_metadata): yield res self._execution_stack.pop() def _find_relevant_mozbuilds(self, paths): """Given a set of filesystem paths, find all relevant moz.build files. We assume that a moz.build file in the directory ancestry of a given path is relevant to that path. Let's say we have the following files on disk:: moz.build foo/moz.build foo/baz/moz.build foo/baz/file1 other/moz.build other/file2 If ``foo/baz/file1`` is passed in, the relevant moz.build files are ``moz.build``, ``foo/moz.build``, and ``foo/baz/moz.build``. For ``other/file2``, the relevant moz.build files are ``moz.build`` and ``other/moz.build``. Returns a dict of input paths to a list of relevant moz.build files. The root moz.build file is first and the leaf-most moz.build is last. """ root = self.config.topsrcdir result = {} @memoize def exists(path): return self._finder.get(path) is not None def itermozbuild(path): subpath = '' yield 'moz.build' for part in mozpath.split(path): subpath = mozpath.join(subpath, part) yield mozpath.join(subpath, 'moz.build') for path in sorted(paths): path = mozpath.normpath(path) if os.path.isabs(path): if not mozpath.basedir(path, [root]): raise Exception('Path outside topsrcdir: %s' % path) path = mozpath.relpath(path, root) result[path] = [p for p in itermozbuild(path) if exists(mozpath.join(root, p))] return result def read_relevant_mozbuilds(self, paths): """Read and process moz.build files relevant for a set of paths. For an iterable of relative-to-root filesystem paths ``paths``, find all moz.build files that may apply to them based on filesystem hierarchy and read those moz.build files. The return value is a 2-tuple. The first item is a dict mapping each input filesystem path to a list of Context instances that are relevant to that path. The second item is a list of all Context instances. Each Context instance is in both data structures. """ relevants = self._find_relevant_mozbuilds(paths) topsrcdir = self.config.topsrcdir # Source moz.build file to directories to traverse. dirs = defaultdict(set) # Relevant path to absolute paths of relevant contexts. path_mozbuilds = {} # There is room to improve this code (and the code in # _find_relevant_mozbuilds) to better handle multiple files in the same # directory. Bug 1136966 tracks. for path, mbpaths in relevants.items(): path_mozbuilds[path] = [mozpath.join(topsrcdir, p) for p in mbpaths] for i, mbpath in enumerate(mbpaths[0:-1]): source_dir = mozpath.dirname(mbpath) target_dir = mozpath.dirname(mbpaths[i + 1]) d = mozpath.normpath(mozpath.join(topsrcdir, mbpath)) dirs[d].add(mozpath.relpath(target_dir, source_dir)) # Exporting doesn't work reliably in tree traversal mode. Override # the function to no-op. functions = dict(FUNCTIONS) def export(sandbox): return lambda varname: None functions['export'] = tuple([export] + list(FUNCTIONS['export'][1:])) metadata = { 'functions': functions, } contexts = defaultdict(list) all_contexts = [] for context in self.read_mozbuild(mozpath.join(topsrcdir, 'moz.build'), self.config, metadata=metadata): # Explicitly set directory traversal variables to override default # traversal rules. if not isinstance(context, SubContext): for v in ('DIRS', 'GYP_DIRS'): context[v][:] = [] context['DIRS'] = sorted(dirs[context.main_path]) contexts[context.main_path].append(context) all_contexts.append(context) result = {} for path, paths in path_mozbuilds.items(): result[path] = reduce(lambda x, y: x + y, (contexts[p] for p in paths), []) return result, all_contexts def files_info(self, paths): """Obtain aggregate data from Files for a set of files. Given a set of input paths, determine which moz.build files may define metadata for them, evaluate those moz.build files, and apply file metadata rules defined within to determine metadata values for each file requested. Essentially, for each input path: 1. Determine the set of moz.build files relevant to that file by looking for moz.build files in ancestor directories. 2. Evaluate moz.build files starting with the most distant. 3. Iterate over Files sub-contexts. 4. If the file pattern matches the file we're seeking info on, apply attribute updates. 5. Return the most recent value of attributes. """ paths, _ = self.read_relevant_mozbuilds(paths) r = {} for path, ctxs in paths.items(): flags = Files(Context()) for ctx in ctxs: if not isinstance(ctx, Files): continue relpath = mozpath.relpath(path, ctx.relsrcdir) pattern = ctx.pattern # Only do wildcard matching if the '*' character is present. # Otherwise, mozpath.match will match directories, which we've # arbitrarily chosen to not allow. if pattern == relpath or \ ('*' in pattern and mozpath.match(relpath, pattern)): flags += ctx if not any([flags.test_tags, flags.test_files, flags.test_flavors]): flags += self.test_defaults_for_path(ctxs) r[path] = flags return r def test_defaults_for_path(self, ctxs): # This names the context keys that will end up emitting a test # manifest. test_manifest_contexts = set( ['%s_MANIFESTS' % key for key in TEST_MANIFESTS] + ['%s_MANIFESTS' % flavor.upper() for flavor in REFTEST_FLAVORS] + ['%s_MANIFESTS' % flavor.upper().replace('-', '_') for flavor in WEB_PLATFORM_TESTS_FLAVORS] ) result_context = Files(Context()) for ctx in ctxs: for key in ctx: if key not in test_manifest_contexts: continue for paths, obj in ctx[key]: if isinstance(paths, tuple): path, tests_root = paths tests_root = mozpath.join(ctx.relsrcdir, tests_root) for t in (mozpath.join(tests_root, path) for path, _ in obj): result_context.test_files.add(mozpath.dirname(t) + '/**') else: for t in obj.tests: if isinstance(t, tuple): path, _ = t relpath = mozpath.relpath(path, self.config.topsrcdir) else: relpath = t['relpath'] result_context.test_files.add(mozpath.dirname(relpath) + '/**') return result_context
cstipkovic/spidermonkey-research
python/mozbuild/mozbuild/frontend/reader.py
Python
mpl-2.0
53,577
[ "VisIt" ]
6ff21f755fae8ada32348f28a8799f112632492e8152cc7929ca70bd91f072bc
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2010 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## """ Search dialog for books """ from decimal import Decimal from kiwi.currency import currency from stoqlib.enums import SearchFilterPosition from stoqlib.gui.search.productsearch import ProductSearch from stoqlib.gui.search.searchcolumns import SearchColumn from stoqlib.lib.translation import stoqlib_gettext as _ from stoqlib.lib.formatters import format_quantity, get_formatted_cost from books.booksdomain import ProductBookFullStockView class ProductBookSearch(ProductSearch): title = _('Book Search') search_spec = ProductBookFullStockView text_field_columns = ['description', 'barcode', 'category_description', 'author', 'publisher', 'isbn'] def _setup_widgets(self): if not self.hide_cost_column: column = 'cost' elif not self.hide_price_column: column = 'price' self.search.set_summary_label( column, label=_(u'<b>Total:</b>'), format='<b>%s</b>') # # SearchDialog Hooks # def create_filters(self): self._setup_widgets() self.search.set_query(self.executer_query) # Branch branch_filter = self.create_branch_filter(_('In branch:')) branch_filter.select(None) self.add_filter(branch_filter, columns=[]) self.branch_filter = branch_filter # Status status_filter = self.create_sellable_filter() self.add_filter(status_filter, columns=['status'], position=SearchFilterPosition.TOP) # # SearchEditor Hooks # def get_columns(self): cols = [SearchColumn('barcode', title=_('Barcode'), data_type=str, sorted=True, width=130), SearchColumn('code', title=_('Code'), data_type=str, visible=False), SearchColumn('category_description', title=_(u'Category'), data_type=str, width=100), SearchColumn('subject', title=_(u'Subject'), data_type=str), SearchColumn('description', title=_(u'Title'), expand=True, data_type=str), SearchColumn('author', title=_(u'Author'), expand=True, data_type=str), SearchColumn('publisher', title=_(u'Publisher'), expand=True, data_type=str), SearchColumn('isbn', title=_(u'ISBN'), data_type=str), SearchColumn('volume', title=_(u'Volume'), data_type=str, visible=False), SearchColumn('series', title=_(u'Series'), data_type=str, visible=False), SearchColumn('language', title=_(u'Language'), data_type=str, visible=False), ] # The price/cost columns must be controlled by hide_cost_column and # hide_price_column. Since the product search will be available across # the applications, it's important to restrict such columns depending # of the context. if not self.hide_cost_column: cols.append(SearchColumn('cost', _('Cost'), data_type=currency, format_func=get_formatted_cost, width=90)) if not self.hide_price_column: cols.append(SearchColumn('price', title=_('Price'), data_type=currency, width=90)) cols.append(SearchColumn('stock', title=_('Stock Total'), format_func=format_quantity, data_type=Decimal, width=100)) return cols
andrebellafronte/stoq
plugins/books/bookssearch.py
Python
gpl-2.0
4,597
[ "VisIt" ]
ee0d42f2799c06696176b4aa1f9f7772c457ab18063544245bbcdea5a9dad907
type_bodies = { 'Human': 'Human', 'Muffalo': 'QuadrupedAnimalWithHooves', 'Gazelle': 'QuadrupedAnimalWithHooves', 'Iguana': 'QuadrupedAnimalWithClawsTailAndJowl', 'Rhinoceros': 'QuadrupedAnimalWithHoovesAndHorn', 'Dromedary': 'QuadrupedAnimalWithHoovesAndHump', 'Chicken': 'Bird', 'Pig': 'QuadrupedAnimalWithHooves', 'Cow': 'QuadrupedAnimalWithHooves', 'Alpaca': 'QuadrupedAnimalWithHooves', 'Megascarab': 'BeetleLike', 'Spelopede': 'BeetleLikeWithClaw', 'Megaspider': 'BeetleLikeWithClaw', 'Warg': 'QuadrupedAnimalWithPawsAndTail', 'WolfTimber': 'QuadrupedAnimalWithPawsAndTail', 'WolfArctic': 'QuadrupedAnimalWithPawsAndTail', 'FoxFennec': 'QuadrupedAnimalWithPawsAndTail', 'FoxRed': 'QuadrupedAnimalWithPawsAndTail', 'FoxArctic': 'QuadrupedAnimalWithPawsAndTail', 'Cobra': 'Snake', 'Monkey': 'Monkey', 'Boomalope': 'QuadrupedAnimalWithHoovesAndHump', 'Elephant': 'QuadrupedAnimalWithHoovesTusksAndTrunk', 'Megasloth': 'QuadrupedAnimalWithPawsAndTail', 'Thrumbo': 'QuadrupedAnimalWithHoovesAndHorn', 'Squirrel': 'QuadrupedAnimalWithPawsAndTail', 'Alphabeaver': 'QuadrupedAnimalWithPawsAndTail', 'Capybara': 'QuadrupedAnimalWithPaws', 'Chinchilla': 'QuadrupedAnimalWithPawsAndTail', 'Boomrat': 'QuadrupedAnimalWithPaws', 'Raccoon': 'QuadrupedAnimalWithPaws', 'Rat': 'QuadrupedAnimalWithPaws', 'YorkshireTerrier': 'QuadrupedAnimalWithPawsAndTail', 'Husky': 'QuadrupedAnimalWithPawsAndTail', 'LabradorRetriever': 'QuadrupedAnimalWithPawsAndTail', 'Cat': 'QuadrupedAnimalWithPawsAndTail', 'Hare': 'QuadrupedAnimalWithPawsAndTail', 'Snowhare': 'QuadrupedAnimalWithPawsAndTail', 'Cassowary': 'Bird', 'Emu': 'Bird', 'Ostrich': 'Bird', 'Turkey': 'Bird', 'Mechanoid_Centipede': 'MechanicalCentipede', 'Mechanoid_Scyther': 'Scyther', 'Cougar': 'QuadrupedAnimalWithPawsAndTail', 'Panther': 'QuadrupedAnimalWithPawsAndTail', 'Lynx': 'QuadrupedAnimalWithPawsAndTail', 'Deer': 'QuadrupedAnimalWithHooves', 'Ibex': 'QuadrupedAnimalWithHooves', 'Elk': 'QuadrupedAnimalWithHooves', 'Caribou': 'QuadrupedAnimalWithHooves', 'WildBoar': 'QuadrupedAnimalWithHoovesAndTusks', 'Tortoise': 'TurtleLike', 'GrizzlyBear': 'QuadrupedAnimalWithPaws', 'PolarBear': 'QuadrupedAnimalWithPaws', } body_parts = { 'Bird': [ 'Body', 'Tail', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'Beak', 'LeftLeg', 'LeftFoot', 'RightLeg', 'RightFoot', ], 'MechanicalCentipede': [ 'MechanicalCentipedeBodyFirstRing', 'MechanicalHead', 'ArtificialBrain', 'LeftSightSensor', 'RightSightSensor', 'LeftHearingSensor', 'RightHearingSensor', 'SmellSensor', 'MechanicalCentipedeBodySecondRing', 'MechanicalCentipedeBodyThirdRing', 'MechanicalCentipedeBodyFourthRing', 'MechanicalCentipedeBodyFifthRing', 'MechanicalCentipedeBodySixthRing', ], 'Scyther': [ 'MechanicalThorax', 'MechanicalNeck', 'MechanicalHead', 'ArtificialBrain', 'LeftSightSensor', 'RightSightSensor', 'LeftHearingSensor', 'RightHearingSensor', 'SmellSensor', 'LeftMechanicalShoulder', 'LeftMechanicalArm', 'LeftBlade', 'LeftMechanicalHand', 'LeftHandMechanicalPinky', 'LeftHandMechanicalMiddleFinger', 'LeftHandMechanicalIndexFinger', 'LeftHandMechanicalThumb', 'RightMechanicalShoulder', 'RightMechanicalArm', 'RightBlade', 'RightMechanicalHand', 'RightHandMechanicalPinky', 'RightHandMechanicalMiddleFinger', 'RightHandMechanicalIndexFinger', 'RightHandMechanicalThumb', 'LeftMechanicalLeg', 'LeftMechanicalFoot', 'RightMechanicalLeg', 'RightMechanicalFoot', ], 'Human': [ 'Torso', 'Sternum', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Pelvis', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'Jaw', 'LeftShoulder', 'LeftClavicle', 'LeftArm', 'LeftHumerus', 'LeftRadius', 'LeftHand', 'LeftHandPinky', 'LeftHandRingFinger', 'LeftHandMiddleFinger', 'LeftHandIndexFinger', 'LeftHandThumb', 'RightShoulder', 'RightClavicle', 'RightArm', 'RightHumerus', 'RightRadius', 'RightHand', 'RightHandPinky', 'RightHandRingFinger', 'RightHandMiddleFinger', 'RightHandIndexFinger', 'RightHandThumb', 'Waist', 'LeftLeg', 'LeftFemur', 'LeftTibia', 'LeftFoot', 'LeftFootLittleToe', 'LeftFootFourthToe', 'LeftFootMiddleToe', 'LeftFootSecondToe', 'LeftFootBigToe', 'RightLeg', 'RightFemur', 'RightTibia', 'RightFoot', 'RightFootLittleToe', 'RightFootFourthToe', 'RightFootMiddleToe', 'RightFootSecondToe', 'RightFootBigToe', ], 'BeetleLike': [ 'Shell', 'LeftElytra', 'RightElytra', 'Stomach', 'InsectHeart', 'Pronotum', 'InsectHead', 'Brain', 'LeftEye', 'RightEye', 'LeftAntenna', 'RightAntenna', 'InsectNostril', 'InsectMouth', 'FrontLeftInsectLeg', 'FrontRightInsectLeg', 'MiddleLeftInsectLeg', 'MiddleRightInsectLeg', 'RearLeftInsectLeg', 'RearRightInsectLeg', ], 'BeetleLikeWithClaw': [ 'Shell', 'LeftElytra', 'RightElytra', 'Stomach', 'InsectHeart', 'Pronotum', 'InsectHead', 'Brain', 'LeftEye', 'RightEye', 'LeftAntenna', 'RightAntenna', 'InsectNostril', 'InsectMouth', 'HeadClaw', 'FrontLeftInsectLeg', 'FrontRightInsectLeg', 'MiddleLeftInsectLeg', 'MiddleRightInsectLeg', 'RearLeftInsectLeg', 'RearRightInsectLeg', ], 'Snake': [ 'SnakeBody', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'SnakeHead', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'Nose', 'SnakeMouth', ], 'QuadrupedAnimalWithPaws': [ 'Body', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'FrontLeftLeg', 'FrontLeftPaw', 'FrontRightLeg', 'FrontRightPaw', 'RearLeftLeg', 'RearLeftPaw', 'RearRightLeg', 'RearRightPaw', ], 'QuadrupedAnimalWithPawsAndTail': [ 'Body', 'Tail', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'FrontLeftLeg', 'FrontLeftPaw', 'FrontRightLeg', 'FrontRightPaw', 'RearLeftLeg', 'RearLeftPaw', 'RearRightLeg', 'RearRightPaw', ], 'QuadrupedAnimalWithHooves': [ 'Body', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'FrontLeftLeg', 'FrontLeftHoof', 'FrontRightLeg', 'FrontRightHoof', 'RearLeftLeg', 'RearLeftHoof', 'RearRightLeg', 'RearRightHoof', ], 'QuadrupedAnimalWithHoovesAndHump': [ 'Body', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Hump', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'FrontLeftLeg', 'FrontLeftHoof', 'FrontRightLeg', 'FrontRightHoof', 'RearLeftLeg', 'RearLeftHoof', 'RearRightLeg', 'RearRightHoof', ], 'QuadrupedAnimalWithHoovesAndTusks': [ 'Body', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'LeftTusk', 'RightTusk', 'FrontLeftLeg', 'FrontLeftHoof', 'FrontRightLeg', 'FrontRightHoof', 'RearLeftLeg', 'RearLeftHoof', 'RearRightLeg', 'RearRightHoof', ], 'QuadrupedAnimalWithHoovesTusksAndTrunk': [ 'Body', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Trunk', 'AnimalJaw', 'LeftTusk', 'RightTusk', 'FrontLeftLeg', 'FrontLeftHoof', 'FrontRightLeg', 'FrontRightHoof', 'RearLeftLeg', 'RearLeftHoof', 'RearRightLeg', 'RearRightHoof', ], 'QuadrupedAnimalWithHoovesAndHorn': [ 'Body', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'Horn', 'AnimalJaw', 'FrontLeftLeg', 'FrontLeftHoof', 'FrontRightLeg', 'FrontRightHoof', 'RearLeftLeg', 'RearLeftHoof', 'RearRightLeg', 'RearRightHoof', ], 'QuadrupedAnimalWithClawsTailAndJowl': [ 'Body', 'Tail', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'Jowl', 'FrontLeftLeg', 'FrontLeftLegFirstClaw', 'FrontLeftLegSecondClaw', 'FrontLeftLegThirdClaw', 'FrontLeftLegFourthClaw', 'FrontLeftLegFifthClaw', 'FrontRightLeg', 'FrontRightLegFirstClaw', 'FrontRightLegSecondClaw', 'FrontRightLegThirdClaw', 'FrontRightLegFourthClaw', 'FrontRightLegFifthClaw', 'RearLeftLeg', 'RearLeftLegFirstClaw', 'RearLeftLegSecondClaw', 'RearLeftLegThirdClaw', 'RearLeftLegFourthClaw', 'RearLeftLegFifthClaw', 'RearRightLeg', 'RearRightLegFirstClaw', 'RearRightLegSecondClaw', 'RearRightLegThirdClaw', 'RearRightLegFourthClaw', 'RearRightLegFifthClaw', ], 'TurtleLike': [ 'TurtleShell', 'Plastron', 'Tail', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Head', 'Brain', 'LeftEye', 'RightEye', 'Nose', 'TurtleBeak', 'FrontLeftLeg', 'FrontRightLeg', 'RearLeftLeg', 'RearRightLeg', ], 'Monkey': [ 'Torso', 'Tail', 'LeftClavicle', 'RightClavicle', 'Sternum', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Rib', 'Pelvis', 'Spine', 'Stomach', 'Heart', 'LeftLung', 'RightLung', 'LeftKidney', 'RightKidney', 'Liver', 'Neck', 'Head', 'Skull', 'Brain', 'LeftEye', 'RightEye', 'LeftEar', 'RightEar', 'Nose', 'AnimalJaw', 'LeftShoulder', 'LeftArm', 'LeftHumerus', 'LeftRadius', 'LeftHand', 'LeftHandPinky', 'LeftHandRingFinger', 'LeftHandMiddleFinger', 'LeftHandIndexFinger', 'LeftHandThumb', 'RightShoulder', 'RightArm', 'RightHumerus', 'RightRadius', 'RightHand', 'RightHandPinky', 'RightHandRingFinger', 'RightHandMiddleFinger', 'RightHandIndexFinger', 'RightHandThumb', 'LeftLeg', 'LeftFemur', 'LeftTibia', 'LeftFoot', 'LeftFootLittleToe', 'LeftFootFourthToe', 'LeftFootMiddleToe', 'LeftFootSecondToe', 'LeftFootBigToe', 'RightLeg', 'RightFemur', 'RightTibia', 'RightFoot', 'RightFootLittleToe', 'RightFootFourthToe', 'RightFootMiddleToe', 'RightFootSecondToe', 'RightFootBigToe', ], }
afit/rimworld-save-migrator
versions/b18tables.py
Python
mit
14,784
[ "Elk" ]
337bda573c143d8ea314a4c05e35f01c36dda3c5a3531c1811d4555006687a39
import king_phisher.plugins as plugin_opts import king_phisher.server.database.manager as db_manager import king_phisher.server.database.models as db_models import king_phisher.server.plugins as plugins import king_phisher.server.signals as signals import king_phisher.utilities as utilities try: import pushbullet except ImportError: has_pushbullet = False else: has_pushbullet = True EXAMPLE_CONFIG = """\ api_keys: <api-key> identifier: King Phisher mask: false """ class Plugin(plugins.ServerPlugin): authors = ['Brandan Geise'] classifiers = ['Plugin :: Server :: Notifications'] title = 'Pushbullet Notifications' description = """ A plugin that uses Pushbullet's API to send push notifications on new website visits and submitted credentials. """ homepage = 'https://github.com/securestate/king-phisher-plugins' options = [ plugin_opts.OptionString( name='api_keys', description='Pushbullet API key, if multiple, separate with comma' ), plugin_opts.OptionString( name='identifier', description='King Phisher server identifier to send in push notification header', default='King Phisher' ), plugin_opts.OptionBoolean( name='mask', description='Partially mask email and campaign values', default=False ) ] req_min_version = '1.4.0' req_packages = { 'pushbullet.py': has_pushbullet } version = '1.2' def initialize(self): signals.server_initialized.connect(self.on_server_initialized) return True def on_server_initialized(self, server): signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits') signals.db_session_inserted.connect(self.on_kp_db_event, sender='credentials') self.send_notification('Pushbullet notifications are now active') def on_kp_db_event(self, sender, targets, session): for event in targets: message = db_manager.get_row_by_id(session, db_models.Message, event.message_id) target_email, campaign_name = self.check_mask(message) if sender == 'visits': message = "New visit from {0} for campaign '{1}'".format(target_email, campaign_name) elif sender == 'credentials': message = "New credentials received from {0} for campaign '{1}'".format(target_email, campaign_name) else: return self.send_notification(message) def check_mask(self, message): if self.config['mask']: target_email = self.mask_string(message.target_email) campaign_name = self.mask_string(message.campaign.name) else: target_email = message.target_email campaign_name = message.campaign.name return target_email, campaign_name def mask_string(self, word): if utilities.is_valid_email_address(word): email_user, email_domain = word.split('@') safe_string = "{0}@{1}{2}{3}".format(email_user, email_domain[:1], ('*' * (len(email_domain) - 2)), email_domain[-1:]) else: safe_string = "{0}{1}{2}".format(word[:1], ('*' * (len(word) - 2)), word[-1:]) return safe_string def send_notification(self, message): api_keys = tuple(k.strip() for k in self.config['api_keys'].split(', ')) for key in api_keys: device = None if ':' in key: device, key = key.split(':') pb = pushbullet.Pushbullet(key) if device: try: device = pb.get_device(device) except pushbullet.errors.InvalidKeyError: self.logger.error("failed to get pushbullet device: {0}".format(device)) try: pb.push_note(self.config['identifier'], message, device=device) except pushbullet.errors.PushError as error: self.logger.error('failed to send the pushbullet note')
securestate/king-phisher-plugins
server/pushbullet_notifications.py
Python
bsd-3-clause
3,533
[ "VisIt" ]
18feb5ea07b219d3b604dbc0b9ea7b555ccd240935e1f48f1fe05cc3519e5f04
# -*- coding: utf-8 -*- from copy import deepcopy import numpy as np from crystals import Crystal from skued import powdersim def test_powdersim_return_shape(): """Test that the return shape of powdersim() is as expected""" q = np.linspace(2, 10, 200) pattern = powdersim(Crystal.from_database("C"), q) assert pattern.shape == q.shape def test_powdersim_peak_alignment(): """Test that the diffraction peaks align with what is expected.""" crystal = Crystal.from_database("C") for reflection in [(0, 1, 1), (1, 2, 0), (-1, 2, 0)]: qknown = np.linalg.norm(crystal.scattering_vector((0, 1, 1))) # Range of scattering vectors is tightly centered around a particular reflection # So that the maximum of the diffraction pattern MUST be at reflection (010) q = np.linspace(qknown - 0.1, qknown + 0.1, 256) pattern = powdersim(Crystal.from_database("C"), q) assert abs(q[np.argmax(pattern)] - qknown) < q[1] - q[0]
LaurentRDC/scikit-ued
skued/simulation/tests/test_powdersim.py
Python
gpl-3.0
992
[ "CRYSTAL" ]
3dbe720ca9be957f8d1d0768dca75c52a8254b5e639c8511f11b64f8799f5d93
""" Gaussian Mixture Models. This implementation corresponds to frequentist (non-Bayesian) formulation of Gaussian Mixture Models. """ # Author: Ron Weiss <ronweiss@gmail.com> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Bertrand Thirion <bertrand.thirion@inria.fr> import warnings import numpy as np from scipy import linalg from time import time from ..base import BaseEstimator from ..utils import check_random_state, check_array from ..utils.extmath import logsumexp from ..utils.validation import check_is_fitted from .. import cluster from sklearn.externals.six.moves import zip EPS = np.finfo(float).eps def log_multivariate_normal_density(X, means, covars, covariance_type='diag'): """Compute the log probability under a multivariate Gaussian distribution. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. means : array_like, shape (n_components, n_features) List of n_features-dimensional mean vectors for n_components Gaussians. Each row corresponds to a single mean vector. covars : array_like List of n_components covariance parameters for each Gaussian. The shape depends on `covariance_type`: (n_components, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' covariance_type : string Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. Returns ------- lpr : array_like, shape (n_samples, n_components) Array containing the log probabilities of each data point in X under each of the n_components multivariate Gaussian distributions. """ log_multivariate_normal_density_dict = { 'spherical': _log_multivariate_normal_density_spherical, 'tied': _log_multivariate_normal_density_tied, 'diag': _log_multivariate_normal_density_diag, 'full': _log_multivariate_normal_density_full} return log_multivariate_normal_density_dict[covariance_type]( X, means, covars) def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """ rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T class GMM(BaseEstimator): """Gaussian Mixture Model Representation of a Gaussian mixture model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a GMM distribution. Initializes parameters such that every mixture component has zero mean and identity covariance. Parameters ---------- n_components : int, optional Number of mixture components. Defaults to 1. covariance_type : string, optional String describing the type of covariance parameters to use. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. random_state: RandomState or an int seed (None by default) A random number generator instance min_covar : float, optional Floor on the diagonal of the covariance matrix to prevent overfitting. Defaults to 1e-3. tol : float, optional Convergence threshold. EM iterations will stop when average gain in log-likelihood is below this threshold. Defaults to 1e-3. n_iter : int, optional Number of EM iterations to perform. n_init : int, optional Number of initializations to perform. the best results is kept params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 'w' for weights, 'm' for means, and 'c' for covars. Defaults to 'wmc'. init_params : string, optional Controls which parameters are updated in the initialization process. Can contain any combination of 'w' for weights, 'm' for means, and 'c' for covars. Defaults to 'wmc'. verbose : int, default: 0 Enable verbose output. If 1 then it always prints the current initialization and iteration step. If greater than 1 then it prints additionally the change and time needed for each step. Attributes ---------- weights_ : array, shape (`n_components`,) This attribute stores the mixing weights for each mixture component. means_ : array, shape (`n_components`, `n_features`) Mean parameters for each mixture component. covars_ : array Covariance parameters for each mixture component. The shape depends on `covariance_type`:: (n_components, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence was reached in fit(), False otherwise. See Also -------- DPGMM : Infinite gaussian mixture model, using the dirichlet process, fit with a variational algorithm VBGMM : Finite gaussian mixture model fit with a variational algorithm, better for situations where there might be too little data to get a good estimate of the covariance matrix. Examples -------- >>> import numpy as np >>> from sklearn import mixture >>> np.random.seed(1) >>> g = mixture.GMM(n_components=2) >>> # Generate random observations with two modes centered on 0 >>> # and 10 to use for training. >>> obs = np.concatenate((np.random.randn(100, 1), ... 10 + np.random.randn(300, 1))) >>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE GMM(covariance_type='diag', init_params='wmc', min_covar=0.001, n_components=2, n_init=1, n_iter=100, params='wmc', random_state=None, thresh=None, tol=0.001, verbose=0) >>> np.round(g.weights_, 2) array([ 0.75, 0.25]) >>> np.round(g.means_, 2) array([[ 10.05], [ 0.06]]) >>> np.round(g.covars_, 2) #doctest: +SKIP array([[[ 1.02]], [[ 0.96]]]) >>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS array([1, 1, 0, 0]...) >>> np.round(g.score([[0], [2], [9], [10]]), 2) array([-2.19, -4.58, -1.75, -1.21]) >>> # Refit the model on new data (initial parameters remain the >>> # same), this time with an even split between the two modes. >>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE GMM(covariance_type='diag', init_params='wmc', min_covar=0.001, n_components=2, n_init=1, n_iter=100, params='wmc', random_state=None, thresh=None, tol=0.001, verbose=0) >>> np.round(g.weights_, 2) array([ 0.5, 0.5]) """ def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=None, tol=1e-3, min_covar=1e-3, n_iter=100, n_init=1, params='wmc', init_params='wmc', verbose=0): if thresh is not None: warnings.warn("'thresh' has been replaced by 'tol' in 0.16 " " and will be removed in 0.18.", DeprecationWarning) self.n_components = n_components self.covariance_type = covariance_type self.thresh = thresh self.tol = tol self.min_covar = min_covar self.random_state = random_state self.n_iter = n_iter self.n_init = n_init self.params = params self.init_params = init_params self.verbose = verbose if covariance_type not in ['spherical', 'tied', 'diag', 'full']: raise ValueError('Invalid value for covariance_type: %s' % covariance_type) if n_init < 1: raise ValueError('GMM estimation requires at least one run') self.weights_ = np.ones(self.n_components) / self.n_components # flag to indicate exit status of fit() method: converged (True) or # n_iter reached (False) self.converged_ = False def _get_covars(self): """Covariance parameters for each mixture component. The shape depends on ``cvtype``:: (n_states, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_states, n_features) if 'diag', (n_states, n_features, n_features) if 'full' """ if self.covariance_type == 'full': return self.covars_ elif self.covariance_type == 'diag': return [np.diag(cov) for cov in self.covars_] elif self.covariance_type == 'tied': return [self.covars_] * self.n_components elif self.covariance_type == 'spherical': return [np.diag(cov) for cov in self.covars_] def _set_covars(self, covars): """Provide values for covariance""" covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities def score(self, X, y=None): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ logprob, _ = self.score_samples(X) return logprob def predict(self, X): """Predict label for data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) component memberships """ logprob, responsibilities = self.score_samples(X) return responsibilities.argmax(axis=1) def predict_proba(self, X): """Predict posterior probability of data under each Gaussian in the model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- responsibilities : array-like, shape = (n_samples, n_components) Returns the probability of the sample for each Gaussian (state) in the model. """ logprob, responsibilities = self.score_samples(X) return responsibilities def sample(self, n_samples=1, random_state=None): """Generate random samples from the model. Parameters ---------- n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array_like, shape (n_samples, n_features) List of samples """ check_is_fitted(self, 'means_') if random_state is None: random_state = self.random_state random_state = check_random_state(random_state) weight_cdf = np.cumsum(self.weights_) X = np.empty((n_samples, self.means_.shape[1])) rand = random_state.rand(n_samples) # decide which component to use for each sample comps = weight_cdf.searchsorted(rand) # for each component, generate all needed samples for comp in range(self.n_components): # occurrences of current component in X comp_in_X = (comp == comps) # number of those occurrences num_comp_in_X = comp_in_X.sum() if num_comp_in_X > 0: if self.covariance_type == 'tied': cv = self.covars_ elif self.covariance_type == 'spherical': cv = self.covars_[comp][0] else: cv = self.covars_[comp] X[comp_in_X] = sample_gaussian( self.means_[comp], cv, self.covariance_type, num_comp_in_X, random_state=random_state).T return X def fit_predict(self, X, y=None): """Fit and then predict labels for data. Warning: due to the final maximization step in the EM algorithm, with low iterations the prediction may not be 100% accurate Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) component memberships """ return self._fit(X, y).argmax(axis=1) def _fit(self, X, y=None, do_prediction=False): """Estimate model parameters with the EM algorithm. A initialization step is performed before entering the expectation-maximization (EM) algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters ---------- X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- responsibilities : array, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation. """ # initialization step X = check_array(X, dtype=np.float64) if X.shape[0] < self.n_components: raise ValueError( 'GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0])) max_log_prob = -np.infty if self.verbose > 0: print('Expectation-maximization algorithm started.') for init in range(self.n_init): if self.verbose > 0: print('Initialization '+str(init+1)) start_init_time = time() if 'm' in self.init_params or not hasattr(self, 'means_'): self.means_ = cluster.KMeans( n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_ if self.verbose > 1: print('\tMeans have been initialized.') if 'w' in self.init_params or not hasattr(self, 'weights_'): self.weights_ = np.tile(1.0 / self.n_components, self.n_components) if self.verbose > 1: print('\tWeights have been initialized.') if 'c' in self.init_params or not hasattr(self, 'covars_'): cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1]) if not cv.shape: cv.shape = (1, 1) self.covars_ = \ distribute_covar_matrix_to_match_covariance_type( cv, self.covariance_type, self.n_components) if self.verbose > 1: print('\tCovariance matrices have been initialized.') # EM algorithms current_log_likelihood = None # reset self.converged_ to False self.converged_ = False # this line should be removed when 'thresh' is removed in v0.18 tol = (self.tol if self.thresh is None else self.thresh / float(X.shape[0])) for i in range(self.n_iter): if self.verbose > 0: print('\tEM iteration '+str(i+1)) start_iter_time = time() prev_log_likelihood = current_log_likelihood # Expectation step log_likelihoods, responsibilities = self.score_samples(X) current_log_likelihood = log_likelihoods.mean() # Check for convergence. # (should compare to self.tol when deprecated 'thresh' is # removed in v0.18) if prev_log_likelihood is not None: change = abs(current_log_likelihood - prev_log_likelihood) if self.verbose > 1: print('\t\tChange: '+str(change)) if change < tol: self.converged_ = True if self.verbose > 0: print('\t\tEM algorithm converged.') break # Maximization step self._do_mstep(X, responsibilities, self.params, self.min_covar) if self.verbose > 1: print('\t\tEM iteration '+str(i+1)+' took {0:.5f}s'.format( time()-start_iter_time)) # if the results are better, keep it if self.n_iter: if current_log_likelihood > max_log_prob: max_log_prob = current_log_likelihood best_params = {'weights': self.weights_, 'means': self.means_, 'covars': self.covars_} if self.verbose > 1: print('\tBetter parameters were found.') if self.verbose > 1: print('\tInitialization '+str(init+1)+' took {0:.5f}s'.format( time()-start_init_time)) # check the existence of an init param that was not subject to # likelihood computation issue. if np.isneginf(max_log_prob) and self.n_iter: raise RuntimeError( "EM algorithm was never able to compute a valid likelihood " + "given initial parameters. Try different init parameters " + "(or increasing n_init) or check for degenerate data.") if self.n_iter: self.covars_ = best_params['covars'] self.means_ = best_params['means'] self.weights_ = best_params['weights'] else: # self.n_iter == 0 occurs when using GMM within HMM # Need to make sure that there are responsibilities to output # Output zeros because it was just a quick initialization responsibilities = np.zeros((X.shape[0], self.n_components)) return responsibilities def fit(self, X, y=None): """Estimate model parameters with the EM algorithm. A initialization step is performed before entering the expectation-maximization (EM) algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters ---------- X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self """ self._fit(X, y) return self def _do_mstep(self, X, responsibilities, params, min_covar=0): """ Perform the Mstep of the EM algorithm and return the class weights """ weights = responsibilities.sum(axis=0) weighted_X_sum = np.dot(responsibilities.T, X) inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS) if 'w' in params: self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS) if 'm' in params: self.means_ = weighted_X_sum * inverse_weights if 'c' in params: covar_mstep_func = _covar_mstep_funcs[self.covariance_type] self.covars_ = covar_mstep_func( self, X, responsibilities, weighted_X_sum, inverse_weights, min_covar) return weights def _n_parameters(self): """Return the number of free parameters in the model.""" ndim = self.means_.shape[1] if self.covariance_type == 'full': cov_params = self.n_components * ndim * (ndim + 1) / 2. elif self.covariance_type == 'diag': cov_params = self.n_components * ndim elif self.covariance_type == 'tied': cov_params = ndim * (ndim + 1) / 2. elif self.covariance_type == 'spherical': cov_params = self.n_components mean_params = ndim * self.n_components return int(cov_params + mean_params + self.n_components - 1) def bic(self, X): """Bayesian information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- bic: float (the lower the better) """ return (-2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0])) def aic(self, X): """Akaike information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- aic: float (the lower the better) """ return - 2 * self.score(X).sum() + 2 * self._n_parameters() ######################################################################### # some helper routines ######################################################################### def _log_multivariate_normal_density_diag(X, means, covars): """Compute Gaussian log-density at X for a diagonal model""" n_samples, n_dim = X.shape lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1) + np.sum((means ** 2) / covars, 1) - 2 * np.dot(X, (means / covars).T) + np.dot(X ** 2, (1.0 / covars).T)) return lpr def _log_multivariate_normal_density_spherical(X, means, covars): """Compute Gaussian log-density at X for a spherical model""" cv = covars.copy() if covars.ndim == 1: cv = cv[:, np.newaxis] if covars.shape[1] == 1: cv = np.tile(cv, (1, X.shape[-1])) return _log_multivariate_normal_density_diag(X, means, cv) def _log_multivariate_normal_density_tied(X, means, covars): """Compute Gaussian log-density at X for a tied model""" cv = np.tile(covars, (means.shape[0], 1, 1)) return _log_multivariate_normal_density_full(X, means, cv) def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7): """Log probability for full covariance matrices.""" n_samples, n_dim = X.shape nmix = len(means) log_prob = np.empty((n_samples, nmix)) for c, (mu, cv) in enumerate(zip(means, covars)): try: cv_chol = linalg.cholesky(cv, lower=True) except linalg.LinAlgError: # The model is most probably stuck in a component with too # few observations, we need to reinitialize this components try: cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim), lower=True) except linalg.LinAlgError: raise ValueError("'covars' must be symmetric, " "positive-definite") cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol))) cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + n_dim * np.log(2 * np.pi) + cv_log_det) return log_prob def _validate_covars(covars, covariance_type, n_components): """Do basic checks on matrix covariance sizes and values """ from scipy import linalg if covariance_type == 'spherical': if len(covars) != n_components: raise ValueError("'spherical' covars have length n_components") elif np.any(covars <= 0): raise ValueError("'spherical' covars must be non-negative") elif covariance_type == 'tied': if covars.shape[0] != covars.shape[1]: raise ValueError("'tied' covars must have shape (n_dim, n_dim)") elif (not np.allclose(covars, covars.T) or np.any(linalg.eigvalsh(covars) <= 0)): raise ValueError("'tied' covars must be symmetric, " "positive-definite") elif covariance_type == 'diag': if len(covars.shape) != 2: raise ValueError("'diag' covars must have shape " "(n_components, n_dim)") elif np.any(covars <= 0): raise ValueError("'diag' covars must be non-negative") elif covariance_type == 'full': if len(covars.shape) != 3: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") elif covars.shape[1] != covars.shape[2]: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") for n, cv in enumerate(covars): if (not np.allclose(cv, cv.T) or np.any(linalg.eigvalsh(cv) <= 0)): raise ValueError("component %d of 'full' covars must be " "symmetric, positive-definite" % n) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components): """Create all the covariance matrices from a given template""" if covariance_type == 'spherical': cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]), (n_components, 1)) elif covariance_type == 'tied': cv = tied_cv elif covariance_type == 'diag': cv = np.tile(np.diag(tied_cv), (n_components, 1)) elif covariance_type == 'full': cv = np.tile(tied_cv, (n_components, 1, 1)) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") return cv def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases""" avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases""" cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1])) def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases""" # Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = X - mu with np.errstate(under='ignore'): # Underflow Errors in doing post * X.T are not important avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS) cv[c] = avg_cv + min_covar * np.eye(n_features) return cv def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): # Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" avg_X2 = np.dot(X.T, X) avg_means2 = np.dot(gmm.means_.T, weighted_X_sum) out = avg_X2 - avg_means2 out *= 1. / X.shape[0] out.flat[::len(out) + 1] += min_covar return out _covar_mstep_funcs = {'spherical': _covar_mstep_spherical, 'diag': _covar_mstep_diag, 'tied': _covar_mstep_tied, 'full': _covar_mstep_full, }
costypetrisor/scikit-learn
sklearn/mixture/gmm.py
Python
bsd-3-clause
30,996
[ "Gaussian" ]
0bfb302854614ae2dd6465fa9aef51c8883de0ecd93106e5b15425046c269d19
# ---------------------------------------------------------------------------- # Copyright 2015-2016 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ This test compares the NEON recurrent layer against a numpy reference recurrent implementation and compares the NEON recurrent bprop deltas to the gradients estimated by finite differences. The numpy reference recurrent layer contains static methods for forward pass and backward pass. The test runs a SINGLE layer of recurrent layer and compare numerical values The reference model handles batch_size as 1 only The following are made sure to be the same in both recurrent layers - initial h values (all zeros) - initial W, b (ones or random values) - input data (random data matrix) - input error (random data matrix) - the data shape inside recurrent_ref is seq_len, input_size, 1 - the data shape inside recurrent (neon) is feature, seq_len * batch_size """ import itertools as itt import numpy as np from neon import NervanaObject, logger as neon_logger from neon.initializers.initializer import Constant, Gaussian from neon.layers import Recurrent from neon.layers.container import DeltasTree from neon.transforms import Tanh from recurrent_ref import Recurrent as RefRecurrent from utils import allclose_with_out def pytest_generate_tests(metafunc): bsz_rng = [1] if 'refgruargs' in metafunc.fixturenames: fargs = [] if metafunc.config.option.all: seq_rng = [2, 3, 4] inp_rng = [3, 5, 10] out_rng = [3, 5, 10] else: seq_rng = [3] inp_rng = [5] out_rng = [10] fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng) metafunc.parametrize('refgruargs', fargs) if 'gradgruargs' in metafunc.fixturenames: fargs = [] if metafunc.config.option.all: seq_rng = [2, 3] inp_rng = [5, 10] out_rng = [3, 5, 10] else: seq_rng = [3] inp_rng = [5] out_rng = [10] fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng) metafunc.parametrize('gradgruargs', fargs) def test_ref_compare_ones(backend_default, refgruargs): # run comparison with reference code # for all ones init seq_len, input_size, hidden_size, batch_size = refgruargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size check_rnn(seq_len, input_size, hidden_size, batch_size, Constant(val=1.0), [1.0, 0.0]) def test_ref_compare_rand(backend_default, refgruargs): # run comparison with reference code # for Gaussian random init seq_len, input_size, hidden_size, batch_size = refgruargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size check_rnn(seq_len, input_size, hidden_size, batch_size, Gaussian()) # compare neon RNN to reference RNN implementation def check_rnn(seq_len, input_size, hidden_size, batch_size, init_func, inp_moms=[0.0, 1.0]): # init_func is the initializer for the model params # inp_moms is the [ mean, std dev] of the random input input_shape = (input_size, seq_len * batch_size) output_shape = (hidden_size, seq_len * batch_size) NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size # ======== create models ======== # neon RNN rnn = Recurrent(hidden_size, init_func, activation=Tanh()) # reference numpy RNN rnn_ref = RefRecurrent(input_size, hidden_size) Wxh = rnn_ref.Wxh Whh = rnn_ref.Whh bh = rnn_ref.bh # ========= generate data ================= # generate random input tensor inp = np.random.rand(*input_shape) * inp_moms[1] + inp_moms[0] inpa = rnn.be.array(inp) # generate random deltas tensor deltas = np.random.randn(*output_shape) # the reference code expects these shapes: # input_shape: (seq_len, input_size, batch_size) # output_shape: (seq_len, hidden_size, batch_size) inp_ref = inp.copy().T.reshape( seq_len, batch_size, input_size).swapaxes(1, 2) deltas_ref = deltas.copy().T.reshape( seq_len, batch_size, hidden_size).swapaxes(1, 2) # ========= running models ========== # run neon fprop rnn.configure((input_size, seq_len)) rnn.prev_layer = True rnn.allocate() dtree = DeltasTree() rnn.allocate_deltas(dtree) dtree.allocate_buffers() rnn.set_deltas(dtree) rnn.fprop(inpa) # weights are only initialized after doing fprop, so now # make ref weights and biases the same with neon model Wxh[:] = rnn.W_input.get() Whh[:] = rnn.W_recur.get() bh[:] = rnn.b.get() (dWxh_ref, dWhh_ref, db_ref, h_ref_list, dh_ref_list, d_out_ref) = rnn_ref.lossFun(inp_ref, deltas_ref) # now test the bprop rnn.bprop(rnn.be.array(deltas)) # grab the delta W from gradient buffer dWxh_neon = rnn.dW_input.get() dWhh_neon = rnn.dW_recur.get() db_neon = rnn.db.get() # comparing outputs neon_logger.display('====Verifying hidden states====') assert allclose_with_out(rnn.outputs.get(), h_ref_list, rtol=0.0, atol=1.0e-5) neon_logger.display('fprop is verified') neon_logger.display('====Verifying update on W and b ====') neon_logger.display('dWxh') assert allclose_with_out(dWxh_neon, dWxh_ref, rtol=0.0, atol=1.0e-5) neon_logger.display('dWhh') assert allclose_with_out(dWhh_neon, dWhh_ref, rtol=0.0, atol=1.0e-5) neon_logger.display('====Verifying update on bias====') neon_logger.display('db') assert allclose_with_out(db_neon, db_ref, rtol=0.0, atol=1.0e-5) neon_logger.display('bprop is verified') return def reset_rnn(rnn): # in order to run fprop multiple times # for the gradient check tests the # rnn internal variables need to be # cleared rnn.x = None rnn.xs = None # just in case rnn.outputs = None return def test_gradient_neon_gru(backend_default, gradgruargs): seq_len, input_size, hidden_size, batch_size = gradgruargs NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size gradient_check(seq_len, input_size, hidden_size, batch_size) def gradient_check(seq_len, input_size, hidden_size, batch_size, threshold=1.0e-3): # 'threshold' is the max fractional difference # between gradient estimate and # bprop deltas (def is 5%) # for a given set of layer parameters calculate # the gradients and compare to the derivatives # obtained with the bprop function. repeat this # for a range of perturbations and use the # perturbation size with the best results. # This is necessary for 32 bit computations min_max_err = -1.0 # minimum max error neon_logger.display('Perturb mag, max grad diff') for pert_exp in range(-5, 0): # need to generate the scaling and input outside # having an issue with the random number generator # when these are generated inside the gradient_calc # function input_shape = (input_size, seq_len * batch_size) output_shape = (hidden_size, seq_len * batch_size) rand_scale = np.random.random(output_shape) * 2.0 - 1.0 inp = np.random.randn(*input_shape) pert_mag = 10.0**pert_exp (grad_est, deltas) = gradient_calc(seq_len, input_size, hidden_size, batch_size, epsilon=pert_mag, rand_scale=rand_scale, inp_bl=inp) dd = np.max(np.abs(grad_est - deltas)) neon_logger.display('%e, %e' % (pert_mag, dd)) if min_max_err < 0.0 or dd < min_max_err: min_max_err = dd # reset the seed so models are same in each run # allclose_with_out(grad_est,deltas, rtol=0.0, atol=0.0) NervanaObject.be.rng_reset() # check that best value of worst case error is less than threshold neon_logger.display('Worst case error %e with perturbation %e' % (min_max_err, pert_mag)) neon_logger.display('Threshold %e' % (threshold)) assert min_max_err < threshold def gradient_calc(seq_len, input_size, hidden_size, batch_size, epsilon=None, rand_scale=None, inp_bl=None): NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size input_shape = (input_size, seq_len * batch_size) # generate input if one is not given if inp_bl is None: inp_bl = np.random.randn(*input_shape) # neon rnn instance rnn = Recurrent(hidden_size, Gaussian(), activation=Tanh()) inpa = rnn.be.array(np.copy(inp_bl)) # run fprop on the baseline input rnn.configure((input_size, seq_len)) rnn.prev_layer = True rnn.allocate() dtree = DeltasTree() rnn.allocate_deltas(dtree) dtree.allocate_buffers() rnn.set_deltas(dtree) out_bl = rnn.fprop(inpa).get() # random scaling/hash to generate fake loss if rand_scale is None: rand_scale = np.random.random(out_bl.shape) * 2.0 - 1.0 # loss function would be: # loss_bl = np.sum(rand_scale * out_bl) # run back prop with rand_scale as the errors # use copy to avoid any interactions deltas_neon = rnn.bprop(rnn.be.array(np.copy(rand_scale))).get() # add a perturbation to each input element grads_est = np.zeros(inpa.shape) inp_pert = inp_bl.copy() for pert_ind in range(inpa.size): save_val = inp_pert.flat[pert_ind] inp_pert.flat[pert_ind] = save_val + epsilon reset_rnn(rnn) rnn.allocate() out_pos = rnn.fprop(rnn.be.array(inp_pert)).get() inp_pert.flat[pert_ind] = save_val - epsilon reset_rnn(rnn) rnn.allocate() out_neg = rnn.fprop(rnn.be.array(inp_pert)).get() # calculate the loss with perturbations loss_pos = np.sum(rand_scale * out_pos) loss_neg = np.sum(rand_scale * out_neg) # compute the gradient estimate grad = 0.5 * (loss_pos - loss_neg) / epsilon grads_est.flat[pert_ind] = grad # reset the perturbed input element inp_pert.flat[pert_ind] = save_val del rnn return (grads_est, deltas_neon) if __name__ == '__main__': from neon.backends import gen_backend bsz = 1 be = gen_backend(backend='gpu', batch_size=bsz) fargs = (30, 5, 10, bsz) # test_ref_compare_ones(be, fargs) test_ref_compare_rand(be, fargs)
Jokeren/neon
tests/test_recurrent.py
Python
apache-2.0
11,602
[ "Gaussian" ]
5334f7e4d8cdae33c34c7c6846c745785eae730382dfedbe963562d06247ef7c
data = """<?xml version="1.0" encoding="utf-8"?> <!-- Created with PySCeS CBM (0.7.0) on Thu, 20 Feb 2014 14:43:49 by timo --> <sbml xmlns="http://www.sbml.org/sbml/level3/version1/core" xmlns:fbc="http://www.sbml.org/sbml/level3/version1/fbc/version1" xmlns:html="http://www.w3.org/1999/xhtml" level="3" version="1" fbc:required="false"> <model metaid="meta_carlson_model" id="carlson_model"> <notes> <html:p> <html:br/> <html:span size="small">Model &quot;<html:strong>carlson_model</html:strong>&quot; (CBMPY_CB_MODEL) generated with <html:a href="http://pysces.sourceforge.net">PySCeS CBM</html:a> (0.7.0) on Thu, 20 Feb 2014 14:43:49.</html:span> </html:p> </notes> <listOfUnitDefinitions> <unitDefinition id="substance" name="substance"> <listOfUnits> <unit kind="mole" exponent="1" scale="0" multiplier="1"/> </listOfUnits> </unitDefinition> <unitDefinition id="area" name="area"> <listOfUnits> <unit kind="metre" exponent="2" scale="0" multiplier="1"/> </listOfUnits> </unitDefinition> <unitDefinition id="volume" name="volume"> <listOfUnits> <unit kind="litre" exponent="1" scale="0" multiplier="1"/> </listOfUnits> </unitDefinition> <unitDefinition id="length" name="length"> <listOfUnits> <unit kind="metre" exponent="1" scale="0" multiplier="1"/> </listOfUnits> </unitDefinition> <unitDefinition id="time" name="time"> <listOfUnits> <unit kind="second" exponent="1" scale="0" multiplier="1"/> </listOfUnits> </unitDefinition> <unitDefinition id="mmol_per_gDW_per_hr" name="mmol_per_gDW_per_hr"> <listOfUnits> <unit kind="mole" exponent="1" scale="-3" multiplier="1"/> <unit kind="gram" exponent="-1" scale="0" multiplier="1"/> <unit kind="second" exponent="-1" scale="0" multiplier="0.00027777"/> </listOfUnits> </unitDefinition> </listOfUnitDefinitions> <listOfCompartments> <compartment id="Cell" name="Cell" size="1" constant="false"/> </listOfCompartments> <listOfSpecies> <species metaid="meta_ACETATE" id="ACETATE" name="acetate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ACETATE_ext" id="ACETATE_ext" name="extracellular acetate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ACETYL_CoA" id="ACETYL_CoA" name="acetyl-coenzyme A" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ADP" id="ADP" name="adenosine diphosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_AKG" id="AKG" name="alpha-ketoglutarate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ATP" id="ATP" name="adenosine triphosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ATP_main" id="ATP_main" name="maintenance energy" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_BIOMASS" id="BIOMASS" name="biomass" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_CITRATE" id="CITRATE" name="citrate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_CO2" id="CO2" name="carbon dioxyde" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_CO2_ext" id="CO2_ext" name="extracellular carbon dioxyde" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_CoASH" id="CoASH" name="coenzyme A" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_DHAP" id="DHAP" name="dihydroxyacetone phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ERYTH_4_P" id="ERYTH_4_P" name="erythrose-4-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ETOH" id="ETOH" name="ethanol" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ETOH_ext" id="ETOH_ext" name="extracellular ethanol" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FAD" id="FAD" name="flavin adenine dinucleotide" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FADH" id="FADH" name="flavin adenine dinucleotide" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FORMATE" id="FORMATE" name="formate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FORMATE_ext" id="FORMATE_ext" name="extracellular formate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FRU_6_P" id="FRU_6_P" name="fructose-6-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FRU_BIS_P" id="FRU_BIS_P" name="fructose bi-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_FUMARATE" id="FUMARATE" name="fumarate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_GA_3P" id="GA_3P" name="glyceraldehyde-3-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_GLU_6_P" id="GLU_6_P" name="glucose-6-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_GLU_ext" id="GLU_ext" name="extracellular glucose" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_ISOCIT" id="ISOCIT" name="isocitrate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_LACTATE" id="LACTATE" name="lactate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_LACTATE_ext" id="LACTATE_ext" name="extracellular lactate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_MALATE" id="MALATE" name="malate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_NAD" id="NAD" name="nicotinamide adenine dinucleotide" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_NADH" id="NADH" name="nicotinamide adenine dinucleotide" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_NH3" id="NH3" name="ammonium" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_NH3_ext" id="NH3_ext" name="extracellular ammonium" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_OXALO" id="OXALO" name="oxaloacetate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_OXY_ext" id="OXY_ext" name="extracellular monooxygen" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_PEP" id="PEP" name="phosphoenolpyruvate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_PG" id="PG" name="phosphoglycerate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_PYR" id="PYR" name="pyruvate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_RIBOSE_5_P" id="RIBOSE_5_P" name="ribose-5-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_RIBULOSE_5_P" id="RIBULOSE_5_P" name="ribulose-5-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_SED_7_P" id="SED_7_P" name="sedoheptulose-7-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_SUCC" id="SUCC" name="succinate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_SUCC_CoA" id="SUCC_CoA" name="succinyl-coenzyme A" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_SUCC_ext" id="SUCC_ext" name="extracellular succinate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> <species metaid="meta_XYL_5_P" id="XYL_5_P" name="xylulose-5-phosphate" compartment="Cell" initialConcentration="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"> <notes> <html:p>SUBSYSTEM: metabolism</html:p> </notes> <annotation> <listOfKeyValueData xmlns="http://pysces.sourceforge.net/KeyValueData"> <data id="SUBSYSTEM" type="string" value="metabolism"/> </listOfKeyValueData> </annotation> </species> </listOfSpecies> <listOfReactions> <reaction metaid="meta_R54r" id="R54r" name="R54r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="ACETYL_CoA" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="2" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ETOH" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="2" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R13r" id="R13r" name="R13r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="RIBOSE_5_P" stoichiometry="1" constant="false"/> <speciesReference species="XYL_5_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="SED_7_P" stoichiometry="1" constant="false"/> <speciesReference species="GA_3P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R26r" id="R26r" name="R26r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="SUCC_CoA" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="SUCC" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R15r" id="R15r" name="R15r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="ERYTH_4_P" stoichiometry="1" constant="false"/> <speciesReference species="XYL_5_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="GA_3P" stoichiometry="1" constant="false"/> <speciesReference species="FRU_6_P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R96" id="R96" name="R96" reversible="false" fast="false"> <listOfReactants> <speciesReference species="FORMATE" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="FORMATE_ext" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R94" id="R94" name="R94" reversible="false" fast="false"> <listOfReactants> <speciesReference species="LACTATE" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="LACTATE_ext" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R95" id="R95" name="R95" reversible="false" fast="false"> <listOfReactants> <speciesReference species="SUCC" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="SUCC_ext" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R93" id="R93" name="R93" reversible="false" fast="false"> <listOfReactants> <speciesReference species="NH3_ext" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="NH3" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R90" id="R90" name="R90" reversible="false" fast="false"> <listOfReactants> <speciesReference species="ETOH" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ETOH_ext" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R91" id="R91" name="R91" reversible="false" fast="false"> <listOfReactants> <speciesReference species="ACETATE" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ACETATE_ext" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R8r" id="R8r" name="R8r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="PG" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="PEP" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R10" id="R10" name="R10" reversible="false" fast="false"> <listOfReactants> <speciesReference species="GLU_6_P" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="2" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="RIBULOSE_5_P" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="2" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R28r" id="R28r" name="R28r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="FUMARATE" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="MALATE" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R3" id="R3" name="R3" reversible="false" fast="false"> <listOfReactants> <speciesReference species="FRU_6_P" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="FRU_BIS_P" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R70" id="R70" name="R70" reversible="false" fast="false"> <listOfReactants> <speciesReference species="GLU_6_P" stoichiometry="4" constant="false"/> <speciesReference species="RIBOSE_5_P" stoichiometry="13" constant="false"/> <speciesReference species="ERYTH_4_P" stoichiometry="5" constant="false"/> <speciesReference species="PEP" stoichiometry="32" constant="false"/> <speciesReference species="PYR" stoichiometry="38" constant="false"/> <speciesReference species="ACETYL_CoA" stoichiometry="41" constant="false"/> <speciesReference species="AKG" stoichiometry="14" constant="false"/> <speciesReference species="OXALO" stoichiometry="24" constant="false"/> <speciesReference species="ATP" stoichiometry="547" constant="false"/> <speciesReference species="NADH" stoichiometry="178" constant="false"/> <speciesReference species="NH3" stoichiometry="139" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="BIOMASS" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="41" constant="false"/> <speciesReference species="ADP" stoichiometry="547" constant="false"/> <speciesReference species="NAD" stoichiometry="178" constant="false"/> <speciesReference species="CO2" stoichiometry="2" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R6r" id="R6r" name="R6r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="GA_3P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="DHAP" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R97r" id="R97r" name="R97r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="CO2_ext" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R2r" id="R2r" name="R2r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="GLU_6_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="FRU_6_P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R55" id="R55" name="R55" reversible="false" fast="false"> <listOfReactants> <speciesReference species="ACETYL_CoA" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ACETATE" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R4" id="R4" name="R4" reversible="false" fast="false"> <listOfReactants> <speciesReference species="FRU_BIS_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="FRU_6_P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R12r" id="R12r" name="R12r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="RIBULOSE_5_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="RIBOSE_5_P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R1" id="R1" name="R1" reversible="false" fast="false"> <listOfReactants> <speciesReference species="GLU_ext" stoichiometry="1" constant="false"/> <speciesReference species="PEP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="GLU_6_P" stoichiometry="1" constant="false"/> <speciesReference species="PYR" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R27r" id="R27r" name="R27r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="SUCC" stoichiometry="1" constant="false"/> <speciesReference species="FAD" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="FUMARATE" stoichiometry="1" constant="false"/> <speciesReference species="FADH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R81" id="R81" name="R81" reversible="false" fast="false"> <listOfReactants> <speciesReference species="FADH" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> <speciesReference species="OXY_ext" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="FAD" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R23r" id="R23r" name="R23r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="CITRATE" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ISOCIT" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R9" id="R9" name="R9" reversible="false" fast="false"> <listOfReactants> <speciesReference species="PEP" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="PYR" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R14r" id="R14r" name="R14r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="GA_3P" stoichiometry="1" constant="false"/> <speciesReference species="SED_7_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ERYTH_4_P" stoichiometry="1" constant="false"/> <speciesReference species="FRU_6_P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_RR9" id="RR9" name="RR9" reversible="false" fast="false"> <listOfReactants> <speciesReference species="PYR" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="2" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="PEP" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="2" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R53r" id="R53r" name="R53r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="PYR" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="LACTATE" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R80" id="R80" name="R80" reversible="false" fast="false"> <listOfReactants> <speciesReference species="NADH" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="2" constant="false"/> <speciesReference species="OXY_ext" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="NAD" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="2" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R83" id="R83" name="R83" reversible="false" fast="false"> <listOfReactants> <speciesReference species="NADH" stoichiometry="1" constant="false"/> <speciesReference species="FAD" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="NAD" stoichiometry="1" constant="false"/> <speciesReference species="FADH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R82" id="R82" name="R82" reversible="false" fast="false"> <listOfReactants> <speciesReference species="ATP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ADP" stoichiometry="1" constant="false"/> <speciesReference species="ATP_main" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R29r" id="R29r" name="R29r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="MALATE" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="OXALO" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R7r" id="R7r" name="R7r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="GA_3P" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="PG" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R41" id="R41" name="R41" reversible="false" fast="false"> <listOfReactants> <speciesReference species="MALATE" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="PYR" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="1" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R40" id="R40" name="R40" reversible="false" fast="false"> <listOfReactants> <speciesReference species="PEP" stoichiometry="1" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="OXALO" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R11r" id="R11r" name="R11r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="RIBULOSE_5_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="XYL_5_P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R42" id="R42" name="R42" reversible="false" fast="false"> <listOfReactants> <speciesReference species="OXALO" stoichiometry="1" constant="false"/> <speciesReference species="ATP" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="PEP" stoichiometry="1" constant="false"/> <speciesReference species="ADP" stoichiometry="1" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R5r" id="R5r" name="R5r" reversible="true" fast="false"> <listOfReactants> <speciesReference species="FRU_BIS_P" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="DHAP" stoichiometry="1" constant="false"/> <speciesReference species="GA_3P" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R25" id="R25" name="R25" reversible="false" fast="false"> <listOfReactants> <speciesReference species="AKG" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="NADH" stoichiometry="1" constant="false"/> <speciesReference species="SUCC_CoA" stoichiometry="1" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R24" id="R24" name="R24" reversible="false" fast="false"> <listOfReactants> <speciesReference species="ISOCIT" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="AKG" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="1" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R22" id="R22" name="R22" reversible="false" fast="false"> <listOfReactants> <speciesReference species="OXALO" stoichiometry="1" constant="false"/> <speciesReference species="ACETYL_CoA" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="CITRATE" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R21" id="R21" name="R21" reversible="false" fast="false"> <listOfReactants> <speciesReference species="PYR" stoichiometry="1" constant="false"/> <speciesReference species="NAD" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ACETYL_CoA" stoichiometry="1" constant="false"/> <speciesReference species="CO2" stoichiometry="1" constant="false"/> <speciesReference species="NADH" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> <reaction metaid="meta_R20" id="R20" name="R20" reversible="false" fast="false"> <listOfReactants> <speciesReference species="PYR" stoichiometry="1" constant="false"/> <speciesReference species="CoASH" stoichiometry="1" constant="false"/> </listOfReactants> <listOfProducts> <speciesReference species="ACETYL_CoA" stoichiometry="1" constant="false"/> <speciesReference species="FORMATE" stoichiometry="1" constant="false"/> </listOfProducts> </reaction> </listOfReactions> <fbc:listOfFluxBounds> <fbc:fluxBound fbc:id="R54r_lb" fbc:reaction="R54r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R54r_ub" fbc:reaction="R54r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R13r_lb" fbc:reaction="R13r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R13r_ub" fbc:reaction="R13r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R81_lb" fbc:reaction="R81" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R81_ub" fbc:reaction="R81" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R15r_lb" fbc:reaction="R15r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R15r_ub" fbc:reaction="R15r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R26r_lb" fbc:reaction="R26r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R26r_ub" fbc:reaction="R26r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R94_lb" fbc:reaction="R94" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R94_ub" fbc:reaction="R94" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R95_lb" fbc:reaction="R95" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R95_ub" fbc:reaction="R95" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R11r_lb" fbc:reaction="R11r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R11r_ub" fbc:reaction="R11r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R90_lb" fbc:reaction="R90" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R90_ub" fbc:reaction="R90" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R91_lb" fbc:reaction="R91" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R91_ub" fbc:reaction="R91" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R8r_lb" fbc:reaction="R8r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R8r_ub" fbc:reaction="R8r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R10_lb" fbc:reaction="R10" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R10_ub" fbc:reaction="R10" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R28r_lb" fbc:reaction="R28r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R28r_ub" fbc:reaction="R28r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R27r_lb" fbc:reaction="R27r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R27r_ub" fbc:reaction="R27r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R93_lb" fbc:reaction="R93" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R93_ub" fbc:reaction="R93" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R70_lb" fbc:reaction="R70" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R70_ub" fbc:reaction="R70" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R6r_lb" fbc:reaction="R6r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R6r_ub" fbc:reaction="R6r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R97r_lb" fbc:reaction="R97r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R97r_ub" fbc:reaction="R97r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R2r_lb" fbc:reaction="R2r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R2r_ub" fbc:reaction="R2r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R55_lb" fbc:reaction="R55" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R55_ub" fbc:reaction="R55" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R4_lb" fbc:reaction="R4" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R4_ub" fbc:reaction="R4" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R12r_lb" fbc:reaction="R12r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R12r_ub" fbc:reaction="R12r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R1_lb" fbc:reaction="R1" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R1_ub" fbc:reaction="R1" fbc:operation="lessEqual" fbc:value="10"/> <fbc:fluxBound fbc:id="R3_lb" fbc:reaction="R3" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R3_ub" fbc:reaction="R3" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R23r_lb" fbc:reaction="R23r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R23r_ub" fbc:reaction="R23r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R9_lb" fbc:reaction="R9" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R9_ub" fbc:reaction="R9" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R14r_lb" fbc:reaction="R14r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R14r_ub" fbc:reaction="R14r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="RR9_lb" fbc:reaction="RR9" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="RR9_ub" fbc:reaction="RR9" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R53r_lb" fbc:reaction="R53r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R53r_ub" fbc:reaction="R53r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R80_lb" fbc:reaction="R80" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R80_ub" fbc:reaction="R80" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R83_lb" fbc:reaction="R83" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R83_ub" fbc:reaction="R83" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R82_lb" fbc:reaction="R82" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R82_ub" fbc:reaction="R82" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R96_lb" fbc:reaction="R96" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R96_ub" fbc:reaction="R96" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R29r_lb" fbc:reaction="R29r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R29r_ub" fbc:reaction="R29r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R7r_lb" fbc:reaction="R7r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R7r_ub" fbc:reaction="R7r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R41_lb" fbc:reaction="R41" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R41_ub" fbc:reaction="R41" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R40_lb" fbc:reaction="R40" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R40_ub" fbc:reaction="R40" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R42_lb" fbc:reaction="R42" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R42_ub" fbc:reaction="R42" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R5r_lb" fbc:reaction="R5r" fbc:operation="greaterEqual" fbc:value="-999999"/> <fbc:fluxBound fbc:id="R5r_ub" fbc:reaction="R5r" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R25_lb" fbc:reaction="R25" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R25_ub" fbc:reaction="R25" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R24_lb" fbc:reaction="R24" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R24_ub" fbc:reaction="R24" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R22_lb" fbc:reaction="R22" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R22_ub" fbc:reaction="R22" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R21_lb" fbc:reaction="R21" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R21_ub" fbc:reaction="R21" fbc:operation="lessEqual" fbc:value="999999"/> <fbc:fluxBound fbc:id="R20_lb" fbc:reaction="R20" fbc:operation="greaterEqual" fbc:value="0"/> <fbc:fluxBound fbc:id="R20_ub" fbc:reaction="R20" fbc:operation="lessEqual" fbc:value="999999"/> </fbc:listOfFluxBounds> <fbc:listOfObjectives fbc:activeObjective="objMaxJ70"> <fbc:objective fbc:id="objMaxJ70" fbc:type="maximize"> <fbc:listOfFluxObjectives> <fbc:fluxObjective fbc:reaction="R70" fbc:coefficient="1"/> </fbc:listOfFluxObjectives> </fbc:objective> </fbc:listOfObjectives> </model> </sbml>"""
SystemsBioinformatics/vonda
vonda/models/Ecoli_Carlson2003_sbml3.py
Python
gpl-3.0
56,465
[ "PySCeS" ]
443e0c047fbdacbe221c50f67d90ea28e9edb34cea69186f1ab9e8615135d9be
""" Test the API for Gyms """ from app.tests.api.personalised.gym_gym_visit_resource.gym_visit_common \ import GymVisitAPICommonCase class TestGymVisitResourceHttpVerbs(GymVisitAPICommonCase): """ Test the API methods for the Gym """ def test_not_logged_in(self): """ Test that when user isn't authenticated that the API returns a 404 """ self.api.logout() resp = self.api.get(self.url) self.assertEqual(resp.status_code, 401) def test_get_allowed(self): """ Test that get requests are allowed """ resp = self.api.get(self.url) self.assertEqual(resp.status_code, 200) def test_post_blocked(self): """ Test that post requests are not allowed """ resp = self.api.post( self.url, { 'gym': 1, 'gym_visit_date': '1990-04-13T06:00' }, format='json') self.assertEqual(resp.status_code, 405) def test_delete_allowed(self): """ Test that can delete a gym visit """ resp = self.api.delete(self.url) self.assertEqual(resp.status_code, 301) resp = self.api.get(self.url) self.assertEqual(resp.status_code, 404) def test_put_blocked(self): """ Test that the PUT verb is not allowed """ resp = self.api.put( self.url, { 'gym': 1, 'gym_visit_date': '1990-04-13T06:00' }, format='json' ) self.assertEqual(resp.status_code, 405)
Gimpneek/exclusive-raid-gym-tracker
app/tests/api/personalised/gym_gym_visit_resource/test_http_verbs.py
Python
gpl-3.0
1,640
[ "VisIt" ]
60061520a067cf823ab83bcde9082a2063c42caa19f2035d422a2429b96b9280
# -*- coding: utf-8 -*- from __future__ import absolute_import from spark.node import Neuron class Accumulator(Neuron): ''' TODO. ''' def transfer(self, potential): ''' TODO. ''' return potential def __repr__(self): return "Accumulator()"
CtrlC-Root/cse5526
spark/rbf/accumulator.py
Python
mit
306
[ "NEURON" ]
1037c4eb394514f56617e6fbe65bcb4ffc3baa81d119560307eec7cf8e0a9892
import math import numpy as np from numpy import linalg from pymol.cgo import BEGIN, COLOR, TRIANGLES, VERTEX, NORMAL, END from pymol import cmd def vertex(a1, a2, a3, u, v, M, r0): vrtx = M.dot(np.array([ a1 * math.cos(u) * math.cos(v), a2 * math.cos(u) * math.sin(v), a3 * math.sin(u) ])) nrml = M.dot(np.array([ math.cos(u) * math.cos(v) / a1, math.cos(u) * math.sin(v) / a2, math.sin(u) / a3 ])) return vrtx + r0, nrml def ie_build(sele, name='iellipsoid', col='[0.5, 0.5, 0.5]', scale='1'): # data = cmd.get_coords(sele) # only for 1.7.4 and higher data = np.array(cmd.get_model(sele, 1).get_coord_list()) col = eval(col, {'__builtins__': None}, {}) scale = float(scale) * 0.0001 r0 = data.mean(axis=0) x, y, z = (data - r0).transpose() Jxx = sum(y ** 2 + z ** 2) Jyy = sum(x ** 2 + z ** 2) Jzz = sum(x ** 2 + y ** 2) Jxy, Jxz, Jyz = sum(x * y), sum(x * z), sum(y * z) ws, vs = linalg.eig(np.array([ [Jxx, -Jxy, -Jxz], [-Jxy, Jyy, -Jyz], [-Jxz, -Jyz, Jzz] ])) M = linalg.inv(vs) a1, a2, a3 = ws * scale u_segs = 12 v_segs = 12 mesh = [BEGIN, TRIANGLES, COLOR] mesh.extend(col) dU = math.pi / u_segs dV = 2 * math.pi / v_segs U = -math.pi / 2 for Y in range(0, u_segs): V = math.pi for X in range(0, v_segs): (x1, y1, z1), (n1x, n1y, n1z) = vertex(a1, a2, a3, U, V, M, r0) (x2, y2, z2), (n2x, n2y, n2z) = vertex(a1, a2, a3, U + dU, V, M, r0) (x3, y3, z3), (n3x, n3y, n3z) = vertex(a1, a2, a3, U + dU, V + dV, M, r0) (x4, y4, z4), (n4x, n4y, n4z) = vertex(a1, a2, a3, U, V + dV, M, r0) mesh.extend([NORMAL, n1x, n1y, n1z, VERTEX, x1, y1, z1]) mesh.extend([NORMAL, n2x, n2y, n2z, VERTEX, x2, y2, z2]) mesh.extend([NORMAL, n4x, n4y, n4z, VERTEX, x4, y4, z4]) mesh.extend([NORMAL, n2x, n2y, n2z, VERTEX, x2, y2, z2]) mesh.extend([NORMAL, n3x, n3y, n3z, VERTEX, x3, y3, z3]) mesh.extend([NORMAL, n4x, n4y, n4z, VERTEX, x4, y4, z4]) V += dV U += dU mesh.append(END) cmd.load_cgo(mesh, name) def ie_build_all(col='[0.5, 0.5, 0.5]', scale='1'): target = cmd.get_names()[0] command = 'cmd.select("atom_group_%d" % ID, "id %d" % ID);'\ 'ie_build("atom_group_%d" % ID, "ellipsoid_%d" % ID, col, scale)' cmd.iterate(target, command, space={'ie_build': ie_build, 'cmd': cmd, 'col': col, 'scale': scale}) def to_bool(value): if value == 'true': return True return False def ie_build_file(fname, align='true', ortho='true', hide='true', zoom='true', col='[0.5, 0.5, 0.5]', scale='1'): if to_bool(ortho): cmd.set('orthoscopic', 'true') cmd.load(fname) if to_bool(align): object_list = cmd.get_names() target = object_list.pop() for obj in object_list: cmd.align(obj, target) if to_bool(hide): cmd.hide('everything') ie_build_all(col, scale) if to_bool(zoom): cmd.zoom() cmd.extend('ie_build', ie_build) cmd.extend('ie_build_all', ie_build_all) cmd.extend('ie_build_file', ie_build_file)
vlasenkov/pymol-iellipsoid
iellipsoid.py
Python
mit
3,747
[ "PyMOL" ]
0dbdde549139fdf0735617f93c8863893990d78c6792cc753f65a5d844069f55
#!/usr/bin/env python # -*- coding=utf-8 -*- import sys import os import numpy as np from numpy import array as npa import matplotlib as mpl import matplotlib.pyplot as plt import pymatgen as mg from pymatgen.io.vasp.outputs import Vasprun, Procar, BSVasprun from pymatgen.symmetry.bandstructure import HighSymmKpath from pymatgen.electronic_structure.core import Spin, Orbital from pymatgen.electronic_structure.plotter import BSPlotter from pymatgen.electronic_structure.plotter import BSPlotterProjected mpl.rc('text', usetex=True) mpl.rc('font', weight='bold') mpl.rcParams['text.latex.unicode'] = True mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] if __name__ == "__main__": # bands object prepared using pymatgen library. contains eigenvalue information v = BSVasprun("./vasprun.xml",parse_projected_eigen=True) bs = v.get_band_structure(line_mode=True) #print (bs.is_metal()) #print (bs.get_band_gap()) #print (bs.get_direct_band_gap()) #print (bs.get_projections_on_elements_and_orbitals({'As':['s','p','d']})) #promenade = HighSymmKpath.get_kpoints #print promenade #get_kpoints(bs,line_density=20, coords_are_cartesian=True) BSPlotter(bs).show() #BSPlotter(bs).plot_brillouin() #BSPlotter(bs).save_plot(filename="normal-bandstructure.pdf",img_format="pdf",zero_to_efermi=True) bsproj = BSPlotterProjected(bs).get_projected_plots_dots_patom_pmorb(dictio={'As':['px','py','pz']}, dictpa={'As':[5,6,7,8]}, sum_atoms={'As':[5,6,7,8]}, sum_morbs={'As':['px','py','pz']}) bsproj.show() # trying new things here #bandstruct = BSDOSPlotter(bs_projection="As", dos_projection=None, vb_energy_range=2, cb_energy_range=2, egrid_interval=0.5, rgb_legend=True) #BSDOSPlotterProjected(bandstruct) # #plt.show() # plt.savefig(sys.argv[1] + ".pdf", format="pdf")
neelravi/vasp
bandplotting-orbital-resolved-pymatgen.py
Python
gpl-3.0
1,885
[ "VASP", "pymatgen" ]
b9f40727bea1579833da80fb6197eec026b35eeec5dbfa8af63be302b4ee73eb
#from distutils.core import setup #from distutils.extension import Extension #from distutils.command.sdist import sdist as _sdist from setuptools import setup from setuptools import Extension from setuptools.command.sdist import sdist as _sdist from fmrc import util #borrowed from online code: http://stackoverflow.com/questions/4505747/how-should-i-structure-a-python-package-that-contains-cython-code try: from Cython.Distutils import build_ext except ImportError: useCython = False else: useCython = True import numpy as np cmdClass = {} extModules = [] if useCython: extModules += [Extension('fmrc.correct', ['fmrc/correct.pyx'])] cmdClass.update({'build_ext': build_ext}) #this is also from the stackoverflow link above, used to auto-compile when you do the sdist command class sdist(_sdist): def run(self): # Make sure the compiled Cython files in the distribution are up-to-date from Cython.Build import cythonize cythonize('fmrc/correct.pyx', include_path=[np.get_include()]) _sdist.run(self) cmdClass['sdist'] = sdist else: extModules += [Extension('fmrc.correct', ['fmrc/correct.c'])] setup(name='fmrc', version=util.VERSION, description='Corrects errors in short reads from high-throughput sequencing', url='http://github.com/sgreenstein/fmrc', author='Seth Greenstein', author_email='sgreens@cs.unc.edu', license='MIT', install_requires=['pysam', 'numpy'], scripts=['bin/fmrc'], packages=['fmrc'], zip_safe=False, include_dirs=[np.get_include()], ext_modules=extModules, cmdclass=cmdClass)
sgreenstein/fmrc
setup.py
Python
mit
1,681
[ "pysam" ]
b63f988379ea40f0236ae398b11eb78d92ffb0d73ac64954e477e888aa727dad
# -*- coding: utf-8 -*- """ Integration test cases for ACMEv2 as implemented by boulder-wfe2. """ import subprocess import requests import datetime import time import os import json import re import OpenSSL from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization import chisel2 from helpers import * from acme import errors as acme_errors from acme.messages import Status, CertificateRequest, Directory from acme import crypto_util as acme_crypto_util from acme import client as acme_client from acme import messages from acme import challenges from acme import errors import josepy import tempfile import shutil import atexit import random import string import threading from http.server import HTTPServer, BaseHTTPRequestHandler import socketserver import socket import challtestsrv challSrv = challtestsrv.ChallTestServer() def test_multidomain(): chisel2.auth_and_issue([random_domain(), random_domain()]) def test_wildcardmultidomain(): """ Test issuance for a random domain and a random wildcard domain using DNS-01. """ chisel2.auth_and_issue([random_domain(), "*."+random_domain()], chall_type="dns-01") def test_http_challenge(): chisel2.auth_and_issue([random_domain(), random_domain()], chall_type="http-01") def rand_http_chall(client): d = random_domain() csr_pem = chisel2.make_csr([d]) order = client.new_order(csr_pem) authzs = order.authorizations for a in authzs: for c in a.body.challenges: if isinstance(c.chall, challenges.HTTP01): return d, c.chall raise(Exception("No HTTP-01 challenge found for random domain authz")) def check_challenge_dns_err(chalType): """ check_challenge_dns_err tests that performing an ACME challenge of the specified type to a hostname that is configured to return SERVFAIL for all queries produces the correct problem type and detail message. """ client = chisel2.make_client() # Create a random domains. d = random_domain() # Configure the chall srv to SERVFAIL all queries for that domain. challSrv.add_servfail_response(d) # Expect a DNS problem with a detail that matches a regex expectedProbType = "dns" expectedProbRegex = re.compile(r"DNS problem: SERVFAIL looking up (A|AAAA|TXT|CAA) for {0}".format(d)) # Try and issue for the domain with the given challenge type. failed = False try: chisel2.auth_and_issue([d], client=client, chall_type=chalType) except acme_errors.ValidationError as e: # Mark that the auth_and_issue failed failed = True # Extract the failed challenge from each failed authorization for authzr in e.failed_authzrs: c = None if chalType == "http-01": c = chisel2.get_chall(authzr, challenges.HTTP01) elif chalType == "dns-01": c = chisel2.get_chall(authzr, challenges.DNS01) elif chalType == "tls-alpn-01": c = chisel2.get_chall(authzr, challenges.TLSALPN01) else: raise(Exception("Invalid challenge type requested: {0}".format(challType))) # The failed challenge's error should match expected error = c.error if error is None or error.typ != "urn:ietf:params:acme:error:{0}".format(expectedProbType): raise(Exception("Expected {0} prob, got {1}".format(expectedProbType, error.typ))) if not expectedProbRegex.match(error.detail): raise(Exception("Prob detail did not match expectedProbRegex, got \"{0}\"".format(error.detail))) finally: challSrv.remove_servfail_response(d) # If there was no exception that means something went wrong. The test should fail. if failed is False: raise(Exception("No problem generated issuing for broken DNS identifier")) def test_http_challenge_dns_err(): """ test_http_challenge_dns_err tests that a HTTP-01 challenge for a domain with broken DNS produces the correct problem response. """ check_challenge_dns_err("http-01") def test_dns_challenge_dns_err(): """ test_dns_challenge_dns_err tests that a DNS-01 challenge for a domain with broken DNS produces the correct problem response. """ check_challenge_dns_err("dns-01") def test_tls_alpn_challenge_dns_err(): """ test_tls_alpn_challenge_dns_err tests that a TLS-ALPN-01 challenge for a domain with broken DNS produces the correct problem response. """ check_challenge_dns_err("tls-alpn-01") def test_http_challenge_broken_redirect(): """ test_http_challenge_broken_redirect tests that a common webserver mis-configuration receives the correct specialized error message when attempting an HTTP-01 challenge. """ client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Create a broken HTTP redirect similar to a sort we see frequently "in the wild" challengePath = "/.well-known/acme-challenge/{0}".format(token) redirect = "http://{0}.well-known/acme-challenge/bad-bad-bad".format(d) challSrv.add_http_redirect( challengePath, redirect) # Expect the specialized error message expectedError = "Fetching {0}: Invalid host in redirect target \"{1}.well-known\". Check webserver config for missing '/' in redirect target.".format(redirect, d) # NOTE(@cpu): Can't use chisel2.expect_problem here because it doesn't let # us interrogate the detail message easily. try: chisel2.auth_and_issue([d], client=client, chall_type="http-01") except acme_errors.ValidationError as e: for authzr in e.failed_authzrs: c = chisel2.get_chall(authzr, challenges.HTTP01) error = c.error if error is None or error.typ != "urn:ietf:params:acme:error:connection": raise(Exception("Expected connection prob, got %s" % (error.__str__()))) if error.detail != expectedError: raise(Exception("Expected prob detail %s, got %s" % (expectedError, error.detail))) challSrv.remove_http_redirect(challengePath) def test_failed_validation_limit(): """ Fail a challenge repeatedly for the same domain, with the same account. Once we reach the rate limit we should get a rateLimitedError. Note that this depends on the specific threshold configured in rate-limit-policies.yml. This also incidentally tests a fix for https://github.com/letsencrypt/boulder/issues/4329. We expect to get ValidationErrors, eventually followed by a rate limit error. """ domain = "fail." + random_domain() csr_pem = chisel2.make_csr([domain]) client = chisel2.make_client() threshold = 3 for _ in range(threshold): order = client.new_order(csr_pem) chall = order.authorizations[0].body.challenges[0] client.answer_challenge(chall, chall.response(client.net.key)) try: client.poll_and_finalize(order) except errors.ValidationError as e: pass chisel2.expect_problem("urn:ietf:params:acme:error:rateLimited", lambda: chisel2.auth_and_issue([domain], client=client)) def test_http_challenge_loop_redirect(): client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Create a HTTP redirect from the challenge's validation path to itself challengePath = "/.well-known/acme-challenge/{0}".format(token) challSrv.add_http_redirect( challengePath, "http://{0}{1}".format(d, challengePath)) # Issuing for the the name should fail because of the challenge domains's # redirect loop. chisel2.expect_problem("urn:ietf:params:acme:error:connection", lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01")) challSrv.remove_http_redirect(challengePath) def test_http_challenge_badport_redirect(): client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Create a HTTP redirect from the challenge's validation path to a host with # an invalid port. challengePath = "/.well-known/acme-challenge/{0}".format(token) challSrv.add_http_redirect( challengePath, "http://{0}:1337{1}".format(d, challengePath)) # Issuing for the name should fail because of the challenge domain's # invalid port redirect. chisel2.expect_problem("urn:ietf:params:acme:error:connection", lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01")) challSrv.remove_http_redirect(challengePath) def test_http_challenge_badhost_redirect(): client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Create a HTTP redirect from the challenge's validation path to a bare IP # hostname. challengePath = "/.well-known/acme-challenge/{0}".format(token) challSrv.add_http_redirect( challengePath, "https://127.0.0.1{0}".format(challengePath)) # Issuing for the name should cause a connection error because the redirect # domain name is an IP address. chisel2.expect_problem("urn:ietf:params:acme:error:connection", lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01")) challSrv.remove_http_redirect(challengePath) def test_http_challenge_badproto_redirect(): client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Create a HTTP redirect from the challenge's validation path to whacky # non-http/https protocol URL. challengePath = "/.well-known/acme-challenge/{0}".format(token) challSrv.add_http_redirect( challengePath, "gopher://{0}{1}".format(d, challengePath)) # Issuing for the name should cause a connection error because the redirect # domain name is an IP address. chisel2.expect_problem("urn:ietf:params:acme:error:connection", lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01")) challSrv.remove_http_redirect(challengePath) def test_http_challenge_http_redirect(): client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Calculate its keyauth so we can add it in a special non-standard location # for the redirect result resp = chall.response(client.net.key) keyauth = resp.key_authorization challSrv.add_http01_response("http-redirect", keyauth) # Create a HTTP redirect from the challenge's validation path to some other # token path where we have registered the key authorization. challengePath = "/.well-known/acme-challenge/{0}".format(token) redirectPath = "/.well-known/acme-challenge/http-redirect?params=are&important=to&not=lose" challSrv.add_http_redirect( challengePath, "http://{0}{1}".format(d, redirectPath)) chisel2.auth_and_issue([d], client=client, chall_type="http-01") challSrv.remove_http_redirect(challengePath) challSrv.remove_http01_response("http-redirect") history = challSrv.http_request_history(d) challSrv.clear_http_request_history(d) # There should have been at least two GET requests made to the # challtestsrv. There may have been more if remote VAs were configured. if len(history) < 2: raise(Exception("Expected at least 2 HTTP request events on challtestsrv, found {1}".format(len(history)))) initialRequests = [] redirectedRequests = [] for request in history: # All requests should have been over HTTP if request['HTTPS'] is True: raise(Exception("Expected all requests to be HTTP")) # Initial requests should have the expected initial HTTP-01 URL for the challenge if request['URL'] == challengePath: initialRequests.append(request) # Redirected requests should have the expected redirect path URL with all # its parameters elif request['URL'] == redirectPath: redirectedRequests.append(request) else: raise(Exception("Unexpected request URL {0} in challtestsrv history: {1}".format(request['URL'], request))) # There should have been at least 1 initial HTTP-01 validation request. if len(initialRequests) < 1: raise(Exception("Expected {0} initial HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(initialRequests)))) # There should have been at least 1 redirected HTTP request for each VA if len(redirectedRequests) < 1: raise(Exception("Expected {0} redirected HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(redirectedRequests)))) def test_http_challenge_https_redirect(): client = chisel2.make_client() # Create an authz for a random domain and get its HTTP-01 challenge token d, chall = rand_http_chall(client) token = chall.encode("token") # Calculate its keyauth so we can add it in a special non-standard location # for the redirect result resp = chall.response(client.net.key) keyauth = resp.key_authorization challSrv.add_http01_response("https-redirect", keyauth) # Create a HTTP redirect from the challenge's validation path to an HTTPS # path with some parameters challengePath = "/.well-known/acme-challenge/{0}".format(token) redirectPath = "/.well-known/acme-challenge/https-redirect?params=are&important=to&not=lose" challSrv.add_http_redirect( challengePath, "https://{0}{1}".format(d, redirectPath)) # Also add an A record for the domain pointing to the interface that the # HTTPS HTTP-01 challtestsrv is bound. challSrv.add_a_record(d, ["10.77.77.77"]) try: chisel2.auth_and_issue([d], client=client, chall_type="http-01") except errors.ValidationError as e: problems = [] for authzr in e.failed_authzrs: for chall in authzr.body.challenges: error = chall.error if error: problems.append(error.__str__()) raise(Exception("validation problem: %s" % "; ".join(problems))) challSrv.remove_http_redirect(challengePath) challSrv.remove_a_record(d) history = challSrv.http_request_history(d) challSrv.clear_http_request_history(d) # There should have been at least two GET requests made to the challtestsrv by the VA if len(history) < 2: raise(Exception("Expected 2 HTTP request events on challtestsrv, found {0}".format(len(history)))) initialRequests = [] redirectedRequests = [] for request in history: # Initial requests should have the expected initial HTTP-01 URL for the challenge if request['URL'] == challengePath: initialRequests.append(request) # Redirected requests should have the expected redirect path URL with all # its parameters elif request['URL'] == redirectPath: redirectedRequests.append(request) else: raise(Exception("Unexpected request URL {0} in challtestsrv history: {1}".format(request['URL'], request))) # There should have been at least 1 initial HTTP-01 validation request. if len(initialRequests) < 1: raise(Exception("Expected {0} initial HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(initialRequests)))) # All initial requests should have been over HTTP for r in initialRequests: if r['HTTPS'] is True: raise(Exception("Expected all initial requests to be HTTP, got %s" % r)) # There should have been at least 1 redirected HTTP request for each VA if len(redirectedRequests) < 1: raise(Exception("Expected {0} redirected HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(redirectedRequests)))) # All the redirected requests should have been over HTTPS with the correct # SNI value for r in redirectedRequests: if r['HTTPS'] is False: raise(Exception("Expected all redirected requests to be HTTPS")) # TODO(@cpu): The following ServerName test will fail with config-next # until https://github.com/letsencrypt/boulder/issues/3969 is fixed. if CONFIG_NEXT: return elif r['ServerName'] != d: raise(Exception("Expected all redirected requests to have ServerName {0} got \"{1}\"".format(d, r['ServerName']))) class SlowHTTPRequestHandler(BaseHTTPRequestHandler): def do_GET(self): try: # Sleeptime needs to be larger than the RA->VA timeout (20s at the # time of writing) sleeptime = 22 print("SlowHTTPRequestHandler: sleeping for {0}s\n".format(sleeptime)) time.sleep(sleeptime) self.send_response(200) self.end_headers() self.wfile.write(b"this is not an ACME key authorization") except: pass class SlowHTTPServer(HTTPServer): # Override handle_error so we don't print a misleading stack trace when the # VA terminates the connection due to timeout. def handle_error(self, request, client_address): pass def test_http_challenge_timeout(): """ test_http_challenge_timeout tests that the VA times out challenge requests to a slow HTTP server appropriately. """ # Start a simple python HTTP server on port 5002 in its own thread. # NOTE(@cpu): The pebble-challtestsrv binds 10.77.77.77:5002 for HTTP-01 # challenges so we must use the 10.88.88.88 address for the throw away # server for this test and add a mock DNS entry that directs the VA to it. httpd = SlowHTTPServer(("10.88.88.88", 5002), SlowHTTPRequestHandler) thread = threading.Thread(target = httpd.serve_forever) thread.daemon = False thread.start() # Pick a random domain hostname = random_domain() # Add A record for the domains to ensure the VA's requests are directed # to the interface that we bound the HTTPServer to. challSrv.add_a_record(hostname, ["10.88.88.88"]) start = datetime.datetime.utcnow() end = 0 try: # We expect a connection timeout error to occur chisel2.expect_problem("urn:ietf:params:acme:error:connection", lambda: chisel2.auth_and_issue([hostname], chall_type="http-01")) end = datetime.datetime.utcnow() finally: # Shut down the HTTP server gracefully and join on its thread. httpd.shutdown() httpd.server_close() thread.join() delta = end - start # Expected duration should be the RA->VA timeout plus some padding (At # present the timeout is 20s so adding 2s of padding = 22s) expectedDuration = 22 if delta.total_seconds() == 0 or delta.total_seconds() > expectedDuration: raise(Exception("expected timeout to occur in under {0} seconds. Took {1}".format(expectedDuration, delta.total_seconds()))) def test_tls_alpn_challenge(): # Pick two random domains domains = [random_domain(),random_domain()] # Add A records for these domains to ensure the VA's requests are directed # to the interface that the challtestsrv has bound for TLS-ALPN-01 challenge # responses for host in domains: challSrv.add_a_record(host, ["10.88.88.88"]) chisel2.auth_and_issue(domains, chall_type="tls-alpn-01") for host in domains: challSrv.remove_a_record(host) def test_overlapping_wildcard(): """ Test issuance for a random domain and a wildcard version of the same domain using DNS-01. This should result in *two* distinct authorizations. """ domain = random_domain() domains = [ domain, "*."+domain ] client = chisel2.make_client(None) csr_pem = chisel2.make_csr(domains) order = client.new_order(csr_pem) authzs = order.authorizations if len(authzs) != 2: raise(Exception("order for %s had %d authorizations, expected 2" % (domains, len(authzs)))) cleanup = chisel2.do_dns_challenges(client, authzs) try: order = client.poll_and_finalize(order) finally: cleanup() def test_highrisk_blocklist(): """ Test issuance for a subdomain of a HighRiskBlockedNames entry. It should fail with a policy error. """ # We include "example.org" in `test/hostname-policy.yaml` in the # HighRiskBlockedNames list so issuing for "foo.example.org" should be # blocked. domain = "foo.example.org" # We expect this to produce a policy problem chisel2.expect_problem("urn:ietf:params:acme:error:rejectedIdentifier", lambda: chisel2.auth_and_issue([domain], chall_type="dns-01")) def test_wildcard_exactblacklist(): """ Test issuance for a wildcard that would cover an exact blacklist entry. It should fail with a policy error. """ # We include "highrisk.le-test.hoffman-andrews.com" in `test/hostname-policy.yaml` # Issuing for "*.le-test.hoffman-andrews.com" should be blocked domain = "*.le-test.hoffman-andrews.com" # We expect this to produce a policy problem chisel2.expect_problem("urn:ietf:params:acme:error:rejectedIdentifier", lambda: chisel2.auth_and_issue([domain], chall_type="dns-01")) def test_wildcard_authz_reuse(): """ Test that an authorization for a base domain obtained via HTTP-01 isn't reused when issuing a wildcard for that base domain later on. """ # Create one client to reuse across multiple issuances client = chisel2.make_client(None) # Pick a random domain to issue for domains = [ random_domain() ] csr_pem = chisel2.make_csr(domains) # Submit an order for the name order = client.new_order(csr_pem) # Complete the order via an HTTP-01 challenge cleanup = chisel2.do_http_challenges(client, order.authorizations) try: order = client.poll_and_finalize(order) finally: cleanup() # Now try to issue a wildcard for the random domain domains[0] = "*." + domains[0] csr_pem = chisel2.make_csr(domains) order = client.new_order(csr_pem) # We expect all of the returned authorizations to be pending status for authz in order.authorizations: if authz.body.status != Status("pending"): raise(Exception("order for %s included a non-pending authorization (status: %s) from a previous HTTP-01 order" % ((domains), str(authz.body.status)))) def test_bad_overlap_wildcard(): chisel2.expect_problem("urn:ietf:params:acme:error:malformed", lambda: chisel2.auth_and_issue(["*.example.com", "www.example.com"])) def test_duplicate_orders(): """ Test that the same client issuing for the same domain names twice in a row works without error. """ client = chisel2.make_client(None) domains = [ random_domain() ] chisel2.auth_and_issue(domains, client=client) chisel2.auth_and_issue(domains, client=client) def test_order_reuse_failed_authz(): """ Test that creating an order for a domain name, failing an authorization in that order, and submitting another new order request for the same name doesn't reuse a failed authorizaton in the new order. """ client = chisel2.make_client(None) domains = [ random_domain() ] csr_pem = chisel2.make_csr(domains) order = client.new_order(csr_pem) firstOrderURI = order.uri # Pick the first authz's first challenge, doesn't matter what type it is chall_body = order.authorizations[0].body.challenges[0] # Answer it, but with nothing set up to solve the challenge request client.answer_challenge(chall_body, chall_body.response(client.net.key)) deadline = datetime.datetime.now() + datetime.timedelta(seconds=60) authzFailed = False try: # Poll the order's authorizations until they are non-pending, a timeout # occurs, or there is an invalid authorization status. client.poll_authorizations(order, deadline) except acme_errors.ValidationError as e: # We expect there to be a ValidationError from one of the authorizations # being invalid. authzFailed = True # If the poll ended and an authz's status isn't invalid then we reached the # deadline, fail the test if not authzFailed: raise(Exception("timed out waiting for order %s to become invalid" % firstOrderURI)) # Make another order with the same domains order = client.new_order(csr_pem) # It should not be the same order as before if order.uri == firstOrderURI: raise(Exception("new-order for %s returned a , now-invalid, order" % domains)) # We expect all of the returned authorizations to be pending status for authz in order.authorizations: if authz.body.status != Status("pending"): raise(Exception("order for %s included a non-pending authorization (status: %s) from a previous order" % ((domains), str(authz.body.status)))) # We expect the new order can be fulfilled cleanup = chisel2.do_http_challenges(client, order.authorizations) try: order = client.poll_and_finalize(order) finally: cleanup() def test_order_finalize_early(): """ Test that finalizing an order before its fully authorized results in the order having an error set and the status being invalid. """ # Create a client client = chisel2.make_client(None) # Create a random domain and a csr domains = [ random_domain() ] csr_pem = chisel2.make_csr(domains) # Create an order for the domain order = client.new_order(csr_pem) deadline = datetime.datetime.now() + datetime.timedelta(seconds=5) # Finalizing an order early should generate an orderNotReady error. chisel2.expect_problem("urn:ietf:params:acme:error:orderNotReady", lambda: client.finalize_order(order, deadline)) def test_revoke_by_account(): client = chisel2.make_client() cert_file = temppath('test_revoke_by_account.pem') order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name) cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem) reset_akamai_purges() client.revoke(josepy.ComparableX509(cert), 0) verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked") verify_akamai_purge() def test_revoke_by_issuer(): client = chisel2.make_client(None) cert_file = temppath('test_revoke_by_issuer.pem') order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name) cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem) reset_akamai_purges() client.revoke(josepy.ComparableX509(cert), 0) verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked") verify_akamai_purge() def test_revoke_by_authz(): domains = [random_domain()] cert_file = temppath('test_revoke_by_authz.pem') order = chisel2.auth_and_issue(domains, cert_output=cert_file.name) cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem) # create a new client and re-authz client = chisel2.make_client(None) chisel2.auth_and_issue(domains, client=client) reset_akamai_purges() client.revoke(josepy.ComparableX509(cert), 0) verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked") verify_akamai_purge() def test_revoke_by_privkey(): client = chisel2.make_client(None) domains = [random_domain()] key = OpenSSL.crypto.PKey() key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048) key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key) csr_pem = chisel2.make_csr(domains) order = client.new_order(csr_pem) cleanup = chisel2.do_http_challenges(client, order.authorizations) try: order = client.poll_and_finalize(order) finally: cleanup() # Create a new client with the JWK as the cert private key jwk = josepy.JWKRSA(key=key) net = acme_client.ClientNetwork(key, user_agent="Boulder integration tester") directory = Directory.from_json(net.get(chisel2.DIRECTORY_V2).json()) new_client = acme_client.ClientV2(directory, net) cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem) reset_akamai_purges() client.revoke(josepy.ComparableX509(cert), 0) cert_file = tempfile.NamedTemporaryFile( dir=tempdir, suffix='.test_revoke_by_privkey.pem', mode='w+', delete=False) cert_file.write(OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, cert).decode()) cert_file.close() verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked") verify_akamai_purge() def test_sct_embedding(): order = chisel2.auth_and_issue([random_domain()]) print(order.fullchain_pem.encode()) cert = parse_cert(order) # make sure there is no poison extension try: cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3")) raise(Exception("certificate contains CT poison extension")) except x509.ExtensionNotFound: # do nothing pass # make sure there is a SCT list extension try: sctList = cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2")) except x509.ExtensionNotFound: raise(Exception("certificate doesn't contain SCT list extension")) if len(sctList.value) != 2: raise(Exception("SCT list contains wrong number of SCTs")) for sct in sctList.value: if sct.version != x509.certificate_transparency.Version.v1: raise(Exception("SCT contains wrong version")) if sct.entry_type != x509.certificate_transparency.LogEntryType.PRE_CERTIFICATE: raise(Exception("SCT contains wrong entry type")) def test_only_return_existing_reg(): client = chisel2.uninitialized_client() email = "test@not-example.com" client.new_account(messages.NewRegistration.from_data(email=email, terms_of_service_agreed=True)) client = chisel2.uninitialized_client(key=client.net.key) class extendedAcct(dict): def json_dumps(self, indent=None): return json.dumps(self) acct = extendedAcct({ "termsOfServiceAgreed": True, "contact": [email], "onlyReturnExisting": True }) resp = client.net.post(client.directory['newAccount'], acct, acme_version=2) if resp.status_code != 200: raise(Exception("incorrect response returned for onlyReturnExisting")) other_client = chisel2.uninitialized_client() newAcct = extendedAcct({ "termsOfServiceAgreed": True, "contact": [email], "onlyReturnExisting": True }) chisel2.expect_problem("urn:ietf:params:acme:error:accountDoesNotExist", lambda: other_client.net.post(other_client.directory['newAccount'], newAcct, acme_version=2)) def BouncerHTTPRequestHandler(redirect, guestlist): """ BouncerHTTPRequestHandler returns a BouncerHandler class that acts like a club bouncer in front of another server. The bouncer will respond to GET requests by looking up the allowed number of requests in the guestlist for the User-Agent making the request. If there is at least one guestlist spot for that UA it will be redirected to the real server and the guestlist will be decremented. Once the guestlist spots for a UA are expended requests will get a bogus result and have to stand outside in the cold """ class BouncerHandler(BaseHTTPRequestHandler): def __init__(self, *args, **kwargs): BaseHTTPRequestHandler.__init__(self, *args, **kwargs) def do_HEAD(self): # This is used by wait_for_server self.send_response(200) self.end_headers() def do_GET(self): ua = self.headers['User-Agent'] guestlistAllows = BouncerHandler.guestlist.get(ua, 0) # If there is still space on the guestlist for this UA then redirect # the request and decrement the guestlist. if guestlistAllows > 0: BouncerHandler.guestlist[ua] -= 1 self.log_message("BouncerHandler UA {0} is on the Guestlist. {1} requests remaining.".format(ua, BouncerHandler.guestlist[ua])) self.send_response(302) self.send_header("Location", BouncerHandler.redirect) self.end_headers() # Otherwise return a bogus result else: self.log_message("BouncerHandler UA {0} has no requests on the Guestlist. Sending request to the curb".format(ua)) self.send_response(200) self.end_headers() self.wfile.write(u"(• ◡ •) <( VIPs only! )".encode()) BouncerHandler.guestlist = guestlist BouncerHandler.redirect = redirect return BouncerHandler def wait_for_server(addr): while True: try: # NOTE(@cpu): Using HEAD here instead of GET because the # BouncerHandler modifies its state for GET requests. status = requests.head(addr).status_code if status == 200: return except requests.exceptions.ConnectionError: pass time.sleep(0.5) def multiva_setup(client, guestlist): """ Setup a testing domain and backing multiva server setup. This will block until the server is ready. The returned cleanup function should be used to stop the server. The first bounceFirst requests to the server will be sent to the real challtestsrv for a good answer, the rest will get a bad answer. Domain name is randomly chosen with random_domain(). """ hostname = random_domain() csr_pem = chisel2.make_csr([hostname]) order = client.new_order(csr_pem) authz = order.authorizations[0] chall = None for c in authz.body.challenges: if isinstance(c.chall, challenges.HTTP01): chall = c.chall if chall is None: raise(Exception("No HTTP-01 challenge found for random domain authz")) token = chall.encode("token") # Calculate the challenge's keyauth so we can add a good keyauth response on # the real challtestsrv that we redirect VIP requests to. resp = chall.response(client.net.key) keyauth = resp.key_authorization challSrv.add_http01_response(token, keyauth) # Add an A record for the domains to ensure the VA's requests are directed # to the interface that we bound the HTTPServer to. challSrv.add_a_record(hostname, ["10.88.88.88"]) # Add an A record for the redirect target that sends it to the real chall # test srv for a valid HTTP-01 response. redirHostname = "pebble-challtestsrv.example.com" challSrv.add_a_record(redirHostname, ["10.77.77.77"]) # Start a simple python HTTP server on port 5002 in its own thread. # NOTE(@cpu): The pebble-challtestsrv binds 10.77.77.77:5002 for HTTP-01 # challenges so we must use the 10.88.88.88 address for the throw away # server for this test and add a mock DNS entry that directs the VA to it. redirect = "http://{0}/.well-known/acme-challenge/{1}".format( redirHostname, token) httpd = HTTPServer(("10.88.88.88", 5002), BouncerHTTPRequestHandler(redirect, guestlist)) thread = threading.Thread(target = httpd.serve_forever) thread.daemon = False thread.start() def cleanup(): # Remove the challtestsrv mocks challSrv.remove_a_record(hostname) challSrv.remove_a_record(redirHostname) challSrv.remove_http01_response(token) # Shut down the HTTP server gracefully and join on its thread. httpd.shutdown() httpd.server_close() thread.join() return hostname, cleanup def test_http_multiva_threshold_pass(): client = chisel2.make_client() # Configure a guestlist that will pass the multiVA threshold test by # allowing the primary VA and one remote. guestlist = {"boulder": 1, "boulder-remote-b": 1} hostname, cleanup = multiva_setup(client, guestlist) try: # With the maximum number of allowed remote VA failures the overall # challenge should still succeed. chisel2.auth_and_issue([hostname], client=client, chall_type="http-01") finally: cleanup() def test_http_multiva_primary_fail_remote_pass(): client = chisel2.make_client() # Configure a guestlist that will fail the primary VA check but allow the # remote VAs guestlist = {"boulder": 0, "boulder-remote-a": 1, "boulder-remote-b": 1} hostname, cleanup = multiva_setup(client, guestlist) foundException = False try: # The overall validation should fail even if the remotes are allowed # because the primary VA result cannot be overridden. chisel2.auth_and_issue([hostname], client=client, chall_type="http-01") except acme_errors.ValidationError as e: # NOTE(@cpu): Chisel2's expect_problem doesn't work in this case so this # test needs to unpack an `acme_errors.ValidationError` on its own. It # might be possible to clean this up in the future. if len(e.failed_authzrs) != 1: raise(Exception("expected one failed authz, found {0}".format(len(e.failed_authzrs)))) challs = e.failed_authzrs[0].body.challenges httpChall = None for chall_body in challs: if isinstance(chall_body.chall, challenges.HTTP01): httpChall = chall_body if httpChall is None: raise(Exception("no HTTP-01 challenge in failed authz")) if httpChall.error.typ != "urn:ietf:params:acme:error:unauthorized": raise(Exception("expected unauthorized prob, found {0}".format(httpChall.error.typ))) foundException = True finally: cleanup() if foundException is False: raise(Exception("Overall validation did not fail")) def test_http_multiva_threshold_fail(): client = chisel2.make_client() # Configure a guestlist that will fail the multiVA threshold test by # only allowing the primary VA. guestlist = {"boulder": 1} hostname, cleanup = multiva_setup(client, guestlist) failed_authzrs = [] try: chisel2.auth_and_issue([hostname], client=client, chall_type="http-01") except acme_errors.ValidationError as e: # NOTE(@cpu): Chisel2's expect_problem doesn't work in this case so this # test needs to unpack an `acme_errors.ValidationError` on its own. It # might be possible to clean this up in the future. failed_authzrs = e.failed_authzrs finally: cleanup() if len(failed_authzrs) != 1: raise(Exception("expected one failed authz, found {0}".format(len(failed_authzrs)))) challs = failed_authzrs[0].body.challenges httpChall = None for chall_body in challs: if isinstance(chall_body.chall, challenges.HTTP01): httpChall = chall_body if httpChall is None: raise(Exception("no HTTP-01 challenge in failed authz")) if httpChall.error.typ != "urn:ietf:params:acme:error:unauthorized": raise(Exception("expected unauthorized prob, found {0}".format(httpChall.error.typ))) if not httpChall.error.detail.startswith("During secondary validation: "): raise(Exception("expected 'During secondary validation' problem detail, found {0}".format(httpChall.error.detail))) class FakeH2ServerHandler(socketserver.BaseRequestHandler): """ FakeH2ServerHandler is a TCP socket handler that writes data representing an initial HTTP/2 SETTINGS frame as a response to all received data. """ def handle(self): # Read whatever the HTTP request was so that the response isn't seen as # unsolicited. self.data = self.request.recv(1024).strip() # Blast some HTTP/2 bytes onto the socket # Truncated example data from taken from the community forum: # https://community.letsencrypt.org/t/le-validation-error-if-server-is-in-google-infrastructure/51841 self.request.sendall(b"\x00\x00\x12\x04\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x80\x00") def wait_for_tcp_server(addr, port): """ wait_for_tcp_server attempts to make a TCP connection to the given address/port every 0.5s until it succeeds. """ while True: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((addr, port)) sock.sendall(b"\n") return except socket.error: time.sleep(0.5) pass def test_http2_http01_challenge(): """ test_http2_http01_challenge tests that an HTTP-01 challenge made to a HTTP/2 server fails with a specific error message for this case. """ client = chisel2.make_client() hostname = "fake.h2.example.com" # Add an A record for the test server to ensure the VA's requests are directed # to the interface that we bind the FakeH2ServerHandler to. challSrv.add_a_record(hostname, ["10.88.88.88"]) # Allow socket address reuse on the base TCPServer class. Failing to do this # causes subsequent integration tests to fail with "Address in use" errors even # though this test _does_ call shutdown() and server_close(). Even though the # server was shut-down Python's socket will be in TIME_WAIT because of prev. client # connections. Having the TCPServer set SO_REUSEADDR on the socket solves # the problem. socketserver.TCPServer.allow_reuse_address = True # Create, start, and wait for a fake HTTP/2 server. server = socketserver.TCPServer(("10.88.88.88", 5002), FakeH2ServerHandler) thread = threading.Thread(target = server.serve_forever) thread.daemon = False thread.start() wait_for_tcp_server("10.88.88.88", 5002) # Issuing an HTTP-01 challenge for this hostname should produce a connection # problem with an error specific to the HTTP/2 misconfiguration. expectedError = "Server is speaking HTTP/2 over HTTP" try: chisel2.auth_and_issue([hostname], client=client, chall_type="http-01") except acme_errors.ValidationError as e: for authzr in e.failed_authzrs: c = chisel2.get_chall(authzr, challenges.HTTP01) error = c.error if error is None or error.typ != "urn:ietf:params:acme:error:connection": raise(Exception("Expected connection prob, got %s" % (error.__str__()))) if not error.detail.endswith(expectedError): raise(Exception("Expected prob detail ending in %s, got %s" % (expectedError, error.detail))) finally: server.shutdown() server.server_close() thread.join() def test_new_order_policy_errs(): """ Test that creating an order with policy blocked identifiers returns a problem with subproblems. """ client = chisel2.make_client(None) # 'in-addr.arpa' is present in `test/hostname-policy.yaml`'s # HighRiskBlockedNames list. csr_pem = chisel2.make_csr(["out-addr.in-addr.arpa", "between-addr.in-addr.arpa"]) # With two policy blocked names in the order we expect to get back a top # level rejectedIdentifier with a detail message that references # subproblems. # # TODO(@cpu): After https://github.com/certbot/certbot/issues/7046 is # implemented in the upstream `acme` module this test should also ensure the # subproblems are properly represented. ok = False try: order = client.new_order(csr_pem) except messages.Error as e: ok = True if e.typ != "urn:ietf:params:acme:error:rejectedIdentifier": raise(Exception("Expected rejectedIdentifier type problem, got {0}".format(e.typ))) if e.detail != 'Error creating new order :: Cannot issue for "between-addr.in-addr.arpa": The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy (and 1 more problems. Refer to sub-problems for more information.)': raise(Exception("Order problem detail did not match expected")) if not ok: raise(Exception("Expected problem, got no error")) def test_long_san_no_cn(): try: chisel2.auth_and_issue(["".join(random.choice(string.ascii_uppercase) for x in range(61)) + ".com"]) # if we get to this raise the auth_and_issue call didn't fail, so fail the test raise(Exception("Issuance didn't fail when the only SAN in a certificate was longer than the max CN length")) except messages.Error as e: if e.typ != "urn:ietf:params:acme:error:badCSR": raise(Exception("Expected malformed type problem, got {0}".format(e.typ))) if e.detail != "Error finalizing order :: CSR doesn't contain a SAN short enough to fit in CN": raise(Exception("Problem detail did not match expected")) def test_delete_unused_challenges(): order = chisel2.auth_and_issue([random_domain()], chall_type="dns-01") a = order.authorizations[0] if len(a.body.challenges) != 1: raise(Exception("too many challenges (%d) left after validation" % len(a.body.challenges))) if not isinstance(a.body.challenges[0].chall, challenges.DNS01): raise(Exception("wrong challenge type left after validation")) # intentionally fail a challenge client = chisel2.make_client() csr_pem = chisel2.make_csr([random_domain()]) order = client.new_order(csr_pem) c = chisel2.get_chall(order.authorizations[0], challenges.DNS01) client.answer_challenge(c, c.response(client.net.key)) for _ in range(5): a, _ = client.poll(order.authorizations[0]) if a.body.status == Status("invalid"): break time.sleep(1) if len(a.body.challenges) != 1: raise(Exception("too many challenges (%d) left after failed validation" % len(a.body.challenges))) if not isinstance(a.body.challenges[0].chall, challenges.DNS01): raise(Exception("wrong challenge type left after validation")) def test_auth_deactivation_v2(): client = chisel2.make_client(None) csr_pem = chisel2.make_csr([random_domain()]) order = client.new_order(csr_pem) resp = client.deactivate_authorization(order.authorizations[0]) if resp.body.status is not messages.STATUS_DEACTIVATED: raise(Exception("unexpected authorization status")) order = chisel2.auth_and_issue([random_domain()], client=client) resp = client.deactivate_authorization(order.authorizations[0]) if resp.body.status is not messages.STATUS_DEACTIVATED: raise(Exception("unexpected authorization status")) def test_ocsp(): cert_file = temppath('test_ocsp.pem') chisel2.auth_and_issue([random_domain()], cert_output=cert_file.name) # As OCSP-Updater is generating responses independently of the CA we sit in a loop # checking OCSP until we either see a good response or we timeout (5s). verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "good") def test_ct_submission(): hostname = random_domain() # These should correspond to the configured logs in ra.json. log_groups = [ ["http://boulder:4500/submissions", "http://boulder:4501/submissions"], ["http://boulder:4510/submissions", "http://boulder:4511/submissions"], ] def submissions(group): count = 0 for log in group: count += int(requests.get(log + "?hostnames=%s" % hostname).text) return count chisel2.auth_and_issue([hostname]) got = [ submissions(log_groups[0]), submissions(log_groups[1]) ] expected = [ 1, 2 ] for i in range(len(log_groups)): if got[i] < expected[i]: raise(Exception("For log group %d, got %d submissions, expected %d." % (i, got[i], expected[i]))) def check_ocsp_basic_oid(cert_file, issuer_file, url): """ This function checks if an OCSP response was successful, but doesn't verify the signature or timestamp. This is useful when simulating the past, so we don't incorrectly reject a response for being in the past. """ ocsp_request = make_ocsp_req(cert_file, issuer_file) responses = fetch_ocsp(ocsp_request, url) # An unauthorized response (for instance, if the OCSP responder doesn't know # about this cert) will just be 30 03 0A 01 06. A "good" or "revoked" # response will contain, among other things, the id-pkix-ocsp-basic OID # identifying the response type. We look for that OID to confirm we got a # successful response. expected = bytearray.fromhex("06 09 2B 06 01 05 05 07 30 01 01") for resp in responses: if not expected in bytearray(resp): raise(Exception("Did not receive successful OCSP response: %s doesn't contain %s" % (base64.b64encode(resp), base64.b64encode(expected)))) ocsp_exp_unauth_setup_data = {} @register_six_months_ago def ocsp_exp_unauth_setup(): client = chisel2.make_client(None) cert_file = temppath('ocsp_exp_unauth_setup.pem') order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name) cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem) # Since our servers are pretending to be in the past, but the openssl cli # isn't, we'll get an expired OCSP response. Just check that it exists; # don't do the full verification (which would fail). check_ocsp_basic_oid(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002") global ocsp_exp_unauth_setup_data ocsp_exp_unauth_setup_data['cert_file'] = cert_file.name def test_ocsp_exp_unauth(): tries = 0 if 'cert_file' not in ocsp_exp_unauth_setup_data: raise Exception("ocsp_exp_unauth_setup didn't run") cert_file = ocsp_exp_unauth_setup_data['cert_file'] while tries < 5: try: verify_ocsp(cert_file, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "XXX") raise(Exception("Unexpected return from verify_ocsp")) except subprocess.CalledProcessError as cpe: if cpe.output == b"Responder Error: unauthorized (6)\n": break except: pass tries += 1 time.sleep(0.25) else: raise(Exception("timed out waiting for unauthorized OCSP response for expired certificate")) def test_blocked_key_account(): # Only config-next has a blocked keys file configured. if not CONFIG_NEXT: return with open("test/test-ca.key", "rb") as key_file: key = serialization.load_pem_private_key(key_file.read(), password=None, backend=default_backend()) # Create a client with the JWK set to a blocked private key jwk = josepy.JWKRSA(key=key) client = chisel2.uninitialized_client(jwk) email = "test@not-example.com" # Try to create an account testPass = False try: client.new_account(messages.NewRegistration.from_data(email=email, terms_of_service_agreed=True)) except acme_errors.Error as e: if e.typ != "urn:ietf:params:acme:error:badPublicKey": raise(Exception("problem did not have correct error type, had {0}".format(e.typ))) if e.detail != "public key is forbidden": raise(Exception("problem did not have correct error detail, had {0}".format(e.detail))) testPass = True if testPass is False: raise(Exception("expected account creation to fail with Error when using blocked key")) def test_blocked_key_cert(): # Only config-next has a blocked keys file configured. if not CONFIG_NEXT: return with open("test/test-ca.key", "r") as f: pemBytes = f.read() domains = [random_domain(), random_domain()] csr = acme_crypto_util.make_csr(pemBytes, domains, False) client = chisel2.make_client(None) order = client.new_order(csr) authzs = order.authorizations testPass = False cleanup = chisel2.do_http_challenges(client, authzs) try: order = client.poll_and_finalize(order) except acme_errors.Error as e: if e.typ != "urn:ietf:params:acme:error:badCSR": raise(Exception("problem did not have correct error type, had {0}".format(e.typ))) if e.detail != "Error finalizing order :: invalid public key in CSR: public key is forbidden": raise(Exception("problem did not have correct error detail, had {0}".format(e.detail))) testPass = True if testPass is False: raise(Exception("expected cert creation to fail with Error when using blocked key")) def test_expiration_mailer(): email_addr = "integration.%x@letsencrypt.org" % random.randrange(2**16) order = chisel2.auth_and_issue([random_domain()], email=email_addr) cert = parse_cert(order) # Check that the expiration mailer sends a reminder expiry = cert.not_valid_after no_reminder = expiry + datetime.timedelta(days=-31) first_reminder = expiry + datetime.timedelta(days=-13) last_reminder = expiry + datetime.timedelta(days=-2) requests.post("http://localhost:9381/clear", data='') for time in (no_reminder, first_reminder, last_reminder): print(get_future_output( ["./bin/expiration-mailer", "--config", "%s/expiration-mailer.json" % config_dir], time)) resp = requests.get("http://localhost:9381/count?to=%s" % email_addr) mailcount = int(resp.text) if mailcount != 2: raise(Exception("\nExpiry mailer failed: expected 2 emails, got %d" % mailcount)) caa_recheck_setup_data = {} @register_twenty_days_ago def caa_recheck_setup(): client = chisel2.make_client() # Issue a certificate with the clock set back, and save the authzs to check # later that they are valid (200). They should however require rechecking for # CAA purposes. numNames = 10 # Generate numNames subdomains of a random domain base_domain = random_domain() domains = [ "{0}.{1}".format(str(n),base_domain) for n in range(numNames) ] order = chisel2.auth_and_issue(domains, client=client) global caa_recheck_setup_data caa_recheck_setup_data = { 'client': client, 'authzs': order.authorizations, } def test_recheck_caa(): """Request issuance for a domain where we have a old cached authz from when CAA was good. We'll set a new CAA record forbidding issuance; the CAA should recheck CAA and reject the request. """ if 'authzs' not in caa_recheck_setup_data: raise(Exception("CAA authzs not prepared for test_caa")) domains = [] for a in caa_recheck_setup_data['authzs']: response = caa_recheck_setup_data['client']._post(a.uri, None) if response.status_code != 200: raise(Exception("Unexpected response for CAA authz: ", response.status_code)) domain = a.body.identifier.value domains.append(domain) # Set a forbidding CAA record on just one domain challSrv.add_caa_issue(domains[3], ";") # Request issuance for the previously-issued domain name, which should # now be denied due to CAA. chisel2.expect_problem("urn:ietf:params:acme:error:caa", lambda: chisel2.auth_and_issue(domains, client=caa_recheck_setup_data['client'])) def test_caa_good(): domain = random_domain() challSrv.add_caa_issue(domain, "happy-hacker-ca.invalid") chisel2.auth_and_issue([domain]) def test_caa_reject(): domain = random_domain() challSrv.add_caa_issue(domain, "sad-hacker-ca.invalid") chisel2.expect_problem("urn:ietf:params:acme:error:caa", lambda: chisel2.auth_and_issue([domain])) def test_caa_extensions(): goodCAA = "happy-hacker-ca.invalid" client = chisel2.make_client() caa_account_uri = client.net.account.uri caa_records = [ {"domain": "accounturi.good-caa-reserved.com", "value":"{0}; accounturi={1}".format(goodCAA, caa_account_uri)}, {"domain": "dns-01-only.good-caa-reserved.com", "value": "{0}; validationmethods=dns-01".format(goodCAA)}, {"domain": "http-01-only.good-caa-reserved.com", "value": "{0}; validationmethods=http-01".format(goodCAA)}, {"domain": "dns-01-or-http01.good-caa-reserved.com", "value": "{0}; validationmethods=dns-01,http-01".format(goodCAA)}, ] for policy in caa_records: challSrv.add_caa_issue(policy["domain"], policy["value"]) # TODO(@4a6f656c): Once the `CAAValidationMethods` feature flag is enabled by # default, remove this early return. if not CONFIG_NEXT: return chisel2.expect_problem("urn:ietf:params:acme:error:caa", lambda: chisel2.auth_and_issue(["dns-01-only.good-caa-reserved.com"], chall_type="http-01")) chisel2.expect_problem("urn:ietf:params:acme:error:caa", lambda: chisel2.auth_and_issue(["http-01-only.good-caa-reserved.com"], chall_type="dns-01")) ## Note: the additional names are to avoid rate limiting... chisel2.auth_and_issue(["dns-01-only.good-caa-reserved.com", "www.dns-01-only.good-caa-reserved.com"], chall_type="dns-01") chisel2.auth_and_issue(["http-01-only.good-caa-reserved.com", "www.http-01-only.good-caa-reserved.com"], chall_type="http-01") chisel2.auth_and_issue(["dns-01-or-http-01.good-caa-reserved.com", "dns-01-only.good-caa-reserved.com"], chall_type="dns-01") chisel2.auth_and_issue(["dns-01-or-http-01.good-caa-reserved.com", "http-01-only.good-caa-reserved.com"], chall_type="http-01") ## CAA should fail with an arbitrary account, but succeed with the CAA client. chisel2.expect_problem("urn:ietf:params:acme:error:caa", lambda: chisel2.auth_and_issue(["accounturi.good-caa-reserved.com"])) chisel2.auth_and_issue(["accounturi.good-caa-reserved.com"], client=client) def test_account_update(): """ Create a new ACME client/account with one contact email. Then update the account to a different contact emails. """ emails=("initial-email@not-example.com", "updated-email@not-example.com", "another-update@not-example.com") client = chisel2.make_client(email=emails[0]) for email in emails[1:]: result = chisel2.update_email(client, email=email) # We expect one contact in the result if len(result.body.contact) != 1: raise(Exception("\nUpdate account failed: expected one contact in result, got 0")) # We expect it to be the email we just updated to actual = result.body.contact[0] if actual != "mailto:"+email: raise(Exception("\nUpdate account failed: expected contact %s, got %s" % (email, actual))) def test_renewal_exemption(): """ Under a single domain, issue one certificate, then two renewals of that certificate, then one more different certificate (with a different subdomain). Since the certificatesPerName rate limit in testing is 2 per 90 days, and the renewals should be discounted under the renewal exemption, each of these issuances should succeed. Then do one last issuance that we expect to be rate limited, just to check that the rate limit is actually 2, and we are testing what we think we are testing. See https://letsencrypt.org/docs/rate-limits/ for more details. """ base_domain = random_domain() # First issuance chisel2.auth_and_issue(["www." + base_domain]) # First Renewal chisel2.auth_and_issue(["www." + base_domain]) # Second Renewal chisel2.auth_and_issue(["www." + base_domain]) # Issuance of a different cert chisel2.auth_and_issue(["blog." + base_domain]) # Final, failed issuance, for another different cert chisel2.expect_problem("urn:ietf:params:acme:error:rateLimited", lambda: chisel2.auth_and_issue(["mail." + base_domain])) def test_certificates_per_name(): chisel2.expect_problem("urn:ietf:params:acme:error:rateLimited", lambda: chisel2.auth_and_issue([random_domain() + ".lim.it"])) def test_oversized_csr(): # Number of names is chosen to be one greater than the configured RA/CA maxNames numNames = 101 # Generate numNames subdomains of a random domain base_domain = random_domain() domains = [ "{0}.{1}".format(str(n),base_domain) for n in range(numNames) ] # We expect issuing for these domains to produce a malformed error because # there are too many names in the request. chisel2.expect_problem("urn:ietf:params:acme:error:malformed", lambda: chisel2.auth_and_issue(domains)) def parse_cert(order): return x509.load_pem_x509_certificate(order.fullchain_pem.encode(), default_backend()) def test_admin_revoker_cert(): cert_file = temppath('test_admin_revoker_cert.pem') order = chisel2.auth_and_issue([random_domain()], cert_output=cert_file.name) parsed_cert = parse_cert(order) # Revoke certificate by serial reset_akamai_purges() run(["./bin/admin-revoker", "serial-revoke", "--config", "%s/admin-revoker.json" % config_dir, '%x' % parsed_cert.serial_number, '1']) # Wait for OCSP response to indicate revocation took place verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked") verify_akamai_purge() def test_admin_revoker_batched(): serialFile = tempfile.NamedTemporaryFile( dir=tempdir, suffix='.test_admin_revoker_batched.serials.hex', mode='w+', delete=False) cert_files = [ temppath('test_admin_revoker_batched.%d.pem' % x) for x in range(3) ] for cert_file in cert_files: order = chisel2.auth_and_issue([random_domain()], cert_output=cert_file.name) serialFile.write("%x\n" % parse_cert(order).serial_number) serialFile.close() run(["./bin/admin-revoker", "batched-serial-revoke", "--config", "%s/admin-revoker.json" % config_dir, serialFile.name, '0', '2']) for cert_file in cert_files: verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked") def test_sct_embedding(): order = chisel2.auth_and_issue([random_domain()]) cert = parse_cert(order) # make sure there is no poison extension try: cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3")) raise(Exception("certificate contains CT poison extension")) except x509.ExtensionNotFound: # do nothing pass # make sure there is a SCT list extension try: sctList = cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2")) except x509.ExtensionNotFound: raise(Exception("certificate doesn't contain SCT list extension")) if len(sctList.value) != 2: raise(Exception("SCT list contains wrong number of SCTs")) for sct in sctList.value: if sct.version != x509.certificate_transparency.Version.v1: raise(Exception("SCT contains wrong version")) if sct.entry_type != x509.certificate_transparency.LogEntryType.PRE_CERTIFICATE: raise(Exception("SCT contains wrong entry type")) delta = sct.timestamp - datetime.datetime.now() if abs(delta) > datetime.timedelta(hours=1): raise(Exception("Delta between SCT timestamp and now was too great " "%s vs %s (%s)" % (sct.timestamp, datetime.datetime.now(), delta))) def test_auth_deactivation(): client = chisel2.make_client(None) d = random_domain() csr_pem = chisel2.make_csr([d]) order = client.new_order(csr_pem) resp = client.deactivate_authorization(order.authorizations[0]) if resp.body.status is not messages.STATUS_DEACTIVATED: raise Exception("unexpected authorization status") order = chisel2.auth_and_issue([random_domain()], client=client) resp = client.deactivate_authorization(order.authorizations[0]) if resp.body.status is not messages.STATUS_DEACTIVATED: raise Exception("unexpected authorization status") def get_ocsp_response_and_reason(cert_file, issuer_file, url): """Returns the ocsp response output and revocation reason.""" output = verify_ocsp(cert_file, issuer_file, url, None) m = re.search('Reason: (\w+)', output) reason = m.group(1) if m is not None else "" return output, reason ocsp_resigning_setup_data = {} @register_twenty_days_ago def ocsp_resigning_setup(): """Issue and then revoke a cert in the past. Useful setup for test_ocsp_resigning, which needs to check that the revocation reason is still correctly set after re-signing and old OCSP response. """ client = chisel2.make_client(None) cert_file = temppath('ocsp_resigning_setup.pem') order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name) cert = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem) # Revoke for reason 1: keyCompromise client.revoke(josepy.ComparableX509(cert), 1) ocsp_response, reason = get_ocsp_response_and_reason( cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002") global ocsp_resigning_setup_data ocsp_resigning_setup_data = { 'cert_file': cert_file.name, 'response': ocsp_response, 'reason': reason } def test_ocsp_resigning(): """Check that, after re-signing an OCSP, the reason is still set.""" if 'response' not in ocsp_resigning_setup_data: raise Exception("ocsp_resigning_setup didn't run") tries = 0 while tries < 5: resp, reason = get_ocsp_response_and_reason( ocsp_resigning_setup_data['cert_file'], "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002") if resp != ocsp_resigning_setup_data['response']: break tries += 1 time.sleep(0.25) else: raise(Exception("timed out waiting for re-signed OCSP response for certificate")) if reason != ocsp_resigning_setup_data['reason']: raise(Exception("re-signed ocsp response has different reason %s expected %s" % ( reason, ocsp_resigning_setup_data['reason']))) if reason != "keyCompromise": raise(Exception("re-signed ocsp response has wrong reason %s" % reason))
bmw/boulder
test/v2_integration.py
Python
mpl-2.0
67,383
[ "BLAST" ]
95e8d57974bb19b51f32cd427a9e6c753a2942a19d69fb0f91a8481375cd6c52
#!/usr/bin/env python # -*- coding: utf-8 -*- '''Views tests for the OSF.''' from __future__ import absolute_import import unittest import json import datetime as dt import mock import httplib as http import math import time from nose.tools import * # noqa PEP8 asserts from tests.test_features import requires_search from modularodm import Q, fields from modularodm.exceptions import ValidationError from dateutil.parser import parse as parse_date from framework import auth from framework.exceptions import HTTPError from framework.auth import User, Auth from framework.auth.utils import impute_names_model from framework.auth.exceptions import InvalidTokenError from framework.tasks import handlers from website import mailchimp_utils from website.views import _rescale_ratio from website.util import permissions from website.models import Node, Pointer, NodeLog from website.project.model import ensure_schemas, has_anonymous_link from website.project.views.contributor import ( send_claim_email, deserialize_contributors, send_claim_registered_email, notify_added_contributor ) from website.profile.utils import add_contributor_json, serialize_unregistered from website.profile.views import fmt_date_or_none from website.util import api_url_for, web_url_for from website import mails, settings from website.util import rubeus from website.project.views.node import _view_project, abbrev_authors, _should_show_wiki_widget from website.project.views.comment import serialize_comment from website.project.decorators import check_can_access from website.project.signals import contributor_added from website.addons.github.model import AddonGitHubOauthSettings from tests.base import ( OsfTestCase, fake, capture_signals, assert_is_redirect, assert_datetime_equal, ) from tests.factories import ( UserFactory, ApiOAuth2ApplicationFactory, ProjectFactory, WatchConfigFactory, NodeFactory, NodeLogFactory, AuthUserFactory, UnregUserFactory, RegistrationFactory, CommentFactory, PrivateLinkFactory, UnconfirmedUserFactory, DashboardFactory, FolderFactory, ProjectWithAddonFactory, MockAddonNodeSettings, ) from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID class Addon(MockAddonNodeSettings): @property def complete(self): return True def archive_errors(self): return 'Error' class Addon2(MockAddonNodeSettings): @property def complete(self): return True def archive_errors(self): return 'Error' class TestViewingProjectWithPrivateLink(OsfTestCase): def setUp(self): super(TestViewingProjectWithPrivateLink, self).setUp() self.user = AuthUserFactory() # Is NOT a contributor self.project = ProjectFactory(is_public=False) self.link = PrivateLinkFactory() self.link.nodes.append(self.project) self.link.save() self.project_url = self.project.web_url_for('view_project') def test_not_anonymous_for_public_project(self): anonymous_link = PrivateLinkFactory(anonymous=True) anonymous_link.nodes.append(self.project) anonymous_link.save() self.project.set_privacy('public') self.project.save() self.project.reload() auth = Auth(user=self.user, private_key=anonymous_link.key) assert_false(has_anonymous_link(self.project, auth)) def test_has_private_link_key(self): res = self.app.get(self.project_url, {'view_only': self.link.key}) assert_equal(res.status_code, 200) def test_not_logged_in_no_key(self): res = self.app.get(self.project_url, {'view_only': None}) assert_is_redirect(res) res = res.follow(expect_errors=True) assert_equal(res.status_code, 301) assert_equal( res.request.path, '/login' ) def test_logged_in_no_private_key(self): res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.FORBIDDEN) def test_logged_in_has_key(self): res = self.app.get( self.project_url, {'view_only': self.link.key}, auth=self.user.auth) assert_equal(res.status_code, 200) @unittest.skip('Skipping for now until we find a way to mock/set the referrer') def test_prepare_private_key(self): res = self.app.get(self.project_url, {'key': self.link.key}) res = res.click('Registrations') assert_is_redirect(res) res = res.follow() assert_equal(res.status_code, 200) assert_equal(res.request.GET['key'], self.link.key) def test_check_can_access_valid(self): contributor = AuthUserFactory() self.project.add_contributor(contributor, auth=Auth(self.project.creator)) self.project.save() assert_true(check_can_access(self.project, contributor)) def test_check_user_access_invalid(self): noncontrib = AuthUserFactory() with assert_raises(HTTPError): check_can_access(self.project, noncontrib) def test_check_user_access_if_user_is_None(self): assert_false(check_can_access(self.project, None)) class TestProjectViews(OsfTestCase): ADDONS_UNDER_TEST = { 'addon1': { 'node_settings': Addon, }, 'addon2': { 'node_settings': Addon2, }, } def setUp(self): super(TestProjectViews, self).setUp() ensure_schemas() self.user1 = AuthUserFactory() self.user1.save() self.consolidate_auth1 = Auth(user=self.user1) self.auth = self.user1.auth self.user2 = UserFactory() # A project has 2 contributors self.project = ProjectFactory( title="Ham", description='Honey-baked', creator=self.user1 ) self.project.add_contributor(self.user2, auth=Auth(self.user1)) self.project.save() def test_cannot_remove_only_visible_contributor_before_remove_contributor(self): self.project.visible_contributor_ids.remove(self.user1._id) self.project.save() url = self.project.api_url_for('project_before_remove_contributor') res = self.app.post_json( url, {'id': self.user2._id}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, http.FORBIDDEN) assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor') def test_cannot_remove_only_visible_contributor_remove_contributor(self): self.project.visible_contributor_ids.remove(self.user1._id) self.project.save() url = self.project.api_url_for('project_removecontributor') res = self.app.post_json( url, {'id': self.user2._id}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, http.FORBIDDEN) assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor') assert_true(self.project.is_contributor(self.user2)) def test_remove_only_visible_contributor_return_false(self): self.project.visible_contributor_ids.remove(self.user1._id) self.project.save() ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1) assert_false(ret) self.project.reload() assert_true(self.project.is_contributor(self.user2)) def test_can_view_nested_project_as_admin(self): self.parent_project = NodeFactory( title='parent project', category='project', parent=self.project, is_public=False ) self.parent_project.save() self.child_project = NodeFactory( title='child project', category='project', parent=self.parent_project, is_public=False ) self.child_project.save() url = self.child_project.web_url_for('view_project') res = self.app.get(url, auth=self.auth) assert_not_in('Private Project', res.body) assert_in('parent project', res.body) def test_edit_description(self): url = "/api/v1/project/{0}/edit/".format(self.project._id) self.app.post_json(url, {"name": "description", "value": "Deep-fried"}, auth=self.auth) self.project.reload() assert_equal(self.project.description, "Deep-fried") def test_project_api_url(self): url = self.project.api_url res = self.app.get(url, auth=self.auth) data = res.json assert_equal(data['node']['category'], 'Project') assert_equal(data['node']['node_type'], 'project') assert_equal(data['node']['title'], self.project.title) assert_equal(data['node']['is_public'], self.project.is_public) assert_equal(data['node']['is_registration'], False) assert_equal(data['node']['id'], self.project._primary_key) assert_equal(data['node']['watched_count'], 0) assert_true(data['user']['is_contributor']) assert_equal(data['node']['description'], self.project.description) assert_equal(data['node']['url'], self.project.url) assert_equal(data['node']['tags'], [t._primary_key for t in self.project.tags]) assert_in('forked_date', data['node']) assert_in('watched_count', data['node']) assert_in('registered_from_url', data['node']) # TODO: Test "parent" and "user" output def test_api_get_folder_pointers(self): dashboard = DashboardFactory(creator=self.user1) project_one = ProjectFactory(creator=self.user1) project_two = ProjectFactory(creator=self.user1) url = dashboard.api_url_for("get_folder_pointers") dashboard.add_pointer(project_one, auth=self.consolidate_auth1) dashboard.add_pointer(project_two, auth=self.consolidate_auth1) res = self.app.get(url, auth=self.auth) pointers = res.json assert_in(project_one._id, pointers) assert_in(project_two._id, pointers) assert_equal(len(pointers), 2) def test_api_get_folder_pointers_from_non_folder(self): project_one = ProjectFactory(creator=self.user1) project_two = ProjectFactory(creator=self.user1) url = project_one.api_url_for("get_folder_pointers") project_one.add_pointer(project_two, auth=self.consolidate_auth1) res = self.app.get(url, auth=self.auth) pointers = res.json assert_equal(len(pointers), 0) def test_new_user_gets_dashboard_on_dashboard_path(self): my_user = AuthUserFactory() dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True)) assert_equal(dashboard.count(), 0) url = api_url_for('get_dashboard') self.app.get(url, auth=my_user.auth) my_user.reload() dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True)) assert_equal(dashboard.count(), 1) def test_add_contributor_post(self): # Two users are added as a contributor via a POST request project = ProjectFactory(creator=self.user1, is_public=True) user2 = UserFactory() user3 = UserFactory() url = "/api/v1/project/{0}/contributors/".format(project._id) dict2 = add_contributor_json(user2) dict3 = add_contributor_json(user3) dict2.update({ 'permission': 'admin', 'visible': True, }) dict3.update({ 'permission': 'write', 'visible': False, }) self.app.post_json( url, { 'users': [dict2, dict3], 'node_ids': [project._id], }, content_type="application/json", auth=self.auth, ).maybe_follow() project.reload() assert_in(user2._id, project.contributors) # A log event was added assert_equal(project.logs[-1].action, "contributor_added") assert_equal(len(project.contributors), 3) assert_in(user2._id, project.permissions) assert_in(user3._id, project.permissions) assert_equal(project.permissions[user2._id], ['read', 'write', 'admin']) assert_equal(project.permissions[user3._id], ['read', 'write']) def test_manage_permissions(self): url = self.project.api_url + 'contributors/manage/' self.app.post_json( url, { 'contributors': [ {'id': self.project.creator._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': self.user1._id, 'permission': 'read', 'registered': True, 'visible': True}, {'id': self.user2._id, 'permission': 'admin', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) self.project.reload() assert_equal(self.project.get_permissions(self.user1), ['read']) assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin']) def test_manage_permissions_again(self): url = self.project.api_url + 'contributors/manage/' self.app.post_json( url, { 'contributors': [ {'id': self.user1._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': self.user2._id, 'permission': 'admin', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) self.project.reload() self.app.post_json( url, { 'contributors': [ {'id': self.user1._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': self.user2._id, 'permission': 'read', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) self.project.reload() assert_equal(self.project.get_permissions(self.user2), ['read']) assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin']) def test_contributor_manage_reorder(self): # Two users are added as a contributor via a POST request project = ProjectFactory(creator=self.user1, is_public=True) reg_user1, reg_user2 = UserFactory(), UserFactory() project.add_contributors( [ {'user': reg_user1, 'permissions': [ 'read', 'write', 'admin'], 'visible': True}, {'user': reg_user2, 'permissions': [ 'read', 'write', 'admin'], 'visible': False}, ] ) # Add a non-registered user unregistered_user = project.add_unregistered_contributor( fullname=fake.name(), email=fake.email(), auth=self.consolidate_auth1, save=True, ) url = project.api_url + 'contributors/manage/' self.app.post_json( url, { 'contributors': [ {'id': reg_user2._id, 'permission': 'admin', 'registered': True, 'visible': False}, {'id': project.creator._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': unregistered_user._id, 'permission': 'admin', 'registered': False, 'visible': True}, {'id': reg_user1._id, 'permission': 'admin', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) project.reload() assert_equal( # Note: Cast ForeignList to list for comparison list(project.contributors), [reg_user2, project.creator, unregistered_user, reg_user1] ) assert_equal( project.visible_contributors, [project.creator, unregistered_user, reg_user1] ) def test_project_remove_contributor(self): url = "/api/v1/project/{0}/removecontributors/".format(self.project._id) # User 1 removes user2 self.app.post(url, json.dumps({"id": self.user2._id}), content_type="application/json", auth=self.auth).maybe_follow() self.project.reload() assert_not_in(self.user2._id, self.project.contributors) # A log event was added assert_equal(self.project.logs[-1].action, "contributor_removed") def test_get_contributors_abbrev(self): # create a project with 3 registered contributors project = ProjectFactory(creator=self.user1, is_public=True) reg_user1, reg_user2 = UserFactory(), UserFactory() project.add_contributors( [ {'user': reg_user1, 'permissions': [ 'read', 'write', 'admin'], 'visible': True}, {'user': reg_user2, 'permissions': [ 'read', 'write', 'admin'], 'visible': True}, ] ) # add an unregistered contributor project.add_unregistered_contributor( fullname=fake.name(), email=fake.email(), auth=self.consolidate_auth1, save=True, ) url = project.api_url_for('get_node_contributors_abbrev') res = self.app.get(url, auth=self.auth) assert_equal(len(project.contributors), 4) assert_equal(len(res.json['contributors']), 3) assert_equal(len(res.json['others_count']), 1) assert_equal(res.json['contributors'][0]['separator'], ',') assert_equal(res.json['contributors'][1]['separator'], ',') assert_equal(res.json['contributors'][2]['separator'], ' &') def test_edit_node_title(self): url = "/api/v1/project/{0}/edit/".format(self.project._id) # The title is changed though posting form data self.app.post_json(url, {"name": "title", "value": "Bacon"}, auth=self.auth).maybe_follow() self.project.reload() # The title was changed assert_equal(self.project.title, "Bacon") # A log event was saved assert_equal(self.project.logs[-1].action, "edit_title") def test_make_public(self): self.project.is_public = False self.project.save() url = "/api/v1/project/{0}/permissions/public/".format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) self.project.reload() assert_true(self.project.is_public) assert_equal(res.json['status'], 'success') def test_make_private(self): self.project.is_public = True self.project.save() url = "/api/v1/project/{0}/permissions/private/".format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) self.project.reload() assert_false(self.project.is_public) assert_equal(res.json['status'], 'success') def test_cant_make_public_if_not_admin(self): non_admin = AuthUserFactory() self.project.add_contributor(non_admin, permissions=['read', 'write']) self.project.is_public = False self.project.save() url = "/api/v1/project/{0}/permissions/public/".format(self.project._id) res = self.app.post_json( url, {}, auth=non_admin.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) assert_false(self.project.is_public) def test_cant_make_private_if_not_admin(self): non_admin = AuthUserFactory() self.project.add_contributor(non_admin, permissions=['read', 'write']) self.project.is_public = True self.project.save() url = "/api/v1/project/{0}/permissions/private/".format(self.project._id) res = self.app.post_json( url, {}, auth=non_admin.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) assert_true(self.project.is_public) def test_add_tag(self): url = self.project.api_url_for('project_add_tag') self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth) self.project.reload() assert_in("foo'ta#@%#%^&g?", self.project.tags) assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag']) def test_remove_tag(self): self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True) assert_in("foo'ta#@%#%^&g?", self.project.tags) url = self.project.api_url_for("project_remove_tag") self.app.delete_json(url, {"tag": "foo'ta#@%#%^&g?"}, auth=self.auth) self.project.reload() assert_not_in("foo'ta#@%#%^&g?", self.project.tags) assert_equal("tag_removed", self.project.logs[-1].action) assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag']) @mock.patch('website.archiver.tasks.archive') def test_register_template_page(self, mock_archive): url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format( self.project._primary_key) self.app.post_json(url, {'registrationChoice': 'Make registration public immediately'}, auth=self.auth) self.project.reload() # A registration was added to the project's registration list assert_equal(len(self.project.node__registrations), 1) # A log event was saved assert_equal(self.project.logs[-1].action, "registration_initiated") # Most recent node is a registration reg = Node.load(self.project.node__registrations[-1]) assert_true(reg.is_registration) @mock.patch('website.archiver.tasks.archive') def test_register_template_with_embargo_creates_embargo(self, mock_archive): url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format( self.project._primary_key) self.app.post_json( url, { 'registrationChoice': 'embargo', 'embargoEndDate': "Fri, 01 Jan {year} 05:00:00 GMT".format(year=str(dt.date.today().year + 1)) }, auth=self.auth) self.project.reload() # Most recent node is a registration reg = Node.load(self.project.node__registrations[-1]) assert_true(reg.is_registration) # The registration created is not public assert_false(reg.is_public) # The registration is pending an embargo that has not been approved assert_true(reg.is_pending_embargo) def test_register_template_page_with_invalid_template_name(self): url = self.project.web_url_for('node_register_template_page', template='invalid') res = self.app.get(url, expect_errors=True, auth=self.auth) assert_equal(res.status_code, 404) assert_in('Template not found', res) def test_register_project_with_multiple_errors(self): self.project.add_addon('addon1', auth=Auth(self.user1)) component = NodeFactory(parent=self.project, creator=self.user1) component.add_addon('addon1', auth=Auth(self.user1)) component.add_addon('addon2', auth=Auth(self.user1)) self.project.save() component.save() url = self.project.api_url_for('project_before_register') res = self.app.get(url, auth=self.auth) data = res.json assert_equal(res.status_code, 200) assert_equal(len(data['errors']), 2) # Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1478 @mock.patch('website.archiver.tasks.archive') def test_registered_projects_contributions(self, mock_archive): # register a project self.project.register_node(None, Auth(user=self.project.creator), '', None) # get the first registered project of a project url = self.project.api_url_for('get_registrations') res = self.app.get(url, auth=self.auth) data = res.json pid = data['nodes'][0]['id'] url2 = api_url_for('get_summary', pid=pid) # count contributions res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth) data = res2.json assert_is_not_none(data['summary']['nlogs']) def test_forks_contributions(self): # fork a project self.project.fork_node(Auth(user=self.project.creator)) # get the first forked project of a project url = self.project.api_url_for('get_forks') res = self.app.get(url, auth=self.auth) data = res.json pid = data['nodes'][0]['id'] url2 = api_url_for('get_summary', pid=pid) # count contributions res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth) data = res2.json assert_is_not_none(data['summary']['nlogs']) @mock.patch('framework.transactions.commands.begin') @mock.patch('framework.transactions.commands.rollback') @mock.patch('framework.transactions.commands.commit') def test_get_logs(self, *mock_commands): # Add some logs for _ in range(5): self.project.logs.append( NodeLogFactory( user=self.user1, action='file_added', params={'node': self.project._id} ) ) self.project.save() url = self.project.api_url_for('get_logs') res = self.app.get(url, auth=self.auth) for mock_command in mock_commands: assert_false(mock_command.called) self.project.reload() data = res.json assert_equal(len(data['logs']), len(self.project.logs)) assert_equal(data['total'], len(self.project.logs)) assert_equal(data['page'], 0) assert_equal(data['pages'], 1) most_recent = data['logs'][0] assert_equal(most_recent['action'], 'file_added') def test_get_logs_invalid_page_input(self): url = self.project.api_url_for('get_logs') invalid_input = 'invalid page' res = self.app.get( url, {'page': invalid_input}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal( res.json['message_long'], 'Invalid value for "page".' ) def test_get_logs_negative_page_num(self): url = self.project.api_url_for('get_logs') invalid_input = -1 res = self.app.get( url, {'page': invalid_input}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal( res.json['message_long'], 'Invalid value for "page".' ) def test_get_logs_page_num_beyond_limit(self): url = self.project.api_url_for('get_logs') size = 10 page_num = math.ceil(len(self.project.logs)/ float(size)) res = self.app.get( url, {'page': page_num}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal( res.json['message_long'], 'Invalid value for "page".' ) def test_get_logs_with_count_param(self): # Add some logs for _ in range(5): self.project.logs.append( NodeLogFactory( user=self.user1, action='file_added', params={'node': self.project._id} ) ) self.project.save() url = self.project.api_url_for('get_logs') res = self.app.get(url, {'count': 3}, auth=self.auth) assert_equal(len(res.json['logs']), 3) # 1 project create log, 1 add contributor log, then 5 generated logs assert_equal(res.json['total'], 5 + 2) assert_equal(res.json['page'], 0) assert_equal(res.json['pages'], 3) def test_get_logs_defaults_to_ten(self): # Add some logs for _ in range(12): self.project.logs.append( NodeLogFactory( user=self.user1, action='file_added', params={'node': self.project._id} ) ) self.project.save() url = self.project.api_url_for('get_logs') res = self.app.get(url, auth=self.auth) assert_equal(len(res.json['logs']), 10) # 1 project create log, 1 add contributor log, then 5 generated logs assert_equal(res.json['total'], 12 + 2) assert_equal(res.json['page'], 0) assert_equal(res.json['pages'], 2) def test_get_more_logs(self): # Add some logs for _ in range(12): self.project.logs.append( NodeLogFactory( user=self.user1, action="file_added", params={"node": self.project._id} ) ) self.project.save() url = self.project.api_url_for('get_logs') res = self.app.get(url, {"page": 1}, auth=self.auth) assert_equal(len(res.json['logs']), 4) #1 project create log, 1 add contributor log, then 12 generated logs assert_equal(res.json['total'], 12 + 2) assert_equal(res.json['page'], 1) assert_equal(res.json['pages'], 2) def test_logs_private(self): """Add logs to a public project, then to its private component. Get the ten most recent logs; assert that ten logs are returned and that all belong to the project and not its component. """ # Add some logs for _ in range(15): self.project.add_log( auth=self.consolidate_auth1, action='file_added', params={'node': self.project._id} ) self.project.is_public = True self.project.save() child = NodeFactory(parent=self.project) for _ in range(5): child.add_log( auth=self.consolidate_auth1, action='file_added', params={'node': child._id} ) url = self.project.api_url_for('get_logs') res = self.app.get(url).maybe_follow() assert_equal(len(res.json['logs']), 10) # 1 project create log, 1 add contributor log, then 15 generated logs assert_equal(res.json['total'], 15 + 2) assert_equal(res.json['page'], 0) assert_equal(res.json['pages'], 2) assert_equal( [self.project._id] * 10, [ log['params']['node'] for log in res.json['logs'] ] ) def test_can_view_public_log_from_private_project(self): project = ProjectFactory(is_public=True) fork = project.fork_node(auth=self.consolidate_auth1) url = fork.api_url_for('get_logs') res = self.app.get(url, auth=self.auth) assert_equal( [each['action'] for each in res.json['logs']], ['node_forked', 'project_created'], ) project.is_public = False project.save() res = self.app.get(url, auth=self.auth) assert_equal( [each['action'] for each in res.json['logs']], ['node_forked', 'project_created'], ) def test_for_private_component_log(self): for _ in range(5): self.project.add_log( auth=self.consolidate_auth1, action='file_added', params={'node': self.project._id} ) self.project.is_public = True self.project.save() child = NodeFactory(parent=self.project) child.is_public = False child.set_title("foo", auth=self.consolidate_auth1) child.set_title("bar", auth=self.consolidate_auth1) child.save() url = self.project.api_url_for('get_logs') res = self.app.get(url).maybe_follow() assert_equal(len(res.json['logs']), 7) assert_not_in( child._id, [ log['params']['node'] for log in res.json['logs'] ] ) def test_remove_project(self): url = self.project.api_url res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow() self.project.reload() assert_equal(self.project.is_deleted, True) assert_in('url', res.json) assert_equal(res.json['url'], '/dashboard/') def test_private_link_edit_name(self): link = PrivateLinkFactory() link.nodes.append(self.project) link.save() assert_equal(link.name, "link") url = self.project.api_url + 'private_link/edit/' self.app.put_json( url, {'pk': link._id, "value": "new name"}, auth=self.auth, ).maybe_follow() self.project.reload() link.reload() assert_equal(link.name, "new name") def test_remove_private_link(self): link = PrivateLinkFactory() link.nodes.append(self.project) link.save() url = self.project.api_url_for('remove_private_link') self.app.delete_json( url, {'private_link_id': link._id}, auth=self.auth, ).maybe_follow() self.project.reload() link.reload() assert_true(link.is_deleted) def test_remove_component(self): node = NodeFactory(parent=self.project, creator=self.user1) url = node.api_url res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow() node.reload() assert_equal(node.is_deleted, True) assert_in('url', res.json) assert_equal(res.json['url'], self.project.url) def test_cant_remove_component_if_not_admin(self): node = NodeFactory(parent=self.project, creator=self.user1) non_admin = AuthUserFactory() node.add_contributor( non_admin, permissions=['read', 'write'], save=True, ) url = node.api_url res = self.app.delete_json( url, {}, auth=non_admin.auth, expect_errors=True, ).maybe_follow() assert_equal(res.status_code, http.FORBIDDEN) assert_false(node.is_deleted) def test_watch_and_unwatch(self): url = self.project.api_url_for('togglewatch_post') self.app.post_json(url, {}, auth=self.auth) res = self.app.get(self.project.api_url, auth=self.auth) assert_equal(res.json['node']['watched_count'], 1) self.app.post_json(url, {}, auth=self.auth) res = self.app.get(self.project.api_url, auth=self.auth) assert_equal(res.json['node']['watched_count'], 0) def test_view_project_returns_whether_to_show_wiki_widget(self): user = AuthUserFactory() project = ProjectFactory.build(creator=user, is_public=True) project.add_contributor(user) project.save() url = project.api_url_for('view_project') res = self.app.get(url, auth=user.auth) assert_equal(res.status_code, http.OK) assert_in('show_wiki_widget', res.json['user']) def test_fork_count_does_not_include_deleted_forks(self): user = AuthUserFactory() project = ProjectFactory(creator=user) auth = Auth(project.creator) fork = project.fork_node(auth) project.save() fork.remove_node(auth) fork.save() url = project.api_url_for('view_project') res = self.app.get(url, auth=user.auth) assert_in('fork_count', res.json['node']) assert_equal(0, res.json['node']['fork_count']) def test_statistic_page_redirect(self): url = self.project.web_url_for('project_statistics_redirect') res = self.app.get(url, auth=self.auth) assert_equal(res.status_code, 302) assert_in(self.project.web_url_for('project_statistics', _guid=True), res.location) class TestEditableChildrenViews(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user, is_public=False) self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True) self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False) self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True) self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False) url = self.project.api_url_for('get_editable_children') self.project_results = self.app.get(url, auth=self.user.auth).json def test_get_editable_children(self): assert_equal(len(self.project_results['children']), 4) assert_equal(self.project_results['node']['id'], self.project._id) def test_editable_children_order(self): assert_equal(self.project_results['children'][0]['id'], self.child._id) assert_equal(self.project_results['children'][1]['id'], self.grandchild._id) assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id) assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id) def test_editable_children_indents(self): assert_equal(self.project_results['children'][0]['indent'], 0) assert_equal(self.project_results['children'][1]['indent'], 1) assert_equal(self.project_results['children'][2]['indent'], 2) assert_equal(self.project_results['children'][3]['indent'], 3) def test_editable_children_parents(self): assert_equal(self.project_results['children'][0]['parent_id'], self.project._id) assert_equal(self.project_results['children'][1]['parent_id'], self.child._id) assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id) assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id) def test_editable_children_privacy(self): assert_false(self.project_results['node']['is_public']) assert_true(self.project_results['children'][0]['is_public']) assert_false(self.project_results['children'][1]['is_public']) assert_true(self.project_results['children'][2]['is_public']) assert_false(self.project_results['children'][3]['is_public']) def test_editable_children_titles(self): assert_equal(self.project_results['node']['title'], self.project.title) assert_equal(self.project_results['children'][0]['title'], self.child.title) assert_equal(self.project_results['children'][1]['title'], self.grandchild.title) assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title) assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title) class TestChildrenViews(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) self.user = AuthUserFactory() def test_get_children(self): project = ProjectFactory(creator=self.user) child = NodeFactory(parent=project, creator=self.user) url = project.api_url_for('get_children') res = self.app.get(url, auth=self.user.auth) nodes = res.json['nodes'] assert_equal(len(nodes), 1) assert_equal(nodes[0]['id'], child._primary_key) def test_get_children_includes_pointers(self): project = ProjectFactory(creator=self.user) pointed = ProjectFactory() project.add_pointer(pointed, Auth(self.user)) project.save() url = project.api_url_for('get_children') res = self.app.get(url, auth=self.user.auth) nodes = res.json['nodes'] assert_equal(len(nodes), 1) assert_equal(nodes[0]['title'], pointed.title) pointer = Pointer.find_one(Q('node', 'eq', pointed)) assert_equal(nodes[0]['id'], pointer._primary_key) def test_get_children_filter_for_permissions(self): # self.user has admin access to this project project = ProjectFactory(creator=self.user) # self.user only has read access to this project, which project points # to read_only_pointed = ProjectFactory() read_only_creator = read_only_pointed.creator read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read']) read_only_pointed.save() # self.user only has read access to this project, which is a subproject # of project read_only = ProjectFactory() read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read']) project.nodes.append(read_only) # self.user adds a pointer to read_only project.add_pointer(read_only_pointed, Auth(self.user)) project.save() url = project.api_url_for('get_children') res = self.app.get(url, auth=self.user.auth) assert_equal(len(res.json['nodes']), 2) url = project.api_url_for('get_children', permissions='write') res = self.app.get(url, auth=self.user.auth) assert_equal(len(res.json['nodes']), 0) def test_get_children_rescale_ratio(self): project = ProjectFactory(creator=self.user) child = NodeFactory(parent=project, creator=self.user) url = project.api_url_for('get_children') res = self.app.get(url, auth=self.user.auth) rescale_ratio = res.json['rescale_ratio'] assert_is_instance(rescale_ratio, float) assert_equal(rescale_ratio, _rescale_ratio(Auth(self.user), [child])) def test_get_children_render_nodes_receives_auth(self): project = ProjectFactory(creator=self.user) NodeFactory(parent=project, creator=self.user) url = project.api_url_for('get_children') res = self.app.get(url, auth=self.user.auth) perm = res.json['nodes'][0]['permissions'] assert_equal(perm, 'admin') class TestUserProfile(OsfTestCase): def setUp(self): super(TestUserProfile, self).setUp() self.user = AuthUserFactory() def test_sanitization_of_edit_profile(self): url = api_url_for('edit_profile', uid=self.user._id) post_data = {'name': 'fullname', 'value': 'new<b> name</b> '} request = self.app.post(url, post_data, auth=self.user.auth) assert_equal('new name', request.json['name']) def test_fmt_date_or_none(self): with assert_raises(HTTPError) as cm: #enter a date before 1900 fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227)) # error should be raised because date is before 1900 assert_equal(cm.exception.code, http.BAD_REQUEST) def test_unserialize_social(self): url = api_url_for('unserialize_social') payload = { 'personal': 'http://frozen.pizza.com/reviews', 'twitter': 'howtopizza', 'github': 'frozenpizzacode', } self.app.put_json( url, payload, auth=self.user.auth, ) self.user.reload() for key, value in payload.iteritems(): assert_equal(self.user.social[key], value) assert_true(self.user.social['researcherId'] is None) def test_unserialize_social_validation_failure(self): url = api_url_for('unserialize_social') # personal URL is invalid payload = { 'personal': 'http://invalidurl', 'twitter': 'howtopizza', 'github': 'frozenpizzacode', } res = self.app.put_json( url, payload, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Invalid personal URL.') def test_serialize_social_editable(self): self.user.social['twitter'] = 'howtopizza' self.user.save() url = api_url_for('serialize_social') res = self.app.get( url, auth=self.user.auth, ) assert_equal(res.json.get('twitter'), 'howtopizza') assert_true(res.json.get('github') is None) assert_true(res.json['editable']) def test_serialize_social_not_editable(self): user2 = AuthUserFactory() self.user.social['twitter'] = 'howtopizza' self.user.save() url = api_url_for('serialize_social', uid=self.user._id) res = self.app.get( url, auth=user2.auth, ) assert_equal(res.json.get('twitter'), 'howtopizza') assert_true(res.json.get('github') is None) assert_false(res.json['editable']) def test_serialize_social_addons_editable(self): self.user.add_addon('github') user_github = self.user.get_addon('github') oauth_settings = AddonGitHubOauthSettings() oauth_settings.github_user_id = 'testuser' oauth_settings.save() user_github.oauth_settings = oauth_settings user_github.save() user_github.github_user_name = 'howtogithub' oauth_settings.save() url = api_url_for('serialize_social') res = self.app.get( url, auth=self.user.auth, ) assert_equal( res.json['addons']['github'], 'howtogithub' ) def test_serialize_social_addons_not_editable(self): user2 = AuthUserFactory() self.user.add_addon('github') user_github = self.user.get_addon('github') oauth_settings = AddonGitHubOauthSettings() oauth_settings.github_user_id = 'testuser' oauth_settings.save() user_github.oauth_settings = oauth_settings user_github.save() user_github.github_user_name = 'howtogithub' oauth_settings.save() url = api_url_for('serialize_social', uid=self.user._id) res = self.app.get( url, auth=user2.auth, ) assert_not_in('addons', res.json) def test_unserialize_and_serialize_jobs(self): jobs = [{ 'institution': 'an institution', 'department': 'a department', 'title': 'a title', 'startMonth': 'January', 'startYear': '2001', 'endMonth': 'March', 'endYear': '2001', 'ongoing': False, }, { 'institution': 'another institution', 'department': None, 'title': None, 'startMonth': 'May', 'startYear': '2001', 'endMonth': None, 'endYear': None, 'ongoing': True, }] payload = {'contents': jobs} url = api_url_for('unserialize_jobs') self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(len(self.user.jobs), 2) url = api_url_for('serialize_jobs') res = self.app.get( url, auth=self.user.auth, ) for i, job in enumerate(jobs): assert_equal(job, res.json['contents'][i]) def test_unserialize_and_serialize_schools(self): schools = [{ 'institution': 'an institution', 'department': 'a department', 'degree': 'a degree', 'startMonth': 1, 'startYear': '2001', 'endMonth': 5, 'endYear': '2001', 'ongoing': False, }, { 'institution': 'another institution', 'department': None, 'degree': None, 'startMonth': 5, 'startYear': '2001', 'endMonth': None, 'endYear': None, 'ongoing': True, }] payload = {'contents': schools} url = api_url_for('unserialize_schools') self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(len(self.user.schools), 2) url = api_url_for('serialize_schools') res = self.app.get( url, auth=self.user.auth, ) for i, job in enumerate(schools): assert_equal(job, res.json['contents'][i]) def test_unserialize_jobs(self): jobs = [ { 'institution': fake.company(), 'department': fake.catch_phrase(), 'title': fake.bs(), 'startMonth': 5, 'startYear': '2013', 'endMonth': 3, 'endYear': '2014', 'ongoing': False, } ] payload = {'contents': jobs} url = api_url_for('unserialize_jobs') res = self.app.put_json(url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) self.user.reload() # jobs field is updated assert_equal(self.user.jobs, jobs) def test_unserialize_names(self): fake_fullname_w_spaces = ' {} '.format(fake.name()) names = { 'full': fake_fullname_w_spaces, 'given': 'Tea', 'middle': 'Gray', 'family': 'Pot', 'suffix': 'Ms.', } url = api_url_for('unserialize_names') res = self.app.put_json(url, names, auth=self.user.auth) assert_equal(res.status_code, 200) self.user.reload() # user is updated assert_equal(self.user.fullname, fake_fullname_w_spaces.strip()) assert_equal(self.user.given_name, names['given']) assert_equal(self.user.middle_names, names['middle']) assert_equal(self.user.family_name, names['family']) assert_equal(self.user.suffix, names['suffix']) def test_unserialize_schools(self): schools = [ { 'institution': fake.company(), 'department': fake.catch_phrase(), 'degree': fake.bs(), 'startMonth': 5, 'startYear': '2013', 'endMonth': 3, 'endYear': '2014', 'ongoing': False, } ] payload = {'contents': schools} url = api_url_for('unserialize_schools') res = self.app.put_json(url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) self.user.reload() # schools field is updated assert_equal(self.user.schools, schools) def test_unserialize_jobs_valid(self): jobs_cached = self.user.jobs jobs = [ { 'institution': fake.company(), 'department': fake.catch_phrase(), 'title': fake.bs(), 'startMonth': 5, 'startYear': '2013', 'endMonth': 3, 'endYear': '2014', 'ongoing': False, } ] payload = {'contents': jobs} url = api_url_for('unserialize_jobs') res = self.app.put_json(url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) def test_get_current_user_gravatar_default_size(self): url = api_url_for('current_user_gravatar') res = self.app.get(url, auth=self.user.auth) current_user_gravatar = res.json['gravatar_url'] assert_true(current_user_gravatar is not None) url = api_url_for('get_gravatar', uid=self.user._id) res = self.app.get(url, auth=self.user.auth) my_user_gravatar = res.json['gravatar_url'] assert_equal(current_user_gravatar, my_user_gravatar) def test_get_other_user_gravatar_default_size(self): user2 = AuthUserFactory() url = api_url_for('current_user_gravatar') res = self.app.get(url, auth=self.user.auth) current_user_gravatar = res.json['gravatar_url'] url = api_url_for('get_gravatar', uid=user2._id) res = self.app.get(url, auth=self.user.auth) user2_gravatar = res.json['gravatar_url'] assert_true(user2_gravatar is not None) assert_not_equal(current_user_gravatar, user2_gravatar) def test_get_current_user_gravatar_specific_size(self): url = api_url_for('current_user_gravatar') res = self.app.get(url, auth=self.user.auth) current_user_default_gravatar = res.json['gravatar_url'] url = api_url_for('current_user_gravatar', size=11) res = self.app.get(url, auth=self.user.auth) current_user_small_gravatar = res.json['gravatar_url'] assert_true(current_user_small_gravatar is not None) assert_not_equal(current_user_default_gravatar, current_user_small_gravatar) def test_get_other_user_gravatar_specific_size(self): user2 = AuthUserFactory() url = api_url_for('get_gravatar', uid=user2._id) res = self.app.get(url, auth=self.user.auth) gravatar_default_size = res.json['gravatar_url'] url = api_url_for('get_gravatar', uid=user2._id, size=11) res = self.app.get(url, auth=self.user.auth) gravatar_small = res.json['gravatar_url'] assert_true(gravatar_small is not None) assert_not_equal(gravatar_default_size, gravatar_small) def test_update_user_timezone(self): assert_equal(self.user.timezone, 'Etc/UTC') payload = {'timezone': 'America/New_York', 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.timezone, 'America/New_York') def test_update_user_locale(self): assert_equal(self.user.locale, 'en_US') payload = {'locale': 'de_DE', 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.locale, 'de_DE') def test_update_user_locale_none(self): assert_equal(self.user.locale, 'en_US') payload = {'locale': None, 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.locale, 'en_US') def test_update_user_locale_empty_string(self): assert_equal(self.user.locale, 'en_US') payload = {'locale': '', 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.locale, 'en_US') def test_cannot_update_user_without_user_id(self): user1 = AuthUserFactory() url = api_url_for('update_user') header = {'emails': [{'address': user1.username}]} res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], '"id" is required') @mock.patch('framework.auth.views.mails.send_mail') @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail): email = fake.email() self.user.emails.append(email) list_name = 'foo' self.user.mailing_lists[list_name] = True self.user.save() mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]} list_id = mailchimp_utils.get_list_id_from_name(list_name) url = api_url_for('update_user', uid=self.user._id) emails = [ {'address': self.user.username, 'primary': False, 'confirmed': True}, {'address': email, 'primary': True, 'confirmed': True}] payload = {'locale': '', 'id': self.user._id, 'emails': emails} self.app.put_json(url, payload, auth=self.user.auth) mock_client.lists.unsubscribe.assert_called_with( id=list_id, email={'email': self.user.username} ) mock_client.lists.subscribe.assert_called_with( id=list_id, email={'email': email}, merge_vars={ 'fname': self.user.given_name, 'lname': self.user.family_name, }, double_optin=False, update_existing=True ) handlers.celery_teardown_request() @mock.patch('framework.auth.views.mails.send_mail') @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail): email = fake.email() self.user.emails.append(email) list_name = 'foo' self.user.mailing_lists[list_name] = False self.user.save() mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]} url = api_url_for('update_user', uid=self.user._id) emails = [ {'address': self.user.username, 'primary': False, 'confirmed': True}, {'address': email, 'primary': True, 'confirmed': True}] payload = {'locale': '', 'id': self.user._id, 'emails': emails} self.app.put_json(url, payload, auth=self.user.auth) assert_equal(mock_client.lists.unsubscribe.call_count, 0) assert_equal(mock_client.lists.subscribe.call_count, 0) handlers.celery_teardown_request() # TODO: Uncomment once outstanding issues with this feature are addressed # def test_twitter_redirect_success(self): # self.user.social['twitter'] = fake.last_name() # self.user.save() # res = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'])) # assert_equals(res.status_code, http.FOUND) # assert_in(self.user.url, res.location) # def test_twitter_redirect_is_case_insensitive(self): # self.user.social['twitter'] = fake.last_name() # self.user.save() # res1 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'])) # res2 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'].lower())) # assert_equal(res1.location, res2.location) # def test_twitter_redirect_unassociated_twitter_handle_returns_404(self): # unassociated_handle = fake.last_name() # expected_error = 'There is no active user associated with the Twitter handle: {0}.'.format(unassociated_handle) # res = self.app.get( # web_url_for('redirect_to_twitter', twitter_handle=unassociated_handle), # expect_errors=True # ) # assert_equal(res.status_code, http.NOT_FOUND) # assert_true(expected_error in res.body) # def test_twitter_redirect_handle_with_multiple_associated_accounts_redirects_to_selection_page(self): # self.user.social['twitter'] = fake.last_name() # self.user.save() # user2 = AuthUserFactory() # user2.social['twitter'] = self.user.social['twitter'] # user2.save() # expected_error = 'There are multiple OSF accounts associated with the Twitter handle: <strong>{0}</strong>.'.format(self.user.social['twitter']) # res = self.app.get( # web_url_for( # 'redirect_to_twitter', # twitter_handle=self.user.social['twitter'], # expect_error=True # ) # ) # assert_equal(res.status_code, http.MULTIPLE_CHOICES) # assert_true(expected_error in res.body) # assert_true(self.user.url in res.body) # assert_true(user2.url in res.body) class TestUserProfileApplicationsPage(OsfTestCase): def setUp(self): super(TestUserProfileApplicationsPage, self).setUp() self.user = AuthUserFactory() self.user2 = AuthUserFactory() self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user) self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id) def test_non_owner_cant_access_detail_page(self): res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True) assert_equal(res.status_code, http.FORBIDDEN) def test_owner_cant_access_deleted_application(self): self.platform_app.is_active = False self.platform_app.save() res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.GONE) def test_owner_cant_access_nonexistent_application(self): url = web_url_for('oauth_application_detail', client_id='nonexistent') res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.NOT_FOUND) class TestUserAccount(OsfTestCase): def setUp(self): super(TestUserAccount, self).setUp() self.user = AuthUserFactory() self.user.set_password('password') self.user.save() @mock.patch('website.profile.views.push_status_message') def test_password_change_valid(self, mock_push_status_message): old_password = 'password' new_password = 'Pa$$w0rd' confirm_password = new_password url = web_url_for('user_account_password') post_data = { 'old_password': old_password, 'new_password': new_password, 'confirm_password': confirm_password, } res = self.app.post(url, post_data, auth=(self.user.username, old_password)) assert_true(302, res.status_code) res = res.follow(auth=(self.user.username, new_password)) assert_true(200, res.status_code) self.user.reload() assert_true(self.user.check_password(new_password)) assert_true(mock_push_status_message.called) assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0]) @mock.patch('website.profile.views.push_status_message') def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='', confirm_password='', error_message='Old password is invalid'): url = web_url_for('user_account_password') post_data = { 'old_password': old_password, 'new_password': new_password, 'confirm_password': confirm_password, } res = self.app.post(url, post_data, auth=self.user.auth) assert_true(302, res.status_code) res = res.follow(auth=self.user.auth) assert_true(200, res.status_code) self.user.reload() assert_false(self.user.check_password(new_password)) assert_true(mock_push_status_message.called) assert_in(error_message, mock_push_status_message.mock_calls[0][1][0]) def test_password_change_invalid_old_password(self): self.test_password_change_invalid( old_password='invalid old password', new_password='new password', confirm_password='new password', error_message='Old password is invalid', ) def test_password_change_invalid_confirm_password(self): self.test_password_change_invalid( old_password='password', new_password='new password', confirm_password='invalid confirm password', error_message='Password does not match the confirmation', ) def test_password_change_invalid_new_password_length(self): self.test_password_change_invalid( old_password='password', new_password='12345', confirm_password='12345', error_message='Password should be at least six characters', ) def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''): self.test_password_change_invalid( old_password=old_password, new_password=new_password, confirm_password=confirm_password, error_message='Passwords cannot be blank', ) def test_password_change_invalid_blank_new_password(self): for password in ('', ' '): self.test_password_change_invalid_blank_password('password', password, 'new password') def test_password_change_invalid_blank_confirm_password(self): for password in ('', ' '): self.test_password_change_invalid_blank_password('password', 'new password', password) class TestAddingContributorViews(OsfTestCase): def setUp(self): super(TestAddingContributorViews, self).setUp() ensure_schemas() self.creator = AuthUserFactory() self.project = ProjectFactory(creator=self.creator) # Authenticate all requests self.app.authenticate(*self.creator.auth) contributor_added.connect(notify_added_contributor) def test_serialize_unregistered_without_record(self): name, email = fake.name(), fake.email() res = serialize_unregistered(fullname=name, email=email) assert_equal(res['fullname'], name) assert_equal(res['email'], email) assert_equal(res['id'], None) assert_false(res['registered']) assert_true(res['gravatar']) assert_false(res['active']) def test_deserialize_contributors(self): contrib = UserFactory() unreg = UnregUserFactory() name, email = fake.name(), fake.email() unreg_no_record = serialize_unregistered(name, email) contrib_data = [ add_contributor_json(contrib), serialize_unregistered(fake.name(), unreg.username), unreg_no_record ] contrib_data[0]['permission'] = 'admin' contrib_data[1]['permission'] = 'write' contrib_data[2]['permission'] = 'read' contrib_data[0]['visible'] = True contrib_data[1]['visible'] = True contrib_data[2]['visible'] = True res = deserialize_contributors( self.project, contrib_data, auth=Auth(self.creator)) assert_equal(len(res), len(contrib_data)) assert_true(res[0]['user'].is_registered) assert_false(res[1]['user'].is_registered) assert_true(res[1]['user']._id) assert_false(res[2]['user'].is_registered) assert_true(res[2]['user']._id) def test_deserialize_contributors_validates_fullname(self): name = "<img src=1 onerror=console.log(1)>" email = fake.email() unreg_no_record = serialize_unregistered(name, email) contrib_data = [unreg_no_record] contrib_data[0]['permission'] = 'admin' contrib_data[0]['visible'] = True with assert_raises(ValidationError): deserialize_contributors( self.project, contrib_data, auth=Auth(self.creator), validate=True) def test_deserialize_contributors_validates_email(self): name = fake.name() email = "!@#$%%^&*" unreg_no_record = serialize_unregistered(name, email) contrib_data = [unreg_no_record] contrib_data[0]['permission'] = 'admin' contrib_data[0]['visible'] = True with assert_raises(ValidationError): deserialize_contributors( self.project, contrib_data, auth=Auth(self.creator), validate=True) @mock.patch('website.project.views.contributor.mails.send_mail') def test_deserialize_contributors_sends_unreg_contributor_added_signal(self, _): unreg = UnregUserFactory() from website.project.signals import unreg_contributor_added serialized = [serialize_unregistered(fake.name(), unreg.username)] serialized[0]['visible'] = True with capture_signals() as mock_signals: deserialize_contributors(self.project, serialized, auth=Auth(self.creator)) assert_equal(mock_signals.signals_sent(), set([unreg_contributor_added])) def test_serialize_unregistered_with_record(self): name, email = fake.name(), fake.email() user = self.project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(self.project.creator)) self.project.save() res = serialize_unregistered( fullname=name, email=email ) assert_false(res['active']) assert_false(res['registered']) assert_equal(res['id'], user._primary_key) assert_true(res['gravatar_url']) assert_equal(res['fullname'], name) assert_equal(res['email'], email) def test_add_contributor_with_unreg_contribs_and_reg_contribs(self): n_contributors_pre = len(self.project.contributors) reg_user = UserFactory() name, email = fake.name(), fake.email() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': email, 'permission': 'admin', 'visible': True, } reg_dict = add_contributor_json(reg_user) reg_dict['permission'] = 'admin' reg_dict['visible'] = True payload = { 'users': [reg_dict, pseudouser], 'node_ids': [] } url = self.project.api_url_for('project_contributors_post') self.app.post_json(url, payload).maybe_follow() self.project.reload() assert_equal(len(self.project.contributors), n_contributors_pre + len(payload['users'])) new_unreg = auth.get_user(email=email) assert_false(new_unreg.is_registered) # unclaimed record was added new_unreg.reload() assert_in(self.project._primary_key, new_unreg.unclaimed_records) rec = new_unreg.get_unclaimed_record(self.project._primary_key) assert_equal(rec['name'], name) assert_equal(rec['email'], email) @mock.patch('website.project.views.contributor.send_claim_email') def test_add_contributors_post_only_sends_one_email_to_unreg_user( self, mock_send_claim_email): # Project has s comp1, comp2 = NodeFactory( creator=self.creator), NodeFactory(creator=self.creator) self.project.nodes.append(comp1) self.project.nodes.append(comp2) self.project.save() # An unreg user is added to the project AND its components unreg_user = { # dict because user has not previous unreg record 'id': None, 'registered': False, 'fullname': fake.name(), 'email': fake.email(), 'permission': 'admin', 'visible': True, } payload = { 'users': [unreg_user], 'node_ids': [comp1._primary_key, comp2._primary_key] } # send request url = self.project.api_url_for('project_contributors_post') assert_true(self.project.can_edit(user=self.creator)) self.app.post_json(url, payload, auth=self.creator.auth) # finalize_invitation should only have been called once assert_equal(mock_send_claim_email.call_count, 1) @mock.patch('website.mails.send_mail') def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail): # Project has components comp1 = NodeFactory(creator=self.creator, parent=self.project) comp2 = NodeFactory(creator=self.creator, parent=self.project) # A registered user is added to the project AND its components user = UserFactory() user_dict = { 'id': user._id, 'fullname': user.fullname, 'email': user.username, 'permission': 'write', 'visible': True} payload = { 'users': [user_dict], 'node_ids': [comp1._primary_key, comp2._primary_key] } # send request url = self.project.api_url_for('project_contributors_post') assert self.project.can_edit(user=self.creator) self.app.post_json(url, payload, auth=self.creator.auth) # send_mail should only have been called once assert_equal(mock_send_mail.call_count, 1) @mock.patch('website.mails.send_mail') def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail): # Project has a component with a sub-component component = NodeFactory(creator=self.creator, parent=self.project) sub_component = NodeFactory(creator=self.creator, parent=component) # A registered user is added to the project and the sub-component, but NOT the component user = UserFactory() user_dict = { 'id': user._id, 'fullname': user.fullname, 'email': user.username, 'permission': 'write', 'visible': True} payload = { 'users': [user_dict], 'node_ids': [sub_component._primary_key] } # send request url = self.project.api_url_for('project_contributors_post') assert self.project.can_edit(user=self.creator) self.app.post_json(url, payload, auth=self.creator.auth) # send_mail is called for both the project and the sub-component assert_equal(mock_send_mail.call_count, 2) @mock.patch('website.project.views.contributor.send_claim_email') def test_email_sent_when_unreg_user_is_added(self, send_mail): name, email = fake.name(), fake.email() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': email, 'permission': 'admin', 'visible': True, } payload = { 'users': [pseudouser], 'node_ids': [] } url = self.project.api_url_for('project_contributors_post') self.app.post_json(url, payload).maybe_follow() assert_true(send_mail.called) assert_true(send_mail.called_with(email=email)) @mock.patch('website.mails.send_mail') def test_email_sent_when_reg_user_is_added(self, send_mail): contributor = UserFactory() contributors = [{ 'user': contributor, 'visible': True, 'permissions': ['read', 'write'] }] project = ProjectFactory() project.add_contributors(contributors, auth=Auth(self.project.creator)) project.save() assert_true(send_mail.called) send_mail.assert_called_with( contributor.username, mails.CONTRIBUTOR_ADDED, user=contributor, node=project) assert_almost_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()), delta=1) @mock.patch('website.mails.send_mail') def test_contributor_added_email_not_sent_to_unreg_user(self, send_mail): unreg_user = UnregUserFactory() contributors = [{ 'user': unreg_user, 'visible': True, 'permissions': ['read', 'write'] }] project = ProjectFactory() project.add_contributors(contributors, auth=Auth(self.project.creator)) project.save() assert_false(send_mail.called) @mock.patch('website.mails.send_mail') def test_forking_project_does_not_send_contributor_added_email(self, send_mail): project = ProjectFactory() project.fork_node(auth=Auth(project.creator)) assert_false(send_mail.called) @mock.patch('website.mails.send_mail') def test_templating_project_does_not_send_contributor_added_email(self, send_mail): project = ProjectFactory() project.use_as_template(auth=Auth(project.creator)) assert_false(send_mail.called) @mock.patch('website.archiver.tasks.archive') @mock.patch('website.mails.send_mail') def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive): project = ProjectFactory() project.register_node(None, Auth(user=project.creator), '', None) assert_false(send_mail.called) @mock.patch('website.mails.send_mail') def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail): contributor = UserFactory() project = ProjectFactory() notify_added_contributor(project, contributor) assert_true(send_mail.called) # 2nd call does not send email because throttle period has not expired notify_added_contributor(project, contributor) assert_equal(send_mail.call_count, 1) @mock.patch('website.mails.send_mail') def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail): throttle = 0.5 contributor = UserFactory() project = ProjectFactory() notify_added_contributor(project, contributor, throttle=throttle) assert_true(send_mail.called) time.sleep(1) # throttle period expires notify_added_contributor(project, contributor, throttle=throttle) assert_equal(send_mail.call_count, 2) def test_add_multiple_contributors_only_adds_one_log(self): n_logs_pre = len(self.project.logs) reg_user = UserFactory() name = fake.name() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': fake.email(), 'permission': 'write', 'visible': True, } reg_dict = add_contributor_json(reg_user) reg_dict['permission'] = 'admin' reg_dict['visible'] = True payload = { 'users': [reg_dict, pseudouser], 'node_ids': [] } url = self.project.api_url_for('project_contributors_post') self.app.post_json(url, payload).maybe_follow() self.project.reload() assert_equal(len(self.project.logs), n_logs_pre + 1) def test_add_contribs_to_multiple_nodes(self): child = NodeFactory(parent=self.project, creator=self.creator) n_contributors_pre = len(child.contributors) reg_user = UserFactory() name, email = fake.name(), fake.email() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': email, 'permission': 'admin', 'visible': True, } reg_dict = add_contributor_json(reg_user) reg_dict['permission'] = 'admin' reg_dict['visible'] = True payload = { 'users': [reg_dict, pseudouser], 'node_ids': [self.project._primary_key, child._primary_key] } url = "/api/v1/project/{0}/contributors/".format(self.project._id) self.app.post_json(url, payload).maybe_follow() child.reload() assert_equal(len(child.contributors), n_contributors_pre + len(payload['users'])) def tearDown(self): super(TestAddingContributorViews, self).tearDown() contributor_added.disconnect(notify_added_contributor) class TestUserInviteViews(OsfTestCase): def setUp(self): super(TestUserInviteViews, self).setUp() ensure_schemas() self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user) self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format( self.project._primary_key) def test_invite_contributor_post_if_not_in_db(self): name, email = fake.name(), fake.email() res = self.app.post_json( self.invite_url, {'fullname': name, 'email': email}, auth=self.user.auth, ) contrib = res.json['contributor'] assert_true(contrib['id'] is None) assert_equal(contrib['fullname'], name) assert_equal(contrib['email'], email) def test_invite_contributor_post_if_unreg_already_in_db(self): # A n unreg user is added to a different project name, email = fake.name(), fake.email() project2 = ProjectFactory() unreg_user = project2.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project2.creator)) project2.save() res = self.app.post_json(self.invite_url, {'fullname': name, 'email': email}, auth=self.user.auth) expected = add_contributor_json(unreg_user) expected['fullname'] = name expected['email'] = email assert_equal(res.json['contributor'], expected) def test_invite_contributor_post_if_emaiL_already_registered(self): reg_user = UserFactory() # Tries to invite user that is already regiestered res = self.app.post_json(self.invite_url, {'fullname': fake.name(), 'email': reg_user.username}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) def test_invite_contributor_post_if_user_is_already_contributor(self): unreg_user = self.project.add_unregistered_contributor( fullname=fake.name(), email=fake.email(), auth=Auth(self.project.creator) ) self.project.save() # Tries to invite unreg user that is already a contributor res = self.app.post_json(self.invite_url, {'fullname': fake.name(), 'email': unreg_user.username}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) def test_invite_contributor_with_no_email(self): name = fake.name() res = self.app.post_json(self.invite_url, {'fullname': name, 'email': None}, auth=self.user.auth) assert_equal(res.status_code, http.OK) data = res.json assert_equal(data['status'], 'success') assert_equal(data['contributor']['fullname'], name) assert_true(data['contributor']['email'] is None) assert_false(data['contributor']['registered']) def test_invite_contributor_requires_fullname(self): res = self.app.post_json(self.invite_url, {'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_email_to_given_email(self, send_mail): project = ProjectFactory() given_email = fake.email() unreg_user = project.add_unregistered_contributor( fullname=fake.name(), email=given_email, auth=Auth(project.creator), ) project.save() send_claim_email(email=given_email, user=unreg_user, node=project) assert_true(send_mail.called) assert_true(send_mail.called_with( to_addr=given_email, mail=mails.INVITE )) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_email_to_referrer(self, send_mail): project = ProjectFactory() referrer = project.creator given_email, real_email = fake.email(), fake.email() unreg_user = project.add_unregistered_contributor(fullname=fake.name(), email=given_email, auth=Auth( referrer) ) project.save() send_claim_email(email=real_email, user=unreg_user, node=project) assert_true(send_mail.called) # email was sent to referrer assert_true(send_mail.called_with( to_addr=referrer.username, mail=mails.FORWARD_INVITE )) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_email_before_throttle_expires(self, send_mail): project = ProjectFactory() given_email = fake.email() unreg_user = project.add_unregistered_contributor( fullname=fake.name(), email=given_email, auth=Auth(project.creator), ) project.save() send_claim_email(email=fake.email(), user=unreg_user, node=project) # 2nd call raises error because throttle hasn't expired with assert_raises(HTTPError): send_claim_email(email=fake.email(), user=unreg_user, node=project) send_mail.assert_not_called() class TestClaimViews(OsfTestCase): def setUp(self): super(TestClaimViews, self).setUp() self.referrer = AuthUserFactory() self.project = ProjectFactory(creator=self.referrer, is_public=True) self.given_name = fake.name() self.given_email = fake.email() self.user = self.project.add_unregistered_contributor( fullname=self.given_name, email=self.given_email, auth=Auth(user=self.referrer) ) self.project.save() @mock.patch('website.project.views.contributor.mails.send_mail') def test_claim_user_post_with_registered_user_id(self, send_mail): # registered user who is attempting to claim the unclaimed contributor reg_user = UserFactory() payload = { # pk of unreg user record 'pk': self.user._primary_key, 'claimerId': reg_user._primary_key } url = '/api/v1/user/{uid}/{pid}/claim/email/'.format( uid=self.user._primary_key, pid=self.project._primary_key, ) res = self.app.post_json(url, payload) # mail was sent assert_true(send_mail.called) # ... to the correct address assert_true(send_mail.called_with(to_addr=self.given_email)) # view returns the correct JSON assert_equal(res.json, { 'status': 'success', 'email': reg_user.username, 'fullname': self.given_name, }) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_registered_email(self, mock_send_mail): reg_user = UserFactory() send_claim_registered_email( claimer=reg_user, unreg_user=self.user, node=self.project ) mock_send_mail.assert_called() assert_equal(mock_send_mail.call_count, 2) first_call_args = mock_send_mail.call_args_list[0][0] assert_equal(first_call_args[0], self.referrer.username) second_call_args = mock_send_mail.call_args_list[1][0] assert_equal(second_call_args[0], reg_user.username) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail): reg_user = UserFactory() send_claim_registered_email( claimer=reg_user, unreg_user=self.user, node=self.project, ) # second call raises error because it was called before throttle period with assert_raises(HTTPError): send_claim_registered_email( claimer=reg_user, unreg_user=self.user, node=self.project, ) mock_send_mail.assert_not_called() @mock.patch('website.project.views.contributor.send_claim_registered_email') def test_claim_user_post_with_email_already_registered_sends_correct_email( self, send_claim_registered_email): reg_user = UserFactory() payload = { 'value': reg_user.username, 'pk': self.user._primary_key } url = self.project.api_url_for('claim_user_post', uid=self.user._id) self.app.post_json(url, payload) assert_true(send_claim_registered_email.called) def test_user_with_removed_unclaimed_url_claiming(self): """ Tests that when an unclaimed user is removed from a project, the unregistered user object does not retain the token. """ self.project.remove_contributor(self.user, Auth(user=self.referrer)) assert_not_in( self.project._primary_key, self.user.unclaimed_records.keys() ) def test_user_with_claim_url_cannot_claim_twice(self): """ Tests that when an unclaimed user is replaced on a project with a claimed user, the unregistered user object does not retain the token. """ reg_user = AuthUserFactory() self.project.replace_contributor(self.user, reg_user) assert_not_in( self.project._primary_key, self.user.unclaimed_records.keys() ) def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self): reg_user = AuthUserFactory() url = self.user.get_claim_url(self.project._primary_key) res = self.app.get(url, auth=reg_user.auth) assert_equal(res.status_code, 302) res = res.follow(auth=reg_user.auth) token = self.user.get_unclaimed_record(self.project._primary_key)['token'] expected = self.project.web_url_for( 'claim_user_registered', uid=self.user._id, token=token, ) assert_equal(res.request.path, expected) def test_get_valid_form(self): url = self.user.get_claim_url(self.project._primary_key) res = self.app.get(url).maybe_follow() assert_equal(res.status_code, 200) def test_invalid_claim_form_redirects_to_register_page(self): uid = self.user._primary_key pid = self.project._primary_key url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals()) res = self.app.get(url, expect_errors=True).maybe_follow() assert_equal(res.status_code, 200) assert_equal(res.request.path, web_url_for('auth_login')) def test_posting_to_claim_form_with_valid_data(self): url = self.user.get_claim_url(self.project._primary_key) res = self.app.post(url, { 'username': self.user.username, 'password': 'killerqueen', 'password2': 'killerqueen' }).maybe_follow() assert_equal(res.status_code, 200) self.user.reload() assert_true(self.user.is_registered) assert_true(self.user.is_active) assert_not_in(self.project._primary_key, self.user.unclaimed_records) def test_posting_to_claim_form_removes_all_unclaimed_data(self): # user has multiple unclaimed records p2 = ProjectFactory(creator=self.referrer) self.user.add_unclaimed_record(node=p2, referrer=self.referrer, given_name=fake.name()) self.user.save() assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check url = self.user.get_claim_url(self.project._primary_key) self.app.post(url, { 'username': self.given_email, 'password': 'bohemianrhap', 'password2': 'bohemianrhap' }) self.user.reload() assert_equal(self.user.unclaimed_records, {}) def test_posting_to_claim_form_sets_fullname_to_given_name(self): # User is created with a full name original_name = fake.name() unreg = UnregUserFactory(fullname=original_name) # User invited with a different name different_name = fake.name() new_user = self.project.add_unregistered_contributor( email=unreg.username, fullname=different_name, auth=Auth(self.project.creator), ) self.project.save() # Goes to claim url claim_url = new_user.get_claim_url(self.project._id) self.app.post(claim_url, { 'username': unreg.username, 'password': 'killerqueen', 'password2': 'killerqueen' }) unreg.reload() # Full name was set correctly assert_equal(unreg.fullname, different_name) # CSL names were set correctly parsed_name = impute_names_model(different_name) assert_equal(unreg.given_name, parsed_name['given_name']) assert_equal(unreg.family_name, parsed_name['family_name']) @mock.patch('website.project.views.contributor.mails.send_mail') def test_claim_user_post_returns_fullname(self, send_mail): url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key, self.project._primary_key) res = self.app.post_json(url, {'value': self.given_email, 'pk': self.user._primary_key}, auth=self.referrer.auth) assert_equal(res.json['fullname'], self.given_name) assert_true(send_mail.called) assert_true(send_mail.called_with(to_addr=self.given_email)) @mock.patch('website.project.views.contributor.mails.send_mail') def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail): email = fake.email() # email that is different from the one the referrer gave url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key, self.project._primary_key) self.app.post_json(url, {'value': email, 'pk': self.user._primary_key} ) assert_true(send_mail.called) assert_equal(send_mail.call_count, 2) call_to_invited = send_mail.mock_calls[0] assert_true(call_to_invited.called_with( to_addr=email )) call_to_referrer = send_mail.mock_calls[1] assert_true(call_to_referrer.called_with( to_addr=self.given_email )) def test_claim_url_with_bad_token_returns_400(self): url = self.project.web_url_for( 'claim_user_registered', uid=self.user._id, token='badtoken', ) res = self.app.get(url, auth=self.referrer.auth, expect_errors=400) assert_equal(res.status_code, 400) def test_cannot_claim_user_with_user_who_is_already_contributor(self): # user who is already a contirbutor to the project contrib = AuthUserFactory() self.project.add_contributor(contrib, auth=Auth(self.project.creator)) self.project.save() # Claiming user goes to claim url, but contrib is already logged in url = self.user.get_claim_url(self.project._primary_key) res = self.app.get( url, auth=contrib.auth, ).follow( auth=contrib.auth, expect_errors=True, ) # Response is a 400 assert_equal(res.status_code, 400) class TestWatchViews(OsfTestCase): def setUp(self): super(TestWatchViews, self).setUp() self.user = AuthUserFactory() self.consolidate_auth = Auth(user=self.user) self.auth = self.user.auth # used for requests auth # A public project self.project = ProjectFactory(is_public=True) self.project.save() # Manually reset log date to 100 days ago so it won't show up in feed self.project.logs[0].date = dt.datetime.utcnow() - dt.timedelta(days=100) self.project.logs[0].save() # A log added now self.last_log = self.project.add_log( NodeLog.TAG_ADDED, params={'node': self.project._primary_key}, auth=self.consolidate_auth, log_date=dt.datetime.utcnow(), save=True, ) # Clear watched list self.user.watched = [] self.user.save() def test_watching_a_project_appends_to_users_watched_list(self): n_watched_then = len(self.user.watched) url = '/api/v1/project/{0}/watch/'.format(self.project._id) res = self.app.post_json(url, params={"digest": True}, auth=self.auth) assert_equal(res.json['watchCount'], 1) self.user.reload() n_watched_now = len(self.user.watched) assert_equal(res.status_code, 200) assert_equal(n_watched_now, n_watched_then + 1) assert_true(self.user.watched[-1].digest) def test_watching_project_twice_returns_400(self): url = "/api/v1/project/{0}/watch/".format(self.project._id) res = self.app.post_json(url, params={}, auth=self.auth) assert_equal(res.status_code, 200) # User tries to watch a node she's already watching res2 = self.app.post_json(url, params={}, auth=self.auth, expect_errors=True) assert_equal(res2.status_code, http.BAD_REQUEST) def test_unwatching_a_project_removes_from_watched_list(self): # The user has already watched a project watch_config = WatchConfigFactory(node=self.project) self.user.watch(watch_config) self.user.save() n_watched_then = len(self.user.watched) url = '/api/v1/project/{0}/unwatch/'.format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) self.user.reload() n_watched_now = len(self.user.watched) assert_equal(res.status_code, 200) assert_equal(n_watched_now, n_watched_then - 1) assert_false(self.user.is_watching(self.project)) def test_toggle_watch(self): # The user is not watching project assert_false(self.user.is_watching(self.project)) url = "/api/v1/project/{0}/togglewatch/".format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) # The response json has a watchcount and watched property assert_equal(res.json['watchCount'], 1) assert_true(res.json['watched']) assert_equal(res.status_code, 200) self.user.reload() # The user is now watching the project assert_true(res.json['watched']) assert_true(self.user.is_watching(self.project)) def test_toggle_watch_node(self): # The project has a public sub-node node = NodeFactory(creator=self.user, parent=self.project, is_public=True) url = "/api/v1/project/{}/node/{}/togglewatch/".format(self.project._id, node._id) res = self.app.post_json(url, {}, auth=self.auth) assert_equal(res.status_code, 200) self.user.reload() # The user is now watching the sub-node assert_true(res.json['watched']) assert_true(self.user.is_watching(node)) def test_get_watched_logs(self): project = ProjectFactory() # Add some logs for _ in range(12): project.logs.append(NodeLogFactory(user=self.user, action="file_added")) project.save() watch_cfg = WatchConfigFactory(node=project) self.user.watch(watch_cfg) self.user.save() url = "/api/v1/watched/logs/" res = self.app.get(url, auth=self.auth) assert_equal(len(res.json['logs']), 10) assert_equal(res.json['logs'][0]['action'], 'file_added') def test_get_watched_logs(self): project = ProjectFactory() # Add some logs for _ in range(12): project.logs.append(NodeLogFactory(user=self.user, action="file_added")) project.save() watch_cfg = WatchConfigFactory(node=project) self.user.watch(watch_cfg) self.user.save() url = api_url_for("watched_logs_get") res = self.app.get(url, auth=self.auth) assert_equal(len(res.json['logs']), 10) # 1 project create log then 12 generated logs assert_equal(res.json['total'], 12 + 1) assert_equal(res.json['page'], 0) assert_equal(res.json['pages'], 2) assert_equal(res.json['logs'][0]['action'], 'file_added') def test_get_more_watched_logs(self): project = ProjectFactory() # Add some logs for _ in range(12): project.logs.append(NodeLogFactory(user=self.user, action="file_added")) project.save() watch_cfg = WatchConfigFactory(node=project) self.user.watch(watch_cfg) self.user.save() url = api_url_for("watched_logs_get") page = 1 res = self.app.get(url, {'page': page}, auth=self.auth) assert_equal(len(res.json['logs']), 3) # 1 project create log then 12 generated logs assert_equal(res.json['total'], 12 + 1) assert_equal(res.json['page'], page) assert_equal(res.json['pages'], 2) assert_equal(res.json['logs'][0]['action'], 'file_added') def test_get_more_watched_logs_invalid_page(self): project = ProjectFactory() watch_cfg = WatchConfigFactory(node=project) self.user.watch(watch_cfg) self.user.save() url = api_url_for("watched_logs_get") invalid_page = 'invalid page' res = self.app.get( url, {'page': invalid_page}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal( res.json['message_long'], 'Invalid value for "page".' ) def test_get_more_watched_logs_invalid_size(self): project = ProjectFactory() watch_cfg = WatchConfigFactory(node=project) self.user.watch(watch_cfg) self.user.save() url = api_url_for("watched_logs_get") invalid_size = 'invalid size' res = self.app.get( url, {'size': invalid_size}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal( res.json['message_long'], 'Invalid value for "size".' ) class TestPointerViews(OsfTestCase): def setUp(self): super(TestPointerViews, self).setUp() self.user = AuthUserFactory() self.consolidate_auth = Auth(user=self.user) self.project = ProjectFactory(creator=self.user) # https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109 def test_get_pointed_excludes_folders(self): pointer_project = ProjectFactory(is_public=True) # project that points to another project pointed_project = ProjectFactory(creator=self.user) # project that other project points to pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True) # Project is in a dashboard folder folder = FolderFactory(creator=pointed_project.creator) folder.add_pointer(pointed_project, Auth(pointed_project.creator), save=True) url = pointed_project.api_url_for('get_pointed') res = self.app.get(url, auth=self.user.auth) assert_equal(res.status_code, 200) # pointer_project's id is included in response, but folder's id is not pointer_ids = [each['id'] for each in res.json['pointed']] assert_in(pointer_project._id, pointer_ids) assert_not_in(folder._id, pointer_ids) def test_add_pointers(self): url = self.project.api_url + 'pointer/' node_ids = [ NodeFactory()._id for _ in range(5) ] self.app.post_json( url, {'nodeIds': node_ids}, auth=self.user.auth, ).maybe_follow() self.project.reload() assert_equal( len(self.project.nodes), 5 ) def test_add_the_same_pointer_more_than_once(self): url = self.project.api_url + 'pointer/' double_node = NodeFactory() self.app.post_json( url, {'nodeIds': [double_node._id]}, auth=self.user.auth, ) res = self.app.post_json( url, {'nodeIds': [double_node._id]}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_add_pointers_no_user_logg_in(self): url = self.project.api_url_for('add_pointers') node_ids = [ NodeFactory()._id for _ in range(5) ] res = self.app.post_json( url, {'nodeIds': node_ids}, auth=None, expect_errors=True ) assert_equal(res.status_code, 401) def test_add_pointers_public_non_contributor(self): project2 = ProjectFactory() project2.set_privacy('public') project2.save() url = self.project.api_url_for('add_pointers') self.app.post_json( url, {'nodeIds': [project2._id]}, auth=self.user.auth, ).maybe_follow() self.project.reload() assert_equal( len(self.project.nodes), 1 ) def test_add_pointers_contributor(self): user2 = AuthUserFactory() self.project.add_contributor(user2) self.project.save() url = self.project.api_url_for('add_pointers') node_ids = [ NodeFactory()._id for _ in range(5) ] self.app.post_json( url, {'nodeIds': node_ids}, auth=user2.auth, ).maybe_follow() self.project.reload() assert_equal( len(self.project.nodes), 5 ) def test_add_pointers_not_provided(self): url = self.project.api_url + 'pointer/' res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_move_pointers(self): project_two = ProjectFactory(creator=self.user) url = api_url_for('move_pointers') node = NodeFactory() pointer = self.project.add_pointer(node, auth=self.consolidate_auth) assert_equal(len(self.project.nodes), 1) assert_equal(len(project_two.nodes), 0) user_auth = self.user.auth move_request = \ { 'fromNodeId': self.project._id, 'toNodeId': project_two._id, 'pointerIds': [pointer.node._id], } self.app.post_json( url, move_request, auth=user_auth, ).maybe_follow() self.project.reload() project_two.reload() assert_equal(len(self.project.nodes), 0) assert_equal(len(project_two.nodes), 1) def test_remove_pointer(self): url = self.project.api_url + 'pointer/' node = NodeFactory() pointer = self.project.add_pointer(node, auth=self.consolidate_auth) self.app.delete_json( url, {'pointerId': pointer._id}, auth=self.user.auth, ) self.project.reload() assert_equal( len(self.project.nodes), 0 ) def test_remove_pointer_not_provided(self): url = self.project.api_url + 'pointer/' res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_remove_pointer_not_found(self): url = self.project.api_url + 'pointer/' res = self.app.delete_json( url, {'pointerId': None}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_remove_pointer_not_in_nodes(self): url = self.project.api_url + 'pointer/' node = NodeFactory() pointer = Pointer(node=node) res = self.app.delete_json( url, {'pointerId': pointer._id}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_fork_pointer(self): url = self.project.api_url + 'pointer/fork/' node = NodeFactory(creator=self.user) pointer = self.project.add_pointer(node, auth=self.consolidate_auth) self.app.post_json( url, {'pointerId': pointer._id}, auth=self.user.auth ) def test_fork_pointer_not_provided(self): url = self.project.api_url + 'pointer/fork/' res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_fork_pointer_not_found(self): url = self.project.api_url + 'pointer/fork/' res = self.app.post_json( url, {'pointerId': None}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_fork_pointer_not_in_nodes(self): url = self.project.api_url + 'pointer/fork/' node = NodeFactory() pointer = Pointer(node=node) res = self.app.post_json( url, {'pointerId': pointer._id}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_before_register_with_pointer(self): "Assert that link warning appears in before register callback." node = NodeFactory() self.project.add_pointer(node, auth=self.consolidate_auth) url = self.project.api_url + 'fork/before/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your fork' in prompt ] assert_equal(len(prompts), 1) def test_before_fork_with_pointer(self): "Assert that link warning appears in before fork callback." node = NodeFactory() self.project.add_pointer(node, auth=self.consolidate_auth) url = self.project.api_url + 'beforeregister/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your registration' in prompt ] assert_equal(len(prompts), 1) def test_before_register_no_pointer(self): "Assert that link warning does not appear in before register callback." url = self.project.api_url + 'fork/before/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your fork' in prompt ] assert_equal(len(prompts), 0) def test_before_fork_no_pointer(self): """Assert that link warning does not appear in before fork callback. """ url = self.project.api_url + 'beforeregister/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your registration' in prompt ] assert_equal(len(prompts), 0) def test_get_pointed(self): pointing_node = ProjectFactory(creator=self.user) pointing_node.add_pointer(self.project, auth=Auth(self.user)) url = self.project.api_url_for('get_pointed') res = self.app.get(url, auth=self.user.auth) pointed = res.json['pointed'] assert_equal(len(pointed), 1) assert_equal(pointed[0]['url'], pointing_node.url) assert_equal(pointed[0]['title'], pointing_node.title) assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node)) def test_get_pointed_private(self): secret_user = UserFactory() pointing_node = ProjectFactory(creator=secret_user) pointing_node.add_pointer(self.project, auth=Auth(secret_user)) url = self.project.api_url_for('get_pointed') res = self.app.get(url, auth=self.user.auth) pointed = res.json['pointed'] assert_equal(len(pointed), 1) assert_equal(pointed[0]['url'], None) assert_equal(pointed[0]['title'], 'Private Component') assert_equal(pointed[0]['authorShort'], 'Private Author(s)') class TestPublicViews(OsfTestCase): def test_explore(self): res = self.app.get("/explore/").maybe_follow() assert_equal(res.status_code, 200) def test_forgot_password_get(self): res = self.app.get(web_url_for('forgot_password_get')) assert_equal(res.status_code, 200) assert_in('Forgot Password', res.body) class TestAuthViews(OsfTestCase): def setUp(self): super(TestAuthViews, self).setUp() self.user = AuthUserFactory() self.auth = self.user.auth def test_merge_user(self): dupe = UserFactory( username="copy@cat.com", emails=['copy@cat.com'] ) dupe.set_password("copycat") dupe.save() url = "/api/v1/user/merge/" self.app.post_json( url, { "merged_username": "copy@cat.com", "merged_password": "copycat" }, auth=self.auth, ) self.user.reload() dupe.reload() assert_true(dupe.is_merged) @mock.patch('framework.auth.views.mails.send_mail') def test_register_sends_confirm_email(self, send_mail): url = '/register/' self.app.post(url, { 'register-fullname': 'Freddie Mercury', 'register-username': 'fred@queen.com', 'register-password': 'killerqueen', 'register-username2': 'fred@queen.com', 'register-password2': 'killerqueen', }) assert_true(send_mail.called) assert_true(send_mail.called_with( to_addr='fred@queen.com' )) @mock.patch('framework.auth.views.mails.send_mail') def test_register_ok(self, _): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password, } ) user = User.find_one(Q('username', 'eq', email)) assert_equal(user.fullname, name) # Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902 @mock.patch('framework.auth.views.mails.send_mail') def test_register_email_case_insensitive(self, _): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': str(email).upper(), 'password': password, } ) user = User.find_one(Q('username', 'eq', email)) assert_equal(user.fullname, name) @mock.patch('framework.auth.views.send_confirm_email') def test_register_scrubs_username(self, _): url = api_url_for('register_user') name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>" email, password = fake.email(), 'underpressure' res = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password, } ) expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()" user = User.find_one(Q('username', 'eq', email)) assert_equal(res.status_code, http.OK) assert_equal(user.fullname, expected_scrub_username) def test_register_email_mismatch(self): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' res = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email + 'lol', 'password': password, }, expect_errors=True, ) assert_equal(res.status_code, http.BAD_REQUEST) users = User.find(Q('username', 'eq', email)) assert_equal(users.count(), 0) def test_register_after_being_invited_as_unreg_contributor(self): # Regression test for: # https://github.com/CenterForOpenScience/openscienceframework.org/issues/861 # https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021 # https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026 # A user is invited as an unregistered contributor project = ProjectFactory() name, email = fake.name(), fake.email() project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project.creator)) project.save() # The new, unregistered user new_user = User.find_one(Q('username', 'eq', email)) # Instead of following the invitation link, they register at the regular # registration page # They use a different name when they register, but same email real_name = fake.name() password = 'myprecious' url = api_url_for('register_user') payload = { 'fullName': real_name, 'email1': email, 'email2': email, 'password': password, } # Send registration request self.app.post_json(url, payload) new_user.reload() # New user confirms by following confirmation link confirm_url = new_user.get_confirmation_url(email, external=False) self.app.get(confirm_url) new_user.reload() # Password and fullname should be updated assert_true(new_user.is_confirmed) assert_true(new_user.check_password(password)) assert_equal(new_user.fullname, real_name) @mock.patch('framework.auth.views.send_confirm_email') def test_register_sends_user_registered_signal(self, mock_send_confirm_email): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' with capture_signals() as mock_signals: self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password, } ) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered])) mock_send_confirm_email.assert_called() @mock.patch('framework.auth.views.send_confirm_email') def test_register_post_sends_user_registered_signal(self, mock_send_confirm_email): url = web_url_for('auth_register_post') name, email, password = fake.name(), fake.email(), 'underpressure' with capture_signals() as mock_signals: self.app.post(url, { 'register-fullname': name, 'register-username': email, 'register-password': password, 'register-username2': email, 'register-password2': password }) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered])) mock_send_confirm_email.assert_called() def test_resend_confirmation_get(self): res = self.app.get('/resend/') assert_equal(res.status_code, 200) @mock.patch('framework.auth.views.mails.send_mail') def test_resend_confirmation(self, send_mail): email = 'test@example.com' token = self.user.add_unconfirmed_email(email) self.user.save() url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': False} self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth) assert_true(send_mail.called) assert_true(send_mail.called_with( to_addr=email )) self.user.reload() assert_not_equal(token, self.user.get_confirmation_token(email)) with assert_raises(InvalidTokenError): self.user._get_unconfirmed_email_for_token(token) def test_resend_confirmation_without_user_id(self): email = 'test@example.com' url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': False} res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], '"id" is required') def test_resend_confirmation_without_email(self): url = api_url_for('resend_confirmation') res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_resend_confirmation_not_work_for_primary_email(self): email = 'test@example.com' url = api_url_for('resend_confirmation') header = {'address': email, 'primary': True, 'confirmed': False} res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails') def test_resend_confirmation_not_work_for_confirmed_email(self): email = 'test@example.com' url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': True} res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails') def test_confirm_email_clears_unclaimed_records_and_revokes_token(self): unclaimed_user = UnconfirmedUserFactory() # unclaimed user has been invited to a project. referrer = UserFactory() project = ProjectFactory(creator=referrer) unclaimed_user.add_unclaimed_record(project, referrer, 'foo') unclaimed_user.save() # sanity check assert_equal(len(unclaimed_user.email_verifications.keys()), 1) # user goes to email confirmation link token = unclaimed_user.get_confirmation_token(unclaimed_user.username) url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token) res = self.app.get(url) assert_equal(res.status_code, 302) # unclaimed records and token are cleared unclaimed_user.reload() assert_equal(unclaimed_user.unclaimed_records, {}) assert_equal(len(unclaimed_user.email_verifications.keys()), 0) def test_confirmation_link_registers_user(self): user = User.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May') assert_false(user.is_registered) # sanity check user.save() confirmation_url = user.get_confirmation_url('brian@queen.com', external=False) res = self.app.get(confirmation_url) assert_equal(res.status_code, 302, 'redirects to settings page') res = res.follow() user.reload() assert_true(user.is_registered) # TODO: Use mock add-on class TestAddonUserViews(OsfTestCase): def setUp(self): super(TestAddonUserViews, self).setUp() self.user = AuthUserFactory() def test_choose_addons_add(self): """Add add-ons; assert that add-ons are attached to project. """ url = '/api/v1/settings/addons/' self.app.post_json( url, {'github': True}, auth=self.user.auth, ).maybe_follow() self.user.reload() assert_true(self.user.get_addon('github')) def test_choose_addons_remove(self): # Add, then delete, add-ons; assert that add-ons are not attached to # project. url = '/api/v1/settings/addons/' self.app.post_json( url, {'github': True}, auth=self.user.auth, ).maybe_follow() self.app.post_json( url, {'github': False}, auth=self.user.auth ).maybe_follow() self.user.reload() assert_false(self.user.get_addon('github')) class TestConfigureMailingListViews(OsfTestCase): @classmethod def setUpClass(cls): super(TestConfigureMailingListViews, cls).setUpClass() cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS settings.ENABLE_EMAIL_SUBSCRIPTIONS = True @unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test') @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api): user = AuthUserFactory() list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]} list_id = mailchimp_utils.get_list_id_from_name(list_name) payload = {settings.MAILCHIMP_GENERAL_LIST: True} url = api_url_for('user_choose_mailing_lists') res = self.app.post_json(url, payload, auth=user.auth) user.reload() # check user.mailing_lists is updated assert_true(user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST]) assert_equal( user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST], payload[settings.MAILCHIMP_GENERAL_LIST] ) # check that user is subscribed mock_client.lists.subscribe.assert_called_with(id=list_id, email={'email': user.username}, merge_vars= {'fname': user.given_name, 'lname': user.family_name, }, double_optin=False, update_existing=True) def test_get_mailchimp_get_endpoint_returns_200(self): url = api_url_for('mailchimp_get_endpoint') res = self.app.get(url) assert_equal(res.status_code, 200) @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api): """ Test that 'subscribe' actions sent to the OSF via mailchimp webhooks update the OSF database. """ list_id = '12345' list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]} # user is not subscribed to a list user = AuthUserFactory() user.mailing_lists = {'OSF General': False} user.save() # user subscribes and webhook sends request to OSF data = {'type': 'subscribe', 'data[list_id]': list_id, 'data[email]': user.username } url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY res = self.app.post(url, data, content_type="application/x-www-form-urlencoded", auth=user.auth) # user field is updated on the OSF user.reload() assert_true(user.mailing_lists[list_name]) @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api): """ Test that 'profile' actions sent to the OSF via mailchimp webhooks do not cause any database changes. """ list_id = '12345' list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]} # user is subscribed to a list user = AuthUserFactory() user.mailing_lists = {'OSF General': True} user.save() # user hits subscribe again, which will update the user's existing info on mailchimp # webhook sends request (when configured to update on changes made through the API) data = {'type': 'profile', 'data[list_id]': list_id, 'data[email]': user.username } url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY res = self.app.post(url, data, content_type="application/x-www-form-urlencoded", auth=user.auth) # user field does not change user.reload() assert_true(user.mailing_lists[list_name]) @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api): list_id = '12345' list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]} # user is subscribed to a list user = AuthUserFactory() user.mailing_lists = {'OSF General': True} user.save() # user unsubscribes through mailchimp and webhook sends request data = {'type': 'unsubscribe', 'data[list_id]': list_id, 'data[email]': user.username } url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY res = self.app.post(url, data, content_type="application/x-www-form-urlencoded", auth=user.auth) # user field is updated on the OSF user.reload() assert_false(user.mailing_lists[list_name]) def test_sync_data_from_mailchimp_fails_without_secret_key(self): user = AuthUserFactory() payload = {'values': {'type': 'unsubscribe', 'data': {'list_id': '12345', 'email': 'freddie@cos.io'}}} url = api_url_for('sync_data_from_mailchimp') res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True) assert_equal(res.status_code, http.UNAUTHORIZED) @classmethod def tearDownClass(cls): super(TestConfigureMailingListViews, cls).tearDownClass() settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions # TODO: Move to OSF Storage class TestFileViews(OsfTestCase): def setUp(self): super(TestFileViews, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory.build(creator=self.user, is_public=True) self.project.add_contributor(self.user) self.project.save() def test_files_get(self): url = self.project.api_url_for('collect_file_trees') res = self.app.get(url, auth=self.user.auth) expected = _view_project(self.project, auth=Auth(user=self.user)) assert_equal(res.status_code, http.OK) assert_equal(res.json['node'], expected['node']) assert_in('tree_js', res.json) assert_in('tree_css', res.json) def test_grid_data(self): url = self.project.api_url_for('grid_data') res = self.app.get(url, auth=self.user.auth).maybe_follow() assert_equal(res.status_code, http.OK) expected = rubeus.to_hgrid(self.project, auth=Auth(self.user)) data = res.json['data'] assert_equal(len(data), len(expected)) class TestComments(OsfTestCase): def setUp(self): super(TestComments, self).setUp() self.project = ProjectFactory(is_public=True) self.consolidated_auth = Auth(user=self.project.creator) self.non_contributor = AuthUserFactory() self.user = AuthUserFactory() self.project.add_contributor(self.user) self.project.save() self.user.save() def _configure_project(self, project, comment_level): project.comment_level = comment_level project.save() def _add_comment(self, project, content=None, **kwargs): content = content if content is not None else 'hammer to fall' url = project.api_url + 'comment/' return self.app.post_json( url, { 'content': content, 'isPublic': 'public', }, **kwargs ) def test_add_comment_public_contributor(self): self._configure_project(self.project, 'public') res = self._add_comment( self.project, auth=self.project.creator.auth, ) self.project.reload() res_comment = res.json['comment'] date_created = parse_date(str(res_comment.pop('dateCreated'))) date_modified = parse_date(str(res_comment.pop('dateModified'))) serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth) date_created2 = parse_date(serialized_comment.pop('dateCreated')) date_modified2 = parse_date(serialized_comment.pop('dateModified')) assert_datetime_equal(date_created, date_created2) assert_datetime_equal(date_modified, date_modified2) assert_equal(len(self.project.commented), 1) assert_equal(res_comment, serialized_comment) def test_add_comment_public_non_contributor(self): self._configure_project(self.project, 'public') res = self._add_comment( self.project, auth=self.non_contributor.auth, ) self.project.reload() res_comment = res.json['comment'] date_created = parse_date(res_comment.pop('dateCreated')) date_modified = parse_date(res_comment.pop('dateModified')) serialized_comment = serialize_comment(self.project.commented[0], Auth(user=self.non_contributor)) date_created2 = parse_date(serialized_comment.pop('dateCreated')) date_modified2 = parse_date(serialized_comment.pop('dateModified')) assert_datetime_equal(date_created, date_created2) assert_datetime_equal(date_modified, date_modified2) assert_equal(len(self.project.commented), 1) assert_equal(res_comment, serialized_comment) def test_add_comment_private_contributor(self): self._configure_project(self.project, 'private') res = self._add_comment( self.project, auth=self.project.creator.auth, ) self.project.reload() res_comment = res.json['comment'] date_created = parse_date(str(res_comment.pop('dateCreated'))) date_modified = parse_date(str(res_comment.pop('dateModified'))) serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth) date_created2 = parse_date(serialized_comment.pop('dateCreated')) date_modified2 = parse_date(serialized_comment.pop('dateModified')) assert_datetime_equal(date_created, date_created2) assert_datetime_equal(date_modified, date_modified2) assert_equal(len(self.project.commented), 1) assert_equal(res_comment, serialized_comment) def test_add_comment_private_non_contributor(self): self._configure_project(self.project, 'private') res = self._add_comment( self.project, auth=self.non_contributor.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) def test_add_comment_logged_out(self): self._configure_project(self.project, 'public') res = self._add_comment(self.project) assert_equal(res.status_code, 302) assert_in('login', res.headers.get('location')) def test_add_comment_off(self): self._configure_project(self.project, None) res = self._add_comment( self.project, auth=self.project.creator.auth, expect_errors=True, ) assert_equal(res.status_code, http.BAD_REQUEST) def test_add_comment_empty(self): self._configure_project(self.project, 'public') res = self._add_comment( self.project, content='', auth=self.project.creator.auth, expect_errors=True, ) assert_equal(res.status_code, http.BAD_REQUEST) assert_false(getattr(self.project, 'commented', [])) def test_add_comment_toolong(self): self._configure_project(self.project, 'public') res = self._add_comment( self.project, content='toolong' * 500, auth=self.project.creator.auth, expect_errors=True, ) assert_equal(res.status_code, http.BAD_REQUEST) assert_false(getattr(self.project, 'commented', [])) def test_add_comment_whitespace(self): self._configure_project(self.project, 'public') res = self._add_comment( self.project, content=' ', auth=self.project.creator.auth, expect_errors=True ) assert_equal(res.status_code, http.BAD_REQUEST) assert_false(getattr(self.project, 'commented', [])) def test_edit_comment(self): self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project) url = self.project.api_url + 'comment/{0}/'.format(comment._id) res = self.app.put_json( url, { 'content': 'edited', 'isPublic': 'private', }, auth=self.project.creator.auth, ) comment.reload() assert_equal(res.json['content'], 'edited') assert_equal(comment.content, 'edited') def test_edit_comment_short(self): self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project, content='short') url = self.project.api_url + 'comment/{0}/'.format(comment._id) res = self.app.put_json( url, { 'content': '', 'isPublic': 'private', }, auth=self.project.creator.auth, expect_errors=True, ) comment.reload() assert_equal(res.status_code, http.BAD_REQUEST) assert_equal(comment.content, 'short') def test_edit_comment_toolong(self): self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project, content='short') url = self.project.api_url + 'comment/{0}/'.format(comment._id) res = self.app.put_json( url, { 'content': 'toolong' * 500, 'isPublic': 'private', }, auth=self.project.creator.auth, expect_errors=True, ) comment.reload() assert_equal(res.status_code, http.BAD_REQUEST) assert_equal(comment.content, 'short') def test_edit_comment_non_author(self): "Contributors who are not the comment author cannot edit." self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project) non_author = AuthUserFactory() self.project.add_contributor(non_author, auth=self.consolidated_auth) url = self.project.api_url + 'comment/{0}/'.format(comment._id) res = self.app.put_json( url, { 'content': 'edited', 'isPublic': 'private', }, auth=non_author.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) def test_edit_comment_non_contributor(self): "Non-contributors who are not the comment author cannot edit." self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project) url = self.project.api_url + 'comment/{0}/'.format(comment._id) res = self.app.put_json( url, { 'content': 'edited', 'isPublic': 'private', }, auth=self.non_contributor.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) def test_delete_comment_author(self): self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project) url = self.project.api_url + 'comment/{0}/'.format(comment._id) self.app.delete_json( url, auth=self.project.creator.auth, ) comment.reload() assert_true(comment.is_deleted) def test_delete_comment_non_author(self): self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project) url = self.project.api_url + 'comment/{0}/'.format(comment._id) res = self.app.delete_json( url, auth=self.non_contributor.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) comment.reload() assert_false(comment.is_deleted) def test_report_abuse(self): self._configure_project(self.project, 'public') comment = CommentFactory(node=self.project) reporter = AuthUserFactory() url = self.project.api_url + 'comment/{0}/report/'.format(comment._id) self.app.post_json( url, { 'category': 'spam', 'text': 'ads', }, auth=reporter.auth, ) comment.reload() assert_in(reporter._id, comment.reports) assert_equal( comment.reports[reporter._id], {'category': 'spam', 'text': 'ads'} ) def test_can_view_private_comments_if_contributor(self): self._configure_project(self.project, 'public') CommentFactory(node=self.project, user=self.project.creator, is_public=False) url = self.project.api_url + 'comments/' res = self.app.get(url, auth=self.project.creator.auth) assert_equal(len(res.json['comments']), 1) def test_view_comments_with_anonymous_link(self): self.project.save() self.project.set_privacy('private') self.project.reload() user = AuthUserFactory() link = PrivateLinkFactory(anonymous=True) link.nodes.append(self.project) link.save() CommentFactory(node=self.project, user=self.project.creator, is_public=False) url = self.project.api_url + 'comments/' res = self.app.get(url, {"view_only": link.key}, auth=user.auth) comment = res.json['comments'][0] author = comment['author'] assert_in('A user', author['name']) assert_false(author['gravatarUrl']) assert_false(author['url']) assert_false(author['id']) def test_discussion_recursive(self): self._configure_project(self.project, 'public') comment_l0 = CommentFactory(node=self.project) user_l1 = UserFactory() user_l2 = UserFactory() comment_l1 = CommentFactory(node=self.project, target=comment_l0, user=user_l1) CommentFactory(node=self.project, target=comment_l1, user=user_l2) url = self.project.api_url + 'comments/discussion/' res = self.app.get(url) assert_equal(len(res.json['discussion']), 3) def test_discussion_no_repeats(self): self._configure_project(self.project, 'public') comment_l0 = CommentFactory(node=self.project) comment_l1 = CommentFactory(node=self.project, target=comment_l0) CommentFactory(node=self.project, target=comment_l1) url = self.project.api_url + 'comments/discussion/' res = self.app.get(url) assert_equal(len(res.json['discussion']), 1) def test_discussion_sort(self): self._configure_project(self.project, 'public') user1 = UserFactory() user2 = UserFactory() CommentFactory(node=self.project) for _ in range(3): CommentFactory(node=self.project, user=user1) for _ in range(2): CommentFactory(node=self.project, user=user2) url = self.project.api_url + 'comments/discussion/' res = self.app.get(url) assert_equal(len(res.json['discussion']), 3) observed = [user['id'] for user in res.json['discussion']] expected = [user1._id, user2._id, self.project.creator._id] assert_equal(observed, expected) def test_view_comments_updates_user_comments_view_timestamp(self): CommentFactory(node=self.project) url = self.project.api_url_for('update_comments_timestamp') res = self.app.put_json(url, auth=self.user.auth) self.user.reload() user_timestamp = self.user.comments_viewed_timestamp[self.project._id] view_timestamp = dt.datetime.utcnow() assert_datetime_equal(user_timestamp, view_timestamp) def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self): url = self.project.api_url_for('update_comments_timestamp') res = self.app.put_json(url, auth=self.user.auth) self.non_contributor.reload() assert_not_in(self.project._id, self.non_contributor.comments_viewed_timestamp) def test_n_unread_comments_updates_when_comment_is_added(self): self._add_comment(self.project, auth=self.project.creator.auth) self.project.reload() url = self.project.api_url_for('list_comments') res = self.app.get(url, auth=self.user.auth) assert_equal(res.json.get('nUnread'), 1) url = self.project.api_url_for('update_comments_timestamp') res = self.app.put_json(url, auth=self.user.auth) self.user.reload() url = self.project.api_url_for('list_comments') res = self.app.get(url, auth=self.user.auth) assert_equal(res.json.get('nUnread'), 0) def test_n_unread_comments_updates_when_comment_reply(self): comment = CommentFactory(node=self.project, user=self.project.creator) reply = CommentFactory(node=self.project, user=self.user, target=comment) self.project.reload() url = self.project.api_url_for('list_comments') res = self.app.get(url, auth=self.project.creator.auth) assert_equal(res.json.get('nUnread'), 1) def test_n_unread_comments_updates_when_comment_is_edited(self): self.test_edit_comment() self.project.reload() url = self.project.api_url_for('list_comments') res = self.app.get(url, auth=self.user.auth) assert_equal(res.json.get('nUnread'), 1) def test_n_unread_comments_is_zero_when_no_comments(self): url = self.project.api_url_for('list_comments') res = self.app.get(url, auth=self.project.creator.auth) assert_equal(res.json.get('nUnread'), 0) class TestTagViews(OsfTestCase): def setUp(self): super(TestTagViews, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user) @unittest.skip('Tags endpoint disabled for now.') def test_tag_get_returns_200(self): url = web_url_for('project_tag', tag='foo') res = self.app.get(url) assert_equal(res.status_code, 200) @requires_search class TestSearchViews(OsfTestCase): def setUp(self): super(TestSearchViews, self).setUp() import website.search.search as search search.delete_all() self.project = ProjectFactory(creator=UserFactory(fullname='Robbie Williams')) self.contrib = UserFactory(fullname='Brian May') for i in range(0, 12): UserFactory(fullname='Freddie Mercury{}'.format(i)) def tearDown(self): super(TestSearchViews, self).tearDown() import website.search.search as search search.delete_all() def test_search_contributor(self): url = api_url_for('search_contributor') res = self.app.get(url, {'query': self.contrib.fullname}) assert_equal(res.status_code, 200) result = res.json['users'] assert_equal(len(result), 1) brian = result[0] assert_equal(brian['fullname'], self.contrib.fullname) assert_in('gravatar_url', brian) assert_equal(brian['registered'], self.contrib.is_registered) assert_equal(brian['active'], self.contrib.is_active) def test_search_pagination_default(self): url = api_url_for('search_contributor') res = self.app.get(url, {'query': 'fr'}) assert_equal(res.status_code, 200) result = res.json['users'] pages = res.json['pages'] page = res.json['page'] assert_equal(len(result), 5) assert_equal(pages, 3) assert_equal(page, 0) def test_search_pagination_default_page_1(self): url = api_url_for('search_contributor') res = self.app.get(url, {'query': 'fr', 'page': 1}) assert_equal(res.status_code, 200) result = res.json['users'] page = res.json['page'] assert_equal(len(result), 5) assert_equal(page, 1) def test_search_pagination_default_page_2(self): url = api_url_for('search_contributor') res = self.app.get(url, {'query': 'fr', 'page': 2}) assert_equal(res.status_code, 200) result = res.json['users'] page = res.json['page'] assert_equal(len(result), 2) assert_equal(page, 2) def test_search_pagination_smaller_pages(self): url = api_url_for('search_contributor') res = self.app.get(url, {'query': 'fr', 'size': 5}) assert_equal(res.status_code, 200) result = res.json['users'] pages = res.json['pages'] page = res.json['page'] assert_equal(len(result), 5) assert_equal(page, 0) assert_equal(pages, 3) def test_search_pagination_smaller_pages_page_2(self): url = api_url_for('search_contributor') res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, }) assert_equal(res.status_code, 200) result = res.json['users'] pages = res.json['pages'] page = res.json['page'] assert_equal(len(result), 2) assert_equal(page, 2) assert_equal(pages, 3) def test_search_projects(self): url = '/search/' res = self.app.get(url, {'q': self.project.title}) assert_equal(res.status_code, 200) class TestODMTitleSearch(OsfTestCase): """ Docs from original method: :arg term: The substring of the title. :arg category: Category of the node. :arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search. :arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search. :arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search. :arg includePublic: yes or no. Whether the projects listed should include public projects. :arg includeContributed: yes or no. Whether the search should include projects the current user has contributed to. :arg ignoreNode: a list of nodes that should not be included in the search. :return: a list of dictionaries of projects """ def setUp(self): super(TestODMTitleSearch, self).setUp() self.user = AuthUserFactory() self.user_two = AuthUserFactory() self.project = ProjectFactory(creator=self.user, title="foo") self.project_two = ProjectFactory(creator=self.user_two, title="bar") self.public_project = ProjectFactory(creator=self.user_two, is_public=True, title="baz") self.registration_project = RegistrationFactory(creator=self.user, title="qux") self.folder = FolderFactory(creator=self.user, title="quux") self.dashboard = DashboardFactory(creator=self.user, title="Dashboard") self.url = api_url_for('search_projects_by_title') def test_search_projects_by_title(self): res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.public_project.title, 'includePublic': 'yes', 'includeContributed': 'no' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.project.title, 'includePublic': 'no', 'includeContributed': 'yes' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.project.title, 'includePublic': 'no', 'includeContributed': 'yes', 'isRegistration': 'no' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.project.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isRegistration': 'either' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.public_project.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isRegistration': 'either' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.registration_project.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isRegistration': 'either' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 2) res = self.app.get(self.url, { 'term': self.registration_project.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isRegistration': 'no' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.folder.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isFolder': 'yes' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) res = self.app.get(self.url, { 'term': self.folder.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isFolder': 'no' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 0) res = self.app.get(self.url, { 'term': self.dashboard.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isFolder': 'no' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 0) res = self.app.get(self.url, { 'term': self.dashboard.title, 'includePublic': 'yes', 'includeContributed': 'yes', 'isFolder': 'yes' }, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json), 1) class TestReorderComponents(OsfTestCase): def setUp(self): super(TestReorderComponents, self).setUp() self.creator = AuthUserFactory() self.contrib = AuthUserFactory() # Project is public self.project = ProjectFactory.build(creator=self.creator, is_public=True) self.project.add_contributor(self.contrib, auth=Auth(self.creator)) # subcomponent that only creator can see self.public_component = NodeFactory(creator=self.creator, is_public=True) self.private_component = NodeFactory(creator=self.creator, is_public=False) self.project.nodes.append(self.public_component) self.project.nodes.append(self.private_component) self.project.save() # https://github.com/CenterForOpenScience/openscienceframework.org/issues/489 def test_reorder_components_with_private_component(self): # contrib tries to reorder components payload = { 'new_list': [ '{0}:node'.format(self.private_component._primary_key), '{0}:node'.format(self.public_component._primary_key), ] } url = self.project.api_url_for('project_reorder_components') res = self.app.post_json(url, payload, auth=self.contrib.auth) assert_equal(res.status_code, 200) class TestDashboardViews(OsfTestCase): def setUp(self): super(TestDashboardViews, self).setUp() self.creator = AuthUserFactory() self.contrib = AuthUserFactory() self.dashboard = DashboardFactory(creator=self.creator) # https://github.com/CenterForOpenScience/openscienceframework.org/issues/571 def test_components_with_are_accessible_from_dashboard(self): project = ProjectFactory(creator=self.creator, is_public=False) component = NodeFactory(creator=self.creator, parent=project) component.add_contributor(self.contrib, auth=Auth(self.creator)) component.save() # Get the All My Projects smart folder from the dashboard url = api_url_for('get_dashboard', nid=ALL_MY_PROJECTS_ID) res = self.app.get(url, auth=self.contrib.auth) assert_equal(len(res.json['data']), 1) def test_get_dashboard_nodes(self): project = ProjectFactory(creator=self.creator) component = NodeFactory(creator=self.creator, parent=project) url = api_url_for('get_dashboard_nodes') res = self.app.get(url, auth=self.creator.auth) assert_equal(res.status_code, 200) nodes = res.json['nodes'] assert_equal(len(nodes), 2) project_serialized = nodes[0] assert_equal(project_serialized['id'], project._primary_key) def test_get_dashboard_nodes_shows_components_if_user_is_not_contrib_on_project(self): # User creates a project with a component project = ProjectFactory(creator=self.creator) component = NodeFactory(creator=self.creator, parent=project) # User adds friend as a contributor to the component but not the # project friend = AuthUserFactory() component.add_contributor(friend, auth=Auth(self.creator)) component.save() # friend requests their dashboard nodes url = api_url_for('get_dashboard_nodes') res = self.app.get(url, auth=friend.auth) nodes = res.json['nodes'] # Response includes component assert_equal(len(nodes), 1) assert_equal(nodes[0]['id'], component._primary_key) # friend requests dashboard nodes, filtering against components url = api_url_for('get_dashboard_nodes', no_components=True) res = self.app.get(url, auth=friend.auth) nodes = res.json['nodes'] assert_equal(len(nodes), 0) def test_get_dashboard_nodes_admin_only(self): friend = AuthUserFactory() project = ProjectFactory(creator=self.creator) # Friend is added as a contributor with read+write (not admin) # permissions perms = permissions.expand_permissions(permissions.WRITE) project.add_contributor(friend, auth=Auth(self.creator), permissions=perms) project.save() url = api_url_for('get_dashboard_nodes') res = self.app.get(url, auth=friend.auth) assert_equal(res.json['nodes'][0]['id'], project._primary_key) # Can filter project according to permission url = api_url_for('get_dashboard_nodes', permissions='admin') res = self.app.get(url, auth=friend.auth) assert_equal(len(res.json['nodes']), 0) def test_get_dashboard_nodes_invalid_permission(self): url = api_url_for('get_dashboard_nodes', permissions='not-valid') res = self.app.get(url, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_registered_components_with_are_accessible_from_dashboard(self): project = ProjectFactory(creator=self.creator, is_public=False) component = NodeFactory(creator=self.creator, parent=project) component.add_contributor(self.contrib, auth=Auth(self.creator)) component.save() project.register_node( None, Auth(self.creator), '', '', ) # Get the All My Registrations smart folder from the dashboard url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID) res = self.app.get(url, auth=self.contrib.auth) assert_equal(len(res.json['data']), 1) def test_archiving_nodes_appear_in_all_my_registrations(self): project = ProjectFactory(creator=self.creator, is_public=False) reg = RegistrationFactory(project=project, user=self.creator) # Get the All My Registrations smart folder from the dashboard url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID) res = self.app.get(url, auth=self.creator.auth) assert_equal(res.json['data'][0]['node_id'], reg._id) def test_untouched_node_is_collapsed(self): found_item = False folder = FolderFactory(creator=self.creator, public=True) self.dashboard.add_pointer(folder, auth=Auth(self.creator)) url = api_url_for('get_dashboard', nid=self.dashboard._id) dashboard_data = self.app.get(url, auth=self.creator.auth) dashboard_json = dashboard_data.json[u'data'] for dashboard_item in dashboard_json: if dashboard_item[u'node_id'] == folder._id: found_item = True assert_false(dashboard_item[u'expand'], "Expand state was not set properly.") assert_true(found_item, "Did not find the folder in the dashboard.") def test_expand_node_sets_expand_to_true(self): found_item = False folder = FolderFactory(creator=self.creator, public=True) self.dashboard.add_pointer(folder, auth=Auth(self.creator)) url = api_url_for('expand', pid=folder._id) self.app.post(url, auth=self.creator.auth) url = api_url_for('get_dashboard', nid=self.dashboard._id) dashboard_data = self.app.get(url, auth=self.creator.auth) dashboard_json = dashboard_data.json[u'data'] for dashboard_item in dashboard_json: if dashboard_item[u'node_id'] == folder._id: found_item = True assert_true(dashboard_item[u'expand'], "Expand state was not set properly.") assert_true(found_item, "Did not find the folder in the dashboard.") def test_collapse_node_sets_expand_to_true(self): found_item = False folder = FolderFactory(creator=self.creator, public=True) self.dashboard.add_pointer(folder, auth=Auth(self.creator)) # Expand the folder url = api_url_for('expand', pid=folder._id) self.app.post(url, auth=self.creator.auth) # Serialize the dashboard and test url = api_url_for('get_dashboard', nid=self.dashboard._id) dashboard_data = self.app.get(url, auth=self.creator.auth) dashboard_json = dashboard_data.json[u'data'] for dashboard_item in dashboard_json: if dashboard_item[u'node_id'] == folder._id: found_item = True assert_true(dashboard_item[u'expand'], "Expand state was not set properly.") assert_true(found_item, "Did not find the folder in the dashboard.") # Collapse the folder found_item = False url = api_url_for('collapse', pid=folder._id) self.app.post(url, auth=self.creator.auth) # Serialize the dashboard and test url = api_url_for('get_dashboard', nid=self.dashboard._id) dashboard_data = self.app.get(url, auth=self.creator.auth) dashboard_json = dashboard_data.json[u'data'] for dashboard_item in dashboard_json: if dashboard_item[u'node_id'] == folder._id: found_item = True assert_false(dashboard_item[u'expand'], "Expand state was not set properly.") assert_true(found_item, "Did not find the folder in the dashboard.") def test_folder_new_post(self): url = api_url_for('folder_new_post', nid=self.dashboard._id) found_item = False # Make the folder title = 'New test folder' payload = {'title': title, } self.app.post_json(url, payload, auth=self.creator.auth) # Serialize the dashboard and test url = api_url_for('get_dashboard', nid=self.dashboard._id) dashboard_data = self.app.get(url, auth=self.creator.auth) dashboard_json = dashboard_data.json[u'data'] for dashboard_item in dashboard_json: if dashboard_item[u'name'] == title: found_item = True assert_true(found_item, "Did not find the folder in the dashboard.") class TestWikiWidgetViews(OsfTestCase): def setUp(self): super(TestWikiWidgetViews, self).setUp() # project with no home wiki page self.project = ProjectFactory() self.read_only_contrib = AuthUserFactory() self.project.add_contributor(self.read_only_contrib, permissions='read') self.noncontributor = AuthUserFactory() # project with no home wiki content self.project2 = ProjectFactory(creator=self.project.creator) self.project2.add_contributor(self.read_only_contrib, permissions='read') self.project2.update_node_wiki(name='home', content='', auth=Auth(self.project.creator)) def test_show_wiki_for_contributors_when_no_wiki_or_content(self): assert_true(_should_show_wiki_widget(self.project, self.project.creator)) assert_true(_should_show_wiki_widget(self.project2, self.project.creator)) def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self): assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib)) assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib)) def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self): assert_false(_should_show_wiki_widget(self.project, self.noncontributor)) assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib)) class TestForkViews(OsfTestCase): def setUp(self): super(TestForkViews, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory.build(creator=self.user, is_public=True) self.consolidated_auth = Auth(user=self.project.creator) self.user.save() self.project.save() def test_fork_private_project_non_contributor(self): self.project.set_privacy("private") self.project.save() url = self.project.api_url_for('node_fork_page') non_contributor = AuthUserFactory() res = self.app.post_json(url, auth=non_contributor.auth, expect_errors=True) assert_equal(res.status_code, http.FORBIDDEN) def test_fork_public_project_non_contributor(self): url = self.project.api_url_for('node_fork_page') non_contributor = AuthUserFactory() res = self.app.post_json(url, auth=non_contributor.auth) assert_equal(res.status_code, 200) def test_fork_project_contributor(self): contributor = AuthUserFactory() self.project.set_privacy("private") self.project.add_contributor(contributor) self.project.save() url = self.project.api_url_for('node_fork_page') res = self.app.post_json(url, auth=contributor.auth) assert_equal(res.status_code, 200) def test_registered_forks_dont_show_in_fork_list(self): fork = self.project.fork_node(self.consolidated_auth) RegistrationFactory(project=fork) url = self.project.api_url_for('get_forks') res = self.app.get(url, auth=self.user.auth) assert_equal(len(res.json['nodes']), 1) assert_equal(res.json['nodes'][0]['id'], fork._id) class TestProjectCreation(OsfTestCase): def setUp(self): super(TestProjectCreation, self).setUp() self.creator = AuthUserFactory() self.url = api_url_for('project_new_post') def test_needs_title(self): res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_create_component_strips_html(self): user = AuthUserFactory() project = ProjectFactory(creator=user) url = web_url_for('project_new_node', pid=project._id) post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''} request = self.app.post(url, post_data, auth=user.auth).follow() project.reload() child = project.nodes[0] # HTML has been stripped assert_equal(child.title, 'New Component Title') def test_strip_html_from_title(self): payload = { 'title': 'no html <b>here</b>' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_equal('no html here', node.title) def test_only_needs_title(self): payload = { 'title': 'Im a real title' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) def test_title_must_be_one_long(self): payload = { 'title': '' } res = self.app.post_json( self.url, payload, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_title_must_be_less_than_200(self): payload = { 'title': ''.join([str(x) for x in xrange(0, 250)]) } res = self.app.post_json( self.url, payload, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_fails_to_create_project_with_whitespace_title(self): payload = { 'title': ' ' } res = self.app.post_json( self.url, payload, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_creates_a_project(self): payload = { 'title': 'Im a real title' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_true(node.title, 'Im a real title') def test_new_project_returns_serialized_node_data(self): payload = { 'title': 'Im a real title' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = res.json['newNode'] assert_true(node) assert_equal(node['title'], 'Im a real title') def test_description_works(self): payload = { 'title': 'Im a real title', 'description': 'I describe things!' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_true(node.description, 'I describe things!') def test_can_template(self): other_node = ProjectFactory(creator=self.creator) payload = { 'title': 'Im a real title', 'template': other_node._id } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_true(node.template_node, other_node) def test_project_before_template_no_addons(self): project = ProjectFactory() res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth) assert_equal(res.json['prompts'], []) def test_project_before_template_with_addons(self): project = ProjectWithAddonFactory(addon='github') res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth) assert_in('GitHub', res.json['prompts']) def test_project_new_from_template_non_user(self): project = ProjectFactory() url = api_url_for('project_new_from_template', nid=project._id) res = self.app.post(url, auth=None) assert_equal(res.status_code, 302) res2 = res.follow(expect_errors=True) assert_equal(res2.status_code, 301) assert_equal(res2.request.path, '/login') def test_project_new_from_template_public_non_contributor(self): non_contributor = AuthUserFactory() project = ProjectFactory(is_public=True) url = api_url_for('project_new_from_template', nid=project._id) res = self.app.post(url, auth=non_contributor.auth) assert_equal(res.status_code, 201) def test_project_new_from_template_contributor(self): contributor = AuthUserFactory() project = ProjectFactory(is_public=False) project.add_contributor(contributor) project.save() url = api_url_for('project_new_from_template', nid=project._id) res = self.app.post(url, auth=contributor.auth) assert_equal(res.status_code, 201) class TestUnconfirmedUserViews(OsfTestCase): def test_can_view_profile(self): user = UnconfirmedUserFactory() url = web_url_for('profile_view_id', uid=user._id) res = self.app.get(url) assert_equal(res.status_code, 200) class TestProfileNodeList(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) self.user = AuthUserFactory() self.public = ProjectFactory(is_public=True) self.public_component = NodeFactory(parent=self.public, is_public=True) self.private = ProjectFactory(is_public=False) self.deleted = ProjectFactory(is_public=True, is_deleted=True) for node in (self.public, self.public_component, self.private, self.deleted): node.add_contributor(self.user, auth=Auth(node.creator)) node.save() def test_get_public_projects(self): url = api_url_for('get_public_projects', uid=self.user._id) res = self.app.get(url) node_ids = [each['id'] for each in res.json['nodes']] assert_in(self.public._id, node_ids) assert_not_in(self.private._id, node_ids) assert_not_in(self.deleted._id, node_ids) assert_not_in(self.public_component._id, node_ids) def test_get_public_components(self): url = api_url_for('get_public_components', uid=self.user._id) res = self.app.get(url) node_ids = [each['id'] for each in res.json['nodes']] assert_in(self.public_component._id, node_ids) assert_not_in(self.public._id, node_ids) assert_not_in(self.private._id, node_ids) assert_not_in(self.deleted._id, node_ids) class TestStaticFileViews(OsfTestCase): def test_robots_dot_txt(self): res = self.app.get('/robots.txt') assert_equal(res.status_code, 200) assert_in('User-agent', res) assert_in('text/plain', res.headers['Content-Type']) def test_favicon(self): res = self.app.get('/favicon.ico') assert_equal(res.status_code, 200) assert_in('image/vnd.microsoft.icon', res.headers['Content-Type']) def test_getting_started_page(self): res = self.app.get('/getting-started/') assert_equal(res.status_code, 200) class TestUserConfirmSignal(OsfTestCase): def test_confirm_user_signal_called_when_user_claims_account(self): unclaimed_user = UnconfirmedUserFactory() # unclaimed user has been invited to a project. referrer = UserFactory() project = ProjectFactory(creator=referrer) unclaimed_user.add_unclaimed_record(project, referrer, 'foo') unclaimed_user.save() token = unclaimed_user.get_unclaimed_record(project._primary_key)['token'] with capture_signals() as mock_signals: url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token) payload = {'username': unclaimed_user.username, 'password': 'password', 'password2': 'password'} res = self.app.post(url, payload) assert_equal(res.status_code, 302) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed])) def test_confirm_user_signal_called_when_user_confirms_email(self): unconfirmed_user = UnconfirmedUserFactory() unconfirmed_user.save() # user goes to email confirmation link token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username) with capture_signals() as mock_signals: url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token) res = self.app.get(url) assert_equal(res.status_code, 302) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed])) if __name__ == '__main__': unittest.main()
petermalcolm/osf.io
tests/test_views.py
Python
apache-2.0
181,333
[ "Brian" ]
9773c1bb947162ee8610426319a27b2cfdee80bca35652be87e9ea7e63eaa3aa
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides utility classes for string operations. """ from __future__ import unicode_literals import re from fractions import Fraction __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Production" __date__ = "$Sep 23, 2011M$" def str_delimited(results, header=None, delimiter="\t"): """ Given a tuple of tuples, generate a delimited string form. >>> results = [["a","b","c"],["d","e","f"],[1,2,3]] >>> print(str_delimited(results,delimiter=",")) a,b,c d,e,f 1,2,3 Args: result: 2d sequence of arbitrary types. header: optional header Returns: Aligned string output in a table-like format. """ returnstr = "" if header is not None: returnstr += delimiter.join(header) + "\n" return returnstr + "\n".join([delimiter.join([str(m) for m in result]) for result in results]) def formula_double_format(afloat, ignore_ones=True, tol=1e-8): """ This function is used to make pretty formulas by formatting the amounts. Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4. Args: afloat (float): a float ignore_ones (bool): if true, floats of 1 are ignored. tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2 Returns: A string representation of the float for formulas. """ if ignore_ones and afloat == 1: return "" elif abs(afloat - int(afloat)) < tol: return str(int(afloat)) else: return str(round(afloat, 8)) def latexify(formula): """ Generates a latex formatted formula. E.g., Fe2O3 is transformed to Fe$_{2}$O$_{3}$. Args: formula (str): Input formula. Returns: Formula suitable for display as in LaTeX with proper subscripts. """ return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1$_{\2}$", formula) def latexify_spacegroup(spacegroup_symbol): """ Generates a latex formatted spacegroup. E.g., P2_1/c is converted to P2$_{1}$/c and P-1 is converted to P$\\overline{1}$. Args: spacegroup_symbol (str): A spacegroup symbol Returns: A latex formatted spacegroup with proper subscripts and overlines. """ sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol) return re.sub(r"-(\d)", r"$\\overline{\1}$", sym) def stream_has_colours(stream): """ True if stream supports colours. Python cookbook, #475186 """ if not hasattr(stream, "isatty"): return False if not stream.isatty(): return False # auto color only on TTYs try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except: return False # guess false in case of error def transformation_to_string(matrix, translation_vec=(0, 0, 0), components=('x', 'y', 'z'), c='', delim=','): """ Convenience method. Given matrix returns string, e.g. x+2y+1/4 :param matrix :param translation_vec :param components: either ('x', 'y', 'z') or ('a', 'b', 'c') :param c: optional additional character to print (used for magmoms) :param delim: delimiter :return: xyz string """ parts = [] for i in range(3): s = '' m = matrix[i] t = translation_vec[i] for j, dim in enumerate(components): if m[j] != 0: f = Fraction(m[j]).limit_denominator() if s != '' and f >= 0: s += '+' if abs(f.numerator) != 1: s += str(f.numerator) elif f < 0: s += '-' s += c + dim if f.denominator != 1: s += '/' + str(f.denominator) if t != 0: s += ('+' if (t > 0 and s != '') else '') + str(Fraction(t).limit_denominator()) if s == '': s += '0' parts.append(s) return delim.join(parts) class StringColorizer(object): colours = {"default": "", "blue": "\x1b[01;34m", "cyan": "\x1b[01;36m", "green": "\x1b[01;32m", "red": "\x1b[01;31m", # lighting colours. #"lred": "\x1b[01;05;37;41m" } def __init__(self, stream): self.has_colours = stream_has_colours(stream) def __call__(self, string, colour): if self.has_colours: code = self.colours.get(colour.lower(), "") if code: return code + string + "\x1b[00m" else: return string else: return string if __name__ == "__main__": import doctest doctest.testmod()
matk86/pymatgen
pymatgen/util/string.py
Python
mit
4,957
[ "pymatgen" ]
741ee09085c309df94a42d45ce99ba193ed9d9c1bc7f2a4a1034c0f104947aed
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- """ multiple clustering algorithms """ import numpy as np import matplotlib.pyplot as plt from .HelperFunctions import get_colors from mpl_toolkits.mplot3d import Axes3D from random import shuffle class Cluster: def __init__(self, points): # Attributes self.points = points self.labels = [] self.result = [] self.noise = [] def __str__(self): """ String representation """ return str(self.points) @staticmethod def area(p): return 0.5 * abs(sum(x0 * y1 - x1 * y0 for ((x0, y0), (x1, y1)) in Cluster.segments(p))) @staticmethod def segments(p): return zip(p, p[1:] + [p[0]]) def open_csv(self, filename="la.csv"): self.points = np.genfromtxt(filename, delimiter=',') @staticmethod def save_csv(output, filename="test.csv"): np.savetxt(filename, output, fmt="%.2f,%.2f,%d") def calculate(self): """ make something exciting """ pass def plot_me(self): plt.plot(self.points[:, 0], self.points[:, 1], 'o') plt.show() @staticmethod def plot_marker(x, y): plt.plot(x, y, "or", color="red", ms=10.0) def show_res(self, comp_list=None, filename=None, shuffle_colors=False): """ plot the results in 3d format: [[point, point, point], [point, point, point, point]...] :param comp_list - [1,3,4] - plot only 1 3 and 4 as result :param filename - name of saving file, otherwise show """ # print result print("clusters: {}".format(len(self.result))) if self.noise: print("noisepts: {}".format(len(self.noise))) # Plot fig = plt.figure() #colors = 'rgbcmyk' colors = get_colors() if shuffle_colors: shuffle(colors) markers = ('o', '+', 'x', '*', 's', 'p', 'h', 'H', 'D', 'd', '<', '>') plt.axis('equal') dim = len(self.result[0][0]) if self.result else 0 if dim == 2: # 2D for i, point_list in enumerate(self.result): if comp_list and i not in comp_list: continue x, y = zip(*point_list) plt.scatter(x, y, c=colors[i % len(colors)], marker=markers[i % len(markers)]) # print noise if self.noise: x, y = zip(*self.noise) plt.scatter(x, y, c='b', marker='o') elif dim == 3: # 3D ax = fig.add_subplot(111, projection='3d') for i, vals in enumerate(self.result): if comp_list and i not in comp_list: continue x, y, z = zip(*vals) ax.scatter(x, y, z, c=colors[i % len(colors)]) # print noise if self.noise: x, y, z = zip(*self.noise) ax.scatter(x, y, z, c='b', marker='o') if dim in [2, 3]: if filename: plt.savefig(filename) else: plt.show()
hotator/python-clustering
cluster/base/Cluster.py
Python
gpl-2.0
3,165
[ "exciting" ]
74d334e6509799c8bbfc1481c4946e25dec0e0ef498d78c395ed22dc4a4b0877
#!/usr/bin/env python import vtk def main(): value = 2.0 colors = vtk.vtkNamedColors() implicitFunction = vtk.vtkSuperquadric() implicitFunction.SetPhiRoundness(2.5) implicitFunction.SetThetaRoundness(.5) # Sample the function. sample = vtk.vtkSampleFunction() sample.SetSampleDimensions(50,50,50) sample.SetImplicitFunction(implicitFunction) xmin, xmax, ymin, ymax, zmin, zmax = -value, value, -value, value, -value, value sample.SetModelBounds(xmin, xmax, ymin, ymax, zmin, zmax) # Create the 0 isosurface. contours = vtk.vtkContourFilter() contours.SetInputConnection(sample.GetOutputPort()) contours.GenerateValues(1, 2.0, 2.0) # Map the contours to graphical primitives. contourMapper = vtk.vtkPolyDataMapper() contourMapper.SetInputConnection(contours.GetOutputPort()) contourMapper.SetScalarRange(0.0, 1.2) # Create an actor for the contours. contourActor = vtk.vtkActor() contourActor.SetMapper(contourMapper) # Create a box around the function to indicate the sampling volume. #Create outline. outline = vtk.vtkOutlineFilter() outline.SetInputConnection(sample.GetOutputPort()) # Map it to graphics primitives. outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) # Create an actor. outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) outlineActor.GetProperty().SetColor(0,0,0) # Visualize. renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) interactor = vtk.vtkRenderWindowInteractor() interactor.SetRenderWindow(renderWindow) renderer.AddActor(contourActor) renderer.AddActor(outlineActor) renderer.SetBackground(colors.GetColor3d("Tan")) # Enable user interface interactor renderWindow.Render() interactor.Start() if __name__ == '__main__': main()
lorensen/VTKExamples
src/Python/ImplicitFunctions/SampleFunction.py
Python
apache-2.0
2,027
[ "VTK" ]
b6e31cadf92e06166b9442c370871dbf93c7a6d99cd9ae25b0628ba062573954
"""Useful fitting functions.""" from scipy.optimize import curve_fit from scipy.signal import medfilt import numpy as np import traceback import warnings from collections.abc import Iterable from .utils import mad, HAS_MPL __all__ = [ "contiguous_regions", "ref_std", "ref_mad", "linear_fun", "linear_fit", "offset", "offset_fit", "baseline_rough", "purge_outliers", "baseline_als", "fit_baseline_plus_bell", "total_variance", "align", ] def contiguous_regions(condition): """Find contiguous True regions of the boolean array "condition". Return a 2D array where the first column is the start index of the region and the second column is the end index. Parameters ---------- condition : boolean array Returns ------- idx : [[i0_0, i0_1], [i1_0, i1_1], ...] A list of integer couples, with the start and end of each True blocks in the original array Notes ----- From http://stackoverflow.com/questions/4494404/ find-large-number-of-consecutive-values-fulfilling- condition-in-a-numpy-array """ # NOQA # Find the indicies of changes in "condition" diff = np.logical_xor(condition[1:], condition[:-1]) (idx,) = diff.nonzero() # We need to start things after the change in "condition". Therefore, # we'll shift the index by 1 to the right. idx += 1 if condition[0]: # If the start of condition is True prepend a 0 idx = np.r_[0, idx] if condition[-1]: # If the end of condition is True, append the length of the array idx = np.r_[idx, condition.size] # Reshape the result into two columns idx.shape = (-1, 2) return idx def _rolling_window(a, window): """A smart rolling window. Found at http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html """ try: shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) except Exception: warnings.warn(traceback.format_exc()) raise def ref_std(array, window=1): """Minimum standard deviation along an array. If a data series is noisy, it is difficult to determine the underlying standard deviation of the original series. Here, the standard deviation is calculated in a rolling window, and the minimum is saved, because it will likely be the interval with less noise. Parameters ---------- array : ``numpy.array`` object or list Input data window : int or float Number of bins of the window Returns ------- ref_std : float The reference Standard Deviation """ return np.std(np.diff(array)) / np.sqrt(2) def ref_mad(array, window=1): """Ref. Median Absolute Deviation of an array, rolling median-subtracted. If a data series is noisy, it is difficult to determine the underlying statistics of the original series. Here, the MAD is calculated in a rolling window, and the minimum is saved, because it will likely be the interval with less noise. Parameters ---------- array : ``numpy.array`` object or list Input data window : int or float Number of bins of the window Returns ------- ref_std : float The reference MAD """ return mad(np.diff(array)) / np.sqrt(2) def linear_fun(x, q, m): """A linear function. Parameters ---------- x : float or array The independent variable m : float The slope q : float The intercept Returns ------- y : float or array The dependent variable """ return m * np.asarray(x, dtype=float) + q def linear_fit(x, y, start_pars, return_err=False): """A linear fit with any set of data. Parameters ---------- x : array-like y : array-like start_pars : [q0, m0], floats Intercept and slope of linear function Returns ------- par : [q, m], floats Fitted intercept and slope of the linear function """ par, _ = curve_fit(linear_fun, x, y, start_pars, maxfev=6000) if return_err: warnings.warn("return_err not implemented yet in linear_fit") return par, None else: return par def offset(x, off): """An offset.""" return off def offset_fit(x, y, offset_start=0, return_err=False): """Fit a constant offset to the data. Parameters ---------- x : array-like y : array-like offset_start : float Constant offset, initial value Returns ------- offset : float Fitted offset """ par, _ = curve_fit(offset, x, y, [offset_start], maxfev=6000) if return_err: warnings.warn("return_err not implemented yet in offset_fit") return par[0], None else: return par[0] def baseline_rough(x, y, start_pars=None, return_baseline=False, mask=None): """Rough function to subtract the baseline. Parameters ---------- x : array-like the sample time/number/position y : array-like the data series corresponding to x start_pars : [q0, m0], floats Intercept and slope of linear function Other Parameters ---------------- return_baseline : bool return the baseline? mask : array of bools Mask indicating the good x and y data. True for good, False for bad Returns ------- y_subtracted : array-like, same size as y The initial time series, subtracted from the trend baseline : array-like, same size as y Fitted baseline """ N = len(y) if start_pars is None: if N > 40: m0 = (np.median(y[-20:]) - np.median(y[:20])) / ( np.mean(x[-20:]) - np.mean(x[:20]) ) else: m0 = (y[-1] - y[0]) / (x[-1] - x[0]) q0 = min(y) start_pars = [q0, m0] lc = y.copy() time = x.copy() if mask is None: mask = np.ones(len(time), dtype=bool) total_trend = 0 if N < 20: par = linear_fit(time, lc, start_pars) lc = lc - linear_fun(time, *par) total_trend = total_trend + linear_fun(time, *par) else: local_std = ref_std(lc, np.max([N // 20, 20])) for percentage in [0.8, 0.15]: time_to_fit = time[mask][1:-1] lc_to_fit = lc[mask][1:-1] if len(time_to_fit) < len(start_pars): break sorted_els = np.argsort(lc_to_fit) # Select the lowest half elements good = sorted_els[: int(N * percentage)] if np.std(lc_to_fit[good]) < 2 * local_std: good = np.ones(len(lc_to_fit), dtype=bool) time_filt = time_to_fit[good] lc_filt = lc_to_fit[good] if len(time_filt) < len(start_pars): break back_in_order = np.argsort(time_filt) lc_filt = lc_filt[back_in_order] time_filt = time_filt[back_in_order] par = linear_fit(time_filt, lc_filt, start_pars) lc = lc - linear_fun(time, *par) total_trend = total_trend + linear_fun(time, *par) if return_baseline: return lc, total_trend else: return lc def outlier_from_median_filt(y, window_size, down=True, up=True): y_medfilt = medfilt(y, window_size) diffs = y - y_medfilt min_diff = mad(diffs) outliers = np.zeros(len(y), dtype=bool) if down: outliers = np.logical_or(outliers, -diffs > 10 * min_diff) if up: outliers = np.logical_or(outliers, diffs > 10 * min_diff) return outliers def purge_outliers( y, window_size=5, up=True, down=True, mask=None, plot=False ): """Remove obvious outliers. Attention: This is known to throw false positives on bona fide, very strong Gaussian peaks """ # Needs to be odd window_size = window_size // 2 * 2 + 1 if mask is None: mask = np.ones(len(y), dtype=bool) bad_mask = np.logical_not(mask) if not (up or down): return y ysave = y y = y.copy() win1 = outlier_from_median_filt(y, window_size) win2 = outlier_from_median_filt(y, window_size * 2 + 1) local_outliers = win1 & win2 Noutliers = len(local_outliers[local_outliers]) if Noutliers > 0: warnings.warn("Found {} outliers".format(Noutliers), UserWarning) outliers = np.logical_or(local_outliers, bad_mask) if not np.any(outliers): return y bad = contiguous_regions(outliers) for b in bad: if b[0] == 0: y[b[0]] = y[b[1]] elif b[1] >= len(y): y[b[0] :] = y[b[0] - 1] else: previous = y[b[0] - 1] next_bin = y[b[1]] dx = b[1] - b[0] y[b[0] : b[1]] = (next_bin - previous) / (dx + 1) * np.arange( 1, b[1] - b[0] + 1 ) + previous if plot and HAS_MPL: import matplotlib.pyplot as plt fig = plt.figure() plt.plot(ysave, label="Input data") plt.plot(y, zorder=3, label="Filtered data") plt.plot(medfilt(ysave, window_size), zorder=6, lw=1, label="Medfilt") plt.savefig("Bubu_" + str(np.random.randint(0, 10000000)) + ".png") plt.legend() plt.close(fig) return y def _als(y, lam, p, niter=30): """Baseline Correction with Asymmetric Least Squares Smoothing. Modifications to the routine from Eilers & Boelens 2005 https://www.researchgate.net/publication/ 228961729_Technical_Report_Baseline_Correction_with_ Asymmetric_Least_Squares_Smoothing The Python translation is partly from http://stackoverflow.com/questions/29156532/ python-baseline-correction-library Parameters ---------- y : array-like the data series corresponding to x lam : float the lambda parameter of the ALS method. This control how much the baseline can adapt to local changes. A higher value corresponds to a stiffer baseline p : float the asymmetry parameter of the ALS method. This controls the overall slope tollerated for the baseline. A higher value correspond to a higher possible slope Other parameters ---------------- niter : int The number of iterations to perform Returns ------- z : array-like, same size as y Fitted baseline. """ from scipy import sparse L = len(y) D = sparse.csc_matrix(np.diff(np.eye(L), 2)) w = np.ones(L) for _ in range(niter): W = sparse.spdiags(w, 0, L, L) Z = W + lam * D.dot(D.transpose()) z = sparse.linalg.spsolve(Z, w * y) w = p * (y > z) + (1 - p) * (y < z) return z def baseline_als(x, y, **kwargs): """Baseline Correction with Asymmetric Least Squares Smoothing. If the input arrays are larger than 300 elements, ignores outlier_purging and executes the baseline calculation on a small subset of the (median-filtered) y-array Parameters ---------- x : array-like the sample time/number/position y : array-like the data series corresponding to x lam : float the lambda parameter of the ALS method. This control how much the baseline can adapt to local changes. A higher value corresponds to a stiffer baseline p : float the asymmetry parameter of the ALS method. This controls the overall slope tollerated for the baseline. A higher value correspond to a higher possible slope Other Parameters ---------------- niter : int The number of iterations to perform return_baseline : bool return the baseline? offset_correction : bool also correct for an offset to align with the running mean of the scan outlier_purging : bool Purge outliers before the fit? mask : array of bools Mask indicating the good x and y data. True for good, False for bad Returns ------- y_subtracted : array-like, same size as y The initial time series, subtracted from the trend baseline : array-like, same size as y Fitted baseline. Only returned if return_baseline is True """ from scipy.interpolate import interp1d if y.size < 300: return _baseline_als(x, y, **kwargs) _ = kwargs.pop("outlier_purging", False) return_baseline = kwargs.pop("return_baseline", False) y_medf = medfilt(y, 31) els = np.array(np.rint(np.linspace(0, y.size - 1, 31)), dtype=int) y_sub, base = _baseline_als( x[els], y_medf[els], outlier_purging=False, return_baseline=True, **kwargs ) func = interp1d(x[els], base) baseline = func(x) if return_baseline: return y - baseline, baseline else: return y - baseline def _baseline_als( x, y, lam=None, p=None, niter=40, return_baseline=False, offset_correction=True, mask=None, outlier_purging=True, ): if not isinstance(outlier_purging, Iterable): outlier_purging = (outlier_purging, outlier_purging) if lam is None: lam = 1e11 if p is None: p = 0.001 N = len(y) if N > 40: med_start = np.median(y[:20]) med_stop = np.median(y[-20:]) approx_m = (med_stop - med_start) / (N - 20) else: approx_m = (y[-1] - y[0]) / (N - 1) approx_q = y[0] approx_baseline = approx_m * np.arange(N) + approx_q y = y - approx_baseline y_mod = purge_outliers( y, up=outlier_purging[0], down=outlier_purging[1], mask=mask ) z = _als(y_mod, lam, p, niter=niter) offset = 0 ysub = y_mod - z if offset_correction: std = ref_std(ysub, np.max([len(y) // 20, 20])) good = np.abs(ysub) < 10 * std if len(ysub[good]) < 20: good = np.ones(len(ysub), dtype=bool) offset = np.median(ysub[good]) if np.isnan(offset): offset = 0 if return_baseline: return y - z - offset, z + offset + approx_baseline else: return y - z - offset def detrend_spectroscopic_data( x, spectrum, kind="als", mask=None, outlier_purging=True ): """Take the baseline off the spectroscopic data. Examples -------- >>> spectrum = np.vstack([np.arange(0 + i, 2 + i, 1/3) ... for i in np.arange(0., 4, 1/16)]) >>> x = np.arange(spectrum.shape[0]) >>> detr, _ = detrend_spectroscopic_data(x, spectrum, kind='rough') >>> np.allclose(detr, 0, atol=1e-3) True """ y = np.sum(spectrum, axis=1) if kind == "als": y_sub, baseline = baseline_als( x, y, return_baseline=True, outlier_purging=outlier_purging, mask=mask, ) elif kind == "rough": y_sub, baseline = baseline_rough(x, y, return_baseline=True, mask=mask) else: warnings.warn("Baseline kind unknown") return spectrum, np.ones_like(spectrum) if len(spectrum.shape) == 1: return y_sub, baseline shape = spectrum.shape tiled_baseline = np.tile(baseline, (shape[1], 1)).transpose() tiled_norm = np.tile(y, (shape[1], 1)).transpose() tiled_baseline = tiled_baseline / tiled_norm * spectrum return spectrum - tiled_baseline, tiled_baseline def fit_baseline_plus_bell(x, y, ye=None, kind="gauss"): """Fit a function composed of a linear baseline plus a bell function. Parameters ---------- x : array-like the sample time/number/position y : array-like the data series corresponding to x Other parameters ---------------- ye : array-like the errors on the data series kind: str Can be 'gauss' or 'lorentz' Returns ------- mod_out : ``Astropy.modeling.model`` object The fitted model fit_info : dict Fit info from the Astropy fitting routine. """ if kind not in ["gauss", "lorentz"]: raise ValueError("kind has to be one of: gauss, lorentz") from astropy.modeling import models, fitting approx_m = (np.median(y[-20:]) - np.median(y[:20])) / ( np.mean(x[-20:]) - np.mean(x[:20]) ) base = models.Linear1D( slope=approx_m, intercept=np.median(y[:20]), name="Baseline" ) xrange = np.max(x) - np.min(x) yrange = np.max(y) - np.min(y) if kind == "gauss": bell = models.Gaussian1D( mean=np.mean(x), stddev=xrange / 20, amplitude=yrange, name="Bell" ) bell.amplitude.bounds = (0, None) bell.mean.bounds = (None, None) bell.stddev.bounds = (0, None) # max_name = 'mean' elif kind == "lorentz": bell = models.Lorentz1D( x_0=np.mean(x), fwhm=xrange / 20, amplitude=yrange, name="Bell" ) bell.amplitude.bounds = (0, None) bell.x_0.bounds = (None, None) bell.fwhm.bounds = (0, None) # max_name = 'x_0' mod_init = base + bell fit = fitting.LevMarLSQFitter() mod_out = fit(mod_init, x, y) return mod_out, fit.fit_info def total_variance(xs, ys, params): """Calculate the total variance of a series of scans. This functions subtracts a linear function from each of the scans (excluding the first one) and calculates the total variance. Parameters ---------- xs : list of array-like [array1, array2, ...] list of arrays containing the x values of each scan ys : list of array-like [array1, array2, ...] list of arrays containing the y values of each scan params : list of array-like [[q0, m0], [q1, m1], ...] list of arrays containing the parameters [m, q] for each scan. Returns ------- total_variance : float The total variance of the baseline-subtracted scans. """ params = np.array(params).flatten() qs = params[: len(xs) - 1] ms = params[len(xs) - 1 :] x = xs[0].copy() y = ys[0].copy() for i in range(1, len(xs)): x = np.append(x, xs[i]) scaled_y = ys[i] - (xs[i] * ms[i - 1] + qs[i - 1]) y = np.append(y, scaled_y) order = np.argsort(x) x = x[order] y = y[order] x_range = [np.min(x), np.max(x)] xints = np.linspace(x_range[0], x_range[1], int(len(x) / 20)) values = np.array( [ np.var(y[(x >= xints[k]) & (x < xints[k + 1])]) for k in range(len(xints[:-1])) ] ) good = values == values value = np.mean(values[good]) return value def _objective_function(params, args): """Put the parameters in the right order to use with scipy's minimize.""" return total_variance(args[0], args[1], params) def align(xs, ys): """Given the first scan, it aligns all the others to that. Parameters ---------- xs : list of array-like [array1, array2, ...] list of arrays containing the x values of each scan ys : list of array-like [array1, array2, ...] list of arrays containing the y values of each scan Returns ------- qs : array-like The list of intercepts maximising the alignment, one for each scan ms : array-like The list of slopes maximising the alignment, one for each scan """ from scipy.optimize import minimize qs = np.zeros(len(xs) - 1) ms = np.zeros(len(xs) - 1) result = minimize( _objective_function, [qs, ms], args=[xs, ys], options={"disp": True} ) qs = result.x[: len(xs) - 1] ms = np.zeros(len(xs) - 1) result = minimize( _objective_function, [qs, ms], args=[xs, ys], options={"disp": True} ) qs = result.x[: len(xs) - 1] ms = result.x[len(xs) - 1 :] return qs, ms
matteobachetti/srt-single-dish-tools
srttools/fit.py
Python
bsd-3-clause
19,985
[ "Gaussian" ]
4daa215e5dae3a583cf9f014ac442ac1ca35c933556ef1477b64ec7cfd6b009a
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in control_flow_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.control_flow_ops import * from tensorflow.python.ops.gen_control_flow_ops import * # pylint: enable=wildcard-import def _SwitchGrad(op, *grad): """Gradients for a Switch op is calculated using a Merge op. If the switch is a loop switch, it will be visited twice. We create the merge on the first visit, and update the other input of the merge on the second visit. A next_iteration is also added on second visit. """ graph = ops.get_default_graph() # pylint: disable=protected-access op_ctxt = op._get_control_flow_context() grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if isinstance(op_ctxt, WhileContext): merge_op = grad_ctxt.grad_state.switch_map.get(op) if merge_op: # This is the second time this Switch is visited. It comes from # the non-exit branch of the Switch, so update the second input # to the Merge. # TODO: Perform shape inference with this new input. # pylint: disable=protected-access merge_op._update_input(1, control_flow_ops._NextIteration(grad[1])) # pylint: enable=protected-access return None, None else: # This is the first time this Switch is visited. It always comes # from the Exit branch, which is grad[0]. grad[1] is empty at this point. # Use grad[0] for both inputs to merge for now, but update the second # input of merge when we see this Switch the second time. merge_fn = control_flow_ops._Merge # pylint: disable=protected-access merge_op = merge_fn([grad[0], grad[0]], name="b_switch")[0] grad_ctxt.grad_state.switch_map[op] = merge_op.op return merge_op, None elif isinstance(op_ctxt, CondContext): good_grad = grad[op_ctxt.branch] zero_grad = grad[1 - op_ctxt.branch] # If we are in a grad context, this switch is part of a cond within a # loop. In this case, we have called ControlFlowState.ZeroLike() so grad # is ready for merge. Otherwise, we need a switch to control zero_grad. if not (grad_ctxt and grad_ctxt.grad_state): dtype = good_grad.dtype branch = op_ctxt.branch zero_grad = switch(zero_grad, op_ctxt.pred, dtype=dtype)[1 - branch] return merge([good_grad, zero_grad], name="cond_grad")[0], None else: false_grad = switch(grad[0], op.inputs[1])[0] true_grad = switch(grad[1], op.inputs[1])[1] return merge([false_grad, true_grad])[0], None ops.RegisterGradient("Switch")(_SwitchGrad) ops.RegisterGradient("RefSwitch")(_SwitchGrad) @ops.RegisterGradient("Merge") def _MergeGrad(op, grad, _): """Gradients for a Merge op are calculated using a Switch op.""" input_op = op.inputs[0].op graph = ops.get_default_graph() # pylint: disable=protected-access op_ctxt = input_op._get_control_flow_context() grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if isinstance(op_ctxt, WhileContext): # pylint: disable=protected-access return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot) # pylint: enable=protected-access elif isinstance(op_ctxt, CondContext): pred = op_ctxt.pred if grad_ctxt and grad_ctxt.grad_state: # This Merge node is part of a cond within a loop. # The backprop needs to have the value of this predicate for every # iteration. So we must have its values accumulated in the forward, and # use the accumulated values as the predicate for this backprop switch. grad_state = grad_ctxt.grad_state real_pred = grad_state.history_map.get(pred.name) if real_pred is None: # Remember the value of pred for every iteration. grad_ctxt = grad_state.grad_context grad_ctxt.Exit() history_pred = grad_state.AddForwardAccumulator(pred) grad_ctxt.Enter() # Add the stack pop op. If pred.op is in a (outer) CondContext, # the stack pop will be guarded with a switch. real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred) grad_state.history_map[pred.name] = real_pred pred = real_pred # pylint: disable=protected-access return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad") # pylint: enable=protected-access else: num_inputs = len(op.inputs) cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)] # pylint: disable=protected-access return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1] for i in xrange(num_inputs)] # pylint: enable=protected-access @ops.RegisterGradient("RefMerge") def _RefMergeGrad(op, grad, _): return _MergeGrad(op, grad, _) @ops.RegisterGradient("Exit") def _ExitGrad(_, grad): """Gradients for an exit op are calculated using an Enter op.""" graph = ops.get_default_graph() # pylint: disable=protected-access grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if not grad_ctxt.back_prop: # The flag `back_prop` is set by users to suppress gradient # computation for this loop. If the attribute `back_prop` is false, # no gradient computation. return None grad_ctxt.AddName(grad.name) enter_fn = control_flow_ops._Enter # pylint: disable=protected-access grad_ctxt.Enter() result = enter_fn(grad, grad_ctxt.name, is_constant=False, parallel_iterations=grad_ctxt.parallel_iterations, name="b_exit") grad_ctxt.Exit() return result ops.RegisterGradient("RefExit")(_ExitGrad) @ops.RegisterGradient("NextIteration") def _NextIterationGrad(_, grad): """A forward next_iteration is translated into a backprop identity. Note that the backprop next_iteration is added in switch grad. """ return grad @ops.RegisterGradient("RefNextIteration") def _RefNextIterationGrad(_, grad): return _NextIterationGrad(_, grad) @ops.RegisterGradient("Enter") def _EnterGrad(op, grad): """Gradients for an Enter are calculated using an Exit op. For loop variables, grad is the gradient so just add an exit. For loop invariants, we need to add an accumulator loop. """ graph = ops.get_default_graph() # pylint: disable=protected-access grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if not grad_ctxt.back_prop: # If the attribute `back_prop` is true, no gradient computation. return grad if op.get_attr("is_constant"): # Add a gradient accumulator for each loop invariant. result = grad_ctxt.AddBackPropAccumulator(grad) else: result = exit(grad) grad_ctxt.ExitResult([result]) return result @ops.RegisterGradient("RefEnter") def _RefEnterGrad(op, grad): return _EnterGrad(op, grad) @ops.RegisterGradient("LoopCond") def _LoopCondGrad(_): """Stop backprop for the predicate of a while loop.""" return None
panmari/tensorflow
tensorflow/python/ops/control_flow_grad.py
Python
apache-2.0
7,949
[ "VisIt" ]
213f3a1793a4c85c72e86ab01d250b3201155fa493ea883e52971a85b67f63de
import sys, logging, os, time, re,json,hashlib import xml.dom.minidom SPLUNK_HOME = os.environ.get("SPLUNK_HOME") RESPONSE_HANDLER_INSTANCE = None SPLUNK_PORT = 8089 STANZA = None SESSION_TOKEN = None REGEX_PATTERN = None #dynamically load in any eggs in /etc/apps/tesla_ta/bin EGG_DIR = SPLUNK_HOME + "/etc/apps/tesla_ta/bin/" for filename in os.listdir(EGG_DIR): if filename.endswith(".egg"): sys.path.append(EGG_DIR + filename) import requests, json from splunklib.client import connect from splunklib.client import Service #set up logging logging.root logging.root.setLevel(logging.ERROR) formatter = logging.Formatter('%(levelname)s %(message)s') #with zero args , should go to STD ERR handler = logging.StreamHandler() handler.setFormatter(formatter) logging.root.addHandler(handler) SCHEME = """<scheme> <title>Tesla</title> <description>Tesla input for polling data from My Tesla</description> <use_external_validation>true</use_external_validation> <streaming_mode>xml</streaming_mode> <use_single_instance>false</use_single_instance> <endpoint> <args> <arg name="name"> <title>Tesla input name</title> <description>Name of this Tesla input</description> </arg> <arg name="activation_key"> <title>Activation Key</title> <description>Visit http://www.baboonbones.com/#activation to obtain a non-expiring key</description> <required_on_edit>true</required_on_edit> <required_on_create>true</required_on_create> </arg> <arg name="vehicle_id"> <title>Tesla Vehicle ID</title> <description>Tesla Vehicle ID</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="api_base"> <title>Base URL</title> <description>Base URL to send the HTTP GET request to</description> <required_on_edit>false</required_on_edit> <required_on_create>true</required_on_create> </arg> <arg name="oauth_url"> <title>OAuth URL</title> <description>OAuth URL</description> <required_on_edit>false</required_on_edit> <required_on_create>true</required_on_create> </arg> <arg name="endpoint"> <title>Endpoint Path</title> <description>Endpoint Path to send the HTTP GET request to</description> <required_on_edit>false</required_on_edit> <required_on_create>true</required_on_create> </arg> <arg name="user"> <title>My Tesla User</title> <description>My Tesla User</description> <required_on_edit>false</required_on_edit> <required_on_create>true</required_on_create> </arg> <arg name="password"> <title>My Tesla Password</title> <description>My Tesla Password</description> <required_on_edit>false</required_on_edit> <required_on_create>true</required_on_create> </arg> <arg name="client_id"> <title>Client ID</title> <description>Client ID</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="client_secret"> <title>Client Secret</title> <description>Client Secret</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="http_proxy"> <title>HTTP Proxy Address</title> <description>HTTP Proxy Address</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="https_proxy"> <title>HTTPs Proxy Address</title> <description>HTTPs Proxy Address</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="request_timeout"> <title>Request Timeout</title> <description>Request Timeout in seconds</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="backoff_time"> <title>Backoff Time</title> <description>Time in seconds to wait for retry after error or timeout</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="polling_interval"> <title>Polling Interval</title> <description>Interval time in seconds to poll the endpoint</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="index_error_response_codes"> <title>Index Error Responses</title> <description>Whether or not to index error response codes : true | false</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="response_handler"> <title>Response Handler</title> <description>Python classname of custom response handler</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="response_handler_args"> <title>Response Handler Arguments</title> <description>Response Handler arguments string , key=value,key2=value2</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> <arg name="response_filter_pattern"> <title>Response Filter Pattern</title> <description>Python Regex pattern, if present , responses must match this pattern to be indexed</description> <required_on_edit>false</required_on_edit> <required_on_create>false</required_on_create> </arg> </args> </endpoint> </scheme> """ def do_validate(): config = get_validation_config() #TODO #if error , print_validation_error & sys.exit(2) def do_run(): config = get_input_config() activation_key = config.get("activation_key").strip() app_name = "Tesla Vehicle Modular Input" if len(activation_key) > 32: activation_hash = activation_key[:32] activation_ts = activation_key[32:][::-1] current_ts = time.time() m = hashlib.md5() m.update((app_name + activation_ts)) if not m.hexdigest().upper() == activation_hash.upper(): logging.error("FATAL Trial Activation key for App '%s' failed. Please ensure that you copy/pasted the key correctly." % app_name) sys.exit(2) if ((current_ts - long(activation_ts)) > 604800): logging.error("FATAL Trial Activation key for App '%s' has now expired. Please visit http://www.baboonbones.com/#activation to purchase a non expiring key." % app_name) sys.exit(2) else: m = hashlib.md5() m.update((app_name)) if not m.hexdigest().upper() == activation_key.upper(): logging.error("FATAL Activation key for App '%s' failed. Please ensure that you copy/pasted the key correctly." % app_name) sys.exit(2) #setup some globals server_uri = config.get("server_uri") global SPLUNK_PORT global STANZA global SESSION_TOKEN SPLUNK_PORT = server_uri[18:] STANZA = config.get("name") SESSION_TOKEN = config.get("session_key") #params vehicle_id = config.get("vehicle_id") api_base = config.get("api_base") api_path = config.get("endpoint") if vehicle_id: api_path_resolved = api_path.replace('{vehicle_id}', vehicle_id); endpoint = api_base + api_path_resolved else: endpoint = api_base + api_path http_method = config.get("http_method", "GET") user = config.get("user") password = config.get("password") client_id = config.get("client_id") client_secret = config.get("client_secret") oauth_url = config.get("oauth_url") response_type = config.get("response_type", "json") http_proxy = config.get("http_proxy") https_proxy = config.get("https_proxy") proxies = {} if not http_proxy is None: proxies["http"] = http_proxy if not https_proxy is None: proxies["https"] = https_proxy request_timeout = int(config.get("request_timeout", 30)) backoff_time = int(config.get("backoff_time", 120)) polling_interval = int(config.get("polling_interval", 300)) index_error_response_codes = int(config.get("index_error_response_codes", 0)) response_filter_pattern = config.get("response_filter_pattern") if response_filter_pattern: global REGEX_PATTERN REGEX_PATTERN = re.compile(response_filter_pattern) response_handler_args = {} response_handler_args_str = config.get("response_handler_args") if not response_handler_args_str is None: response_handler_args = dict((k.strip(), v.strip()) for k, v in (item.split('=') for item in response_handler_args_str.split(delimiter))) response_handler = config.get("response_handler", "DefaultResponseHandler") module = __import__("responsehandlers") class_ = getattr(module, response_handler) global RESPONSE_HANDLER_INSTANCE RESPONSE_HANDLER_INSTANCE = class_(**response_handler_args) try: req_args = {"verify" : False , "timeout" : float(request_timeout)} if proxies: req_args["proxies"] = proxies token = '' while True: try: if not token: req_args['data'] = {'grant_type':'password','email':user, 'password':password,'client_id':client_id,'client_secret':client_secret} #perform auth request r = requests.post(oauth_url, **req_args) json_response = json.loads(r.text) token = json_response['access_token'] del req_args['data'] req_args['headers'] = {'Authorization':'Bearer '+token} #perform API request r = requests.get(endpoint, **req_args) except requests.exceptions.Timeout, e: token = '' logging.error("HTTP Request Timeout error: %s" % str(e)) time.sleep(float(backoff_time)) continue except Exception as e: token = '' logging.error("Exception performing request: %s" % str(e)) time.sleep(float(backoff_time)) continue try: r.raise_for_status() handle_output(r, r.text, response_type, req_args, endpoint) except requests.exceptions.HTTPError, e: #reset for reauth token = '' error_output = r.text error_http_code = r.status_code if index_error_response_codes: error_event = "" error_event += 'http_error_code = %s error_message = %s' % (error_http_code, error_output) print_xml_single_instance_mode(error_event) sys.stdout.flush() logging.error("HTTP Request error: %s" % str(e)) time.sleep(float(backoff_time)) continue time.sleep(float(polling_interval)) except RuntimeError, e: logging.error("Looks like an error: %s" % str(e)) sys.exit(2) def dictParameterToStringFormat(parameter): if parameter: return ''.join('{}={},'.format(key, val) for key, val in parameter.items())[:-1] else: return None def handle_output(response, output, type, req_args, endpoint): try: if REGEX_PATTERN: search_result = REGEX_PATTERN.search(output) if search_result == None: return RESPONSE_HANDLER_INSTANCE(response, output, type, req_args, endpoint) sys.stdout.flush() except RuntimeError, e: logging.error("Looks like an error handle the response output: %s" % str(e)) # prints validation error data to be consumed by Splunk def print_validation_error(s): print "<error><message>%s</message></error>" % encodeXMLText(s) # prints XML stream def print_xml_single_instance_mode(s): print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s) # prints simple stream def print_simple(s): print "%s\n" % s def encodeXMLText(text): text = text.replace("&", "&amp;") text = text.replace("\"", "&quot;") text = text.replace("'", "&apos;") text = text.replace("<", "&lt;") text = text.replace(">", "&gt;") return text def usage(): print "usage: %s [--scheme|--validate-arguments]" logging.error("Incorrect Program Usage") sys.exit(2) def do_scheme(): print SCHEME #read XML configuration passed from splunkd, need to refactor to support single instance mode def get_input_config(): config = {} try: # read everything from stdin config_str = sys.stdin.read() # parse the config XML doc = xml.dom.minidom.parseString(config_str) root = doc.documentElement session_key_node = root.getElementsByTagName("session_key")[0] if session_key_node and session_key_node.firstChild and session_key_node.firstChild.nodeType == session_key_node.firstChild.TEXT_NODE: data = session_key_node.firstChild.data config["session_key"] = data server_uri_node = root.getElementsByTagName("server_uri")[0] if server_uri_node and server_uri_node.firstChild and server_uri_node.firstChild.nodeType == server_uri_node.firstChild.TEXT_NODE: data = server_uri_node.firstChild.data config["server_uri"] = data conf_node = root.getElementsByTagName("configuration")[0] if conf_node: logging.debug("XML: found configuration") stanza = conf_node.getElementsByTagName("stanza")[0] if stanza: stanza_name = stanza.getAttribute("name") if stanza_name: logging.debug("XML: found stanza " + stanza_name) config["name"] = stanza_name params = stanza.getElementsByTagName("param") for param in params: param_name = param.getAttribute("name") logging.debug("XML: found param '%s'" % param_name) if param_name and param.firstChild and \ param.firstChild.nodeType == param.firstChild.TEXT_NODE: data = param.firstChild.data config[param_name] = data logging.debug("XML: '%s' -> '%s'" % (param_name, data)) checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0] if checkpnt_node and checkpnt_node.firstChild and \ checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE: config["checkpoint_dir"] = checkpnt_node.firstChild.data if not config: raise Exception, "Invalid configuration received from Splunk." except Exception, e: raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e) return config #read XML configuration passed from splunkd, need to refactor to support single instance mode def get_validation_config(): val_data = {} # read everything from stdin val_str = sys.stdin.read() # parse the validation XML doc = xml.dom.minidom.parseString(val_str) root = doc.documentElement logging.debug("XML: found items") item_node = root.getElementsByTagName("item")[0] if item_node: logging.debug("XML: found item") name = item_node.getAttribute("name") val_data["stanza"] = name params_node = item_node.getElementsByTagName("param") for param in params_node: name = param.getAttribute("name") logging.debug("Found param %s" % name) if name and param.firstChild and \ param.firstChild.nodeType == param.firstChild.TEXT_NODE: val_data[name] = param.firstChild.data return val_data if __name__ == '__main__': if len(sys.argv) > 1: if sys.argv[1] == "--scheme": do_scheme() elif sys.argv[1] == "--validate-arguments": do_validate() else: usage() else: do_run() sys.exit(0)
damiendallimore/SplunkModularInputsPythonFramework
implementations/tesla/bin/tesla.py
Python
apache-2.0
18,139
[ "VisIt" ]
a5b184244d0ff4a1a6b8e2958c82a712d151f884de191ee4089d54215bab5d84
''' ResourceStatusDB: This module provides definition of the DB tables, and methods to access them. Written using sqlalchemy declarative_base For extending the ResourceStatusDB tables: 1) In the extended module, call: from DIRAC.ResourceStatusSystem.DB.ResourceStatusDB import rmsBase, TABLESLIST, TABLESLISTWITHID TABLESLIST = TABLESLIST + [list of new table names] TABLESLISTWITHID = TABLESLISTWITHID + [list of new table names] 2) provide a declarative_base definition of the tables (new or extended) in the extension module ''' __RCSID__ = "$Id$" import datetime from sqlalchemy import desc from sqlalchemy.orm import sessionmaker, class_mapper from sqlalchemy.orm.query import Query from sqlalchemy.engine.reflection import Inspector from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine, Column, String, DateTime, exc, BigInteger from DIRAC import S_OK, S_ERROR, gLogger, gConfig from DIRAC.ConfigurationSystem.Client.Utilities import getDBParameters from DIRAC.ResourceStatusSystem.Utilities import Utils TABLESLIST = ['SiteStatus', 'ResourceStatus', 'NodeStatus'] TABLESLISTWITHID = ['SiteLog', 'SiteHistory', 'ResourceLog', 'ResourceHistory', 'NodeLog', 'NodeHistory'] # Defining the tables rssBase = declarative_base() class ElementStatusBase(object): """ Prototype for tables """ __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} name = Column( 'Name', String( 64 ), nullable = False, primary_key = True ) statustype = Column( 'StatusType', String( 128 ), nullable = False, server_default = 'all', primary_key = True ) status = Column( 'Status', String( 8 ), nullable = False, server_default = '' ) reason = Column( 'Reason', String( 512 ), nullable = False, server_default = 'Unspecified' ) dateeffective = Column( 'DateEffective', DateTime, nullable = False ) tokenexpiration = Column( 'TokenExpiration', DateTime, nullable = False , server_default = '9999-12-31 23:59:59' ) elementtype = Column( 'ElementType', String( 32 ), nullable = False, server_default = '' ) lastchecktime = Column( 'LastCheckTime', DateTime, nullable = False , server_default = '1000-01-01 00:00:00' ) tokenowner = Column( 'TokenOwner', String( 16 ), nullable = False , server_default = 'rs_svc') def fromDict( self, dictionary ): """ Fill the fields of the AccountingCache object from a dictionary :param dictionary: Dictionary to fill a single line :type arguments: dict """ utcnow = self.lastchecktime.replace(microsecond = 0) if self.lastchecktime\ else datetime.datetime.utcnow().replace(microsecond = 0) self.name = dictionary.get( 'Name', self.name ) self.statustype = dictionary.get( 'StatusType', self.statustype ) self.status = dictionary.get( 'Status', self.status ) self.reason = dictionary.get( 'Reason', self.reason ) self.dateeffective = dictionary.get( 'DateEffective', self.dateeffective ) self.tokenexpiration = dictionary.get( 'TokenExpiration', self.tokenexpiration ) self.elementtype = dictionary.get( 'ElementType', self.elementtype ) self.lastchecktime = dictionary.get( 'LastCheckTime', utcnow ) self.tokenowner = dictionary.get( 'TokenOwner', self.tokenowner ) if self.dateeffective: self.dateeffective = self.dateeffective.replace(microsecond = 0) if self.tokenexpiration: self.tokenexpiration = self.tokenexpiration.replace(microsecond = 0) def toList(self): """ Simply returns a list of column values """ return [self.name, self.statustype, self.status, self.reason, self.dateeffective, self.tokenexpiration, self.elementtype, self.lastchecktime, self.tokenowner] class ElementStatusBaseWithID(ElementStatusBase): """ Prototype for tables This is almost the same as ElementStatusBase, with the following differences: - there's an autoincrement ID column which is also the primary key - the name and statusType components are not part of the primary key """ id = Column( 'ID', BigInteger, nullable = False, autoincrement= True, primary_key = True ) name = Column( 'Name', String( 64 ), nullable = False ) statustype = Column( 'StatusType', String( 128 ), nullable = False, server_default = 'all' ) status = Column( 'Status', String( 8 ), nullable = False, server_default = '' ) reason = Column( 'Reason', String( 512 ), nullable = False, server_default = 'Unspecified' ) dateeffective = Column( 'DateEffective', DateTime, nullable = False ) tokenexpiration = Column( 'TokenExpiration', DateTime, nullable = False , server_default = '9999-12-31 23:59:59' ) elementtype = Column( 'ElementType', String( 32 ), nullable = False, server_default = '' ) lastchecktime = Column( 'LastCheckTime', DateTime, nullable = False , server_default = '1000-01-01 00:00:00' ) tokenowner = Column( 'TokenOwner', String( 16 ), nullable = False , server_default = 'rs_svc') def fromDict( self, dictionary ): """ Fill the fields of the AccountingCache object from a dictionary :param dictionary: Dictionary to fill a single line :type arguments: dict """ self.id = dictionary.get( 'ID', self.id ) super(ElementStatusBaseWithID, self).fromDict(dictionary) def toList(self): """ Simply returns a list of column values """ return [self.id, self.name, self.statustype, self.status, self.reason, self.dateeffective, self.tokenexpiration, self.elementtype, self.lastchecktime, self.tokenowner] ### tables with schema defined in ElementStatusBase class SiteStatus(ElementStatusBase, rssBase): """ SiteStatus table """ __tablename__ = 'SiteStatus' class ResourceStatus(ElementStatusBase, rssBase): """ ResourceStatusDB table """ __tablename__ = 'ResourceStatus' class NodeStatus(ElementStatusBase, rssBase): """ NodeStatus table """ __tablename__ = 'NodeStatus' ### tables with schema defined in ElementStatusBaseWithID class SiteLog(ElementStatusBaseWithID, rssBase): """ SiteLog table """ __tablename__ = 'SiteLog' class SiteHistory(ElementStatusBaseWithID, rssBase): """ SiteHistory table """ __tablename__ = 'SiteHistory' class ResourceLog(ElementStatusBaseWithID, rssBase): """ ResourceLog table """ __tablename__ = 'ResourceLog' class ResourceHistory(ElementStatusBaseWithID, rssBase): """ ResourceHistory table """ __tablename__ = 'ResourceHistory' class NodeLog(ElementStatusBaseWithID, rssBase): """ NodeLog table """ __tablename__ = 'NodeLog' class NodeHistory(ElementStatusBaseWithID, rssBase): """ NodeHistory table """ __tablename__ = 'NodeHistory' ### Interaction with the DB class ResourceStatusDB( object ): ''' Class that defines the interactions with the tables of the ResourceStatusDB. ''' def __init__( self ): """c'tor :param self: self reference """ self.log = gLogger.getSubLogger( 'ResourceStatusDB' ) #These are the list of tables that will be created. #They can be extended in an extension module self.tablesList = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.DB.ResourceStatusDB' ), 'TABLESLIST') self.tablesListWithID = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.DB.ResourceStatusDB' ), 'TABLESLISTWITHID') self.extensions = gConfig.getValue( 'DIRAC/Extensions', [] ) self.__initializeConnection( 'ResourceStatus/ResourceStatusDB' ) self.__initializeDB() def __initializeConnection( self, dbPath ): """ Collect from the CS all the info needed to connect to the DB. This should be in a base class eventually """ result = getDBParameters( dbPath ) if not result[ 'OK' ]: raise Exception( 'Cannot get database parameters: %s' % result['Message'] ) dbParameters = result[ 'Value' ] self.log.debug("db parameters: %s" % dbParameters) self.host = dbParameters[ 'Host' ] self.port = dbParameters[ 'Port' ] self.user = dbParameters[ 'User' ] self.password = dbParameters[ 'Password' ] self.dbName = dbParameters[ 'DBName' ] self.engine = create_engine( 'mysql://%s:%s@%s:%s/%s' % ( self.user, self.password, self.host, self.port, self.dbName ), pool_recycle = 3600, echo_pool = True, echo = self.log.getLevel() == 'DEBUG') self.sessionMaker_o = sessionmaker( bind = self.engine ) self.inspector = Inspector.from_engine( self.engine ) def __initializeDB( self ): """ Create the tables, if they are not there yet """ tablesInDB = self.inspector.get_table_names() for table in self.tablesList: if table not in tablesInDB: found = False #is it in the extension? (fully or extended) for ext in gConfig.getValue( 'DIRAC/Extensions', [] ): try: getattr(__import__(ext + __name__, globals(), locals(), [table]), table).__table__.create( self.engine ) #pylint: disable=no-member found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base. if not found: getattr(__import__(__name__, globals(), locals(), [table]), table).__table__.create( self.engine ) #pylint: disable=no-member else: gLogger.debug( "Table %s already exists" %table ) for table in self.tablesListWithID: if table not in tablesInDB: found = False #is it in the extension? (fully or extended) for ext in gConfig.getValue( 'DIRAC/Extensions', [] ): try: getattr(__import__(ext + __name__, globals(), locals(), [table]), table).__table__.create( self.engine ) #pylint: disable=no-member found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base. if not found: getattr(__import__(__name__, globals(), locals(), [table]), table).__table__.create( self.engine ) #pylint: disable=no-member else: gLogger.debug( "Table %s already exists" %table ) # SQL Methods ############################################################### def insert( self, table, params ): ''' Inserts params in the DB. :param table: table where to insert :type table: str :param params: Dictionary to fill a single line :type params: dict :return: S_OK() || S_ERROR() ''' # expire_on_commit is set to False so that we can still use the object after we close the session session = self.sessionMaker_o( expire_on_commit = False ) #FIXME: should we use this flag elsewhere? found = False for ext in self.extensions: try: tableRow_o = getattr(__import__(ext + __name__, globals(), locals(), [table]), table)() found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base (this same module). if not found: tableRow_o = getattr(__import__(__name__, globals(), locals(), [table]), table)() if not params.get('DateEffective'): params['DateEffective'] = datetime.datetime.utcnow().replace(microsecond = 0) tableRow_o.fromDict(params) try: session.add(tableRow_o) session.commit() return S_OK() except exc.IntegrityError as err: self.log.warn("insert: trying to insert a duplicate key? %s" %err) session.rollback() except exc.SQLAlchemyError as e: session.rollback() self.log.exception( "insert: unexpected exception", lException = e ) return S_ERROR( "insert: unexpected exception %s" % e ) finally: session.close() def select( self, table, params ): ''' Uses params to build conditional SQL statement ( WHERE ... ). :Parameters: **params** - `dict` arguments for the mysql query ( must match table columns ! ). :return: S_OK() || S_ERROR() ''' session = self.sessionMaker_o() # finding the table found = False for ext in self.extensions: try: table_c = getattr(__import__(ext + __name__, globals(), locals(), [table]), table) found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base (this same module). if not found: table_c = getattr(__import__(__name__, globals(), locals(), [table]), table) # handling query conditions found in 'Meta' columnNames = [column.lower() for column in params.get('Meta', {}).get('columns', [])] older = params.get('Meta', {}).get('older', None) newer = params.get('Meta', {}).get('newer', None) order = params.get('Meta', {}).get('order', None) limit = params.get('Meta', {}).get('limit', None) params.pop('Meta', None) try: # setting up the select query if not columnNames: # query on the whole table wholeTable = True columns = table_c.__table__.columns # retrieve the column names columnNames = [str(column).split('.')[1] for column in columns] select = Query(table_c, session = session) else: # query only the selected columns wholeTable = False columns = [getattr(table_c, column) for column in columnNames] select = Query(columns, session = session) # query conditions for columnName, columnValue in params.iteritems(): if not columnValue: continue column_a = getattr(table_c, columnName.lower()) if isinstance(columnValue, (list, tuple)): select = select.filter(column_a.in_(list(columnValue))) elif isinstance(columnValue, (basestring, datetime.datetime, bool)): select = select.filter(column_a == columnValue) else: self.log.error("type(columnValue) == %s" %type(columnValue)) if older: column_a = getattr(table_c, older[0].lower()) select = select.filter(column_a < older[1]) if newer: column_a = getattr(table_c, newer[0].lower()) select = select.filter(column_a > newer[1]) if order: order = [order] if isinstance(order, basestring) else list(order) column_a = getattr(table_c, order[0].lower()) if len(order) == 2 and order[1].lower() == 'desc': select = select.order_by(desc(column_a)) else: select = select.order_by(column_a) if limit: select = select.limit(int(limit)) # querying selectionRes = select.all() # handling the results if wholeTable: selectionResToList = [res.toList() for res in selectionRes] else: selectionResToList = [[getattr(res, col) for col in columnNames] for res in selectionRes] finalResult = S_OK(selectionResToList) finalResult['Columns'] = columnNames return finalResult except exc.SQLAlchemyError as e: session.rollback() self.log.exception( "select: unexpected exception", lException = e ) return S_ERROR( "select: unexpected exception %s" % e ) finally: session.close() def delete( self, table, params ): """ :param table: table from where to delete :type table: str :param params: dictionary of which line(s) to delete :type params: dict :return: S_OK() || S_ERROR() """ session = self.sessionMaker_o() found = False for ext in self.extensions: try: table_c = getattr(__import__(ext + __name__, globals(), locals(), [table]), table) found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base (this same module). if not found: table_c = getattr(__import__(__name__, globals(), locals(), [table]), table) # handling query conditions found in 'Meta' older = params.get('Meta', {}).get('older', None) newer = params.get('Meta', {}).get('newer', None) order = params.get('Meta', {}).get('order', None) limit = params.get('Meta', {}).get('limit', None) params.pop('Meta', None) try: deleteQuery = Query(table_c, session = session) for columnName, columnValue in params.iteritems(): if not columnValue: continue column_a = getattr(table_c, columnName.lower()) if isinstance(columnValue, (list, tuple)): deleteQuery = deleteQuery.filter(column_a.in_(list(columnValue))) elif isinstance(columnValue, (basestring, datetime.datetime, bool) ): deleteQuery = deleteQuery.filter(column_a == columnValue) else: self.log.error("type(columnValue) == %s" %type(columnValue)) if older: column_a = getattr(table_c, older[0].lower()) deleteQuery = deleteQuery.filter(column_a < older[1]) if newer: column_a = getattr(table_c, newer[0].lower()) deleteQuery = deleteQuery.filter(column_a > newer[1]) if order: order = [order] if isinstance(order, basestring) else list(order) column_a = getattr(table_c, order[0].lower()) if len(order) == 2 and order[1].lower() == 'desc': deleteQuery = deleteQuery.order_by(desc(column_a)) else: deleteQuery = deleteQuery.order_by(column_a) if limit: deleteQuery = deleteQuery.limit(int(limit)) res = deleteQuery.delete(synchronize_session=False) #FIXME: unsure about it session.commit() return S_OK(res) except exc.SQLAlchemyError as e: session.rollback() self.log.exception( "delete: unexpected exception", lException = e ) return S_ERROR( "delete: unexpected exception %s" % e ) finally: session.close() ## Extended SQL methods ###################################################### def addOrModify( self, table, params ): ''' Using the PrimaryKeys of the table, it looks for the record in the database. If it is there, it is updated, if not, it is inserted as a new entry. :param table: table where to add or modify :type table: str :param params: dictionary of what to add or modify :type params: dict :return: S_OK() || S_ERROR() ''' session = self.sessionMaker_o() found = False for ext in self.extensions: try: table_c = getattr(__import__(ext + __name__, globals(), locals(), [table]), table) found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base (this same module). if not found: table_c = getattr(__import__(__name__, globals(), locals(), [table]), table) primaryKeys = [key.name for key in class_mapper(table_c).primary_key] try: select = Query(table_c, session = session) for columnName, columnValue in params.iteritems(): if not columnValue or columnName not in primaryKeys: continue column_a = getattr(table_c, columnName.lower()) if isinstance(columnValue, (list, tuple)): select = select.filter(column_a.in_(list(columnValue))) elif isinstance(columnValue, basestring): select = select.filter(column_a == columnValue) else: self.log.error("type(columnValue) == %s" %type(columnValue)) res = select.first() # the selection is done via primaryKeys only if not res: # if not there, let's insert it (and exit) return self.insert(table, params) # From now on, we assume we need to modify # Treating case of time value updates if not params.get('LastCheckTime'): params['LastCheckTime'] = None if not params.get('DateEffective'): params['DateEffective'] = None # Should we change DateEffective? changeDE = False if params.get('Status'): if params.get('Status') != res.status: # we update dateEffective iff we change the status changeDE = True for columnName, columnValue in params.iteritems(): if columnName == 'LastCheckTime' and not columnValue: # we always update lastCheckTime columnValue = datetime.datetime.utcnow().replace(microsecond = 0) if changeDE and columnName == 'DateEffective' and not columnValue: columnValue = datetime.datetime.utcnow().replace(microsecond = 0) if columnValue: if isinstance(columnValue, datetime.datetime): columnValue = columnValue.replace(microsecond = 0) setattr(res, columnName.lower(), columnValue) session.commit() # and since we modified, we now insert a new line in the log table return self.insert(table.replace('Status', '') + 'Log', params) # The line inserted will maybe become a History line thanks to the SummarizeLogsAgent except exc.SQLAlchemyError as e: session.rollback() self.log.exception( "addOrModify: unexpected exception", lException = e ) return S_ERROR( "addOrModify: unexpected exception %s" % e ) finally: session.close() def addIfNotThere( self, table, params ): ''' Using the PrimaryKeys of the table, it looks for the record in the database. If it is not there, it is inserted as a new entry. :param table: table where to add or modify :type table: str :param params: dictionary of what to add or modify :type params: dict :return: S_OK() || S_ERROR() ''' session = self.sessionMaker_o() table_c = getattr(__import__(__name__, globals(), locals(), [table]), table) primaryKeys = [key.name for key in class_mapper(table_c).primary_key] try: select = Query(table_c, session = session) for columnName, columnValue in params.iteritems(): if not columnValue or columnName not in primaryKeys: continue column_a = getattr(table_c, columnName.lower()) if isinstance(columnValue, (list, tuple)): select = select.filter(column_a.in_(list(columnValue))) elif isinstance(columnValue, basestring): select = select.filter(column_a == columnValue) else: self.log.error("type(columnValue) == %s" %type(columnValue)) res = select.first() # the selection is done via primaryKeys only if not res: # if not there, let's insert it return self.insert(table, params) session.commit() return S_OK() except exc.SQLAlchemyError as e: session.rollback() self.log.exception( "addIfNotThere: unexpected exception", lException = e ) return S_ERROR( "addIfNotThere: unexpected exception %s" % e ) finally: session.close() ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
Andrew-McNab-UK/DIRAC
ResourceStatusSystem/DB/ResourceStatusDB.py
Python
gpl-3.0
23,417
[ "DIRAC" ]
e12fc84da539fb5ea452d86df8f587bcb0cc649964f0cd0572fd4052f150c005
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Variational Autoencoder example on binarized MNIST dataset.""" from typing import Iterator, Mapping, Tuple, NamedTuple, Sequence from absl import app from absl import flags from absl import logging import haiku as hk import jax import jax.numpy as jnp import numpy as np import optax import tensorflow_datasets as tfds flags.DEFINE_integer("batch_size", 128, "Size of the batch to train on.") flags.DEFINE_float("learning_rate", 0.001, "Learning rate for the optimizer.") flags.DEFINE_integer("training_steps", 5000, "Number of training steps to run.") flags.DEFINE_integer("eval_frequency", 100, "How often to evaluate the model.") flags.DEFINE_integer("random_seed", 42, "Random seed.") FLAGS = flags.FLAGS PRNGKey = jnp.ndarray Batch = Mapping[str, np.ndarray] MNIST_IMAGE_SHAPE: Sequence[int] = (28, 28, 1) def load_dataset(split: str, batch_size: int) -> Iterator[Batch]: ds = tfds.load("binarized_mnist", split=split, shuffle_files=True, read_config=tfds.ReadConfig(shuffle_seed=FLAGS.random_seed)) ds = ds.shuffle(buffer_size=10 * batch_size, seed=FLAGS.random_seed) ds = ds.batch(batch_size) ds = ds.prefetch(buffer_size=5) ds = ds.repeat() return iter(tfds.as_numpy(ds)) class Encoder(hk.Module): """Encoder model.""" def __init__(self, hidden_size: int = 512, latent_size: int = 10): super().__init__() self._hidden_size = hidden_size self._latent_size = latent_size def __call__(self, x: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]: x = hk.Flatten()(x) x = hk.Linear(self._hidden_size)(x) x = jax.nn.relu(x) mean = hk.Linear(self._latent_size)(x) log_stddev = hk.Linear(self._latent_size)(x) stddev = jnp.exp(log_stddev) return mean, stddev class Decoder(hk.Module): """Decoder model.""" def __init__( self, hidden_size: int = 512, output_shape: Sequence[int] = MNIST_IMAGE_SHAPE, ): super().__init__() self._hidden_size = hidden_size self._output_shape = output_shape def __call__(self, z: jnp.ndarray) -> jnp.ndarray: z = hk.Linear(self._hidden_size)(z) z = jax.nn.relu(z) logits = hk.Linear(np.prod(self._output_shape))(z) logits = jnp.reshape(logits, (-1, *self._output_shape)) return logits class VAEOutput(NamedTuple): image: jnp.ndarray mean: jnp.ndarray stddev: jnp.ndarray logits: jnp.ndarray class VariationalAutoEncoder(hk.Module): """Main VAE model class, uses Encoder & Decoder under the hood.""" def __init__( self, hidden_size: int = 512, latent_size: int = 10, output_shape: Sequence[int] = MNIST_IMAGE_SHAPE, ): super().__init__() self._hidden_size = hidden_size self._latent_size = latent_size self._output_shape = output_shape def __call__(self, x: jnp.ndarray) -> VAEOutput: x = x.astype(jnp.float32) mean, stddev = Encoder(self._hidden_size, self._latent_size)(x) z = mean + stddev * jax.random.normal(hk.next_rng_key(), mean.shape) logits = Decoder(self._hidden_size, self._output_shape)(z) p = jax.nn.sigmoid(logits) image = jax.random.bernoulli(hk.next_rng_key(), p) return VAEOutput(image, mean, stddev, logits) def binary_cross_entropy(x: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray: """Calculate binary (logistic) cross-entropy from distribution logits. Args: x: input variable tensor, must be of same shape as logits logits: log odds of a Bernoulli distribution, i.e. log(p/(1-p)) Returns: A scalar representing binary CE for the given Bernoulli distribution. """ if x.shape != logits.shape: raise ValueError("inputs x and logits must be of the same shape") x = jnp.reshape(x, (x.shape[0], -1)) logits = jnp.reshape(logits, (logits.shape[0], -1)) return -jnp.sum(x * logits - jnp.logaddexp(0.0, logits), axis=-1) def kl_gaussian(mean: jnp.ndarray, var: jnp.ndarray) -> jnp.ndarray: r"""Calculate KL divergence between given and standard gaussian distributions. KL(p, q) = H(p, q) - H(p) = -\int p(x)log(q(x))dx - -\int p(x)log(p(x))dx = 0.5 * [log(|s2|/|s1|) - 1 + tr(s1/s2) + (m1-m2)^2/s2] = 0.5 * [-log(|s1|) - 1 + tr(s1) + m1^2] (if m2 = 0, s2 = 1) Args: mean: mean vector of the first distribution var: diagonal vector of covariance matrix of the first distribution Returns: A scalar representing KL divergence of the two Gaussian distributions. """ return 0.5 * jnp.sum(-jnp.log(var) - 1.0 + var + jnp.square(mean), axis=-1) def main(_): FLAGS.alsologtostderr = True model = hk.transform(lambda x: VariationalAutoEncoder()(x)) # pylint: disable=unnecessary-lambda optimizer = optax.adam(FLAGS.learning_rate) @jax.jit def loss_fn(params: hk.Params, rng_key: PRNGKey, batch: Batch) -> jnp.ndarray: """ELBO loss: E_p[log(x)] - KL(d||q), where p ~ Be(0.5) and q ~ N(0,1).""" outputs: VAEOutput = model.apply(params, rng_key, batch["image"]) log_likelihood = -binary_cross_entropy(batch["image"], outputs.logits) kl = kl_gaussian(outputs.mean, jnp.square(outputs.stddev)) elbo = log_likelihood - kl return -jnp.mean(elbo) @jax.jit def update( params: hk.Params, rng_key: PRNGKey, opt_state: optax.OptState, batch: Batch, ) -> Tuple[hk.Params, optax.OptState]: """Single SGD update step.""" grads = jax.grad(loss_fn)(params, rng_key, batch) updates, new_opt_state = optimizer.update(grads, opt_state) new_params = optax.apply_updates(params, updates) return new_params, new_opt_state rng_seq = hk.PRNGSequence(FLAGS.random_seed) params = model.init(next(rng_seq), np.zeros((1, *MNIST_IMAGE_SHAPE))) opt_state = optimizer.init(params) train_ds = load_dataset(tfds.Split.TRAIN, FLAGS.batch_size) valid_ds = load_dataset(tfds.Split.TEST, FLAGS.batch_size) for step in range(FLAGS.training_steps): params, opt_state = update(params, next(rng_seq), opt_state, next(train_ds)) if step % FLAGS.eval_frequency == 0: val_loss = loss_fn(params, next(rng_seq), next(valid_ds)) logging.info("STEP: %5d; Validation ELBO: %.3f", step, -val_loss) if __name__ == "__main__": app.run(main)
deepmind/dm-haiku
examples/vae.py
Python
apache-2.0
6,886
[ "Gaussian" ]
04d385b381d8393c3872400c991a697c483593da5d5b454f0029bdc31ccb48cb
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2018 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # r"""File for accessory procedures in the chem module. Credit for the libmints vector3 class to Justin M. Turney and incremental improvements by other psi4 developers. Vectors that use these functions are overwhelmingly of length 3, so pure python instead of NumPy is the right choice efficiency-wise. """ from __future__ import absolute_import from __future__ import print_function from __future__ import division import copy import math from .exceptions import * ZERO = 1.0E-14 def norm(v): """Compute the magnitude of vector *v*.""" return math.sqrt(sum(v[i] * v[i] for i in range(len(v)))) def add(v, u): """Compute sum of vectors *v* and *u*.""" return [u[i] + v[i] for i in range(len(v))] def sub(v, u): """Compute difference of vectors *v* - *u*.""" return [v[i] - u[i] for i in range(len(v))] def dot(v, u): """Compute dot product of vectors *v* and *u*.""" return sum(u[i] * v[i] for i in range(len(v))) def scale(v, d): """Compute by-element scale by *d* of vector *v*.""" return [d * v[i] for i in range(len(v))] def naivemult(v, u): """Compute by-element multiplication of vectors *v* and *u*.""" if len(u) != len(v): raise ValidationError('naivemult() only defined for vectors of same length \n') return [u[i] * v[i] for i in range(len(v))] def normalize(v): """Compute normalized vector *v*.""" vmag = norm(v) return [v[i] / vmag for i in range(len(v))] def distance(v, u): """Compute the distance between points defined by vectors *v* and *u*.""" return math.sqrt(sum(((v[i] - u[i]) * (v[i] - u[i]) for i in range(len(v))))) def cross(v, u): """Compute cross product of length 3 vectors *v* and *u*.""" if len(u) != 3 or len(v) != 3: raise ValidationError('cross() only defined for vectors of length 3\n') return [v[1] * u[2] - v[2] * u[1], v[2] * u[0] - v[0] * u[2], v[0] * u[1] - v[1] * u[0]] # yapf: disable def rotate(v, theta, axis): """Rotate length 3 vector *v* about *axis* by *theta* radians.""" if len(v) != 3 or len(axis) != 3: raise ValidationError('rotate() only defined for vectors of length 3\n') unitaxis = normalize(copy.deepcopy(axis)) # split into parallel and perpendicular components along axis parallel = scale(axis, dot(v, axis) / dot(axis, axis)) perpendicular = sub(v, parallel) # form unit vector perpendicular to parallel and perpendicular third_axis = perp_unit(axis, perpendicular) third_axis = scale(third_axis, norm(perpendicular)) result = add(parallel, add(scale(perpendicular, math.cos(theta)), scale(third_axis, math.sin(theta)))) for item in range(len(result)): if math.fabs(result[item]) < ZERO: result[item] = 0.0 return result def perp_unit(u, v): """Compute unit vector perpendicular to length 3 vectors *u* and *v*.""" if len(u) != 3 or len(v) != 3: raise ValidationError('perp_unit() only defined for vectors of length 3\n') # try cross product result = cross(u, v) resultdotresult = dot(result, result) if resultdotresult < 1.E-16: # cross product is too small to normalize # find the largest of this and v dotprodt = dot(u, u) dotprodv = dot(v, v) if dotprodt < dotprodv: d = copy.deepcopy(v) dotprodd = dotprodv else: d = copy.deepcopy(u) dotprodd = dotprodt # see if d is big enough if dotprodd < 1.e-16: # choose an arbitrary vector, since the biggest vector is small result = [1.0, 0.0, 0.0] return result else: # choose a vector perpendicular to d # choose it in one of the planes xy, xz, yz # choose the plane to be that which contains the two largest components of d absd = [math.fabs(d[0]), math.fabs(d[1]), math.fabs(d[2])] if (absd[1] - absd[0]) > 1.0e-12: #if absd[0] < absd[1]: axis0 = 1 if (absd[2] - absd[0]) > 1.0e-12: #if absd[0] < absd[2]: axis1 = 2 else: axis1 = 0 else: axis0 = 0 if (absd[2] - absd[1]) > 1.0e-12: #if absd[1] < absd[2]: axis1 = 2 else: axis1 = 1 result = [0.0, 0.0, 0.0] # do the pi/2 rotation in the plane result[axis0] = d[axis1] result[axis1] = -1.0 * d[axis0] result = normalize(result) return result else: # normalize the cross product and return the result result = scale(result, 1.0 / math.sqrt(resultdotresult)) return result def determinant(mat): """Given 3x3 matrix *mat*, compute the determinat """ if len(mat) != 3 or len(mat[0]) != 3 or len(mat[1]) != 3 or len(mat[2]) != 3: raise ValidationError('determinant() only defined for arrays of dimension 3x3\n') det = mat[0][0] * mat[1][1] * mat[2][2] - mat[0][2] * mat[1][1] * mat[2][0] + \ mat[0][1] * mat[1][2] * mat[2][0] - mat[0][1] * mat[1][0] * mat[2][2] + \ mat[0][2] * mat[1][0] * mat[2][1] - mat[0][0] * mat[1][2] * mat[2][1] return det def diagonalize3x3symmat(M): """Given an real symmetric 3x3 matrix *M*, compute the eigenvalues """ if len(M) != 3 or len(M[0]) != 3 or len(M[1]) != 3 or len(M[2]) != 3: raise ValidationError('diagonalize3x3symmat() only defined for arrays of dimension 3x3\n') A = copy.deepcopy(M) # Symmetric input matrix Q = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # Storage buffer for eigenvectors w = [A[0][0], A[1][1], A[2][2]] # Storage buffer for eigenvalues # sd, so # Sums of diagonal resp. off-diagonal elements # s, c, t # sin(phi), cos(phi), tan(phi) and temporary storage # g, h, z, theta # More temporary storage # Calculate SQR(tr(A)) sd = 0.0 for i in range(3): sd += math.fabs(w[i]) sd = sd * sd # Main iteration loop for nIter in range(50): # Test for convergence so = 0.0 for p in range(3): for q in range(p + 1, 3): so += math.fabs(A[p][q]) if so == 0.0: return w, Q # return eval, evec if nIter < 4: thresh = 0.2 * so / (3 * 3) else: thresh = 0.0 # Do sweep for p in range(3): for q in range(p + 1, 3): g = 100.0 * math.fabs(A[p][q]) if nIter > 4 and (math.fabs(w[p]) + g == math.fabs(w[p])) and \ (math.fabs(w[q]) + g == math.fabs(w[q])): A[p][q] = 0.0 elif math.fabs(A[p][q]) > thresh: # Calculate Jacobi transformation h = w[q] - w[p] if math.fabs(h) + g == math.fabs(h): t = A[p][q] / h else: theta = 0.5 * h / A[p][q] if theta < 0.0: t = -1.0 / (math.sqrt(1.0 + theta * theta) - theta) else: t = 1.0 / (math.sqrt(1.0 + theta * theta) + theta) c = 1.0 / math.sqrt(1.0 + t * t) s = t * c z = t * A[p][q] # Apply Jacobi transformation A[p][q] = 0.0 w[p] -= z w[q] += z for r in range(p): t = A[r][p] A[r][p] = c * t - s * A[r][q] A[r][q] = s * t + c * A[r][q] for r in range(p + 1, q): t = A[p][r] A[p][r] = c * t - s * A[r][q] A[r][q] = s * t + c * A[r][q] for r in range(q + 1, 3): t = A[p][r] A[p][r] = c * t - s * A[q][r] A[q][r] = s * t + c * A[q][r] # Update eigenvectors for r in range(3): t = Q[r][p] Q[r][p] = c * t - s * Q[r][q] Q[r][q] = s * t + c * Q[r][q] return None def zero(m, n): """ Create zero matrix""" new_matrix = [[0 for row in range(n)] for col in range(m)] return new_matrix def identity(m): """Create identity matrix""" new_matrix = zero(m, m) for i in range(m): new_matrix[i][i] = 1.0 return new_matrix def show(matrix): """ Print out matrix""" for col in matrix: print(col) def mscale(matrix, d): """Return *matrix* scaled by scalar *d*""" for i in range(len(matrix)): for j in range(len(matrix[0])): matrix[i][j] *= d return matrix def mult(matrix1, matrix2): """ Matrix multiplication""" if len(matrix1[0]) != len(matrix2): # Check matrix dimensions raise ValidationError('Matrices must be m*n and n*p to multiply!') else: # Multiply if correct dimensions try: new_matrix = zero(len(matrix1), len(matrix2[0])) for i in range(len(matrix1)): for j in range(len(matrix2[0])): for k in range(len(matrix2)): new_matrix[i][j] += matrix1[i][k] * matrix2[k][j] except TypeError: new_matrix = zero(len(matrix1), 1) for i in range(len(matrix1)): for k in range(len(matrix2)): new_matrix[i][0] += matrix1[i][k] * matrix2[k] return new_matrix def transpose(matrix): """Return matrix transpose""" if len(matrix[0]) != len(matrix): # Check matrix dimensions raise ValidationError('Matrices must be square.') tmat = [list(i) for i in zip(*matrix)] return tmat def matadd(matrix1, matrix2, fac1=1.0, fac2=1.0): """Matrix addition""" if (len(matrix1[0]) != len(matrix2[0])) or (len(matrix1) != len(matrix2)): raise ValidationError('Matrices must be same dimension to add.') new_matrix = zero(len(matrix1), len(matrix1[0])) for i in range(len(matrix1)): for j in range(len(matrix1[0])): new_matrix[i][j] = fac1 * matrix1[i][j] + fac2 * matrix2[i][j] return new_matrix
amjames/psi4
psi4/driver/qcdb/vecutil.py
Python
lgpl-3.0
11,468
[ "Psi4" ]
e1b4fc95e4236dc3c6202d63ff8242167c7b323965c99a46fbb34dac61b91d6a
# # ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image # Copyright (C) 2017 Christian Zimmermann # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import print_function, unicode_literals import os import tensorflow as tf from general import crop_image_from_xy from canonical_trafo import canonical_trafo, flip_right_hand from relative_trafo import bone_rel_trafo class BinaryDbReader(object): """ Reads data from a binary dataset created by create_binary_db.py """ def __init__(self, mode=None, batch_size=1, shuffle=True, use_wrist_coord=True, sigma=25.0, hand_crop=False, random_crop_to_size=False, scale_to_size=False, hue_aug=False, coord_uv_noise=False, crop_center_noise=False, crop_scale_noise=False, crop_offset_noise=False, scoremap_dropout=False): """ Inputs: mode: string, Indicates which binary file to read. Can be 'training' or 'evaluation' batch_size: int, Number of samples forming a batch shuffle: boolean, If true samples of binary file are shuffled while reading use_wrist_coord: boolean, When true keypoint #0 is the wrist, palm center otherwise hand_crop: boolean, When true calculates a tight hand crop using the gt keypoint annotations Updates/Sets the following output items: image_crop, crop_scale, scoremap, cam_mat, keypoint_uv21 sigma: float, Size of the ground truth scoremaps random_crop_to_size: boolean, Takes randomly sampled crops from the image & the mask scale_to_size: boolean, Scales down image and keypoints hue_aug: boolean, Random hue augmentation coord_uv_noise: boolean, Adds some gaussian noise on the gt uv coordinate crop_center_noise: boolean, Adds gaussian noise on the hand crop center location (all keypoints still lie within the crop) crop_scale_noise: boolean, Adds gaussian noise on the hand crop size crop_offset_noise: boolean, Offsets the hand crop center randomly (keypoints can lie outside the crop) scoremap_dropout: boolean, Randomly drop scoremap channels """ self.path_to_db = './data/bin/' self.num_samples = 0 if mode == 'training': self.path_to_db += 'rhd_training.bin' self.num_samples = 41258 elif mode == 'evaluation': self.path_to_db += 'rhd_evaluation.bin' self.num_samples = 2728 else: assert 0, "Unknown dataset mode." assert os.path.exists(self.path_to_db), "Could not find the binary data file!" # general parameters self.batch_size = batch_size self.sigma = sigma self.shuffle = shuffle self.use_wrist_coord = use_wrist_coord self.random_crop_to_size = random_crop_to_size self.random_crop_size = 256 self.scale_to_size = scale_to_size self.scale_target_size = (240, 320) # size its scaled down to if scale_to_size=True # data augmentation parameters self.hue_aug = hue_aug self.hue_aug_max = 0.1 self.hand_crop = hand_crop self.coord_uv_noise = coord_uv_noise self.coord_uv_noise_sigma = 2.5 # std dev in px of noise on the uv coordinates self.crop_center_noise = crop_center_noise self.crop_center_noise_sigma = 20.0 # std dev in px: this moves what is in the "center", but the crop always contains all keypoints self.crop_scale_noise = crop_scale_noise self.crop_offset_noise = crop_offset_noise self.crop_offset_noise_sigma = 10.0 # translates the crop after size calculation (this can move keypoints outside) self.scoremap_dropout = scoremap_dropout self.scoremap_dropout_prob = 0.8 # these are constants of the dataset and therefore must not be changed self.image_size = (320, 320) self.crop_size = 256 self.num_kp = 42 def get(self): """ Provides input data to the graph. """ # calculate size of each record (this lists what is contained in the db and how many bytes are occupied) record_bytes = 2 encoding_bytes = 4 kp_xyz_entries = 3 * self.num_kp record_bytes += encoding_bytes*kp_xyz_entries encoding_bytes = 4 kp_uv_entries = 2 * self.num_kp record_bytes += encoding_bytes*kp_uv_entries cam_matrix_entries = 9 record_bytes += encoding_bytes*cam_matrix_entries image_bytes = self.image_size[0] * self.image_size[1] * 3 record_bytes += image_bytes hand_parts_bytes = self.image_size[0] * self.image_size[1] record_bytes += hand_parts_bytes kp_vis_bytes = self.num_kp record_bytes += kp_vis_bytes """ READ DATA ITEMS""" # Start reader reader = tf.FixedLengthRecordReader(header_bytes=0, record_bytes=record_bytes) _, value = reader.read(tf.train.string_input_producer([self.path_to_db])) # decode to floats bytes_read = 0 data_dict = dict() record_bytes_float32 = tf.decode_raw(value, tf.float32) # 1. Read keypoint xyz keypoint_xyz = tf.reshape(tf.slice(record_bytes_float32, [bytes_read//4], [kp_xyz_entries]), [self.num_kp, 3]) bytes_read += encoding_bytes*kp_xyz_entries # calculate palm coord if not self.use_wrist_coord: palm_coord_l = tf.expand_dims(0.5*(keypoint_xyz[0, :] + keypoint_xyz[12, :]), 0) palm_coord_r = tf.expand_dims(0.5*(keypoint_xyz[21, :] + keypoint_xyz[33, :]), 0) keypoint_xyz = tf.concat([palm_coord_l, keypoint_xyz[1:21, :], palm_coord_r, keypoint_xyz[-20:, :]], 0) data_dict['keypoint_xyz'] = keypoint_xyz # 2. Read keypoint uv keypoint_uv = tf.cast(tf.reshape(tf.slice(record_bytes_float32, [bytes_read//4], [kp_uv_entries]), [self.num_kp, 2]), tf.int32) bytes_read += encoding_bytes*kp_uv_entries keypoint_uv = tf.cast(keypoint_uv, tf.float32) # calculate palm coord if not self.use_wrist_coord: palm_coord_uv_l = tf.expand_dims(0.5*(keypoint_uv[0, :] + keypoint_uv[12, :]), 0) palm_coord_uv_r = tf.expand_dims(0.5*(keypoint_uv[21, :] + keypoint_uv[33, :]), 0) keypoint_uv = tf.concat([palm_coord_uv_l, keypoint_uv[1:21, :], palm_coord_uv_r, keypoint_uv[-20:, :]], 0) if self.coord_uv_noise: noise = tf.truncated_normal([42, 2], mean=0.0, stddev=self.coord_uv_noise_sigma) keypoint_uv += noise data_dict['keypoint_uv'] = keypoint_uv # 3. Camera intrinsics cam_mat = tf.reshape(tf.slice(record_bytes_float32, [bytes_read//4], [cam_matrix_entries]), [3, 3]) bytes_read += encoding_bytes*cam_matrix_entries data_dict['cam_mat'] = cam_mat # decode to uint8 bytes_read += 2 record_bytes_uint8 = tf.decode_raw(value, tf.uint8) # 4. Read image image = tf.reshape(tf.slice(record_bytes_uint8, [bytes_read], [image_bytes]), [self.image_size[0], self.image_size[1], 3]) image = tf.cast(image, tf.float32) bytes_read += image_bytes # subtract mean image = image / 255.0 - 0.5 if self.hue_aug: image = tf.image.random_hue(image, self.hue_aug_max) data_dict['image'] = image # 5. Read mask hand_parts_mask = tf.reshape(tf.slice(record_bytes_uint8, [bytes_read], [hand_parts_bytes]), [self.image_size[0], self.image_size[1]]) hand_parts_mask = tf.cast(hand_parts_mask, tf.int32) bytes_read += hand_parts_bytes data_dict['hand_parts'] = hand_parts_mask hand_mask = tf.greater(hand_parts_mask, 1) bg_mask = tf.logical_not(hand_mask) data_dict['hand_mask'] = tf.cast(tf.stack([bg_mask, hand_mask], 2), tf.int32) # 6. Read visibilty keypoint_vis = tf.reshape(tf.slice(record_bytes_uint8, [bytes_read], [kp_vis_bytes]), [self.num_kp]) keypoint_vis = tf.cast(keypoint_vis, tf.bool) bytes_read += kp_vis_bytes # calculate palm visibility if not self.use_wrist_coord: palm_vis_l = tf.expand_dims(tf.logical_or(keypoint_vis[0], keypoint_vis[12]), 0) palm_vis_r = tf.expand_dims(tf.logical_or(keypoint_vis[21], keypoint_vis[33]), 0) keypoint_vis = tf.concat([palm_vis_l, keypoint_vis[1:21], palm_vis_r, keypoint_vis[-20:]], 0) data_dict['keypoint_vis'] = keypoint_vis assert bytes_read == record_bytes, "Doesnt add up." """ DEPENDENT DATA ITEMS: SUBSET of 21 keypoints""" # figure out dominant hand by analysis of the segmentation mask one_map, zero_map = tf.ones_like(hand_parts_mask), tf.zeros_like(hand_parts_mask) cond_l = tf.logical_and(tf.greater(hand_parts_mask, one_map), tf.less(hand_parts_mask, one_map*18)) cond_r = tf.greater(hand_parts_mask, one_map*17) hand_map_l = tf.where(cond_l, one_map, zero_map) hand_map_r = tf.where(cond_r, one_map, zero_map) num_px_left_hand = tf.reduce_sum(hand_map_l) num_px_right_hand = tf.reduce_sum(hand_map_r) # PRODUCE the 21 subset using the segmentation masks # We only deal with the more prominent hand for each frame and discard the second set of keypoints kp_coord_xyz_left = keypoint_xyz[:21, :] kp_coord_xyz_right = keypoint_xyz[-21:, :] cond_left = tf.logical_and(tf.cast(tf.ones_like(kp_coord_xyz_left), tf.bool), tf.greater(num_px_left_hand, num_px_right_hand)) kp_coord_xyz21 = tf.where(cond_left, kp_coord_xyz_left, kp_coord_xyz_right) hand_side = tf.where(tf.greater(num_px_left_hand, num_px_right_hand), tf.constant(0, dtype=tf.int32), tf.constant(1, dtype=tf.int32)) # left hand = 0; right hand = 1 data_dict['hand_side'] = tf.one_hot(hand_side, depth=2, on_value=1.0, off_value=0.0, dtype=tf.float32) data_dict['keypoint_xyz21'] = kp_coord_xyz21 # make coords relative to root joint kp_coord_xyz_root = kp_coord_xyz21[0, :] # this is the palm coord kp_coord_xyz21_rel = kp_coord_xyz21 - kp_coord_xyz_root # relative coords in metric coords index_root_bone_length = tf.sqrt(tf.reduce_sum(tf.square(kp_coord_xyz21_rel[12, :] - kp_coord_xyz21_rel[11, :]))) data_dict['keypoint_scale'] = index_root_bone_length data_dict['keypoint_xyz21_normed'] = kp_coord_xyz21_rel / index_root_bone_length # normalized by length of 12->11 # calculate local coordinates kp_coord_xyz21_local = bone_rel_trafo(data_dict['keypoint_xyz21_normed']) kp_coord_xyz21_local = tf.squeeze(kp_coord_xyz21_local) data_dict['keypoint_xyz21_local'] = kp_coord_xyz21_local # calculate viewpoint and coords in canonical coordinates kp_coord_xyz21_rel_can, rot_mat = canonical_trafo(data_dict['keypoint_xyz21_normed']) kp_coord_xyz21_rel_can, rot_mat = tf.squeeze(kp_coord_xyz21_rel_can), tf.squeeze(rot_mat) kp_coord_xyz21_rel_can = flip_right_hand(kp_coord_xyz21_rel_can, tf.logical_not(cond_left)) data_dict['keypoint_xyz21_can'] = kp_coord_xyz21_rel_can data_dict['rot_mat'] = tf.matrix_inverse(rot_mat) # Set of 21 for visibility keypoint_vis_left = keypoint_vis[:21] keypoint_vis_right = keypoint_vis[-21:] keypoint_vis21 = tf.where(cond_left[:, 0], keypoint_vis_left, keypoint_vis_right) data_dict['keypoint_vis21'] = keypoint_vis21 # Set of 21 for UV coordinates keypoint_uv_left = keypoint_uv[:21, :] keypoint_uv_right = keypoint_uv[-21:, :] keypoint_uv21 = tf.where(cond_left[:, :2], keypoint_uv_left, keypoint_uv_right) data_dict['keypoint_uv21'] = keypoint_uv21 """ DEPENDENT DATA ITEMS: HAND CROP """ if self.hand_crop: crop_center = keypoint_uv21[12, ::-1] # catch problem, when no valid kp available (happens almost never) crop_center = tf.cond(tf.reduce_all(tf.is_finite(crop_center)), lambda: crop_center, lambda: tf.constant([0.0, 0.0])) crop_center.set_shape([2, ]) if self.crop_center_noise: noise = tf.truncated_normal([2], mean=0.0, stddev=self.crop_center_noise_sigma) crop_center += noise crop_scale_noise = tf.constant(1.0) if self.crop_scale_noise: crop_scale_noise = tf.squeeze(tf.random_uniform([1], minval=1.0, maxval=1.2)) # select visible coords only kp_coord_h = tf.boolean_mask(keypoint_uv21[:, 1], keypoint_vis21) kp_coord_w = tf.boolean_mask(keypoint_uv21[:, 0], keypoint_vis21) kp_coord_hw = tf.stack([kp_coord_h, kp_coord_w], 1) # determine size of crop (measure spatial extend of hw coords first) min_coord = tf.maximum(tf.reduce_min(kp_coord_hw, 0), 0.0) max_coord = tf.minimum(tf.reduce_max(kp_coord_hw, 0), self.image_size) # find out larger distance wrt the center of crop crop_size_best = 2*tf.maximum(max_coord - crop_center, crop_center - min_coord) crop_size_best = tf.reduce_max(crop_size_best) crop_size_best = tf.minimum(tf.maximum(crop_size_best, 50.0), 500.0) # catch problem, when no valid kp available crop_size_best = tf.cond(tf.reduce_all(tf.is_finite(crop_size_best)), lambda: crop_size_best, lambda: tf.constant(200.0)) crop_size_best.set_shape([]) # calculate necessary scaling scale = tf.cast(self.crop_size, tf.float32) / crop_size_best scale = tf.minimum(tf.maximum(scale, 1.0), 10.0) scale *= crop_scale_noise data_dict['crop_scale'] = scale if self.crop_offset_noise: noise = tf.truncated_normal([2], mean=0.0, stddev=self.crop_offset_noise_sigma) crop_center += noise # Crop image img_crop = crop_image_from_xy(tf.expand_dims(image, 0), crop_center, self.crop_size, scale) data_dict['image_crop'] = tf.squeeze(img_crop) # Modify uv21 coordinates crop_center_float = tf.cast(crop_center, tf.float32) keypoint_uv21_u = (keypoint_uv21[:, 0] - crop_center_float[1]) * scale + self.crop_size // 2 keypoint_uv21_v = (keypoint_uv21[:, 1] - crop_center_float[0]) * scale + self.crop_size // 2 keypoint_uv21 = tf.stack([keypoint_uv21_u, keypoint_uv21_v], 1) data_dict['keypoint_uv21'] = keypoint_uv21 # Modify camera intrinsics scale = tf.reshape(scale, [1, ]) scale_matrix = tf.dynamic_stitch([[0], [1], [2], [3], [4], [5], [6], [7], [8]], [scale, [0.0], [0.0], [0.0], scale, [0.0], [0.0], [0.0], [1.0]]) scale_matrix = tf.reshape(scale_matrix, [3, 3]) crop_center_float = tf.cast(crop_center, tf.float32) trans1 = crop_center_float[0] * scale - self.crop_size // 2 trans2 = crop_center_float[1] * scale - self.crop_size // 2 trans1 = tf.reshape(trans1, [1, ]) trans2 = tf.reshape(trans2, [1, ]) trans_matrix = tf.dynamic_stitch([[0], [1], [2], [3], [4], [5], [6], [7], [8]], [[1.0], [0.0], -trans2, [0.0], [1.0], -trans1, [0.0], [0.0], [1.0]]) trans_matrix = tf.reshape(trans_matrix, [3, 3]) data_dict['cam_mat'] = tf.matmul(trans_matrix, tf.matmul(scale_matrix, cam_mat)) """ DEPENDENT DATA ITEMS: Scoremap from the SUBSET of 21 keypoints""" # create scoremaps from the subset of 2D annoataion keypoint_hw21 = tf.stack([keypoint_uv21[:, 1], keypoint_uv21[:, 0]], -1) scoremap_size = self.image_size if self.hand_crop: scoremap_size = (self.crop_size, self.crop_size) scoremap = self.create_multiple_gaussian_map(keypoint_hw21, scoremap_size, self.sigma, valid_vec=keypoint_vis21) if self.scoremap_dropout: scoremap = tf.nn.dropout(scoremap, self.scoremap_dropout_prob, noise_shape=[1, 1, 21]) scoremap *= self.scoremap_dropout_prob data_dict['scoremap'] = scoremap if self.scale_to_size: image, keypoint_uv21, keypoint_vis21 = data_dict['image'], data_dict['keypoint_uv21'], data_dict['keypoint_vis21'] s = image.get_shape().as_list() image = tf.image.resize_images(image, self.scale_target_size) scale = (self.scale_target_size[0]/float(s[0]), self.scale_target_size[1]/float(s[1])) keypoint_uv21 = tf.stack([keypoint_uv21[:, 0] * scale[1], keypoint_uv21[:, 1] * scale[0]], 1) data_dict = dict() # delete everything else because the scaling makes the data invalid anyway data_dict['image'] = image data_dict['keypoint_uv21'] = keypoint_uv21 data_dict['keypoint_vis21'] = keypoint_vis21 elif self.random_crop_to_size: tensor_stack = tf.concat([data_dict['image'], tf.expand_dims(tf.cast(data_dict['hand_parts'], tf.float32), -1), tf.cast(data_dict['hand_mask'], tf.float32)], 2) s = tensor_stack.get_shape().as_list() tensor_stack_cropped = tf.random_crop(tensor_stack, [self.random_crop_size, self.random_crop_size, s[2]]) data_dict = dict() # delete everything else because the random cropping makes the data invalid anyway data_dict['image'], data_dict['hand_parts'], data_dict['hand_mask'] = tensor_stack_cropped[:, :, :3],\ tf.cast(tensor_stack_cropped[:, :, 3], tf.int32),\ tf.cast(tensor_stack_cropped[:, :, 4:], tf.int32) names, tensors = zip(*data_dict.items()) if self.shuffle: tensors = tf.train.shuffle_batch_join([tensors], batch_size=self.batch_size, capacity=100, min_after_dequeue=50, enqueue_many=False) else: tensors = tf.train.batch_join([tensors], batch_size=self.batch_size, capacity=100, enqueue_many=False) return dict(zip(names, tensors)) @staticmethod def create_multiple_gaussian_map(coords_uv, output_size, sigma, valid_vec=None): """ Creates a map of size (output_shape[0], output_shape[1]) at (center[0], center[1]) with variance sigma for multiple coordinates.""" with tf.name_scope('create_multiple_gaussian_map'): sigma = tf.cast(sigma, tf.float32) assert len(output_size) == 2 s = coords_uv.get_shape().as_list() coords_uv = tf.cast(coords_uv, tf.int32) if valid_vec is not None: valid_vec = tf.cast(valid_vec, tf.float32) valid_vec = tf.squeeze(valid_vec) cond_val = tf.greater(valid_vec, 0.5) else: cond_val = tf.ones_like(coords_uv[:, 0], dtype=tf.float32) cond_val = tf.greater(cond_val, 0.5) cond_1_in = tf.logical_and(tf.less(coords_uv[:, 0], output_size[0]-1), tf.greater(coords_uv[:, 0], 0)) cond_2_in = tf.logical_and(tf.less(coords_uv[:, 1], output_size[1]-1), tf.greater(coords_uv[:, 1], 0)) cond_in = tf.logical_and(cond_1_in, cond_2_in) cond = tf.logical_and(cond_val, cond_in) coords_uv = tf.cast(coords_uv, tf.float32) # create meshgrid x_range = tf.expand_dims(tf.range(output_size[0]), 1) y_range = tf.expand_dims(tf.range(output_size[1]), 0) X = tf.cast(tf.tile(x_range, [1, output_size[1]]), tf.float32) Y = tf.cast(tf.tile(y_range, [output_size[0], 1]), tf.float32) X.set_shape((output_size[0], output_size[1])) Y.set_shape((output_size[0], output_size[1])) X = tf.expand_dims(X, -1) Y = tf.expand_dims(Y, -1) X_b = tf.tile(X, [1, 1, s[0]]) Y_b = tf.tile(Y, [1, 1, s[0]]) X_b -= coords_uv[:, 0] Y_b -= coords_uv[:, 1] dist = tf.square(X_b) + tf.square(Y_b) scoremap = tf.exp(-dist / tf.square(sigma)) * tf.cast(cond, tf.float32) return scoremap
dedoogong/asrada
HandPose_Detector/BinaryDbReader.py
Python
apache-2.0
22,576
[ "Gaussian" ]
58082ed0a31619217dac6eba097fcec6a8b6f8abfa38710b0e1bd4f4ca9c29d2
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 """ Example: Neural Transport ========================= This example illustrates how to use a trained AutoBNAFNormal autoguide to transform a posterior to a Gaussian-like one. The transform will be used to get better mixing rate for NUTS sampler. **References:** 1. Hoffman, M. et al. (2019), "NeuTra-lizing Bad Geometry in Hamiltonian Monte Carlo Using Neural Transport", (https://arxiv.org/abs/1903.03704) .. image:: ../_static/img/examples/neutra.png :align: center """ import argparse import os from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt import seaborn as sns from jax import random import jax.numpy as jnp from jax.scipy.special import logsumexp import numpyro from numpyro import optim from numpyro.diagnostics import print_summary import numpyro.distributions as dist from numpyro.distributions import constraints from numpyro.infer import MCMC, NUTS, SVI, Trace_ELBO from numpyro.infer.autoguide import AutoBNAFNormal from numpyro.infer.reparam import NeuTraReparam class DualMoonDistribution(dist.Distribution): support = constraints.real_vector def __init__(self): super(DualMoonDistribution, self).__init__(event_shape=(2,)) def sample(self, key, sample_shape=()): # it is enough to return an arbitrary sample with correct shape return jnp.zeros(sample_shape + self.event_shape) def log_prob(self, x): term1 = 0.5 * ((jnp.linalg.norm(x, axis=-1) - 2) / 0.4) ** 2 term2 = -0.5 * ((x[..., :1] + jnp.array([-2.0, 2.0])) / 0.6) ** 2 pe = term1 - logsumexp(term2, axis=-1) return -pe def dual_moon_model(): numpyro.sample("x", DualMoonDistribution()) def main(args): print("Start vanilla HMC...") nuts_kernel = NUTS(dual_moon_model) mcmc = MCMC( nuts_kernel, num_warmup=args.num_warmup, num_samples=args.num_samples, num_chains=args.num_chains, progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True, ) mcmc.run(random.PRNGKey(0)) mcmc.print_summary() vanilla_samples = mcmc.get_samples()["x"].copy() guide = AutoBNAFNormal( dual_moon_model, hidden_factors=[args.hidden_factor, args.hidden_factor] ) svi = SVI(dual_moon_model, guide, optim.Adam(0.003), Trace_ELBO()) print("Start training guide...") svi_result = svi.run(random.PRNGKey(1), args.num_iters) print("Finish training guide. Extract samples...") guide_samples = guide.sample_posterior( random.PRNGKey(2), svi_result.params, sample_shape=(args.num_samples,) )["x"].copy() print("\nStart NeuTra HMC...") neutra = NeuTraReparam(guide, svi_result.params) neutra_model = neutra.reparam(dual_moon_model) nuts_kernel = NUTS(neutra_model) mcmc = MCMC( nuts_kernel, num_warmup=args.num_warmup, num_samples=args.num_samples, num_chains=args.num_chains, progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True, ) mcmc.run(random.PRNGKey(3)) mcmc.print_summary() zs = mcmc.get_samples(group_by_chain=True)["auto_shared_latent"] print("Transform samples into unwarped space...") samples = neutra.transform_sample(zs) print_summary(samples) zs = zs.reshape(-1, 2) samples = samples["x"].reshape(-1, 2).copy() # make plots # guide samples (for plotting) guide_base_samples = dist.Normal(jnp.zeros(2), 1.0).sample( random.PRNGKey(4), (1000,) ) guide_trans_samples = neutra.transform_sample(guide_base_samples)["x"] x1 = jnp.linspace(-3, 3, 100) x2 = jnp.linspace(-3, 3, 100) X1, X2 = jnp.meshgrid(x1, x2) P = jnp.exp(DualMoonDistribution().log_prob(jnp.stack([X1, X2], axis=-1))) fig = plt.figure(figsize=(12, 8), constrained_layout=True) gs = GridSpec(2, 3, figure=fig) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) ax3 = fig.add_subplot(gs[0, 1]) ax4 = fig.add_subplot(gs[1, 1]) ax5 = fig.add_subplot(gs[0, 2]) ax6 = fig.add_subplot(gs[1, 2]) ax1.plot(svi_result.losses[1000:]) ax1.set_title("Autoguide training loss\n(after 1000 steps)") ax2.contourf(X1, X2, P, cmap="OrRd") sns.kdeplot(x=guide_samples[:, 0], y=guide_samples[:, 1], n_levels=30, ax=ax2) ax2.set( xlim=[-3, 3], ylim=[-3, 3], xlabel="x0", ylabel="x1", title="Posterior using\nAutoBNAFNormal guide", ) sns.scatterplot( x=guide_base_samples[:, 0], y=guide_base_samples[:, 1], ax=ax3, hue=guide_trans_samples[:, 0] < 0.0, ) ax3.set( xlim=[-3, 3], ylim=[-3, 3], xlabel="x0", ylabel="x1", title="AutoBNAFNormal base samples\n(True=left moon; False=right moon)", ) ax4.contourf(X1, X2, P, cmap="OrRd") sns.kdeplot(x=vanilla_samples[:, 0], y=vanilla_samples[:, 1], n_levels=30, ax=ax4) ax4.plot(vanilla_samples[-50:, 0], vanilla_samples[-50:, 1], "bo-", alpha=0.5) ax4.set( xlim=[-3, 3], ylim=[-3, 3], xlabel="x0", ylabel="x1", title="Posterior using\nvanilla HMC sampler", ) sns.scatterplot( x=zs[:, 0], y=zs[:, 1], ax=ax5, hue=samples[:, 0] < 0.0, s=30, alpha=0.5, edgecolor="none", ) ax5.set( xlim=[-5, 5], ylim=[-5, 5], xlabel="x0", ylabel="x1", title="Samples from the\nwarped posterior - p(z)", ) ax6.contourf(X1, X2, P, cmap="OrRd") sns.kdeplot(x=samples[:, 0], y=samples[:, 1], n_levels=30, ax=ax6) ax6.plot(samples[-50:, 0], samples[-50:, 1], "bo-", alpha=0.2) ax6.set( xlim=[-3, 3], ylim=[-3, 3], xlabel="x0", ylabel="x1", title="Posterior using\nNeuTra HMC sampler", ) plt.savefig("neutra.pdf") if __name__ == "__main__": assert numpyro.__version__.startswith("0.9.0") parser = argparse.ArgumentParser(description="NeuTra HMC") parser.add_argument("-n", "--num-samples", nargs="?", default=4000, type=int) parser.add_argument("--num-warmup", nargs="?", default=1000, type=int) parser.add_argument("--num-chains", nargs="?", default=1, type=int) parser.add_argument("--hidden-factor", nargs="?", default=8, type=int) parser.add_argument("--num-iters", nargs="?", default=10000, type=int) parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".') args = parser.parse_args() numpyro.set_platform(args.device) numpyro.set_host_device_count(args.num_chains) main(args)
pyro-ppl/numpyro
examples/neutra.py
Python
apache-2.0
6,722
[ "Gaussian" ]
9c99d80de19b086330a038f5bc28aebdf28436764dc44a11f0115c67e1346f13
""" Handler for CAs + CRLs bundles """ import tarfile import os import io from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC import gLogger, S_OK, S_ERROR, gConfig from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler from DIRAC.Core.Utilities import File, List from DIRAC.Core.Security import Locations, Utilities class BundleManager: def __init__(self, baseCSPath): self.__csPath = baseCSPath self.__bundles = {} self.updateBundles() def __getDirsToBundle(self): dirsToBundle = {} result = gConfig.getOptionsDict("%s/DirsToBundle" % self.__csPath) if result["OK"]: dB = result["Value"] for bId in dB: dirsToBundle[bId] = List.fromChar(dB[bId]) if gConfig.getValue("%s/BundleCAs" % self.__csPath, True): dirsToBundle["CAs"] = [ "%s/*.0" % Locations.getCAsLocation(), "%s/*.signing_policy" % Locations.getCAsLocation(), "%s/*.pem" % Locations.getCAsLocation(), ] if gConfig.getValue("%s/BundleCRLs" % self.__csPath, True): dirsToBundle["CRLs"] = ["%s/*.r0" % Locations.getCAsLocation()] return dirsToBundle def getBundles(self): return dict([(bId, self.__bundles[bId]) for bId in self.__bundles]) def bundleExists(self, bId): return bId in self.__bundles def getBundleVersion(self, bId): try: return self.__bundles[bId][0] except Exception: return "" def getBundleData(self, bId): try: return self.__bundles[bId][1] except Exception: return "" def updateBundles(self): dirsToBundle = self.__getDirsToBundle() # Delete bundles that don't have to be updated for bId in self.__bundles: if bId not in dirsToBundle: gLogger.info("Deleting old bundle %s" % bId) del self.__bundles[bId] for bId in dirsToBundle: bundlePaths = dirsToBundle[bId] gLogger.info("Updating %s bundle %s" % (bId, bundlePaths)) buffer_ = io.BytesIO() filesToBundle = sorted(File.getGlobbedFiles(bundlePaths)) if filesToBundle: commonPath = os.path.commonprefix(filesToBundle) commonEnd = len(commonPath) gLogger.info("Bundle will have %s files with common path %s" % (len(filesToBundle), commonPath)) with tarfile.open("dummy", "w:gz", buffer_) as tarBuffer: for filePath in filesToBundle: tarBuffer.add(filePath, filePath[commonEnd:]) zippedData = buffer_.getvalue() buffer_.close() hash_ = File.getMD5ForFiles(filesToBundle) gLogger.info("Bundled %s : %s bytes (%s)" % (bId, len(zippedData), hash_)) self.__bundles[bId] = (hash_, zippedData) else: self.__bundles[bId] = (None, None) class BundleDeliveryHandlerMixin: @classmethod def initializeHandler(cls, serviceInfoDict): csPath = serviceInfoDict["serviceSectionPath"] cls.bundleManager = BundleManager(csPath) updateBundleTime = gConfig.getValue("%s/BundlesLifeTime" % csPath, 3600 * 6) gLogger.info("Bundles will be updated each %s secs" % updateBundleTime) gThreadScheduler.addPeriodicTask(updateBundleTime, cls.bundleManager.updateBundles) return S_OK() types_getListOfBundles = [] @classmethod def export_getListOfBundles(cls): return S_OK(cls.bundleManager.getBundles()) def transfer_toClient(self, fileId, token, fileHelper): version = "" if isinstance(fileId, str): if fileId in ["CAs", "CRLs"]: return self.__transferFile(fileId, fileHelper) else: bId = fileId elif isinstance(fileId, (list, tuple)): if len(fileId) == 0: fileHelper.markAsTransferred() return S_ERROR("No bundle specified!") elif len(fileId) == 1: bId = fileId[0] else: bId = fileId[0] version = fileId[1] if not self.bundleManager.bundleExists(bId): fileHelper.markAsTransferred() return S_ERROR("Unknown bundle %s" % bId) bundleVersion = self.bundleManager.getBundleVersion(bId) if bundleVersion is None: fileHelper.markAsTransferred() return S_ERROR("Empty bundle %s" % bId) if version == bundleVersion: fileHelper.markAsTransferred() return S_OK(bundleVersion) buffer_ = io.BytesIO(self.bundleManager.getBundleData(bId)) result = fileHelper.DataSourceToNetwork(buffer_) buffer_.close() if not result["OK"]: return result return S_OK(bundleVersion) def __transferFile(self, filetype, fileHelper): """ This file is creates and transfers the CAs or CRLs file to the client. :param str filetype: we can define which file will be transfered to the client :param object fileHelper: :return: S_OK or S_ERROR """ if filetype == "CAs": retVal = Utilities.generateCAFile() elif filetype == "CRLs": retVal = Utilities.generateRevokedCertsFile() else: return S_ERROR("Not supported file type %s" % filetype) if not retVal["OK"]: return retVal else: result = fileHelper.getFileDescriptor(retVal["Value"], "r") if not result["OK"]: result = fileHelper.sendEOF() # better to check again the existence of the file if not os.path.exists(retVal["Value"]): return S_ERROR("File %s does not exist" % os.path.basename(retVal["Value"])) else: return S_ERROR("Failed to get file descriptor") fileDescriptor = result["Value"] result = fileHelper.FDToNetwork(fileDescriptor) fileHelper.oFile.close() # close the file and return return result class BundleDeliveryHandler(BundleDeliveryHandlerMixin, RequestHandler): pass
DIRACGrid/DIRAC
src/DIRAC/FrameworkSystem/Service/BundleDeliveryHandler.py
Python
gpl-3.0
6,375
[ "DIRAC" ]
cf12afc6f3476faad0e612d1cca7609e7ab0852af9ed055b7be9b15e7c4e877f
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Or visit http://www.gnu.org/licenses/gpl.html. # __author__ = 'wildi.markus@bluewin.ch' # Transform with libnova # Python bindings for libnova # import numpy as np from astropy.coordinates import Longitude,Latitude,Angle from astropy import units as u from astropy.coordinates import SkyCoord from ctypes import * class LN_equ_posn(Structure): _fields_ = [("ra", c_double),("dec", c_double)] class LN_hrz_posn(Structure): _fields_ = [("az", c_double),("alt", c_double)] class LN_lnlat_posn(Structure): _fields_ = [("lng", c_double),("lat", c_double)] class LN_nut(Structure): _fields_ = [("longitude", c_double),("obliquity", c_double),("ecliptic", c_double)] # add full path if it is not on LD_PATH ln=cdll.LoadLibrary("libnova.so") ln.ln_get_equ_aber.restype = None ln.ln_get_equ_prec.restype = None ln.ln_get_equ_from_ecl.restype = None ln.ln_get_nutation.restype = None ln.ln_get_refraction_adj.restype = c_double ln.ln_get_angular_separation.restype = c_double ln_pos_eq=LN_equ_posn() ln_pos_eq_ab=LN_equ_posn() ln_pos_eq_pm=LN_equ_posn() ln_pos_eq_app=LN_equ_posn() ln_pos_eq_pr=LN_equ_posn() ln_pos_aa_ab=LN_hrz_posn() ln_hrz_posn=LN_hrz_posn() ln_lnlat_posn=LN_lnlat_posn() ln_nut= LN_nut() ln_pos_eq_nut=LN_equ_posn() class Transformation(object): def __init__(self, lg=None,obs=None,refraction_method=None): # self.lg=lg self.name='LN Libnova' self.refraction_method=refraction_method self.obs=obs self.ln_obs=LN_lnlat_posn() self.ln_obs.lng=obs.longitude.degree # deg self.ln_obs.lat=obs.latitude.degree # deg self.ln_hght=obs.height # hm, no .meter?? m, not a libnova quantity def transform_to_hadec(self,tf=None,sky=None,mount_set_icrs=None): tem=sky.temperature pre=sky.pressure hum=sky.humidity pre_qfe=pre # to make it clear what is used aa=self.LN_EQ_to_AltAz(ra=Longitude(tf.ra.radian,u.radian).degree,dec=Latitude(tf.dec.radian,u.radian).degree,ln_pressure_qfe=pre_qfe,ln_temperature=tem,ln_humidity=hum,obstime=tf.obstime) ha=self.LN_AltAz_to_HA(az=aa.az.degree,alt=aa.alt.degree,obstime=tf.obstime) return ha def transform_to_altaz(self,tf=None,sky=None,mount_set_icrs=None): tem=sky.temperature pre=sky.pressure hum=sky.humidity aa=self.LN_EQ_to_AltAz(ra=Longitude(tf.ra.radian,u.radian).degree,dec=Latitude(tf.dec.radian,u.radian).degree,ln_pressure_qfe=pre_qfe,ln_temperature=tem,ln_humidity=hum,obstime=tf.obstime,mount_set_icrs=mount_set_icrs) return aa def LN_nutation_meeus(self,eq_pr,JD=None): # first order correction ln.ln_get_nutation(c_double(JD),byref(ln_nut)) d_psi=ln_nut.longitude /180.*np.pi epsilon_0=(ln_nut.ecliptic+ln_nut.obliquity)/180.*np.pi # true obliquity d_epsilon=ln_nut.obliquity/180.*np.pi ra=Longitude(eq_pr.ra,u.degree).radian dec=Latitude(eq_pr.dec,u.degree).radian d_ra=(np.cos(epsilon_0)+np.sin(epsilon_0)*np.sin(ra)*np.tan(dec))*d_psi-np.cos(ra)*np.tan(dec)*d_epsilon d_dec=(np.sin(epsilon_0)*np.cos(ra))*d_psi+np.sin(ra)*d_epsilon ln_pos_eq_nut.ra=eq_pr.ra + d_ra * 180./np.pi ln_pos_eq_nut.dec=eq_pr.dec + d_dec* 180./np.pi return ln_pos_eq_nut def LN_EQ_to_AltAz(self,ra=None,dec=None,ln_pressure_qfe=None,ln_temperature=None,ln_humidity=None,obstime=None,mount_set_icrs=False): ln_pos_eq.ra=ra ln_pos_eq.dec=dec if mount_set_icrs: # ToDo missing see Jean Meeus, Astronomical Algorithms, chapter 23 # proper motion # annual paralax (0".8) # gravitational deflection of light (0".003) ln.ln_get_equ_prec(byref(ln_pos_eq), c_double(obstime.jd), byref(ln_pos_eq_pr)) ln_pos_eq_nut=self.LN_nutation_meeus(eq_pr=ln_pos_eq_pr,JD=obstime.jd) ln.ln_get_equ_aber(byref(ln_pos_eq_nut), c_double(obstime.jd), byref(ln_pos_eq_ab)) ln.ln_get_hrz_from_equ(byref(ln_pos_eq_ab), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab)) # here we use QFE not pressure at sea level! # E.g. at Dome-C this formula: # ln_pressure=ln_see_pres * pow(1. - (0.0065 * ln_alt) / 288.15, (9.80665 * 0.0289644) / (8.31447 * 0.0065)); # is not precise. if self.refraction_method is None: d_alt_deg=ln.ln_get_refraction_adj(c_double(ln_pos_aa_ab.alt),c_double(ln_pressure_qfe),c_double(ln_temperature)) else: d_alt_deg=180./np.pi* self.refraction_method(alt=ln_pos_aa_ab.alt,tem=ln_temperature,pre=ln_pressure_qfe,hum=ln_humidity) else: # ... but not for the star position as measured in mount frame ln.ln_get_hrz_from_equ(byref(ln_pos_eq), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab)); d_alt_deg=0. a_az=Longitude(ln_pos_aa_ab.az,u.deg) a_alt=Latitude(ln_pos_aa_ab.alt + d_alt_deg,u.deg) pos_aa=SkyCoord(az=a_az.radian,alt=a_alt.radian,unit=(u.radian,u.radian),frame='altaz',location=self.obs,obstime=obstime,obswl=0.5*u.micron, pressure=ln_pressure_qfe*u.hPa,temperature=ln_temperature*u.deg_C,relative_humidity=ln_humidity) return pos_aa def LN_AltAz_to_HA(self,az=None,alt=None,obstime=None): ln_hrz_posn.alt=alt ln_hrz_posn.az=az ln.ln_get_equ_from_hrz(byref(ln_hrz_posn),byref(self.ln_obs), c_double(obstime.jd),byref(ln_pos_eq)) # calculate HA ra=Longitude(ln_pos_eq.ra,u.deg) HA= obstime.sidereal_time('apparent') - ra # hm, ra=ha a bit ugly ha=SkyCoord(ra=HA, dec=Latitude(ln_pos_eq.dec,u.deg).radian,unit=(u.radian,u.radian),frame='cirs') return ha def LN_ICRS_to_GCRS(self,ra=None,dec=None,ln_pressure_qfe=None,ln_temperature=None,ln_humidity=None,obstime=None): ln_pos_eq.ra=ra ln_pos_eq.dec=dec ln.ln_get_equ_prec(byref(ln_pos_eq), c_double(obstime.jd), byref(ln_pos_eq_pr)) ln_pos_eq_nut=self.LN_nutation_meeus(eq_pr=ln_pos_eq_pr,JD=obstime.jd) ln.ln_get_equ_aber(byref(ln_pos_eq_nut), c_double(obstime.jd), byref(ln_pos_eq_ab)) ra=Longitude(ln_pos_eq_ab.ra,u.deg) dec=Latitude(ln_pos_eq_ab.dec,u.deg) gcrs=SkyCoord(ra=ra.radian,dec=dec.radian,unit=(u.radian,u.radian),frame='gcrs',location=self.obs,obstime=obstime,obswl=0.5*u.micron, pressure=ln_pressure_qfe*u.hPa,temperature=ln_temperature*u.deg_C,relative_humidity=ln_humidity) return gcrs def LN_ICRS_to_AltAz(self,ra=None,dec=None,ln_pressure_qfe=None,ln_temperature=None,ln_humidity=None,obstime=None,mount_set_icrs=True): ln_pos_eq.ra=ra ln_pos_eq.dec=dec if mount_set_icrs: # libnova corrections for catalog data ... # ToDo missing see Jean Meeus, Astronomical Algorithms, chapter 23 # proper motion # annual paralax (0".8) # gravitational deflection of light (0".003) ln.ln_get_equ_prec(byref(ln_pos_eq), c_double(obstime.jd), byref(ln_pos_eq_pr)) ln_pos_eq_nut=self.LN_nutation_meeus(eq_pr=ln_pos_eq_pr,JD=obstime.jd) ln.ln_get_equ_aber(byref(ln_pos_eq_nut), c_double(obstime.jd), byref(ln_pos_eq_ab)) ln.ln_get_hrz_from_equ(byref(ln_pos_eq_ab), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab)) # here we use QFE not pressure at sea level! # E.g. at Dome-C this formula: # ln_pressure=ln_see_pres * pow(1. - (0.0065 * ln_alt) / 288.15, (9.80665 * 0.0289644) / (8.31447 * 0.0065)); # is not precise. if self.refraction_method is None: d_alt_deg=ln.ln_get_refraction_adj(c_double(ln_pos_aa_ab.alt),c_double(ln_pressure_qfe),c_double(ln_temperature)) else: d_alt_deg=180./np.pi* self.refraction_method(alt=ln_pos_aa_ab.alt,tem=ln_temperature,pre=ln_pressure_qfe,hum=ln_humidity) else: # ... but not for the star position as measured in mount frame ln.ln_get_hrz_from_equ(byref(ln_pos_eq), byref(self.ln_obs), c_double(obstime.jd), byref(ln_pos_aa_ab)); d_alt_deg=0. a_az=Longitude(ln_pos_aa_ab.az,u.deg) a_az.wrap_at(0.*u.degree) a_alt=Latitude(ln_pos_aa_ab.alt + d_alt_deg,u.deg) pos_aa=SkyCoord(az=a_az.radian,alt=a_alt.radian,unit=(u.radian,u.radian),frame='altaz',location=self.obs,obstime=obstime,obswl=0.5*u.micron, pressure=ln_pressure_qfe*u.hPa,temperature=ln_temperature*u.deg_C,relative_humidity=ln_humidity) return pos_aa
jerryjiahaha/rts2
scripts/u_point/transform/u_libnova.py
Python
lgpl-3.0
8,916
[ "VisIt" ]
40c700d3a07515f4dd8599098ef9ca1fdfc6268f1aa64399283a8862ee4e778c
#!/usr/bin/python """ This script asks cdash to give it a summary of all of the failing tests on the nightly expected section. It presents the tests ranked by the number of failing machines. From this view you can more easily see what is in the greatest need of fixing. """ import sys import time import datetime import urllib # Process args csvOutput = True wikiOutput = False dashDate = str(datetime.date.today()) argc = len(sys.argv) while argc > 1: argc = argc - 1 if sys.argv[argc] == "--csv": csvOutput = True wikiOutput = False elif sys.argv[argc] == "--wiki": wikiOutput = True csvOutput = False if wikiOutput: print "==Dashboard for " + dashDate + "==" url = 'https://open.cdash.org/api/?method=build&task=sitetestfailures&project=VTK&group=Nightly%20Expected' page = urllib.urlopen(url) data = page.readlines() if len(data[0]) == 2: #"[]" print "Cdash returned nothing useful, please try again later." raise SystemExit submissions = eval(data[0]) tfails = dict() if csvOutput: print "-"*20, "ANALYZING", "-"*20 elif wikiOutput: print "===Builds for " + dashDate + "===" print r'{| class="wikitable sortable" border="1" cellpadding="5" cellspacing="0"' print r'|-' print r'| Build Name' print r'| Failing' for skey in submissions.keys(): submission = submissions[skey] bname = submission['buildname'] bfails = submission['tests'] if len(bfails) > 100: continue if csvOutput: print bname print len(bfails) elif wikiOutput: print r'|-' print r'| ', print r'[https://open.cdash.org/index.php?project=VTK' + '&date=' + dashDate + r'&filtercount=1' + r'&field1=buildname/string&compare1=61&value1=' + bname + " " + bname + "]" print r'|' print len(bfails) for tnum in range(0, len(bfails)): test = bfails[tnum] tname = test['name'] if not tname in tfails: tfails[tname] = list() tfails[tname].append(bname) if wikiOutput: print r'|}' if csvOutput: print "-"*20, "REPORT", "-"*20 print len(tfails)," FAILURES" elif wikiOutput: print "===Tests for " + dashDate + "===" print r'{| class="wikitable sortable" border="1" cellpadding="5" cellspacing="0"' print r'|-' print r'| Test' print r'| Failing' print r'| Platforms' failcounts = map(lambda x: (x,len(tfails[x])), tfails.keys()) sortedfails = sorted(failcounts, key=lambda fail: fail[1]) for test in sortedfails: tname = test[0] if csvOutput: print tname, ",", len(tfails[tname]), ",", tfails[tname] elif wikiOutput: print r'|-' print r'| ' print r'[https://open.cdash.org/testSummary.php?' + r'project=11' + r'&date=' + dashDate + r'&name=' + tname + ' ' + tname + ']' print r'|', print len(tfails[tname]) print r'|', print tfails[tname]
ashray/VTK-EVM
Utilities/Maintenance/vtk_fail_summary.py
Python
bsd-3-clause
2,767
[ "VTK" ]
72241e64e00fefd13ceaba86fa07af8bffe6dea0045fceedad8572fd63b54688
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\# '''Application handler for Bender applications in LHCb.''' import os import tempfile import pprint import shutil from os.path import split, join from GangaCore.GPIDev.Schema.Schema import FileItem, SimpleItem import GangaCore.Utility.logging from GangaCore.GPIDev.Lib.File import File from GangaCore.Utility.util import unique from GangaCore.Core.exceptions import ApplicationConfigurationError from GangaCore.GPIDev.Lib.File import ShareDir from GangaCore.GPIDev.Lib.File.FileBuffer import FileBuffer from GangaGaudi.Lib.Applications.GaudiBase import GaudiBase from GangaGaudi.Lib.Applications.GaudiUtils import fillPackedSandbox, gzipFile from GangaCore.Utility.files import expandfilename, fullpath from GangaCore.Utility.Config import getConfig from GangaCore.Utility.Shell import Shell from .AppsBaseUtils import guess_version from GangaCore.GPIDev.Base.Proxy import isType # from GangaCore.GPIDev.Adapters.StandardJobConfig import StandardJobConfig # Added for XML PostProcessing from GangaLHCb.Lib.RTHandlers.RTHUtils import getXMLSummaryScript from GangaLHCb.Lib.Applications import XMLPostProcessor logger = GangaCore.Utility.logging.getLogger() #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\# class Bender(GaudiBase): """The Bender application handler The user specifies a module file (via Bender.module) which contains a Bender python module and the number of events they want to run on (via Bender.events). The user's module is then run on the data by calling: USERMODULE.configure(EventSelectorInput,FileCatalogCatalogs) USERMODULE.run(NUMEVENTS) """ _name = 'Bender' _category = 'applications' _exportmethods = GaudiBase._exportmethods[:] _exportmethods += ['prepare', 'unprepare'] _schema = GaudiBase._schema.inherit_copy() docstr = 'The package the application belongs to (e.g. "Sim", "Phys")' _schema.datadict['package'] = SimpleItem(defvalue=None, typelist=['str', 'type(None)'], doc=docstr) docstr = 'The package where your top level requirements file is read ' \ 'from. Can be written either as a path ' \ '\"Tutorial/Analysis/v6r0\" or in traditional notation ' \ '\"Analysis v6r0 Tutorial\"' _schema.datadict['masterpackage'] = SimpleItem(defvalue=None, typelist=[ 'str', 'type(None)'], doc=docstr) docstr = 'Extra options to be passed onto the SetupProject command '\ 'used for configuring the environment. As an example '\ 'setting it to \'--dev\' will give access to the DEV area. '\ 'For full documentation of the available options see '\ 'https://twiki.cern.ch/twiki/bin/view/LHCb/SetupProject' _schema.datadict['setupProjectOptions'] = SimpleItem(defvalue='', typelist=[ 'str', 'type(None)'], doc=docstr) docstr = 'The name of the module to import. A copy will be made ' \ 'at submission time' _schema.datadict['module'] = FileItem(preparable=1, defvalue=File(), doc=docstr) docstr = 'The name of the Gaudi application (Bender)' _schema.datadict['project'] = SimpleItem(preparable=1, defvalue='Bender', hidden=1, protected=1, typelist=['str'], doc=docstr) docstr = 'The number of events ' _schema.datadict['events'] = SimpleItem( defvalue=-1, typelist=['int'], doc=docstr) docstr = 'Parameres for module ' _schema.datadict['params'] = SimpleItem( defvalue={}, typelist=['dict', 'str', 'int', 'bool', 'float'], doc=docstr) _schema.version.major += 2 _schema.version.minor += 0 #def __init__(self): # super(Bender, self).__init__() def _get_default_version(self, gaudi_app): return guess_version(self, gaudi_app) def _auto__init__(self): if (not self.appname) and (not self.project): self.project = 'Bender' # default if (not self.appname): self.appname = self.project self._init() def _getshell(self): from . import EnvironFunctions return EnvironFunctions._getshell(self) def prepare(self, force=False): super(Bender, self).prepare(force) self._check_inputs() share_dir = os.path.join(expandfilename(getConfig('Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'], self.is_prepared.name) fillPackedSandbox([self.module], os.path.join(share_dir, 'inputsandbox', '_input_sandbox_%s.tar' % self.is_prepared.name)) gzipFile(os.path.join(share_dir, 'inputsandbox', '_input_sandbox_%s.tar' % self.is_prepared.name), os.path.join( share_dir, 'inputsandbox', '_input_sandbox_%s.tgz' % self.is_prepared.name), True) # add the newly created shared directory into the metadata system if # the app is associated with a persisted object self.checkPreparedHasParent(self) self.post_prepare() logger.debug("Finished Preparing Application in %s" % share_dir) def master_configure(self): return (None, StandardJobConfig()) def configure(self, master_appconfig): # self._configure() modulename = split(self.module.name)[-1].split('.')[0] script = """ from copy import deepcopy from Gaudi.Configuration import * importOptions('data.py') import %s as USERMODULE EventSelectorInput = deepcopy(EventSelector().Input) FileCatalogCatalogs = deepcopy(FileCatalog().Catalogs) EventSelector().Input=[] FileCatalog().Catalogs=[]\n""" % modulename script_configure = "USERMODULE.configure(EventSelectorInput,FileCatalogCatalogs%s)\n" if self.params: param_string = ",params=%s" % self.params else: param_string = "" script_configure = script_configure % param_string script += script_configure script += "USERMODULE.run(%d)\n" % self.events script += getXMLSummaryScript() # add summary.xml outputsandbox_temp = XMLPostProcessor._XMLJobFiles() outputsandbox_temp += unique(self.getJobObject().outputsandbox) outputsandbox = unique(outputsandbox_temp) input_files = [] input_files += [FileBuffer('gaudipython-wrapper.py', script)] logger.debug("Returning StandardJobConfig") return (None, StandardJobConfig(inputbox=input_files, outputbox=outputsandbox)) def _check_inputs(self): """Checks the validity of user's entries for GaudiPython schema""" # Always check for None OR empty #logger.info("self.module: %s" % str(self.module)) if isType(self.module, str): self.module = File(self.module) if self.module.name is None: raise ApplicationConfigurationError("Application Module not requested") elif self.module.name == "": raise ApplicationConfigurationError("Application Module not requested") else: # Always check we've been given a FILE! self.module.name = fullpath(self.module.name) if not os.path.isfile(self.module.name): msg = 'Module file %s not found.' % self.module.name raise ApplicationConfigurationError(msg) def postprocess(self): XMLPostProcessor.postprocess(self, logger) #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\# # Associate the correct run-time handlers to GaudiPython for various backends. from GangaCore.GPIDev.Adapters.ApplicationRuntimeHandlers import allHandlers from GangaLHCb.Lib.RTHandlers.LHCbGaudiRunTimeHandler import LHCbGaudiRunTimeHandler from GangaLHCb.Lib.RTHandlers.LHCbGaudiDiracRunTimeHandler import LHCbGaudiDiracRunTimeHandler for backend in ['LSF', 'Interactive', 'PBS', 'SGE', 'Local', 'Condor', 'Remote']: allHandlers.add('Bender', backend, LHCbGaudiRunTimeHandler) allHandlers.add('Bender', 'Dirac', LHCbGaudiDiracRunTimeHandler) #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
ganga-devs/ganga
ganga/GangaLHCb/Lib/Applications/Bender.py
Python
gpl-3.0
8,804
[ "DIRAC" ]
ff84bb75c41e534288f40972612d0197a636d3e0ebdd1f056e3e7b68d0de679e
# Version: 0.15 """ The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation First, decide on values for the following configuration variables: * `VCS`: the version control system you use. Currently accepts "git". * `style`: the style of version string to be produced. See "Styles" below for details. Defaults to "pep440", which looks like `TAG[+DISTANCE.gSHORTHASH[.dirty]]`. * `versionfile_source`: A project-relative pathname into which the generated version strings should be written. This is usually a `_version.py` next to your project's main `__init__.py` file, so it can be imported at runtime. If your project uses `src/myproject/__init__.py`, this should be `src/myproject/_version.py`. This file should be checked in to your VCS as usual: the copy created below by `setup.py setup_versioneer` will include code that parses expanded VCS keywords in generated tarballs. The 'build' and 'sdist' commands will replace it with a copy that has just the calculated version string. This must be set even if your project does not have any modules (and will therefore never import `_version.py`), since "setup.py sdist" -based trees still need somewhere to record the pre-calculated version strings. Anywhere in the source tree should do. If there is a `__init__.py` next to your `_version.py`, the `setup.py setup_versioneer` command (described below) will append some `__version__`-setting assignments, if they aren't already present. * `versionfile_build`: Like `versionfile_source`, but relative to the build directory instead of the source directory. These will differ when your setup.py uses 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`, then you will probably have `versionfile_build='myproject/_version.py'` and `versionfile_source='src/myproject/_version.py'`. If this is set to None, then `setup.py build` will not attempt to rewrite any `_version.py` in the built tree. If your project does not have any libraries (e.g. if it only builds a script), then you should use `versionfile_build = None` and override `distutils.command.build_scripts` to explicitly insert a copy of `versioneer.get_version()` into your generated script. * `tag_prefix`: a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. If your tags look like 'myproject-1.2.0', then you should use tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this should be an empty string. * `parentdir_prefix`: a optional string, frequently the same as tag_prefix, which appears at the start of all unpacked tarball filenames. If your tarball unpacks into 'myproject-1.2.0', this should be 'myproject-'. To disable this feature, just omit the field from your `setup.cfg`. This tool provides one script, named `versioneer`. That script has one mode, "install", which writes a copy of `versioneer.py` into the current directory and runs `versioneer.py setup` to finish the installation. To versioneer-enable your project: * 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and populating it with the configuration values you decided earlier (note that the option names are not case-sensitive): ```` [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = "" parentdir_prefix = myproject- ```` * 2: Run `versioneer install`. This will do the following: * copy `versioneer.py` into the top of your source tree * create `_version.py` in the right place (`versionfile_source`) * modify your `__init__.py` (if one exists next to `_version.py`) to define `__version__` (by calling a function from `_version.py`) * modify your `MANIFEST.in` to include both `versioneer.py` and the generated `_version.py` in sdist tarballs `versioneer install` will complain about any problems it finds with your `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all the problems. * 3: add a `import versioneer` to your setup.py, and add the following arguments to the setup() call: version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), * 4: commit these changes to your VCS. To make sure you won't forget, `versioneer install` will mark everything it touched for addition using `git add`. Don't forget to add `setup.py` and `setup.cfg` too. ## Post-Installation Usage Once established, all uses of your tree from a VCS checkout should get the current version string. All generated tarballs should include an embedded version string (so users who unpack them will not need a VCS tool installed). If you distribute your project through PyPI, then the release process should boil down to two steps: * 1: git tag 1.0 * 2: python setup.py register sdist upload If you distribute it through github (i.e. users use github to generate tarballs with `git archive`), the process is: * 1: git tag 1.0 * 2: git push; git push --tags Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at least one tag in its history. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See details.md in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ### Upgrading to 0.15 Starting with this version, Versioneer is configured with a `[versioneer]` section in your `setup.cfg` file. Earlier versions required the `setup.py` to set attributes on the `versioneer` module immediately after import. The new version will refuse to run (raising an exception during import) until you have provided the necessary `setup.cfg` section. In addition, the Versioneer package provides an executable named `versioneer`, and the installation process is driven by running `versioneer install`. In 0.14 and earlier, the executable was named `versioneer-installer` and was run without an argument. ### Upgrading to 0.14 0.14 changes the format of the version string. 0.13 and earlier used hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a plus-separated "local version" section strings, with dot-separated components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old format, but should be ok with the new one. ### Upgrading from 0.11 to 0.12 Nothing special. ### Upgrading from 0.10 to 0.11 You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running `setup.py setup_versioneer`. This will enable the use of additional version-control systems (SVN, etc) in the future. ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is hereby released into the public domain. The `_version.py` that it creates is also in the public domain. """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: pass def get_root(): # we require that all commands are run from the project root, i.e. the # directory that contains setup.py, setup.cfg, and versioneer.py . root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): pass # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) return None return stdout LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.15 (https://github.com/warner/python-versioneer) import errno import os import re import subprocess import sys def get_keywords(): # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full} return keywords class VersioneerConfig: pass def get_config(): # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): pass LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) return None return stdout def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%%s', but '%%s' doesn't start with " "prefix '%%s'" %% (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs-tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if 0 and not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %%s" %% root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs-tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if 0 and not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with " "prefix '%s'" % (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.15) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json import sys version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} class VersioneerBadRootError(Exception): pass def get_versions(verbose=False): # returns dict with two keys: 'version' and 'full' if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} def get_version(): return get_versions()["version"] def get_cmdclass(): if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = "" parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-time keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)
chen0031/rekall
rekall-core/versioneer.py
Python
gpl-2.0
62,487
[ "Brian" ]
827f92a2c1571b1bb2d14ba5cbfc2c05497636f9ea0fc1102416cac35da913f0
from django.db import models from .general_models import ScenarioSpecificBase from django.forms.models import model_to_dict class Feed(ScenarioSpecificBase): me = models.FloatField() fme = models.FloatField() erdp = models.FloatField() dup = models.FloatField() adf = models.FloatField() price = models.FloatField(null=True) avaliable = models.FloatField(null=True) feed_type = models.ForeignKey('FeedType') maxInclusion = models.FloatField(null=True) @property def qm_ruminant(self): """ Yan+agnew 2004 :return: ratio of metabolisable energy to gross energy """ return (-0.000796 * self.adf) + 0.827 aggregable = ["me", "fme", "erdp", "dup", "adf", "price", "qm_ruminant"] # fields which can be meaningfully aggregated def to_dict(self): dict = model_to_dict(self) dict["feed_type"]=self.feed_type.id dict["qm_ruminant"]=self.qm_ruminant() return dict class FeedType(ScenarioSpecificBase): minInclusion = models.FloatField(null=True) maxInclusion = models.FloatField(null=True) def to_dict(self): dict ={} dict["id"]=self.id dict["minInclusion"]=self.minInclusion dict["maxInclusion"]=self.maxInclusion return dict # class BreedDetails(models.Model): # breed = models.CharField(max_length=20, unique=True) # # # Fox et. al. 1998 # MM = models.FloatField(null=True) # maintenance multiplier non-lactating # MML = models.FloatField(null=True) # maintenance multiplier lactating # BW = models.FloatField(null=True) # Calf Birth Weight kg # peak_yield = models.FloatField(null=True) # Average peak milk yield kg # BW_adjustment_Q1 = models.FloatField(null=True) # Q1 Birth weight adjustment/age of dam yr # BW_adjustment_Q2 = models.FloatField(null=True) # Q2 Birth weight adjustment/age of dam yr # BW_adjustment_Q3 = models.FloatField(null=True) # Q3 Birth weight adjustment/age of dam yr # BW_adjustment_Q4 = models.FloatField(null=True) # Q4 Birth weight adjustment/age of dam yr
BenLatham/FLOSS-Agricultural-Simulation
simulation/models/diet_models.py
Python
mit
2,117
[ "ADF" ]
fe74b5218afbc69c7916a2488ef69d4373cf9242ad3c7e6c42bf8158014bcb4a
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2010 Gary Burton # Copyright (C) 2010 Jakim Friant # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """Tools/Utilities/Relationship Calculator""" #------------------------------------------------------------------------- # # Standard python modules # #------------------------------------------------------------------------- from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.get_translation().gettext #------------------------------------------------------------------------- # # GNOME libraries # #------------------------------------------------------------------------- from gi.repository import Gdk from gi.repository import Gtk #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from gramps.gen.display.name import displayer as name_displayer from gramps.gui.managedwindow import ManagedWindow from gramps.gui.views.treemodels import PeopleBaseModel, PersonTreeModel from gramps.plugins.lib.libpersonview import BasePersonView from gramps.gen.relationship import get_relationship_calculator from gramps.gui.dialog import ErrorDialog from gramps.gui.plug import tool from gramps.gui.glade import Glade #------------------------------------------------------------------------- # # Constants # #------------------------------------------------------------------------- column_names = [column[0] for column in BasePersonView.COLUMNS] #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- class RelCalc(tool.Tool, ManagedWindow): def __init__(self, dbstate, uistate, options_class, name, callback=None): """ Relationship calculator class. """ tool.Tool.__init__(self, dbstate, options_class, name) ManagedWindow.__init__(self,uistate,[],self.__class__) #set the columns to see for data in BasePersonView.CONFIGSETTINGS: if data[0] == 'columns.rank': colord = data[1] elif data[0] == 'columns.visible': colvis = data[1] elif data[0] == 'columns.size': colsize = data[1] self.colord = [] for col, size in zip(colord, colsize): if col in colvis: self.colord.append((1, col, size)) else: self.colord.append((0, col, size)) self.dbstate = dbstate self.relationship = get_relationship_calculator() self.relationship.connect_db_signals(dbstate) self.glade = Glade() self.person = self.db.get_person_from_handle( uistate.get_active('Person')) name = '' if self.person: name = name_displayer.display(self.person) self.title = _('Relationship calculator: %(person_name)s' ) % {'person_name' : name} window = self.glade.toplevel self.titlelabel = self.glade.get_object('title') self.set_window(window, self.titlelabel, _('Relationship to %(person_name)s' ) % {'person_name' : name}, self.title) self.tree = self.glade.get_object("peopleList") self.text = self.glade.get_object("text1") self.textbuffer = Gtk.TextBuffer() self.text.set_buffer(self.textbuffer) self.model = PersonTreeModel(self.db) self.tree.set_model(self.model) self.tree.connect('key-press-event', self._key_press) self.selection = self.tree.get_selection() self.selection.set_mode(Gtk.SelectionMode.SINGLE) #keep reference of column so garbage collection works self.columns = [] for pair in self.colord: if not pair[0]: continue name = column_names[pair[1]] column = Gtk.TreeViewColumn(name, Gtk.CellRendererText(), markup=pair[1]) column.set_resizable(True) column.set_min_width(60) column.set_sizing(Gtk.TreeViewColumnSizing.GROW_ONLY) self.tree.append_column(column) #keep reference of column so garbage collection works self.columns.append(column) self.sel = self.tree.get_selection() self.changedkey = self.sel.connect('changed',self.on_apply_clicked) self.closebtn = self.glade.get_object("button5") self.closebtn.connect('clicked', self.close) if not self.person: self.window.hide() ErrorDialog(_('Active person has not been set'), _('You must select an active person for this ' 'tool to work properly.')) self.close() return self.show() def close(self, *obj): """ Close relcalc tool. Remove non-gtk connections so garbage collection can do its magic. """ self.relationship.disconnect_db_signals(self.dbstate) self.sel.disconnect(self.changedkey) ManagedWindow.close(self, *obj) def build_menu_names(self, obj): return (_("Relationship Calculator tool"),None) def on_apply_clicked(self, obj): model, iter_ = self.tree.get_selection().get_selected() if not iter_: return handle = model.get_handle_from_iter(iter_) other_person = self.db.get_person_from_handle(handle) if other_person is None : self.textbuffer.set_text("") return #now determine the relation, and print it out rel_strings, common_an = self.relationship.get_all_relationships( self.db, self.person, other_person) p1 = name_displayer.display(self.person) p2 = name_displayer.display(other_person) text = [] if other_person is None: pass elif self.person.handle == other_person.handle: rstr = _("%(person)s and %(active_person)s are the same person.") % { 'person': p1, 'active_person': p2 } text.append((rstr, "")) elif len(rel_strings) == 0: rstr = _("%(person)s and %(active_person)s are not related.") % { 'person': p2, 'active_person': p1 } text.append((rstr, "")) for rel_string, common in zip(rel_strings, common_an): rstr = _("%(person)s is the %(relationship)s of %(active_person)s." ) % {'person': p2, 'relationship': rel_string, 'active_person': p1 } length = len(common) if length == 1: person = self.db.get_person_from_handle(common[0]) if common[0] in [other_person.handle, self.person.handle]: commontext = '' else : name = name_displayer.display(person) commontext = " " + _("Their common ancestor is %s.") % name elif length == 2: p1c = self.db.get_person_from_handle(common[0]) p2c = self.db.get_person_from_handle(common[1]) p1str = name_displayer.display(p1c) p2str = name_displayer.display(p2c) commontext = " " + _("Their common ancestors are %(ancestor1)s and %(ancestor2)s.") % { 'ancestor1': p1str, 'ancestor2': p2str } elif length > 2: index = 0 commontext = " " + _("Their common ancestors are: ") for person_handle in common: person = self.db.get_person_from_handle(person_handle) if index: commontext += ", " commontext += name_displayer.display(person) index += 1 commontext += "." else: commontext = "" text.append((rstr, commontext)) textval = "" for val in text: textval += "%s %s\n" % (val[0], val[1]) self.textbuffer.set_text(textval) def _key_press(self, obj, event): if event.keyval in (Gdk.KEY_Return, Gdk.KEY_KP_Enter): store, paths = self.selection.get_selected_rows() if paths and len(paths[0]) == 1 : if self.tree.row_expanded(paths[0]): self.tree.collapse_row(paths[0]) else: self.tree.expand_row(paths[0], 0) return True return False #------------------------------------------------------------------------ # # # #------------------------------------------------------------------------ class RelCalcOptions(tool.ToolOptions): """ Defines options and provides handling interface. """ def __init__(self, name,person_id=None): tool.ToolOptions.__init__(self, name,person_id)
Forage/Gramps
gramps/plugins/tool/relcalc.py
Python
gpl-2.0
10,241
[ "Brian" ]
9e3d49ea7536dddc95c9a4de0479b03733cb204653a2c9fd4fb5a51f46862f66
# ============================================================================ # # Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved. # www.conceptive.be / project-camelot@conceptive.be # # This file is part of the Camelot Library. # # This file may be used under the terms of the GNU General Public # License version 2.0 as published by the Free Software Foundation # and appearing in the file license.txt included in the packaging of # this file. Please review this information to ensure GNU # General Public Licensing requirements will be met. # # If you are unsure which license is appropriate for your use, please # visit www.python-camelot.com or contact project-camelot@conceptive.be # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # For use of this library in commercial applications, please contact # project-camelot@conceptive.be # # ============================================================================ from PyQt4 import QtDesigner from camelot.view.plugins import CamelotEditorPlugin class DateEditorPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin, CamelotEditorPlugin): def __init__(self, parent = None): QtDesigner.QPyDesignerCustomWidgetPlugin.__init__(self) from camelot.view.controls.editors import DateEditor CamelotEditorPlugin.__init__(self) self._widget = DateEditor
kurtraschke/camelot
camelot/view/plugins/dateeditorplugin.py
Python
gpl-2.0
1,490
[ "VisIt" ]
9bd6d219f639060155aa743d14e18d5bf7574be6ddf406ddf331664999310cc1
""" TESTS is a dict with all you tests. Keys for this will be categories' names. Each test is dict with "input" -- input data for user function "answer" -- your right answer "explanation" -- not necessary key, it's using for additional info in animation. """ WORDS = {'act', 'age', 'air', 'arm', 'art', 'ask', 'bad', 'bag', 'bar', 'bat', 'bed', 'bet', 'bid', 'big', 'bit', 'box', 'boy', 'bug', 'bus', 'buy', 'can', 'cap', 'car', 'cat', 'cow', 'cry', 'cup', 'cut', 'dad', 'day', 'dig', 'dog', 'dot', 'due', 'ear', 'eat', 'egg', 'end', 'eye', 'fan', 'fat', 'fee', 'few', 'fix', 'fly', 'fun', 'gap', 'gas', 'god', 'guy', 'hat', 'hit', 'ice', 'job', 'key', 'kid', 'lab', 'law', 'lay', 'leg', 'let', 'lie', 'lip', 'log', 'low', 'man', 'map', 'mix', 'mom', 'mud', 'net', 'oil', 'one', 'pay', 'pen', 'pie', 'pin', 'pop', 'pot', 'put', 'raw', 'red', 'rip', 'row', 'rub', 'run', 'sad', 'sea', 'set', 'sex', 'she', 'sir', 'sky', 'son', 'sun', 'tap', 'tax', 'tea', 'tie', 'tip', 'toe', 'top', 'try', 'two', 'use', 'war', 'way', 'web', 'win', 'you', 'area', 'army', 'baby', 'back', 'bake', 'ball', 'band', 'bank', 'base', 'bath', 'bear', 'beat', 'beer', 'bell', 'belt', 'bend', 'bike', 'bill', 'bird', 'bite', 'blow', 'blue', 'boat', 'body', 'bone', 'book', 'boot', 'boss', 'bowl', 'burn', 'cake', 'call', 'calm', 'camp', 'card', 'care', 'case', 'cash', 'cell', 'chip', 'city', 'club', 'clue', 'coat', 'code', 'cold', 'cook', 'copy', 'cost', 'crew', 'dare', 'dark', 'data', 'date', 'dead', 'deal', 'dear', 'debt', 'deep', 'desk', 'diet', 'dirt', 'dish', 'disk', 'door', 'drag', 'draw', 'drop', 'dump', 'dust', 'duty', 'ease', 'east', 'edge', 'exam', 'exit', 'face', 'fact', 'fall', 'farm', 'fear', 'feed', 'feel', 'file', 'fill', 'film', 'fire', 'fish', 'flow', 'fold', 'food', 'foot', 'form', 'fuel', 'gain', 'game', 'gate', 'gear', 'gene', 'gift', 'girl', 'give', 'glad', 'goal', 'gold', 'golf', 'good', 'grab', 'hair', 'half', 'hall', 'hand', 'hang', 'harm', 'hate', 'head', 'heat', 'hell', 'help', 'hide', 'high', 'hire', 'hold', 'hole', 'home', 'hook', 'hope', 'host', 'hour', 'hunt', 'hurt', 'idea', 'iron', 'item', 'join', 'joke', 'jump', 'jury', 'keep', 'kick', 'kill', 'kind', 'king', 'kiss', 'knee', 'lack', 'lady', 'lake', 'land', 'lead', 'life', 'lift', 'line', 'link', 'list', 'load', 'loan', 'lock', 'long', 'look', 'loss', 'love', 'luck', 'mail', 'main', 'make', 'male', 'mall', 'many', 'mark', 'mate', 'math', 'meal', 'meat', 'meet', 'menu', 'mess', 'milk', 'mind', 'mine', 'miss', 'mode', 'mood', 'most', 'move', 'nail', 'name', 'neat', 'neck', 'news', 'nose', 'note', 'oven', 'pace', 'pack', 'page', 'pain', 'pair', 'park', 'part', 'pass', 'past', 'path', 'peak', 'pick', 'pipe', 'plan', 'play', 'poem', 'poet', 'pool', 'post', 'pull', 'push', 'quit', 'race', 'rain', 'rate', 'read', 'rent', 'rest', 'rice', 'rich', 'ride', 'ring', 'rise', 'risk', 'road', 'rock', 'role', 'roll', 'roof', 'room', 'rope', 'ruin', 'rule', 'rush', 'safe', 'sail', 'sale', 'salt', 'sand', 'save', 'seat', 'self', 'sell', 'ship', 'shoe', 'shop', 'shot', 'show', 'sick', 'side', 'sign', 'sing', 'sink', 'site', 'size', 'skin', 'slip', 'snow', 'sock', 'soft', 'soil', 'song', 'sort', 'soup', 'spot', 'star', 'stay', 'step', 'stop', 'suck', 'suit', 'swim', 'tale', 'talk', 'tank', 'task', 'team', 'tear', 'tell', 'term', 'test', 'text', 'till', 'time', 'tone', 'tool', 'tour', 'town', 'tree', 'trip', 'tune', 'turn', 'type', 'unit', 'user', 'vast', 'verb', 'verb', 'verb', 'view', 'wait', 'wake', 'walk', 'wall', 'wash', 'wave', 'wear', 'week', 'west', 'wife', 'will', 'wind', 'wine', 'wing', 'wish', 'wood', 'word', 'work', 'wrap', 'yard', 'year', 'zone', 'abuse', 'actor', 'adult', 'agent', 'alarm', 'anger', 'angle', 'apple', 'aside', 'award', 'basis', 'beach', 'being', 'bench', 'birth', 'black', 'blame', 'blank', 'blind', 'block', 'blood', 'board', 'bonus', 'brain', 'brave', 'bread', 'break', 'brick', 'brief', 'broad', 'brown', 'brush', 'buddy', 'bunch', 'buyer', 'cable', 'candy', 'carry', 'catch', 'cause', 'chain', 'chair', 'chart', 'check', 'cheek', 'chest', 'child', 'claim', 'class', 'clerk', 'click', 'clock', 'cloud', 'coach', 'coast', 'count', 'court', 'cover', 'crack', 'craft', 'crash', 'crazy', 'cream', 'cross', 'curve', 'cycle', 'dance', 'death', 'delay', 'depth', 'devil', 'doubt', 'draft', 'drama', 'dream', 'dress', 'drink', 'drive', 'drunk', 'earth', 'entry', 'equal', 'error', 'essay', 'event', 'fault', 'field', 'fight', 'final', 'floor', 'focus', 'force', 'frame', 'front', 'fruit', 'funny', 'glass', 'glove', 'grade', 'grand', 'grass', 'great', 'green', 'group', 'guard', 'guess', 'guest', 'guide', 'habit', 'heart', 'heavy', 'hello', 'honey', 'horse', 'hotel', 'house', 'human', 'hurry', 'ideal', 'image', 'issue', 'joint', 'judge', 'juice', 'knife', 'laugh', 'layer', 'leave', 'level', 'light', 'limit', 'local', 'lunch', 'major', 'march', 'match', 'maybe', 'media', 'metal', 'might', 'minor', 'model', 'money', 'month', 'motor', 'mouse', 'mouth', 'movie', 'music', 'nasty', 'nerve', 'night', 'noise', 'north', 'novel', 'nurse', 'offer', 'order', 'other', 'owner', 'paint', 'panic', 'paper', 'party', 'pause', 'peace', 'phase', 'phone', 'photo', 'piano', 'piece', 'pitch', 'pizza', 'place', 'plane', 'plant', 'plate', 'point', 'pound', 'power', 'press', 'price', 'pride', 'print', 'prior', 'prize', 'proof', 'punch', 'queen', 'quiet', 'quote', 'radio', 'raise', 'range', 'ratio', 'reach', 'reply', 'river', 'rough', 'round', 'royal', 'salad', 'scale', 'scene', 'score', 'screw', 'sense', 'serve', 'shake', 'shame', 'shape', 'share', 'shift', 'shine', 'shirt', 'shock', 'shoot', 'silly', 'skill', 'skirt', 'sleep', 'slice', 'slide', 'smile', 'smoke', 'solid', 'sound', 'south', 'space', 'spare', 'speed', 'spell', 'spend', 'spite', 'split', 'sport', 'spray', 'staff', 'stage', 'stand', 'start', 'state', 'steak', 'steal', 'stick', 'still', 'stock', 'store', 'storm', 'story', 'strip', 'study', 'stuff', 'style', 'sugar', 'sweet', 'swing', 'table', 'taste', 'teach', 'theme', 'thing', 'title', 'today', 'tooth', 'topic', 'total', 'touch', 'tough', 'towel', 'tower', 'track', 'trade', 'train', 'trash', 'treat', 'trick', 'truck', 'trust', 'truth', 'twist', 'uncle', 'union', 'upper', 'usual', 'value', 'video', 'virus', 'visit', 'voice', 'watch', 'water', 'weird', 'wheel', 'while', 'white', 'whole', 'woman', 'world', 'worry', 'worth', 'young', 'youth', 'abroad', 'access', 'action', 'active', 'advice', 'affair', 'affect', 'agency', 'amount', 'animal', 'annual', 'answer', 'appeal', 'aspect', 'assist', 'attack', 'author', 'basket', 'battle', 'beyond', 'bitter', 'border', 'boring', 'bother', 'bottle', 'bottom', 'branch', 'breast', 'breath', 'bridge', 'budget', 'button', 'camera', 'cancel', 'cancer', 'candle', 'career', 'carpet', 'chance', 'change', 'charge', 'choice', 'church', 'client', 'closet', 'coffee', 'collar', 'common', 'cookie', 'corner', 'county', 'couple', 'course', 'cousin', 'credit', 'damage', 'dealer', 'debate', 'degree', 'demand', 'design', 'desire', 'detail', 'device', 'dinner', 'divide', 'doctor', 'double', 'drawer', 'driver', 'editor', 'effect', 'effort', 'employ', 'energy', 'engine', 'escape', 'estate', 'excuse', 'expert', 'extent', 'factor', 'family', 'farmer', 'father', 'female', 'figure', 'finger', 'finish', 'flight', 'flower', 'formal', 'friend', 'future', 'garage', 'garden', 'gather', 'ground', 'growth', 'guitar', 'handle', 'health', 'height', 'horror', 'impact', 'income', 'injury', 'insect', 'inside', 'invite', 'island', 'jacket', 'junior', 'ladder', 'lawyer', 'leader', 'league', 'length', 'lesson', 'letter', 'listen', 'living', 'manner', 'market', 'master', 'matter', 'medium', 'member', 'memory', 'method', 'middle', 'minute', 'mirror', 'mobile', 'moment', 'mother', 'muscle', 'nation', 'native', 'nature', 'nobody', 'normal', 'notice', 'number', 'object', 'office', 'option', 'orange', 'parent', 'people', 'period', 'permit', 'person', 'phrase', 'player', 'plenty', 'poetry', 'police', 'policy', 'potato', 'priest', 'profit', 'prompt', 'public', 'purple', 'reason', 'recipe', 'record', 'refuse', 'region', 'regret', 'relief', 'remote', 'remove', 'repair', 'repeat', 'report', 'resist', 'resort', 'result', 'return', 'reveal', 'review', 'reward', 'safety', 'salary', 'sample', 'scheme', 'school', 'screen', 'script', 'search', 'season', 'second', 'secret', 'sector', 'senior', 'series', 'shower', 'signal', 'silver', 'simple', 'singer', 'single', 'sister', 'source', 'speech', 'spirit', 'spread', 'spring', 'square', 'stable', 'status', 'strain', 'street', 'stress', 'strike', 'string', 'stroke', 'studio', 'stupid', 'summer', 'survey', 'switch', 'system', 'tackle', 'target', 'tennis', 'thanks', 'theory', 'throat', 'ticket', 'tongue', 'travel', 'unique', 'visual', 'volume', 'wealth', 'weight', 'window', 'winner', 'winter', 'wonder', 'worker', 'writer', 'yellow', 'ability', 'account', 'address', 'advance', 'airline', 'airport', 'alcohol', 'analyst', 'anxiety', 'anybody', 'arrival', 'article', 'article', 'attempt', 'average', 'balance', 'bedroom', 'benefit', 'bicycle', 'brother', 'cabinet', 'capital', 'channel', 'chapter', 'charity', 'chicken', 'classic', 'climate', 'clothes', 'college', 'combine', 'comfort', 'command', 'comment', 'company', 'complex', 'concept', 'concern', 'concert', 'consist', 'contact', 'contest', 'context', 'control', 'convert', 'counter', 'country', 'courage', 'culture', 'current', 'deposit', 'diamond', 'disease', 'display', 'drawing', 'economy', 'emotion', 'evening', 'example', 'extreme', 'failure', 'feature', 'feeling', 'finance', 'finding', 'fishing', 'forever', 'fortune', 'freedom', 'funeral', 'garbage', 'general', 'grocery', 'hearing', 'highway', 'history', 'holiday', 'housing', 'husband', 'illegal', 'impress', 'initial', 'kitchen', 'leading', 'leather', 'lecture', 'library', 'machine', 'manager', 'maximum', 'meaning', 'meeting', 'mention', 'message', 'minimum', 'mission', 'mistake', 'mixture', 'monitor', 'morning', 'natural', 'network', 'nothing', 'officer', 'opening', 'opinion', 'outcome', 'outside', 'package', 'parking', 'partner', 'passage', 'passion', 'patient', 'pattern', 'payment', 'penalty', 'pension', 'physics', 'picture', 'plastic', 'present', 'primary', 'private', 'problem', 'process', 'produce', 'product', 'profile', 'program', 'project', 'promise', 'purpose', 'quality', 'quarter', 'reading', 'reality', 'recover', 'regular', 'release', 'request', 'reserve', 'resolve', 'respect', 'respond', 'revenue', 'routine', 'savings', 'science', 'scratch', 'section', 'service', 'session', 'setting', 'shelter', 'society', 'speaker', 'special', 'station', 'stomach', 'storage', 'stretch', 'student', 'subject', 'success', 'support', 'surgery', 'suspect', 'teacher', 'tension', 'thought', 'tonight', 'tourist', 'traffic', 'trainer', 'trouble', 'variety', 'vehicle', 'version', 'village', 'warning', 'weather', 'wedding', 'weekend', 'welcome', 'western', 'whereas', 'witness', 'working', 'writing', 'accident', 'activity', 'addition', 'ambition', 'analysis', 'anything', 'anywhere', 'argument', 'attitude', 'audience', 'baseball', 'bathroom', 'birthday', 'building', 'business', 'calendar', 'campaign', 'category', 'champion', 'chemical', 'computer', 'conflict', 'constant', 'contract', 'creative', 'currency', 'customer', 'database', 'daughter', 'decision', 'delivery', 'designer', 'director', 'disaster', 'discount', 'distance', 'district', 'document', 'election', 'elevator', 'emphasis', 'employee', 'employer', 'engineer', 'entrance', 'estimate', 'evidence', 'exchange', 'exercise', 'external', 'familiar', 'feedback', 'football', 'function', 'guidance', 'homework', 'hospital', 'incident', 'increase', 'industry', 'instance', 'interest', 'internal', 'internet', 'judgment', 'language', 'location', 'magazine', 'marriage', 'material', 'medicine', 'midnight', 'mortgage', 'mountain', 'national', 'negative', 'occasion', 'official', 'opposite', 'ordinary', 'original', 'painting', 'patience', 'personal', 'physical', 'platform', 'pleasure', 'politics', 'position', 'positive', 'possible', 'practice', 'presence', 'pressure', 'priority', 'progress', 'property', 'proposal', 'purchase', 'quantity', 'question', 'reaction', 'register', 'relation', 'relative', 'republic', 'research', 'resident', 'resource', 'response', 'sandwich', 'schedule', 'security', 'sentence', 'shopping', 'shoulder', 'software', 'solution', 'specific', 'standard', 'stranger', 'strategy', 'strength', 'struggle', 'surprise', 'surround', 'swimming', 'sympathy', 'teaching', 'tomorrow', 'training', 'upstairs', 'vacation', 'valuable', 'weakness', 'advantage', 'afternoon', 'agreement', 'apartment', 'assistant', 'associate', 'attention', 'awareness', 'beautiful', 'beginning', 'boyfriend', 'breakfast', 'brilliant', 'candidate', 'challenge', 'character', 'chemistry', 'childhood', 'chocolate', 'cigarette', 'classroom', 'committee', 'community', 'complaint', 'condition', 'confusion', 'criticism', 'departure', 'dependent', 'dimension', 'direction', 'economics', 'education', 'effective', 'emergency', 'equipment', 'extension', 'following', 'guarantee', 'highlight', 'historian', 'implement', 'inflation', 'influence', 'inspector', 'insurance', 'intention', 'interview', 'knowledge', 'landscape', 'marketing', 'necessary', 'newspaper', 'objective', 'operation', 'passenger', 'pollution', 'potential', 'president', 'principle', 'procedure', 'professor', 'promotion', 'reception', 'recording', 'reference', 'secretary', 'selection', 'sensitive', 'signature', 'situation', 'somewhere', 'spiritual', 'statement', 'structure', 'substance', 'telephone', 'temporary', 'tradition', 'variation', 'vegetable', 'yesterday', 'appearance', 'assignment', 'assistance', 'assumption', 'atmosphere', 'background', 'collection', 'commercial', 'commission', 'comparison', 'conclusion', 'conference', 'confidence', 'connection', 'definition', 'department', 'depression', 'difference', 'difficulty', 'discipline', 'discussion', 'efficiency', 'employment', 'enthusiasm', 'equivalent', 'excitement', 'experience', 'expression', 'foundation', 'friendship', 'girlfriend', 'government', 'importance', 'impression', 'indication', 'individual', 'inevitable', 'initiative', 'inspection', 'investment', 'leadership', 'literature', 'management', 'membership', 'obligation', 'particular', 'percentage', 'perception', 'permission', 'philosophy', 'population', 'possession', 'preference', 'profession', 'protection', 'psychology', 'reflection', 'reputation', 'resolution', 'restaurant', 'revolution', 'specialist', 'suggestion', 'technology', 'television', 'transition', 'university', 'advertising', 'alternative', 'application', 'appointment', 'association', 'celebration', 'combination', 'comfortable', 'competition', 'concentrate', 'consequence', 'description', 'development', 'engineering', 'environment', 'examination', 'explanation', 'grandfather', 'grandmother', 'imagination', 'improvement', 'independent', 'information', 'instruction', 'interaction', 'maintenance', 'measurement', 'negotiation', 'opportunity', 'performance', 'personality', 'perspective', 'possibility', 'preparation', 'recognition', 'replacement', 'requirement', 'supermarket', 'temperature', 'championship', 'construction', 'contribution', 'conversation', 'distribution', 'independence', 'introduction', 'manufacturer', 'organization', 'presentation', 'professional', 'refrigerator', 'relationship', 'satisfaction', 'significance', 'communication', 'consideration', 'entertainment', 'establishment', 'international', 'understanding', 'administration', 'recommendation', 'representative', 'responsibility', 'transportation'} TESTS = { "Basics": [ ], "Extra": [ ] } BASIC_TESTS = [ ['.XXX.', '...X.', '.X.X.', '.....'], ['X.XX', '....', 'X.XX', 'X...', 'XXX.', '....', 'XXX.'], ['...XXXXXX', '.XXX.X...', '.....X.XX', 'XXXX.X...', 'XX...X.XX', 'XX.XXX.X.', 'X......X.', 'XX.X.XXX.', 'XXXX.....'], ] EXTRA_TEST = [ ['XXXX.....X', 'XXXX.XXX.X', 'X....XXX.X', 'XX.XXX.XXX', 'X.......XX', 'XX.X.X.XXX', '..........', '.XXX.X.X.X', '...XXX.X.X', '.XXXXXXX.X'], ['XX.X.XXXX', 'X....XXXX', 'XX.X.X.XX', '.........', 'XXXX.X.X.', '.X.....X.', '.X.X.XXXX', '...X....X', 'XX.XXXXXX'], ['...XXXXX.', 'XX.X.....', '...XXXXX.', 'XX.X.XXX.', 'X......X.', 'XXXX.X.X.', 'XXX......', 'XXXXXX.XX'], ['XXX.XXX.XX', 'XXX......X', 'X.X.XXX.XX', '....XXX...', 'X.X.X.XXXX', 'X.X....XXX', 'X.XXX.XXX.', 'XXXX......', 'XXXXXXX.X.', 'XXXXXXX...', 'XXXXXXXXX.'], ['XXX.X.XXX', 'XX.......', 'XXX.X.XXX', 'X........', 'X.XXXXX.X', 'X....XX.X', 'XXX.XXXXX', 'XX......X', 'XXX.X.X.X', '....X...X', 'XXXXXXX.X'], ] for t in BASIC_TESTS: TESTS["Basics"].append({"input": [t, list(WORDS)], "answer": t}) for t in EXTRA_TEST: TESTS["Extra"].append({"input": [t, list(WORDS)], "answer": t})
Bryukh-Checkio-Tasks/checkio-mission-crossword-solver
verification/tests.py
Python
mit
18,045
[ "VisIt" ]
4872706271c7f24231de294db343e1a0526d4ee0d9eaf03fed3d1aab13f60c8d
import director, random from constants import * OUTSIDE_CHANNEL = "left"; INSIDE_CHANNEL = "right"; switch_sound = director.load_sound("scenes/p2/audio/switch.ogg") ghost_story_sound = director.load_sound("scenes/p2/audio/ghost_story.ogg") def reset(): director.set_on(WELCOME_SIGN_RELAY) director.set_on(CHAIR_SPOT_RELAY) director.set_off(GREEN_LIGHT_RELAY) director.set_off(RED_LIGHT_RELAY) director.add_trigger(PROXIMITY_TRIGGER, welcome, (), bouncetime=2) director.add_trigger(DOOR_TRIGGER, enter, (), bouncetime=2) director.add_trigger(CHAIR_TRIGGER, play, (), bouncetime=2) director.play_sound(ghost_story_sound, loops=-1, channel=INSIDE_CHANNEL) def welcome(): director.play_sound("scenes/p2/audio/welcome.ogg", channel=OUTSIDE_CHANNEL) def enter(): # Stop the idle sounds ghost_story_sound.stop() director.play_sound("scenes/p2/audio/take_a_seat.ogg", channel=INSIDE_CHANNEL) director.set_off(WELCOME_SIGN_RELAY, 2) director.remove_trigger(DOOR_TRIGGER) def play(): # Stop the idle sounds ghost_story_sound.stop() # Siren director.set_on(SIREN_RELAY, 0, 13) director.set_on(CLAXON_RELAY, 0, 0.5) director.set_on(CLAXON_RELAY, 1, 0.5) director.set_on(CLAXON_RELAY, 2, 0.5) director.play_sound("scenes/p2/audio/siren.ogg", channel=INSIDE_CHANNEL) # Green light and hum sound effect director.play_sound(switch_sound, 5, channel=INSIDE_CHANNEL) director.set_on(GREEN_LIGHT_RELAY, 5.5, 7.5) director.play_sound("scenes/p2/audio/switch_hum.ogg", 5, loops=-1, maxtime=4, channel=INSIDE_CHANNEL) # Red light director.play_sound(switch_sound, 6, channel=INSIDE_CHANNEL) director.set_on(RED_LIGHT_RELAY, 6.5, 6.5) # fire the chair director.set_on(CHAIR_VIBRATE_RELAY, 6.8, 5) director.play_sound("scenes/p2/audio/electric.ogg", 6, maxtime=6.5, channel=INSIDE_CHANNEL) # Flicker chair light director.set_off(CHAIR_SPOT_RELAY, 6.8) director.set_on(CHAIR_SPOT_RELAY, 6.9, 0.2) director.set_on(CHAIR_SPOT_RELAY, 7.9, 0.2) director.set_on(CHAIR_SPOT_RELAY, 8.3, 0.1) director.set_on(CHAIR_SPOT_RELAY, 8.7, 0.1) director.set_on(CHAIR_SPOT_RELAY, 8.9, 0.3) director.set_on(CHAIR_SPOT_RELAY, 9.5, 0.1) director.set_on(CHAIR_SPOT_RELAY, 9.7, 0.1) director.set_on(CHAIR_SPOT_RELAY, 10.4, 0.3) director.set_on(CHAIR_SPOT_RELAY, 11, 0.1) director.set_on(CHAIR_SPOT_RELAY, 11.3, 0.4) director.set_on(CHAIR_SPOT_RELAY, 12.2, 0.3) director.set_on(CHAIR_SPOT_RELAY, 12.8) # Air blast director.set_on(AIR_BLAST_RELAY, 6.9, 2) director.set_on(AIR_BLAST_RELAY, 10, 1) # End sound director.play_sound(switch_sound, 12.5, channel=INSIDE_CHANNEL) # Reset director.schedule(120, reset, ()) director.remove_trigger(CHAIR_TRIGGER) # init triggers etc print('Load the chair') director.add_trigger(RESET_TRIGGER, reset, (), bouncetime=2) reset()
smdvdsn/hauntedpi
scenes/p2/scene_chair.py
Python
mit
2,794
[ "BLAST" ]
9e9f569fc5ca9799ffd55c7df8cee28f940822fa179bdad33e964bac20c9cd22
''' Tests for pyleoclim.core.ui.PSD Naming rules: 1. class: Test{filename}{Class}{method} with appropriate camel case 2. function: test_{method}_t{test_id} Notes on how to test: 0. Make sure [pytest](https://docs.pytest.org) has been installed: `pip install pytest` 1. execute `pytest {directory_path}` in terminal to perform all tests in all testing files inside the specified directory 2. execute `pytest {file_path}` in terminal to perform all tests in the specified file 3. execute `pytest {file_path}::{TestClass}::{test_method}` in terminal to perform a specific test class/method inside the specified file 4. after `pip install pytest-xdist`, one may execute "pytest -n 4" to test in parallel with number of workers specified by `-n` 5. for more details, see https://docs.pytest.org/en/stable/usage.html ''' import numpy as np import pandas as pd from numpy.testing import assert_array_equal from pandas.testing import assert_frame_equal import pytest import pyleoclim as pyleo from pyleoclim.utils.tsmodel import ( ar1_sim, colored_noise, ) # a collection of useful functions def gen_normal(loc=0, scale=1, nt=100): ''' Generate random data with a Gaussian distribution ''' t = np.arange(nt) v = np.random.normal(loc=loc, scale=scale, size=nt) return t, v def gen_colored_noise(alpha=1, nt=100, f0=None, m=None, seed=None): ''' Generate colored noise ''' t = np.arange(nt) v = colored_noise(alpha=alpha, t=t, f0=f0, m=m, seed=seed) return t, v # Tests below class TestUiPsdPlot: ''' Tests for PSD.plot() ''' def test_plot_t0(self): ''' Test PSD.plot() with default parameters ''' alpha = 1 t, v = gen_colored_noise(nt=500, alpha=alpha) ts = pyleo.Series(time=t, value=v) psd = ts.spectral(method='mtm') fig, ax = psd.plot(mute=True) class TestUiPsdSignifTest: ''' Tests for PSD.signif_test() ''' def test_signif_test_t0(self): ''' Test PSD.signif_test() with default parameters ''' alpha = 1 t, v = gen_colored_noise(nt=500, alpha=alpha) ts = pyleo.Series(time=t, value=v) psd = ts.spectral(method='mtm') psd_signif = psd.signif_test(number=10)
LinkedEarth/Pyleoclim_util
pyleoclim/tests/test_ui_PSD.py
Python
gpl-3.0
2,249
[ "Gaussian" ]
02eabcbb6dcd39cae290178659bd43558ca3f7b85e29b7b9c4a48028268f5a9a
"""Additional dataset classes.""" from __future__ import (division, print_function, ) from collections import OrderedDict from scipy.stats import multivariate_normal import numpy as np import numpy.random as npr from fuel import config from fuel.datasets import H5PYDataset, IndexableDataset from fuel.transformers.defaults import uint8_pixels_to_floatX from fuel.utils import find_in_data_path from ali.utils import as_array class TinyILSVRC2012(H5PYDataset): """The Tiny ILSVRC2012 Dataset. Parameters ---------- which_sets : tuple of str Which split to load. Valid values are 'train' (1,281,167 examples) 'valid' (50,000 examples), and 'test' (100,000 examples). """ filename = 'ilsvrc2012_tiny.hdf5' default_transformers = uint8_pixels_to_floatX(('features',)) def __init__(self, which_sets, **kwargs): kwargs.setdefault('load_in_memory', False) super(TinyILSVRC2012, self).__init__( file_or_path=find_in_data_path(self.filename), which_sets=which_sets, **kwargs) class GaussianMixture(IndexableDataset): """ Toy dataset containing points sampled from a gaussian mixture distribution. The dataset contains 3 sources: * features * label * densities """ def __init__(self, num_examples, means=None, variances=None, priors=None, **kwargs): rng = kwargs.pop('rng', None) if rng is None: seed = kwargs.pop('seed', config.default_seed) rng = np.random.RandomState(seed) gaussian_mixture = GaussianMixtureDistribution(means=means, variances=variances, priors=priors, rng=rng) self.means = gaussian_mixture.means self.variances = gaussian_mixture.variances self.priors = gaussian_mixture.priors features, labels = gaussian_mixture.sample(nsamples=num_examples) densities = gaussian_mixture.pdf(x=features) data = OrderedDict([ ('features', features), ('label', labels), ('density', densities) ]) super(GaussianMixture, self).__init__(data, **kwargs) class GaussianMixtureDistribution(object): """ Gaussian Mixture Distribution Parameters ---------- means : tuple of ndarray. Specifies the means for the gaussian components. variances : tuple of ndarray. Specifies the variances for the gaussian components. priors : tuple of ndarray Specifies the prior distribution of the components. """ def __init__(self, means=None, variances=None, priors=None, rng=None, seed=None): if means is None: means = map(lambda x: 10.0 * as_array(x), [[0, 0], [1, 1], [-1, -1], [1, -1], [-1, 1]]) # Number of components self.ncomponents = len(means) self.dim = means[0].shape[0] self.means = means # If prior is not specified let prior be flat. if priors is None: priors = [1.0/self.ncomponents for _ in range(self.ncomponents)] self.priors = priors # If variances are not specified let variances be identity if variances is None: variances = [np.eye(self.dim) for _ in range(self.ncomponents)] self.variances = variances assert len(means) == len(variances), "Mean variances mismatch" assert len(variances) == len(priors), "prior mismatch" if rng is None: rng = npr.RandomState(seed=seed) self.rng = rng def _sample_prior(self, nsamples): return self.rng.choice(a=self.ncomponents, size=(nsamples, ), replace=True, p=self.priors) def sample(self, nsamples): # Sampling priors samples = [] fathers = self._sample_prior(nsamples=nsamples).tolist() for father in fathers: samples.append(self._sample_gaussian(self.means[father], self.variances[father])) return as_array(samples), as_array(fathers) def _sample_gaussian(self, mean, variance): # sampling unit gaussians epsilons = self.rng.normal(size=(self.dim, )) return mean + np.linalg.cholesky(variance).dot(epsilons) def _gaussian_pdf(self, x, mean, variance): return multivariate_normal.pdf(x, mean=mean, cov=variance) def pdf(self, x): "Evaluates the the probability density function at the given point x" pdfs = map(lambda m, v, p: p * self._gaussian_pdf(x, m, v), self.means, self.variances, self.priors) return reduce(lambda x, y: x + y, pdfs, 0.0) if __name__ == '__main__': means = map(lambda x: as_array(x), [[0, 0], [1, 1], [-1, -1], [1, -1], [-1, 1]]) std = 0.01 variances = [np.eye(2) * std for _ in means] priors = [1.0/len(means) for _ in means] gaussian_mixture = GaussianMixtureDistribution(means=means, variances=variances, priors=priors) gmdset = GaussianMixture(1000, means, variances, priors, sources=('features', ))
IshmaelBelghazi/ALI
ali/datasets.py
Python
mit
5,781
[ "Gaussian" ]
1c2fa22ad5bbef6d190fc130092d6546faba74875dff9dedcc9ad37a2085b252
from django.db import models from edc_constants.choices import HIV_RESULT class Subject(models.Model): subject_identifier = models.CharField(max_length=25, unique=True) class Meta: app_label = 'hiv_status' class Visit(models.Model): subject = models.ForeignKey(Subject) visit_datetime = models.DateTimeField() visit_code = models.CharField(max_length=10) encounter = models.IntegerField() def __str__(self): return self.visit_datetime.strftime('%Y-%m-%d') class Meta: app_label = 'hiv_status' unique_together = (('subject', 'visit_datetime'), ('visit_code', 'encounter')) ordering = ('-visit_datetime', 'visit_code', 'encounter') get_latest_by = 'visit_datetime' class HivResult(models.Model): """A models completed by the user to record the result of a test run "today".""" visit = models.ForeignKey(Visit) result_value = models.CharField( verbose_name="Today\'s HIV test result", max_length=50, choices=HIV_RESULT, help_text="If participant declined HIV testing, please select a reason below.", ) result_datetime = models.DateTimeField( verbose_name="Today\'s HIV test result date and time", null=True, blank=True, ) why_not_tested = models.CharField( verbose_name=("What was the main reason why you did not want HIV testing" " as part of today's visit?"), max_length=65, null=True, blank=True, help_text="Note: Only asked of individuals declining HIV testing during this visit.", ) class Meta: app_label = 'hiv_status' get_latest_by = 'result_datetime' class HivStatusReview(models.Model): visit = models.ForeignKey(Visit) report_datetime = models.DateTimeField(null=True) documented_result = models.CharField(max_length=10, null=True) documented_result_date = models.DateField(null=True) indirect_documentation = models.CharField(max_length=10, null=True) indirect_documentation_date = models.DateField(null=True) verbal_result = models.CharField(max_length=10, null=True) class Meta: app_label = 'hiv_status' get_latest_by = 'report_datetime'
botswana-harvard/hiv-status
hiv_status/models.py
Python
gpl-2.0
2,277
[ "VisIt" ]
27ee6de4d9ee4fcdd61489b91be878d8da2e23b8047b16ff58c5722c57260ca0
#!/usr/bin/python """Test of navigation to same page links.""" from macaroon.playback import * import utils sequence = MacroSequence() sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("<Control>Home")) sequence.append(utils.AssertPresentationAction( "1. Top of file", ["BRAILLE LINE: 'Contents h1'", " VISIBLE: 'Contents h1', cursor=1", "SPEECH OUTPUT: 'Contents heading level 1'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Tab")) sequence.append(utils.AssertPresentationAction( "2. Tab", ["BRAILLE LINE: 'First item'", " VISIBLE: 'First item', cursor=1", "BRAILLE LINE: 'First item'", " VISIBLE: 'First item', cursor=1", "SPEECH OUTPUT: 'First item link.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Tab")) sequence.append(utils.AssertPresentationAction( "3. Tab", ["BRAILLE LINE: 'Second item'", " VISIBLE: 'Second item', cursor=1", "BRAILLE LINE: 'Second item'", " VISIBLE: 'Second item', cursor=1", "SPEECH OUTPUT: 'Second item link.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Return")) sequence.append(utils.AssertPresentationAction( "4. Return", ["BRAILLE LINE: 'Second h2'", " VISIBLE: 'Second h2', cursor=1", "SPEECH OUTPUT: 'Second heading level 2'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "5. Down", ["BRAILLE LINE: 'Orca are versatile and opportunistic predators. Some populations feed mostly on fish, and other populations'", " VISIBLE: 'Orca are versatile and opportuni', cursor=1", "SPEECH OUTPUT: 'Orca are versatile and opportunistic predators. Some populations feed mostly on fish, and other populations'"])) sequence.append(utils.AssertionSummaryAction()) sequence.start()
pvagner/orca
test/keystrokes/firefox/line_nav_follow_same_page_link.py
Python
lgpl-2.1
1,993
[ "ORCA" ]
b7aa3fea4ec2126b14496995a898e9d895be79b4904138ae02774b6aa72f0a0a
# -*- coding: utf-8 -*- # # inhibitory_network.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ This is the inhibitory network used as test case 2 (see figure 9 and 10) in Hahne, J., Helias, M., Kunkel, S., Igarashi, J., Bolten, M., Frommer, A. and Diesmann, M., A unified framework for spiking and gap-junction interactions in distributed neuronal network simulations, Front. Neuroinform. 9:22. (2015), doi: 10.3389/fninf.2015.00022 The network contains 500 hh_psc_alpha_gap neurons with random initial membrane potentials between −40 and −80 mV. Each neuron receives 50 inhibitory synaptic inputs that are randomly selected from all other neurons, each with synaptic weight JI = −50.0 pA and synaptic delay d = 1.0 ms. Each neuron receives an excitatory external Poissonian input of 500.0 Hz with synaptic weight JE = 300.0 pA and the same delay d. In addition (60*500)/2 gap junctions are added randomly to the network resulting in an average of 60 gap-junction connections per neuron. """ import pylab import nest import random import numpy n_neuron = 500 gap_per_neuron = 60 inh_per_neuron = 50 delay = 1.0 j_exc = 300. j_inh = -50. threads = 8 stepsize = 0.05 simtime = 501. """ Set gap weight here """ gap_weight = 0.32 random.seed(1) nest.ResetKernel() nest.SetKernelStatus({'resolution': 0.05, 'total_num_virtual_procs': threads, 'print_time': True, # Settings for waveform relaxation # 'use_wfr': False uses communication in every step # instead of an iterative solution 'use_wfr': True, 'wfr_comm_interval': 1.0, 'wfr_tol': 0.0001, 'wfr_max_iterations': 15, 'wfr_interpolation_order': 3}) neurons = nest.Create('hh_psc_alpha_gap', n_neuron) sd = nest.Create("spike_detector", params={'to_file': False, 'to_memory': True}) pg = nest.Create("poisson_generator", params={'rate': 500.0}) conn_dict = {'rule': 'fixed_indegree', 'indegree': inh_per_neuron, 'autapses': False, 'multapses': True} syn_dict = {'model': 'static_synapse', 'weight': j_inh, 'delay': delay} nest.Connect(neurons, neurons, conn_dict, syn_dict) nest.Connect(pg, neurons, 'all_to_all', syn_spec={'model': 'static_synapse', 'weight': j_exc, 'delay': delay}) nest.Connect(neurons, sd) for i in range(n_neuron): nest.SetStatus([neurons[i]], {'V_m': (-40. - 40. * random.random())}) """ We must not use the 'fixed_indegree' oder 'fixed_outdegree' functionality of nest.Connect() to create the connections, as gap_junction connections are two-way connections and we need to make sure that the same neurons are connected in both ways. """ # create gap_junction connections n_connection = n_neuron * gap_per_neuron / 2 connections = numpy.transpose( [random.sample(neurons, 2) for _ in range(n_connection)]) # Connect sources -> targets and targets -> sources with # one call to nest.Connect using the "symmetric" flag nest.Connect(connections[0], connections[1], {'rule': 'one_to_one', 'symmetric': True}, {'model': 'gap_junction', 'weight': gap_weight}) nest.Simulate(simtime) times = nest.GetStatus(sd, 'events')[0]['times'] spikes = nest.GetStatus(sd, 'events')[0]['senders'] n_spikes = nest.GetStatus(sd, 'n_events')[0] hz_rate = (1000.0 * n_spikes / simtime) / n_neuron pylab.figure(1) pylab.plot(times, spikes, 'o') pylab.title('Average spike rate (Hz): %.2f' % hz_rate) pylab.xlabel('time (ms)') pylab.ylabel('neuron no') pylab.show()
HBPNeurorobotics/nest-simulator
examples/nest/gap_junction/inhibitory_network.py
Python
gpl-2.0
4,472
[ "NEURON" ]
1428ff1e1e83755c4def5ba1aac0d456e17a11630bbc4ea60d1eec719d9cc1b2
""" Copyright (c) 2016 Jet Propulsion Laboratory, California Institute of Technology. All rights reserved """ import logging import numpy as np from nexustiles.nexustiles import NexusTileService # from time import time from webservice.NexusHandler import nexus_handler, SparkHandler, DEFAULT_PARAMETERS_SPEC from webservice.webmodel import NexusResults, NexusProcessingException, NoDataException @nexus_handler class TimeAvgMapSparkHandlerImpl(SparkHandler): name = "Time Average Map Spark" path = "/timeAvgMapSpark" description = "Computes a Latitude/Longitude Time Average plot given an arbitrary geographical area and time range" params = DEFAULT_PARAMETERS_SPEC singleton = True def __init__(self): SparkHandler.__init__(self) self.log = logging.getLogger(__name__) # self.log.setLevel(logging.DEBUG) @staticmethod def _map(tile_in_spark): tile_bounds = tile_in_spark[0] (min_lat, max_lat, min_lon, max_lon, min_y, max_y, min_x, max_x) = tile_bounds startTime = tile_in_spark[1] endTime = tile_in_spark[2] ds = tile_in_spark[3] tile_service = NexusTileService() # print 'Started tile {0}'.format(tile_bounds) # sys.stdout.flush() tile_inbounds_shape = (max_y - min_y + 1, max_x - min_x + 1) # days_at_a_time = 90 days_at_a_time = 30 # days_at_a_time = 7 # days_at_a_time = 1 # print 'days_at_a_time = {0}'.format(days_at_a_time) t_incr = 86400 * days_at_a_time sum_tile = np.array(np.zeros(tile_inbounds_shape, dtype=np.float64)) cnt_tile = np.array(np.zeros(tile_inbounds_shape, dtype=np.uint32)) t_start = startTime while t_start <= endTime: t_end = min(t_start + t_incr, endTime) # t1 = time() # print 'nexus call start at time {0}'.format(t1) # sys.stdout.flush() # nexus_tiles = \ # TimeAvgMapSparkHandlerImpl.query_by_parts(tile_service, # min_lat, max_lat, # min_lon, max_lon, # ds, # t_start, # t_end, # part_dim=2) nexus_tiles = \ tile_service.get_tiles_bounded_by_box(min_lat, max_lat, min_lon, max_lon, ds=ds, start_time=t_start, end_time=t_end) # t2 = time() # print 'nexus call end at time %f' % t2 # print 'secs in nexus call: ', t2 - t1 # print 't %d to %d - Got %d tiles' % (t_start, t_end, # len(nexus_tiles)) # for nt in nexus_tiles: # print nt.granule # print nt.section_spec # print 'lat min/max:', np.ma.min(nt.latitudes), np.ma.max(nt.latitudes) # print 'lon min/max:', np.ma.min(nt.longitudes), np.ma.max(nt.longitudes) # sys.stdout.flush() for tile in nexus_tiles: tile.data.data[:, :] = np.nan_to_num(tile.data.data) sum_tile += tile.data.data[0, min_y:max_y + 1, min_x:max_x + 1] cnt_tile += (~tile.data.mask[0, min_y:max_y + 1, min_x:max_x + 1]).astype(np.uint8) t_start = t_end + 1 # print 'cnt_tile = ', cnt_tile # cnt_tile.mask = ~(cnt_tile.data.astype(bool)) # sum_tile.mask = cnt_tile.mask # avg_tile = sum_tile / cnt_tile # stats_tile = [[{'avg': avg_tile.data[y,x], 'cnt': cnt_tile.data[y,x]} for x in range(tile_inbounds_shape[1])] for y in range(tile_inbounds_shape[0])] # print 'Finished tile', tile_bounds # print 'Tile avg = ', avg_tile # sys.stdout.flush() return ((min_lat, max_lat, min_lon, max_lon), (sum_tile, cnt_tile)) def calc(self, computeOptions, **args): """ :param computeOptions: StatsComputeOptions :param args: dict :return: """ spark_master, spark_nexecs, spark_nparts = computeOptions.get_spark_cfg() self._setQueryParams(computeOptions.get_dataset()[0], (float(computeOptions.get_min_lat()), float(computeOptions.get_max_lat()), float(computeOptions.get_min_lon()), float(computeOptions.get_max_lon())), computeOptions.get_start_time(), computeOptions.get_end_time(), spark_master=spark_master, spark_nexecs=spark_nexecs, spark_nparts=spark_nparts) if 'CLIM' in self._ds: raise NexusProcessingException( reason="Cannot compute Latitude/Longitude Time Average plot on a climatology", code=400) nexus_tiles = self._find_global_tile_set() # print 'tiles:' # for tile in nexus_tiles: # print tile.granule # print tile.section_spec # print 'lat:', tile.latitudes # print 'lon:', tile.longitudes # nexus_tiles) if len(nexus_tiles) == 0: raise NoDataException(reason="No data found for selected timeframe") self.log.debug('Found {0} tiles'.format(len(nexus_tiles))) self.log.debug('Using Native resolution: lat_res={0}, lon_res={1}'.format(self._latRes, self._lonRes)) nlats = int((self._maxLat - self._minLatCent) / self._latRes) + 1 nlons = int((self._maxLon - self._minLonCent) / self._lonRes) + 1 self.log.debug('nlats={0}, nlons={1}'.format(nlats, nlons)) self.log.debug('center lat range = {0} to {1}'.format(self._minLatCent, self._maxLatCent)) self.log.debug('center lon range = {0} to {1}'.format(self._minLonCent, self._maxLonCent)) # for tile in nexus_tiles: # print 'lats: ', tile.latitudes.compressed() # print 'lons: ', tile.longitudes.compressed() # Create array of tuples to pass to Spark map function nexus_tiles_spark = [[self._find_tile_bounds(t), self._startTime, self._endTime, self._ds] for t in nexus_tiles] # print 'nexus_tiles_spark = ', nexus_tiles_spark # Remove empty tiles (should have bounds set to None) bad_tile_inds = np.where([t[0] is None for t in nexus_tiles_spark])[0] for i in np.flipud(bad_tile_inds): del nexus_tiles_spark[i] # Expand Spark map tuple array by duplicating each entry N times, # where N is the number of ways we want the time dimension carved up. num_time_parts = 72 # num_time_parts = 1 nexus_tiles_spark = np.repeat(nexus_tiles_spark, num_time_parts, axis=0) self.log.debug('repeated len(nexus_tiles_spark) = {0}'.format(len(nexus_tiles_spark))) # Set the time boundaries for each of the Spark map tuples. # Every Nth element in the array gets the same time bounds. spark_part_times = np.linspace(self._startTime, self._endTime, num_time_parts + 1, dtype=np.int64) spark_part_time_ranges = \ np.repeat([[[spark_part_times[i], spark_part_times[i + 1]] for i in range(num_time_parts)]], len(nexus_tiles_spark) / num_time_parts, axis=0).reshape((len(nexus_tiles_spark), 2)) self.log.debug('spark_part_time_ranges={0}'.format(spark_part_time_ranges)) nexus_tiles_spark[:, 1:3] = spark_part_time_ranges # print 'nexus_tiles_spark final = ' # for i in range(len(nexus_tiles_spark)): # print nexus_tiles_spark[i] # Launch Spark computations rdd = self._sc.parallelize(nexus_tiles_spark, self._spark_nparts) sum_count_part = rdd.map(self._map) sum_count = \ sum_count_part.combineByKey(lambda val: val, lambda x, val: (x[0] + val[0], x[1] + val[1]), lambda x, y: (x[0] + y[0], x[1] + y[1])) fill = self._fill avg_tiles = \ sum_count.map(lambda (bounds, (sum_tile, cnt_tile)): (bounds, [[{'avg': (sum_tile[y, x] / cnt_tile[y, x]) if (cnt_tile[y, x] > 0) else fill, 'cnt': cnt_tile[y, x]} for x in range(sum_tile.shape[1])] for y in range(sum_tile.shape[0])])).collect() # Combine subset results to produce global map. # # The tiles below are NOT Nexus objects. They are tuples # with the time avg map data and lat-lon bounding box. a = np.zeros((nlats, nlons), dtype=np.float64, order='C') n = np.zeros((nlats, nlons), dtype=np.uint32, order='C') for tile in avg_tiles: if tile is not None: ((tile_min_lat, tile_max_lat, tile_min_lon, tile_max_lon), tile_stats) = tile tile_data = np.ma.array( [[tile_stats[y][x]['avg'] for x in range(len(tile_stats[0]))] for y in range(len(tile_stats))]) tile_cnt = np.array( [[tile_stats[y][x]['cnt'] for x in range(len(tile_stats[0]))] for y in range(len(tile_stats))]) tile_data.mask = ~(tile_cnt.astype(bool)) y0 = self._lat2ind(tile_min_lat) y1 = y0 + tile_data.shape[0] - 1 x0 = self._lon2ind(tile_min_lon) x1 = x0 + tile_data.shape[1] - 1 if np.any(np.logical_not(tile_data.mask)): self.log.debug( 'writing tile lat {0}-{1}, lon {2}-{3}, map y {4}-{5}, map x {6}-{7}'.format(tile_min_lat, tile_max_lat, tile_min_lon, tile_max_lon, y0, y1, x0, x1)) a[y0:y1 + 1, x0:x1 + 1] = tile_data n[y0:y1 + 1, x0:x1 + 1] = tile_cnt else: self.log.debug( 'All pixels masked in tile lat {0}-{1}, lon {2}-{3}, map y {4}-{5}, map x {6}-{7}'.format( tile_min_lat, tile_max_lat, tile_min_lon, tile_max_lon, y0, y1, x0, x1)) # Store global map in a NetCDF file. self._create_nc_file(a, 'tam.nc', 'val', fill=self._fill) # Create dict for JSON response results = [[{'avg': a[y, x], 'cnt': int(n[y, x]), 'lat': self._ind2lat(y), 'lon': self._ind2lon(x)} for x in range(a.shape[1])] for y in range(a.shape[0])] return TimeAvgMapSparkResults(results=results, meta={}, computeOptions=computeOptions) class TimeAvgMapSparkResults(NexusResults): def __init__(self, results=None, meta=None, computeOptions=None): NexusResults.__init__(self, results=results, meta=meta, stats=None, computeOptions=computeOptions)
dataplumber/nexus
analysis/webservice/algorithms_spark/TimeAvgMapSpark.py
Python
apache-2.0
12,355
[ "NetCDF" ]
8783f1aead194206aff4c6a38a38844bdfb96b51998d705cfb247b5763ebaf94
# Copyright 2010-2017, The University of Melbourne # Copyright 2010-2017, Brian May # # This file is part of Karaage. # # Karaage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Karaage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Karaage If not, see <http://www.gnu.org/licenses/>. from django.conf import settings from django.conf.urls import url from karaage.common.views import profile as common_profile from karaage.people.views import profile as people_profile profile_urlpatterns = [ url(r'^$', common_profile.profile, name='kg_profile'), url(r'^logout/$', common_profile.logout, name='kg_profile_logout'), ] profile_urlpatterns += [ url(r'^personal/$', people_profile.profile_personal, name='kg_profile_personal'), url(r'^edit/$', people_profile.edit_profile, name='kg_profile_edit'), url(r'^password/$', people_profile.password_change, name='kg_profile_password'), url(r'^password_request/$', people_profile.password_request, name='kg_profile_reset'), url(r'^password_request/done/$', people_profile.password_request_done, name='kg_profile_reset_done'), url(r'^login/$', people_profile.login, name='kg_profile_login'), url(r'^login/(?P<username>%s)/$' % settings.USERNAME_VALIDATION_RE, people_profile.login, name="kg_profile_login"), ] if settings.AAF_RAPID_CONNECT_ENABLED: profile_urlpatterns += [ url( r'^arc/$', people_profile.profile_aaf_rapid_connect, name='kg_profile_arc', ), url( r'^slogin/$', people_profile.aaf_rapid_connect_login, name='kg_profile_login_arc', ), ]
brianmay/karaage
karaage/people/urls/__init__.py
Python
gpl-3.0
2,144
[ "Brian" ]
486d61bc448fae492eae5b29b6e71376b6daad9b10ddca56b2b7232a0f532733
#!/usr/bin/env python # -*- coding: utf-8 -*- import pycuda.driver as cuda import pycuda.autoinit import pycuda.gpuarray as gpuarray from pycuda.compiler import SourceModule from scipy.sparse import * import numpy import random import sys INPUT_VECTOR_SIZE = 2 # inputs of one neuron SYNAPSES_VECTOR_SIZE = 2 # synapsesination connections of one neuron NEURONS_IN_GROUP = 4 # number of neurons in a group MAX_THRESHOLD = 1 # threshold for spiking GROUPS_AMOUNT = 2 # number of neurons groups (correspond to blocks on the GPU) def show_configuration(): print "###################################################" print "# for each neuron:" print "# max number of inputs: %d" % INPUT_VECTOR_SIZE print "# max number of synapses: %d" % SYNAPSES_VECTOR_SIZE print "#" print "# neurons in a group: %d" % NEURONS_IN_GROUP print "# number of groups: %d" % (GROUPS_AMOUNT) print "# total neurons: %d" % (NEURONS_IN_GROUP*GROUPS_AMOUNT) print "# max threshold: %d" % MAX_THRESHOLD print "###################################################" def debug(title, var): print title+':' print var print "###################################################" def divide_network_to_groups(): # NOT IMPLEMENTED # divide to groups with minimal inter-group connections # under maximum group size restriction (block size in the GPU) # It is a graph-cut problem - graph partitioning optimizing edges cut to minimum # while satisfying additional conditions. # # ref: # http://romainbrette.fr/WordPress3/wp-content/uploads/2014/06/BretteGoodman2012.pdf # # instead - an example network with GROUPS_AMOUNT dense groups and minor inter-group connection is built: # create all groups GI=NEURONS_IN_GROUP*INPUT_VECTOR_SIZE # group inputs GS=NEURONS_IN_GROUP*SYNAPSES_VECTOR_SIZE # group synapses g = numpy.zeros((GI*GROUPS_AMOUNT,GS*GROUPS_AMOUNT)) # large (sparse) matrixi g = g.astype(numpy.float32) # weights: between 0.0-1.0 for each of inputs # indices: # (rows) input#, (columns) synapse# # inside group connections: for i in range(GROUPS_AMOUNT): g[0+i*GI,6+i*GS]=0.2 # on group #0, #0 synapse of neuron #3 connects to #0 input of neuron #0 with weight 0.2 g[1+i*GI,2+i*GS]=0.6 g[2+i*GI,7+i*GS]=0.5 g[3+i*GI,4+i*GS]=0.7 g[4+i*GI,0+i*GS]=0.4 g[6+i*GI,5+i*GS]=0.8 #g[5+i*GI,1+i*GS]=0.7123 # inter-group connections # group 1 depends on group 0 g[7+1*GI,3+0*GS]=0.9 # #1 synapse of neuron #2 in group #0 connects to #1 input of neuron #3 in group #1 numpy.set_printoptions(linewidth=10000) print g return g def get_weights_graph(): # Assuming that the connection matrix is sparse, the data # structure used is compressed Sparse Row/Column matrix. # The CSR high efficiency of rows are used for the weights to target neurons, # to achieve coalesced memory access during spike distribution. # http://homepages.cwi.nl/~sbohte/publication/slazynski2012network.pdf # # A dense representation haa NEURONS_IN_GROUP*SYNAPSES_VECTOR_SIZE # columns and NEURONS_IN_GROUP*INPUT_VECTOR_SIZE rows, each stating the # the corresponding wight or a zero for no connection. Each neuron spans over # SYNAPSES_VECTOR_SIZE columns and INPUT_VECTOR_SIZE rows. # Groups of neurons (more dense connections) are located in neighbour indices, so # they land in the same block letting them run for longer periods while using # shared memory, until they need to connect to another group which runs on # a different block. # # neuron synapses X # ----------------------------------------- > # |██████| | | | | # |██████| | | | | # |██████| | | | | # |██████| | | | | # |------- | | | | # |--------|--------|--------|--------|---- # | . |███| | | . | # n | |---- | | | # e | | | | | # u | | | | | # r |--------|--------|--------|--------|---- # o | | . |██████| | | # n | | |██████| | . | # | | |██████| | | # i | | |------- | | # n |--------|--------|--------|--------|---- # p | . | | |█████| | # u | | | |█████| | # t | | | |------ | # s | | | | | # |--------|--------|--------|--------|---- # Y v # # This is a Weights matrix (W): # ============================= # Each of the large squares (16) represents synapses of neurons group (on axis X) connecting # to inputs of neurons group (on axis Y). # On the diagonal there are (smaller) squares representing (dense) connections inside # a group. The dots on other squares represent inter-group connections. # The matrix is splitted to vertical slices, each containing neurons with synapses from one group. # Each group runs later on a separate GPU block. # When a spike goes to a neuron in another block there is a mechanism that updates the required block. # # The CSR representation of the above matrix is: # A - an array of all non-zero weights (right to left, top down) # B - an array where value in place i is the A-index of the first non-zero number on row i of W. # The size |A| is added to B. # C - an array of the column indices in W of each of A items. # # A block that has dependency needs to get periodic approvals until which clock step it # may run. A bidirectional dependency between blocks can be solved by running each time # during some fixed clock slices (e.g. 1000 clocks). If no spikes were done, just continue # with the next slice. If a spike was emitted, cut the slice to 1/2 and repeat calculation # on both blocks. Update the corresponding spike as needed. # groups=divide_network_to_groups() CSC_groups=[] CSC_vectors_lengths=numpy.zeros(3*GROUPS_AMOUNT, dtype=numpy.float32) CSC_vectors_start_index=numpy.zeros(3*GROUPS_AMOUNT, dtype=numpy.float32) # split large matrix to GROUPS_AMOUNT group slices for i in range(GROUPS_AMOUNT): g_slice=groups[:,i*SYNAPSES_VECTOR_SIZE*NEURONS_IN_GROUP:(i+1)*SYNAPSES_VECTOR_SIZE*NEURONS_IN_GROUP] #print "slice ...." #print g_slice m=csc_matrix(g_slice) A=m.data B=m.indptr C=m.indices #print A,B,C # keep vector (of CSC representation for each group) lengths CSC_vectors_lengths[0+i*3]=len(A) CSC_vectors_lengths[1+i*3]=len(B) CSC_vectors_lengths[2+i*3]=len(C) #print "CSC_vectors_lengths ", CSC_vectors_lengths if i<(GROUPS_AMOUNT-1): # check on which location each vector begins # next vector begins at the previous location + its vector length # this is needed for in-kernel vectors usage optimization CSC_vectors_start_index[0+(i+1)*3]=CSC_vectors_start_index[0+i*3]+len(A) CSC_vectors_start_index[1+(i+1)*3]=CSC_vectors_start_index[1+i*3]+len(B) CSC_vectors_start_index[2+(i+1)*3]=CSC_vectors_start_index[2+i*3]+len(C) #print "CSC_vectors_start_index ", CSC_vectors_start_index CSC_groups.append([A,B,C]) return CSC_groups,CSC_vectors_start_index,CSC_vectors_lengths def run(): show_configuration() # get network CSC_groups,CSC_vectors_start_index,CSC_vectors_lengths=get_weights_graph() # concat all CSC vectors to simplify load to GPU # calculate total lengths concat_vectors_lengths=numpy.zeros(3, dtype=numpy.float32) j=0 for i in CSC_vectors_lengths: concat_vectors_lengths[j%3]+=CSC_vectors_lengths[j] # calculate total lengths for all A,B,C j+=1 # allocate concatenated vectors ccA=numpy.zeros(concat_vectors_lengths[0], dtype=numpy.float32) ccB=numpy.zeros(concat_vectors_lengths[1], dtype=numpy.float32) ccC=numpy.zeros(concat_vectors_lengths[2], dtype=numpy.float32) # concating all A in to ccA, B to ccB and C to ccC ccA_counter=0 ccB_counter=0 ccC_counter=0 for i in range(GROUPS_AMOUNT): A,B,C = CSC_groups[i] for j in range(CSC_vectors_lengths[0+i*3]): # run over each A length ccA[j+ccA_counter]=A[j] ccA_counter=j+1 for j in range(CSC_vectors_lengths[1+i*3]): # run over each B length ccB[j+ccB_counter]=B[j] ccB_counter=j+1 #print "range: ",CSC_vectors_lengths[2+i*3] for j in range(CSC_vectors_lengths[2+i*3]): # run over each C length #print "ccC index is ", j, " writing ",C[j] ccC[j+ccC_counter]=C[j] ccC_counter=j+1 #print "==============> ",concat_vectors_lengths #print "==============> ",ccA #print "==============> ",ccC # more data structures: # ===================== # Inputs - array. Size according to block size limit from weight matrix. # Threshold - array per neuron (small). # Action Potential (AC) - array. Size according to block size limit from weight matrix. # Fired - array per neuron (small). # Cross block dependency - matrix per block (small). # inputs: each is 0 or the corresponding weight # use one vector for inputs of a whole neurons group X = numpy.array([0.2,0,0.5,0.7,0.4,0,0,0.9,0.2,0,0.5,0.7,0.4,0,0,0.9]) #X = numpy.array([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]) X = X.astype(numpy.float32) # threshold TH = MAX_THRESHOLD*numpy.random.rand(NEURONS_IN_GROUP*GROUPS_AMOUNT) #TH = MAX_THRESHOLD*numpy.zeros(NEURONS_IN_GROUP*GROUPS_AMOUNT) TH = TH.astype(numpy.float32) # to gpu # currently one one group is loaded # to load the full ndarray, the following can be used: # http://documen.tician.de/pycuda/array.html#pycuda.gpuarray.GPUArray.set ccA_gpu = gpuarray.to_gpu(ccA) ccB_gpu = gpuarray.to_gpu(ccB) ccC_gpu = gpuarray.to_gpu(ccC) X_gpu = gpuarray.to_gpu(X) TH_gpu = gpuarray.to_gpu(TH) # CSC_vectors_start_index and CSC_vectors_lengths of CSC vectors CSC_vectors_lengths_gpu = gpuarray.to_gpu(CSC_vectors_lengths) CSC_vectors_start_index_gpu = gpuarray.to_gpu(CSC_vectors_start_index) # prepare vectors for results: # weighted sum AC_gpu = gpuarray.zeros(NEURONS_IN_GROUP*GROUPS_AMOUNT, dtype=numpy.float32) # fired fired_gpu = gpuarray.zeros(NEURONS_IN_GROUP*GROUPS_AMOUNT, dtype=numpy.float32) ################################################################ # # declare kernel # ################################################################ kernel_code_template = """ #include <stdio.h> #define INPUT_VECTOR_SIZE 2 #define SYNAPSES_VECTOR_SIZE 2 #define NEURONS_IN_GROUP 4 #define GROUPS_AMOUNT 2 #define INPUTS_PER_GROUP (INPUT_VECTOR_SIZE*NEURONS_IN_GROUP) #define GROUP_NUMBER_MASK (INPUTS_PER_GROUP*(GROUPS_AMOUNT-1)) #define MAX_GROUP_UPDATE_QUEUE_LEN 8 // must be 2^n to work with modulo optimization (see atomicAnd below) #define PERIODIC_UPDATE_CYCLES 4 #define UPDATE_PERIODS 1 // management of inter-group updates on shared memory __device__ struct update_group_entry { int clock; // Note: add __padding for alighnment if using 64 bit float int input; float weight; } group_updates_queue[GROUPS_AMOUNT][MAX_GROUP_UPDATE_QUEUE_LEN]; __device__ int first_on_queue[GROUPS_AMOUNT]; // mod MAX_GROUP_UPDATE_QUEUE_LEN __device__ int already_on_queue[GROUPS_AMOUNT]; volatile __device__ int safe_clock[GROUPS_AMOUNT]; /* * # neural state update + spike generation: * # ======================================= * # each input has one of 2 values - 0 or the corresponding weight. * # each group/block verifies that it is safe to run for the current clock. * # safe means that if there is dependency on another group - the other block signals updates for inputs * # on current block at certain clocks, or alternatively no updates until some recent clock. * # block run on all these inputs of neurons in current block, compare to threshold, and update fired * # array. When done, zero all inputs (assumption of 1 clock decay of the spike). */ __device__ void sigma(float *X, float *AC, float *TH, float *fired, uint clock) { const uint tx = threadIdx.x; const uint bx = blockIdx.x; const uint vec_num = tx/INPUT_VECTOR_SIZE+bx*NEURONS_IN_GROUP; int first_index; // busy loop if no "safe" clock in the future if(bx==1){ // FIXME: condition should be "is dependent group?" if (clock>safe_clock[bx]) { printf("busy loop on block %d clock %d before safe %d\\n", bx, clock, safe_clock[bx]); } else { printf("skip busy as clock %d before safe %d\\n", clock, safe_clock[bx]); } while(clock>safe_clock[bx]) { // busy wait // maybe some variation on _gpu_sync() could be used here. // http://fulmanski.pl/zajecia/cuda/zajecia_20122013/materialy/TR_GPU_synchronization.pdf printf("%d, ",clock); } } if (already_on_queue[bx] > 0) { // must update inputs due to spikes from other groups printf("handling queue for group %d length of %d at clock %d\\n", bx, already_on_queue[bx], clock); first_index=first_on_queue[bx]; printf("on queue index %d, clock %d, input %d, weight %f\\n", first_index, group_updates_queue[bx][first_index].clock, group_updates_queue[bx][first_index].input, group_updates_queue[bx][first_index].weight); if(clock==group_updates_queue[bx][first_index].clock) { // update the input using the values from the queue X[group_updates_queue[bx][first_index].input]=group_updates_queue[bx][first_index].weight; } atomicAdd(&already_on_queue[bx],-1); // FIXME: take care with parallel changes (consider A Parallel Counter Class - http://www.drdobbs.com/parallel/atomic-operations-and-low-wait-algorithm/240160177) atomicAdd(&first_on_queue[bx],1); atomicAnd(&first_on_queue[bx],MAX_GROUP_UPDATE_QUEUE_LEN-1); // next on cyclic buffer - optimization of modulo (no problem after previous atomic add, since during the transition // between MAX_GROUP_UPDATE_QUEUE_LEN-1 to MAX_GROUP_UPDATE_QUEUE_LEN, these are orthogonal bits) } if (tx<INPUT_VECTOR_SIZE*NEURONS_IN_GROUP) { atomicAdd(&AC[vec_num], X[tx+bx*INPUT_VECTOR_SIZE]); if(AC[vec_num]>=TH[vec_num]) { fired[vec_num]=1.0; // it is written over INPUT_VECTOR_SIZE times printf("fired[%d]=%f on clock %d\\n", vec_num, fired[vec_num], clock); } else { //printf("under TH of fired[%d]=%f\\n", vec_num, fired[vec_num]); } } } __device__ void zero(float *x) { const uint tx = blockIdx.x *blockDim.x + threadIdx.x; if (tx<INPUT_VECTOR_SIZE*NEURONS_IN_GROUP) { x[tx]=0; } } /* * # spike distribution: * # =================== * # inside a block, run on the weights with a coalesced memory access, multiply by corresponding * # fired array (the indices derived from C by [floor of] division to INPUT_VECTOR_SIZE). Update the * # corresponding input (the indices are in C). When done, zero all fired array (assumption of * # 1 clock decay of the spike). * # Note: An attempt to update another group (block) is done using group_updates_queue mechanism. */ __device__ void update_inputs(float *ccA, float *ccC, float *fired, float *X, float *CSC_vectors_start_index, float *CSC_vectors_lengths, uint clock) { const uint tx = threadIdx.x; const uint bx = blockIdx.x; int a_len_index=0+bx*3; int c_len_index=2+bx*3; int a_index=tx+CSC_vectors_start_index[a_len_index]; int c_index=tx+CSC_vectors_start_index[c_len_index]; int input_index = ccC[c_index]; int fired_index = input_index/SYNAPSES_VECTOR_SIZE; // neuron number int input_group=(input_index&GROUP_NUMBER_MASK)/INPUTS_PER_GROUP; // to which block goes the index //printf("BLOCK %d\\n", bx); if(tx<CSC_vectors_lengths[a_len_index]) { // running over (the relevat subarray of) A //printf("block %d, input_index %d, MASK %x, GROUP NUM %d\\n", bx, input_index, GROUP_NUMBER_MASK, (input_index&GROUP_NUMBER_MASK)/INPUTS_PER_GROUP); if(input_group==bx) { // updating current group X[input_index] = ccA[a_index]*fired[fired_index]; printf("normal update in block %d for %d with %f\\n",bx, input_index, ccA[a_index]*fired[fired_index]); } else { // must update a different group if(fired[fired_index]>0.0) { // ignore on non fired neuron printf("external update in block %d at clock %d for input %d with fired_index %d fire %f tell block %d\\n",bx, clock, input_index, fired_index, fired[fired_index], input_group); if(already_on_queue[input_group]<MAX_GROUP_UPDATE_QUEUE_LEN) { group_updates_queue[input_group][first_on_queue[input_group]].clock=clock; group_updates_queue[input_group][first_on_queue[input_group]].input=input_index; group_updates_queue[input_group][first_on_queue[input_group]].weight=ccA[a_index]*fired[fired_index]; already_on_queue[input_group]+=1; } else { printf("QUEUE TOO LONG on group %d! Spike will be ignored!!!\\n", input_group); } } } printf("tx %d fired[%d] %f ccA %f X %f\\n", tx, fired_index, fired[fired_index], ccA[a_index], X[input_index]); } } __global__ void cycle(float *X, float *ccA, float *ccB, float * ccC, float *AC, float *TH, float *fired, float *CSC_vectors_start_index, float *CSC_vectors_lengths) { uint clock; uint periods; //if(blockIdx.x==0) { // return; //} for(periods=0;periods<UPDATE_PERIODS;periods++) { for(clock=0+periods*PERIODIC_UPDATE_CYCLES;clock<PERIODIC_UPDATE_CYCLES*(periods+1);clock++) { zero(fired); zero(AC); __syncthreads(); sigma(X, AC, TH, fired, clock); __syncthreads(); zero(X); __syncthreads(); update_inputs(ccA, ccC, fired, X, CSC_vectors_start_index, CSC_vectors_lengths, clock); __syncthreads(); } //printf("PERIOD %d\\n", periods); if(blockIdx.x==0){ // FIXME: condition should be "is non-dependant group?" if (already_on_queue[1] == 0) { safe_clock[1]=clock; // FIXME: atomic? clock-1? printf("update clean SAFE to clock %d\\n", safe_clock[1]); } else { safe_clock[1]=group_updates_queue[1][first_on_queue[1]].clock; // FIXME: atomic? clock-1? printf("update dirty SAFE to clock %d\\n", safe_clock[1]); } } } } """ kernel_code = kernel_code_template mod = SourceModule(kernel_code) ################################################################ # # debug before running kernel # ################################################################ debug("inputs",X) debug("thresholds", TH_gpu.get()) ################################################################ # # running kernel # ################################################################ cycle = mod.get_function("cycle") cycle(X_gpu, ccA_gpu, ccB_gpu, ccC_gpu, AC_gpu, TH_gpu, fired_gpu, CSC_vectors_start_index_gpu, CSC_vectors_lengths_gpu, block=(SYNAPSES_VECTOR_SIZE*NEURONS_IN_GROUP,1,1), grid=(GROUPS_AMOUNT,1)) ################################################################ # # debug after running kernel # ################################################################ debug("last fired neurons", fired_gpu.get()) debug("inputs after running network", X_gpu.get()) if __name__ == "__main__": run() # # improvement options to examine: # =============================== # parallel sum during AC calculation (complexity drop fron o(n) to o(log n), but maybe for # such small input amounts per neuron it doesn't make sense. # # loop unrolling.
4martin/snn-simulator-example
snn-simulator.py
Python
mit
21,162
[ "NEURON" ]
14b0e72d1c89f029ffc93340c1212dc8c145f556244502ba4f7c11ed923710c7
# -*- coding: utf-8 -*- from shop.models.cartmodel import Cart from django.contrib.auth.models import AnonymousUser def get_cart_from_database(request): database_cart = Cart.objects.filter(user=request.user) if database_cart: database_cart = database_cart[0] else: database_cart = None return database_cart def get_cart_from_session(request): session_cart = None session = getattr(request, 'session', None) if session is not None: cart_id = session.get('cart_id') if cart_id: try: session_cart = Cart.objects.get(pk=cart_id) except Cart.DoesNotExist: session_cart = None return session_cart def get_or_create_cart(request, save=False): """ Return cart for current visitor. For a logged in user, try to get the cart from the database. If it's not there or it's empty, use the cart from the session. If the user is not logged in use the cart from the session. If there is no cart object in the database or session, create one. If ``save`` is True, cart object will be explicitly saved. """ cart = None if not hasattr(request, '_cart'): is_logged_in = request.user and not isinstance(request.user, AnonymousUser) if is_logged_in: # if we are authenticated session_cart = get_cart_from_session(request) if session_cart and session_cart.user == request.user: # and the session cart already belongs to us, we are done cart = session_cart elif session_cart and session_cart.total_quantity > 0 and session_cart.user != request.user: # if it does not belong to us yet database_cart = get_cart_from_database(request) if database_cart: # and there already is a cart that belongs to us in the database # delete the old database cart database_cart.delete() database_cart = None # save the user to the new one from the session session_cart.user = request.user session_cart.save() cart = session_cart else: # if there is no session_cart, or it's empty, use the database cart cart = get_cart_from_database(request) if cart: # and save it to the session request.session['cart_id'] = cart.id else: # not authenticated? cart might be in session cart = get_cart_from_session(request) if not cart: # in case it's our first visit and no cart was created yet if is_logged_in: cart = Cart(user=request.user) elif getattr(request, 'session', None) is not None: cart = Cart() if save and not cart.pk: cart.save() request.session['cart_id'] = cart.id setattr(request, '_cart', cart) cart = getattr(request, '_cart') # There we *must* have a cart return cart
thenewguy/django-shop
shop/util/cart.py
Python
bsd-3-clause
3,146
[ "VisIt" ]
6e92981a91c9f83c0dcb28ae2c493fe62d00585d71bcb7f3b9c9429e334db4ea
#!/usr/bin/env python import os import sys import subprocess as sp import argparse if sys.version_info.major == 3: PY3 = True from urllib.request import urlretrieve else: PY3 = True from urllib import urlretrieve usage = """ The easy way to test recipes is by using `circleci build`. However this does not allow local testing recipes using mulled-build (due to the technicalities of running docker within docker and the CircleCI client). This script makes it easy to do mulled-build tests. It works by using the same code used in the .circleci/setup.sh script to build an isolated Miniconda environment and a custom `activate` script. Set up the environment like this: ./bootstrap.py /tmp/miniconda It creates an activate script at ~/.config/bioconda/activate. So you can then use: source ~/.config/bioconda/activate and then use that isolated root environment independent of any other conda installations you might have. """ ap = argparse.ArgumentParser(usage) ap.add_argument('bootstrap', help='''Location to which a new Miniconda installation plus bioconda-utils should be installed. This will be separate from any existing conda installations.''') ap.add_argument('--no-docker', action='store_true', help='''By default we expect Docker to be present. Use this arg to disable that behavior. This will reduce functionality, but is useful if you're unable to install docker.''') args = ap.parse_args() # This is the "common" step in the CircleCI config which gets the versions of # Miniconda and bioconda-utils that we're using. urlretrieve( 'https://raw.githubusercontent.com/bioconda/bioconda-common/master/common.sh', filename='.circleci/common.sh') # TODO: this mimics the override in the "common" job in .circleci/config.yaml with open('.circleci/common.sh', 'w') as fout: fout.write("MINICONDA_VER=4.5.4\nBIOCONDA_UTILS_TAG=master\n") local_config_path = os.path.expanduser('~/.config/bioconda/activate') def _write_custom_activate(install_path): """ Once the isolated Miniconda version has been installed, copy its activate script over to a custom location, and then hard-code the paths and PS1. We don't need a matching `deactivate` because the activate script properly keeps track of the new location. """ config_dir = os.path.dirname(local_config_path) if not os.path.exists(config_dir): os.makedirs(config_dir) activate = os.path.join(install_path, 'miniconda/bin/activate') lines = [i.rstrip() for i in open(activate)] # The following is code from cb2; disabling but keeping it around for now: if 0: # Exact matches to lines we want to replace in the activate script, leading # space included. substitutions = [ ( '_CONDA_DIR=$(dirname "$_SCRIPT_LOCATION")', '_CONDA_DIR="{0}/miniconda/bin"'.format(install_path) ), ( ' export PS1="(${CONDA_DEFAULT_ENV}) $PS1"', ' export PS1="(BIOCONDA-UTILS) $PS1"', ) ] for orig, sub in substitutions: # Be very picky so that we'll know if/when the activate script changes. try: pos = lines.index(orig) except ValueError: raise ValueError( "Expecting '{0}' to be in {1} but couldn't find it" .format(orig, activate) ) lines[pos] = sub with open(local_config_path, 'w') as fout: for line in lines: fout.write(line + '\n') use_docker = "true" if args.no_docker: use_docker = "false" env = {'WORKSPACE': args.bootstrap, 'BOOTSTRAP': "true", 'USE_DOCKER': use_docker, 'PATH': os.environ['PATH']} sp.check_call(['.circleci/setup.sh'], env=env) _write_custom_activate(args.bootstrap) print(""" An isolated version of bioconda-utils has been installed to {1}. This is separate from any other conda installations you might have. To use it, source this custom activate script: source ~/.config/bioconda/activate When done: source deactivate """)
joachimwolff/bioconda-recipes
bootstrap.py
Python
mit
4,242
[ "Bioconda" ]
cce566f72f95064aa918ea4705f1a0a6a70adaa942773dfdc9ecc0280d58523b
# Copyright (C) 2015 Alejandro Molina-Sanchez, Henrique Pereira Coutada Miranda # All rights reserved. # # This file is part of yambopy # import xml.etree.ElementTree as ET from qepy.auxiliary import * from numpy import array, zeros import re RytoeV = 13.605698066 class ProjwfcXML(): """ Class to read data from a Quantum espresso projwfc XML file. This file contains the projection of the Kohn-Sham stated in the atomic orbitals read from the pseudopotential """ _proj_file = 'atomic_proj.xml' def __init__(self,prefix,output_filename='projwfc.log',path='.'): """ Initialize the structure with the path where the atomic_proj.xml is """ self.prefix = prefix self.path = path self.datafile_xml = ET.parse( "%s/%s.save/%s"%(path, prefix, self._proj_file)).getroot() #get nkpoints self.nkpoints = int(self.datafile_xml.findall("HEADER/NUMBER_OF_K-POINTS")[0].text.strip()) # Read the number of BANDS self.nbands = int(self.datafile_xml.find("HEADER/NUMBER_OF_BANDS").text) #get fermi self.fermi = float(self.datafile_xml.find("HEADER/FERMI_ENERGY").text)*RytoeV #get number of projections self.nproj = int(self.datafile_xml.find("HEADER/NUMBER_OF_ATOMIC_WFC").text) #get weights of kpoints projections self.weights = map(float,self.datafile_xml.find("WEIGHT_OF_K-POINTS").text.split()) self.eigen = self.get_eigen() self.proj = self.get_proj() #here we open the ouput file of projwfc and get the quantum numbers of the orbitals try: f = open("%s/%s"%(path,output_filename),'r') except: print "The output file of projwfc.x: %s was not found"%output_filename exit(1) states = [] # wfc j l m m_j for line in re.findall('state\s+\#\s+([0-9]+):\s+atom\s+([0-9]+)\s+\(([a-zA-Z]+)\s+\),\s+wfc\s+([0-9])\s+\((?:j=([0-9.]+))? ?(?:l=([0-9.]+))? ?(?:m=\s+([0-9.]+))? ?(?:m_j=([ \-0-9.]+))?',f.read()): # examples of the lines we have to read # 5: atom 1 (C ), wfc 3 (l=2 m= 1) #no spin case # 5: atom 1 (C ), wfc 3 (j=1.5 l=1 m_j=-1.5) #non collinear spin case _, iatom, atype, wfc, j, l, m, m_j = line state = {'iatom':int(iatom), 'atype':atype, 'wfc':int(wfc)} if j: j = float(j) if l: l = int(l) if m: m = int(m) if m_j: m_j = float(m_j) states.append({'iatom':int(iatom), 'atype':atype, 'wfc':int(wfc), 'j':j, 'l':l, 'm':m, 'm_j':m_j}) self.states = states f.close() def __str__(self): s = "" s += "nkpoints: %d\n"%self.nkpoints s += "nbands: %d\n"%self.nbands return s def get_indexes(self): """ Get indexes of the bands where the projection is maximal """ # Selection of the bands proj = zeros([self.nkpoints,self.nproj],dtype=int) for ik in range(self.nkpoints): for ip in range(self.nproj): proj[ik,ip] = np.argmax(np.absolute(self.proj[ik,ip,:])**2) return proj def plot_eigen(self, ax, size=20, cmap=None, color='r', path=[], selected_orbitals=[], selected_orbitals_2=[]): """ Plot the band structure. The size of the points is the weigth of the selected orbitals. Options: (a) Relative weight between two compositions. Pass a second set of orbitals (b) Colormap enters as a string Under development to include also colormap and a dictionary for the selection of the orbitals... """ import matplotlib.pyplot as plt if path: if isinstance(path,Path): path = path.get_indexes() ticks, labels = zip(*path) ax.set_xticks(ticks) ax.set_xticklabels(labels) ax.set_ylabel('E (eV)') #Colormap if cmap: color_map = plt.get_cmap(cmap) #plot vertical line for x, label in path: ax.axvline(x,c='k',lw=2) ax.axhline(0,c='k') #get weights w_proj = self.get_weights(selected_orbitals=selected_orbitals) #get weights of second set of orbitals if selected_orbitals_2: w_rel = self.get_relative_weight(selected_orbitals=selected_orbitals, selected_orbitals_2=selected_orbitals_2) #plot bands for fix size for ib in range(self.nbands): ax.scatter(range(self.nkpoints),self.eigen[:,ib] - self.fermi,s=size,c=w_rel[:,ib],cmap=color_map,edgecolors='none') #plot bands for a varying size if not selected_orbitals_2: for ib in range(self.nbands): #ax.scatter(range(self.nkpoints),self.eigen[:,ib] - self.fermi,c='r',edgecolors='none') ax.scatter(range(self.nkpoints),self.eigen[:,ib] - self.fermi,s=w_proj[:,ib]*size,c=color,edgecolors='none') ax.set_xlim(0, self.nkpoints-1) ax.set_ylim(auto=True) def get_weights(self,selected_orbitals=[]): # Selection of the bands w_proj = zeros([self.nkpoints,self.nbands]) for ik in range(self.nkpoints): for ib in range(self.nbands): w_proj[ik,ib] = sum(abs(self.proj[ik,selected_orbitals,ib])**2) return w_proj def get_relative_weight(self,selected_orbitals=[],selected_orbitals_2=[]): # Selection of the bands w_rel = zeros([self.nkpoints,self.nbands]) for ik in range(self.nkpoints): for ib in range(self.nbands): w_rel[ik,ib] = sum(abs(self.proj[ik,selected_orbitals,ib])**2)/(sum(abs(self.proj[ik,selected_orbitals,ib])**2)+sum(abs(self.proj[ik,selected_orbitals_2,ib])**2)) return w_rel def get_eigen(self): """ Return eigenvalues """ datafile_xml = self.datafile_xml eigen = [] for ik in xrange(self.nkpoints): eigen.append( map(float, self.datafile_xml.find("EIGENVALUES/K-POINT.%d/EIG"%(ik+1)).text.split() )) self.eigen = np.array(eigen)*RytoeV return self.eigen def write_proj(self,filename='proj'): """ Write the projection array in a numpy file """ np.savez(filename,proj=self.proj,weights=self.weights) def get_proj(self): """ Return projections """ datafile_xml = self.datafile_xml proj = zeros([self.nkpoints,self.nproj,self.nbands],dtype=complex) for ik in range(self.nkpoints): for ip in range(self.nproj): projlist = self.datafile_xml.find("PROJECTIONS/K-POINT.%d/ATMWFC.%d" % (ik+1,ip+1) ).text.splitlines()[1:-1] proj[ik,ip] = [ (lambda x,y: complex(float(x),float(y)))(*c.split(',')) for c in projlist ] self.proj = np.array(proj) return proj def __str__(self): s = "nbands: %d\n"%self.nbands s += "nkpoints: %d\n"%self.nkpoints for n,state in enumerate(self.states): s += "n: %3d -> iatom:%3d atype:%2s wfc:%d j:%s l:%s m:%s m_j:%s\n"%(n,state['iatom'],state['atype'],state['wfc'],str(state['j']),str(state['l']),str(state['m']),str(state['m_j'])) return s
henriquemiranda/yambopy
qepy/projwfcxml.py
Python
bsd-3-clause
7,487
[ "Quantum ESPRESSO" ]
6eb482d2b94cca4fe5a530fe07a96185a85782877d640bc80285f1cde4f1a40f
# Copyright 2001 by Katharine Lindner. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """ This module is deprecated; its functions are now available from Bio.InterPro. This module provides code to access resources at InterPro over the WWW. http://www.ebi.ac.uk/interpro Functions: get_interpro_entry """ import warnings warnings.warn("Bio.WWW.InterPro was deprecated. Its functionality is now available from Bio.InterPro.") from Bio import InterPro get_interpro_entry = InterPro.get_interpro_entry
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/WWW/InterPro.py
Python
apache-2.0
651
[ "Biopython" ]
4c38e99b5e4ec2b8cf0510452d8c76a1b4b9e364bf5f771fbe530a80f1a312b7
#!/usr/bin/python # -*- coding: utf-8 -*- from argparse import Namespace import datetime, openpyxl as xl, os import code import operator, collections import unicodecsv as csv import re # Django Imports from django.core.management.base import BaseCommand, CommandError from django.db import models from django.utils import timezone from django.conf import settings # Local Imports import backend.models as back import contacts.models as cont import utils from utils.xl import xl_add_header_row , xl_style_current_row , make_column , bold_font class Command(BaseCommand): help = 'Parse and import SMS data bank' def add_arguments(self,parser): # code.interact(local=locals()) subparsers = parser.add_subparsers(help='make reports') # The cmd argument is required for django.core.management.base.CommandParser print_parser = subparsers.add_parser('print',cmd=parser.cmd,help='report send time statistics') print_parser.add_argument('-t','--times',action='store_true',default=False,help='print send times') print_parser.add_argument('-f','--facilities',action='store_true',default=False,help='print registered totals per facility') print_parser.add_argument('-c','--validation-codes',action='store_true',default=False,help='print validation stats') print_parser.add_argument('-m','--messages',action='store_true',default=False,help='print message statistics') print_parser.add_argument('-a','--all',action='store_true',default=False,help='all report options') print_parser.add_argument('-o','--hours',action='store_true',default=False,help='print hist of message hours') print_parser.add_argument('-i','--hiv',action='store_true',default=False,help='print hiv messaging status') print_parser.add_argument('-l','--language',action='store_true',default=False,help='print language histogram') print_parser.add_argument('-s','--status',action='store_true',default=False,help='print status histogram') print_parser.add_argument('-e','--enrollment',action='store_true',default=False,help='print enrollment by site') print_parser.add_argument('-d','--delivery',action='store_true',default=False,help='print delivery statistics') print_parser.add_argument('-x', '--success-times', action='store_true', default=False, help='print success times report') print_parser.add_argument('-u', '--message-status', default=None, const='all', choices=('day','week','cur_week','month','year','all'),nargs='?', help='print message status') print_parser.add_argument('--delivery-source',action='store_true',default=False,help='print delivery source statistics') print_parser.add_argument('--topic',action='store_true',default=False,help='incoming message topics') print_parser.add_argument('--weeks',default=5,type=int,help='message history weeks (default 5)') print_parser.set_defaults(action='print_stats') xlsx_parser = subparsers.add_parser('xlsx',cmd=parser.cmd,help='create xlsx reports') xlsx_parser.add_argument('-t','--visit',action='store_true',default=False,help='create visit report') xlsx_parser.add_argument('-d','--detail',action='store_true',default=False,help='create detail report') xlsx_parser.add_argument('-a','--all',action='store_true',default=False,help='create all reports') xlsx_parser.add_argument('-i','--interaction',action='store_true',default=False,help='create participant interaction report') xlsx_parser.add_argument('-m','--messages',action='store_true',default=False,help='create system message dump') xlsx_parser.add_argument('-w','--weekly',action='store_true',default=False,help='create weakly stats report') xlsx_parser.add_argument('-c','--custom',action='store_true',default=False,help='create custom report') xlsx_parser.add_argument('--dir',default='ignore',help='directory to save report in') xlsx_parser.set_defaults(action='make_xlsx') csv_parser = subparsers.add_parser('csv',cmd=parser.cmd,help='create csv reports') csv_parser.add_argument('--dir',default='ignore',help='directory to save csv in') csv_parser.add_argument('name',help='csv report type', choices=( 'hiv_messaging','enrollment','messages','edd','delivery', 'participant_week', 'sae','visits','msg_success','msg_dump','hiv_statuschange', ) ) csv_parser.set_defaults(action='make_csv_name') def handle(self,*args,**options): self.stdout.write( 'Reports Action: {}'.format(options['action']) ) self.printed = False self.options = options getattr(self,options['action'])() ######################################## # Commands ######################################## def print_stats(self): if self.options['facilities'] or self.options['all']: self.participants_by_facility() if self.options['times'] or self.options['all']: self.send_times() if self.options['status'] or self.options['all']: self.status_breakdown() if self.options['validation_codes'] or self.options['all']: self.validation_stats() if self.options['messages'] or self.options['all']: self.message_stats() if self.options['hiv'] or self.options['all']: self.hiv_messaging() if self.options['hours']: self.message_hours() if self.options['language']: self.print_languages() if self.options['enrollment']: self.print_enrollment() if self.options['delivery']: self.print_delivery_stats() if self.options['delivery_source'] and not self.options['delivery']: self.print_delivery_source() if self.options['topic']: self.print_message_topic() if self.options['message_status'] is not None: self.print_message_status() if self.options['success_times']: self.print_success_times() # SEC::XLSX Helper Functions def make_xlsx(self): workbook_columns = {} if self.options['visit'] or self.options['all']: workbook_columns['visit'] = visit_columns if self.options['detail'] or self.options['all']: workbook_columns['detail'] = detail_columns if self.options['custom']: workbook_columns['custom'] = detail_columns if self.options['interaction']: workbook_columns['interaction'] = interaction_columns interaction_columns.queryset = make_interaction_columns() if self.options['messages']: make_message_wb() if self.options['weekly']: make_weekly_wb() for name, columns in workbook_columns.items(): wb = xl.workbook.Workbook() today = datetime.date.today() appname = re.sub(r"[\s+]", '_', getattr(settings, 'APP_NAME', 'mWaChX')) file_name = appname + '_' + today.strftime('{}_%Y-%m-%d.xlsx').format(name) xlsx_path_out = os.path.join(self.options['dir'],file_name) self.stdout.write( "Making xlsx file {}".format(xlsx_path_out) ) if hasattr(columns,'facility_sheet'): make_facility_worksheet(columns,wb.active,'mathare') make_facility_worksheet(columns,wb.create_sheet(),'rachuonyo') else: make_worksheet(columns,wb.active,columns.queryset) wb.save(xlsx_path_out) # SEC::Start CSV Functions def make_csv_name(self): file_path = getattr(self,'make_{}_csv'.format(self.options['name']))() print "Done:" , file_path ######################################## # Start Print Functions ######################################## def send_times(self): self.print_header("Participant Send Times") c_all = cont.Contact.objects_no_link.all().order_by('send_day','send_time') time_counts = c_all.exclude(study_group='control').values('send_day','send_time') \ .annotate(count=models.Count('send_day')) times, day , counts = {} ,0 , [0,0,0] day_lookup = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun'] time_map = {8:0,13:1,20:2} for c in time_counts: if c['send_day'] == day: counts[time_map[c['send_time']]] = c['count'] else: times[day] = counts day = c['send_day'] counts = [0,0,0] counts[time_map[c['send_time']]] = c['count'] times[day] = counts totals = [0,0,0] for i in range(7): t = times.get(i,[0,0,0]) totals = [t1+t2 for t1,t2 in zip(totals,t)] self.stdout.write( "{} {} {}".format(day_lookup[i],t,sum(t)) ) self.stdout.write( "Tot {} {}".format(totals,sum(totals)) ) def participants_by_facility(self): self.print_header("Participant By Facility") group_counts = cont.Contact.objects.values('facility','study_group') \ .annotate(count=models.Count('study_id',distinct=True)) # Piviot Group Counts counts = collections.defaultdict(GroupRowCount) for g in group_counts: counts[g['facility']][g['study_group']] = g['count'] # Print Group Counts self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","Control","One-Way","Two-Way","Total") ) total_row = GroupRowCount() for facility, row in counts.items(): self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format( facility.capitalize(), row['control'], row['one-way'], row['two-way'], row.total()) ) total_row += row self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format( "Total", total_row['control'], total_row['one-way'], total_row['two-way'], total_row.total() ) ) self.stdout.write('\nMost Recent Message: %s' % cont.Message.objects.first().created ) def validation_stats(self): self.print_header('Validation Stats') c_all = cont.Contact.objects_no_link.all() stats = collections.OrderedDict( ( ('< 1h',0) , ('< 1d',0) ,('> 1d',0) , ('None',0) ) ) for c in c_all: seconds = c.validation_delta() if seconds is None: stats['None'] += 1 elif seconds <= 3600: stats['< 1h'] += 1 elif seconds <= 86400: stats['< 1d'] += 1 elif seconds > 86400: stats['> 1d'] += 1 else: stats['None'] += 1 counts = dict( c_all.values_list('is_validated').annotate(count=models.Count('is_validated')) ) total = sum(counts.values()) self.stdout.write( "Total: {} Valididated: {} ({:0.3f}) Not-Validated: {} ({:0.3f})\n".format( total , counts[True] , counts[True] / float(total) , counts[False] , counts[False] / float(total) ) ) for key , count in stats.items(): self.stdout.write( "\t{}\t{} ({:0.3f})".format(key,count, count/float(total) ) ) def message_stats(self): self.print_header('Message Statistics (system-participant-nurse)') # Get messages grouped by facility, system and outgoing m_all = cont.Message.objects.all() group_counts = m_all.order_by().values( 'contact__facility','contact__study_group','is_system','is_outgoing' ).annotate(count=models.Count('contact__facility')) # Piviot Group Counts based on facility counts = collections.defaultdict(MessageRow) for g in group_counts: facility = g['contact__facility'] if facility is None: continue study_group = g['contact__study_group'] sender = 'system' if not g['is_system']: sender = 'nurse' if g['is_outgoing'] else 'participant' counts[facility][study_group][sender] = g['count'] # Print Message Totals Table self.stdout.write( "{:^10}{:^18}{:^18}{:^18}{:^18}".format("","Control","One-Way","Two-Way","Total") ) total_row = MessageRow() for facility , row in counts.items(): total_row += row row['two-way'].replies = m_all.filter(parent__isnull=False,contact__facility=facility).count() self.stdout.write( '{:<10}{} {} ({})'.format(facility.capitalize(),row,row.total(),row.total().total() ) ) none_count = m_all.filter(contact__isnull=True).count() total_count = total_row.total() total_row['two-way'].replies = m_all.filter(parent__isnull=False).count() self.stdout.write( '{:<10}{} {} ({})'.format('Total',total_row,total_count,sum(total_count) ) ) self.stdout.write( '{:<10}{:04d} ({})'.format('None',none_count,none_count+sum(total_count)) ) # Print last 5 weeks of messaging self.stdout.write('') self.print_messages(self.options['weeks']) def print_messages(self,weeks=None): # Get all two-way messages m_all = cont.Message.objects.filter(contact__study_group='two-way') # Get start date study_start_date = timezone.make_aware(datetime.datetime(2015,11,23)) now = timezone.now() weeks_start_date = timezone.make_aware( datetime.datetime(now.year,now.month,now.day) - datetime.timedelta(days=now.weekday()) ) # Last Sunday start_date = study_start_date if weeks is not None and weeks_start_date > study_start_date: start_date = weeks_start_date - datetime.timedelta(days=weeks*7) total_row = MessageRowItem() while start_date < now: end_date = start_date + datetime.timedelta(days=7) m_range = m_all.filter(created__range=(start_date,end_date)) row = MessageRowItem() row['system'] = m_range.filter(is_system=True).count() row['participant'] = m_range.filter(is_system=False,is_outgoing=False).count() row['nurse'] = m_range.filter(is_system=False,is_outgoing=True).count() row.replies = m_range.filter(parent__isnull=False).count() total_row += row self.stdout.write( '{} {} ({})'.format(start_date.strftime('%Y-%m-%d'),row,sum(row) ) ) start_date = end_date self.stdout.write( "Total {} ({})".format(total_row,sum(total_row)) ) def message_hours(self): self.print_header('Histogram of message send hour (two-way only)') messages , hour_counts = {} , {} messages['p'] = cont.Message.objects.filter(is_outgoing=False,contact__study_group='two-way') messages['s'] = cont.Message.objects.filter(is_outgoing=True,is_system=True,contact__study_group='two-way') messages['n'] = cont.Message.objects.filter(is_outgoing=True,is_system=False,contact__study_group='two-way') for k in messages.keys(): hours = [0 for _ in range(24)] for m in messages[k]: hours[m.created.hour] += 1 hour_counts[k] = hours print " C S N" for h in range(24): print "{0:<5}{1:<5}{2:<5}{3:<5}".format((h+3)%24,hour_counts['p'][h],hour_counts['s'][h],hour_counts['n'][h]) print " {0:<5}{1:<5}{2:<5}".format(*map(sum,[hour_counts[k] for k in ('p','s','n')])) def hiv_messaging(self): self.print_header('HIV Messaging Preference (none-initiated-system)') hiv_messaging_groups = cont.Contact.objects.order_by().values('facility','study_group','hiv_messaging') \ .annotate(count=models.Count('study_id',distinct=True)) # Piviot Group Counts group_counts = collections.defaultdict(HivRowCount) for g in hiv_messaging_groups: group_counts[g['facility']][g['study_group']][g['hiv_messaging']] = g['count'] # Print Group Counts self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","Control","One-Way","Two-Way","Total") ) total_row = HivRowCount() for facility, row in group_counts.items(): self.stdout.write( "{0:^12}{1[control]:^12}{1[one-way]:^12}{1[two-way]:^12}{2:^12}".format( facility.capitalize(), row, row.total() ) ) total_row += row self.stdout.write( "{0:^12}{1[control]:^12}{1[one-way]:^12}{1[two-way]:^12} {2:^12}".format( "Total", total_row, total_row.total() ) ) def print_languages(self): self.print_header('Language Statistics (english,swahili,luo)') language_groups = cont.Contact.objects.order_by().values('facility','study_group','language') \ .annotate(count=models.Count('study_id',distinct=True)) # Piviot Group Counts language_counts = collections.defaultdict(LanguageRow) for g in language_groups: language_counts[g['facility']][g['study_group']][g['language']] = g['count'] # Print Group Counts self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","Control","One-Way","Two-Way","Total") ) total_row = LanguageRow() for facility, row in language_counts.items(): self.stdout.write( "{0:^12}{1[control]:^12}{1[one-way]:^12}{1[two-way]:^12}{2:^12}".format( facility.capitalize(), row, row.total() ) ) total_row += row self.stdout.write( "{0:^12}{1[control]:^12}{1[one-way]:^12}{1[two-way]:^12} {2:^12}".format( "Total", total_row, total_row.total() ) ) print '' self.print_header('Language of Messages (participant,nurse)') message_groups = cont.Message.objects.order_by().filter(contact__study_group='two-way',is_system=False)\ .prefetch_related('contact').values('languages','contact__language','is_outgoing')\ .exclude(languages='').annotate(count=models.Count('id',distinct=True)) # Piviot Group Counts language_counts = collections.defaultdict(LanguageMessageRow) for g in message_groups: language_counts[g['languages']][g['contact__language']][g['is_outgoing']] = g['count'] # Print Group Counts self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","English","Swahili","Luo","Total") ) total_row = LanguageMessageRow() for language, row in language_counts.items(): self.stdout.write( "{0:^12}{1[english]:^12}{1[swahili]:^12}{1[luo]:^12}{2:^12}".format( ','.join(s[0] for s in language.split(';')), row, row.total() ) ) total_row += row self.stdout.write( "{0:^12}{1[english]:^12}{1[swahili]:^12}{1[luo]:^12}{2:^12}".format( "Total", total_row, total_row.total() ) ) def status_breakdown(self): self.print_header('Participant Status (control,one-way,two-way)') status_groups = cont.Contact.objects.order_by().values('facility','status','study_group')\ .annotate(count=models.Count('study_id',distinct=True)) # Piviot Group Counts status_counts = collections.defaultdict(StatusRow) for g in status_groups: status_counts[g['facility']][g['status']][g['study_group']] = g['count'] # Print Group Counts self.stdout.write( StatusRow.header() ) total_row = StatusRow() for facility, row in status_counts.items(): self.stdout.write( row.row_str(facility) ) total_row += row self.stdout.write( total_row.row_str("Total") ) def print_delivery_stats(self): self.print_header('Participant Delivery Stats') today = datetime.date.today() c_all = cont.Contact.objects.all() edd = c_all.filter(status='pregnant').order_by('due_date') post = edd.filter(due_date__lt=today) self.stdout.write( 'Found {:d} pregnant participants with {:d} post edd'.format( edd.count(), post.count() ) ) future_edd = edd.order_by("-due_date") self.stdout.write( 'Furthest from EDD') for p in future_edd[:5]: self.stdout.write( "\t{0.study_id} {0.due_date} {0.study_group} (weeks {1:.0f})".format( p, p.delta_days() / 7 ) ) self.stdout.write( '\n') self.stdout.write( 'Furthest past EDD') for p in edd[:5]: self.stdout.write( "\t{0.study_id} {0.due_date} {0.study_group} (weeks {1:.0f})".format( p, p.delta_days() / 7 ) ) self.stdout.write( '\n') dd = c_all.filter(delivery_date__isnull=False).order_by('delivery_date') self.stdout.write( 'Found {:d} post-partum participants'.format(dd.count()) ) self.stdout.write( 'Furthest from delivery date - (id due_date delivery_date)') for p in dd[:5]: self.stdout.write( "\t{0.study_id} {0.due_date} {0.delivery_date} {0.study_group} (weeks {1:.0f})".format( p, p.delta_days() / 7 ) ) self.stdout.write( '\n') # Add edd to dd delta seconds dd_min , dd_max , dd_total , dd_count = None , None , 0 , dd.count() dd_hist = [0 for _ in range(-10,11)] for p in dd: p.delivery_offset = (p.delivery_date - p.due_date).total_seconds() if dd_min is None or dd_min.delivery_delta > p.delivery_offset: dd_min = p if dd_max is None or dd_max.delivery_delta < p.delivery_offset: dd_max = p dd_total += p.delivery_offset dd_weeks = int(p.delivery_offset / 604800) + 10 if dd_weeks < 0: dd_weeks = 0 elif dd_weeks > 20: dd_weeks = 20 dd_hist[dd_weeks] += 1 self.stdout.write( 'Min {:s} (weeks {:.0f}) Max: {:s} (weeks {:.0f}) Average: {:f}'.format( dd_min.study_id , dd_min.delivery_offset/604800 , dd_max.study_id , dd_max.delivery_offset/604800, dd_total/(dd_count*604800) ) ) table = "" for c in range(-10,11): table += ' {:2.0f} '.format(c) table += '\n' + '----'*21 + '\n' for c in dd_hist: table += ' {:2.0f} '.format(c) self.stdout.write(table) self.print_delivery_source() def print_delivery_source(self): self.print_header('Participant Delivery Source (control,one-way,two-way)') source_groups = cont.Contact.objects_no_link.filter(delivery_date__isnull=False).order_by().values('facility',\ 'study_group','delivery_source').annotate(count=models.Count('delivery_source')) # for g in source_groups: # print g # return # Piviot Group Counts source_counts = collections.defaultdict(DeliverySourceItem) for g in source_groups: source_counts[g['facility']][g['delivery_source']][g['study_group']] = g['count'] # Print Group Counts self.stdout.write( DeliverySourceItem.header() ) total_row = DeliverySourceItem() for facility, row in source_counts.items(): self.stdout.write( row.row_str(facility) ) total_row += row self.stdout.write( total_row.row_str("Total") ) def print_enrollment(self): self.print_header('Participant Enrollment By Week') c_all = cont.Contact.objects.all() enrollment_counts = collections.OrderedDict() for c in c_all: key = c.created.strftime('%Y-%U') try: enrollment_counts[key][c.facility] += 1 except KeyError as e: enrollment_counts[key] = FacilityRow() enrollment_counts[key][c.facility] += 1 self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}{:^12}{:^12}{:^12}".format( "Week","Ahero","Bondo","Mathare","Siaya","Rachuonyo","Riruta","Total") ) total_row = FacilityRow() for week , enrollment in enrollment_counts.items(): print week, enrollment, enrollment.total() total_row += enrollment print 'Total ' , total_row , total_row.total() def print_message_topic(self): self.print_header('Incoming Message Topic') msgs = cont.Message.objects.filter(is_outgoing=False,contact__isnull=False) topics = collections.Counter( m.topic for m in msgs ) print "%s\t%s" % ('Topic','Count') for key , count in topics.items(): print "%s\t%s" % (key , count) print "%s\t%s" % ('Total', msgs.count()) def print_success_times(self): self.print_header('Success Times') participant_message_counts = cont.Contact.objects_no_link.annotate_messages().order_by('-msg_missed')[:13] def display_phone_number(num): participant = participant_message_counts[num-1] return " |\t{!r:<40} O: {:<3} D: {:<3} M: {:<3} I: {:<3}".format( participant, participant.msg_outgoing, participant.msg_delivered, participant.msg_missed, participant.msg_incoming ) self.stdout.write('\n') intervals = [ ['',0], ['<10s',10], ['<30s',30], ['<1m',60], ['<5m',300], ['<10m',600], ['<30m',1800], ['<1h',3600], ['<2h',7200], ['<4h',14400], ['<8h',28800], ['<16h',57600], ['<24h',86400], ['>24h',604800] ] # Add success_dt and filter messages from start of collection: Nov 30, 2016 messages = cont.Message.objects.exclude(external_status='Failed').add_success_dt() for i in range(1,len(intervals)): count = messages.filter( success_dt__range=(intervals[i-1][1],intervals[i][1]) ).count() intervals[i].append(count) self.stdout.write( ' {:>8}: {:<4}{:>15}'.format( intervals[i][0], count, display_phone_number(i) )) print "\tTotal (since Nov 30, 2016): {} Longest Wait: {} (h)".format( messages.filter(success_dt__isnull=False).count(), messages.first().success_dt/3600.0) def print_message_status(self): self.print_header('All Messages By Status') # Print message status print message_status_groups(delta=self.options['message_status']) print "Other Types" status_groups = cont.Message.objects.order_by().values('external_status'). \ exclude(external_status__in=('Success','Sent','Failed')).exclude(is_outgoing=False). \ annotate(count=models.Count('external_status')) for group in status_groups: print "\t{0[external_status]:<30}: {0[count]}".format(group) print "\t{:<30}: {}".format("Total",sum( g['count'] for g in status_groups ) ) print "\nFailed Reasons" reasons = collections.Counter() for msg in cont.Message.objects.filter(is_outgoing=True).exclude(external_status__in=('Success','Sent')): reasons[msg.external_data.get('reason','No Reason')] += 1 for reason , count in reasons.items(): print "\t{:<20}: {}".format(reason,count) print "\t{:<20}: {}".format("Total",sum( reasons.values() ) ) def print_header(self,header): if self.printed: self.stdout.write("") self.printed = True self.stdout.write( "-"*30 ) self.stdout.write( "{:^30}".format(header) ) self.stdout.write( "-"*30 ) ######################################## # SEC::Start CSV Functions ######################################## def make_hiv_messaging_csv(self): ''' Basic csv dump of hiv messaging status ''' columns = collections.OrderedDict([ ('study_id','study_id'), ('hiv_messaging','hiv_messaging'), ('hiv_disclosed', null_boolean_factory('hiv_disclosed')), ('phone_shared', null_boolean_factory('phone_shared')), ]) contacts = cont.Contact.objects.all().order_by('study_id') file_path = os.path.join(self.options['dir'],'hiv_messaging.csv') make_csv(columns,contacts,file_path) return file_path def make_hiv_statuschange_csv(self): ''' Basic csv dump of hiv messaging status changes ''' columns = collections.OrderedDict([ ('study_id','contact.study_id'), ('old','old'), ('new', 'new'), ('date','created'), ('since_enrollment', lambda obj: obj.created - obj.contact.created ), ]) status_changes = cont.StatusChange.objects.filter(type='hiv') file_path = os.path.join(self.options['dir'],'hiv_status_changes.csv') make_csv(columns,status_changes,file_path) return file_path def make_visits_csv(self): ''' Basic csv dump of visit history for all participants ''' columns = collections.OrderedDict([ ('study_id',operator.attrgetter('participant.study_id')), ('type','visit_type'), ('scheduled','scheduled'), ('status','status'), ('arrived','arrived'), ]) visits = cont.Visit.objects.all().order_by('participant__study_id').prefetch_related('participant') file_path = os.path.join(self.options['dir'],'visit_dump.csv') make_csv(columns,visits,file_path) return file_path def make_msg_success_csv(self): ''' Basic csv dump of message success rates ''' columns = collections.OrderedDict([ ('study_id','study_id'), ('group','study_group'), ('msg_out','msg_out'), ('missed','msg_missed'), ('received','msg_received'), ('failed','msg_failed'), ('rate',lambda p: round(p.msg_received / float(p.msg_out),4) if p.msg_out != 0 else 0 ), ('msg_in','msg_in') ]) p_all = cont.Contact.objects_no_link.annotate_messages().order_by('-study_group','-msg_missed','-msg_out') file_path = os.path.join(self.options['dir'],'message_success.csv') make_csv(columns,p_all,file_path) return file_path def make_enrollment_csv(self): c_all = cont.Contact.objects.all() enrollment_counts = collections.OrderedDict() for c in c_all: key = c.created.strftime('%Y-%U') try: enrollment_counts[key][c.facility] += 1 except KeyError as e: enrollment_counts[key] = FacilityRow() enrollment_counts[key][c.facility] += 1 file_path = os.path.join(self.options['dir'],'enrollment.csv') with open( file_path , 'wb') as csvfile: csv_writer = csv.writer(csvfile) # Header Row csv_writer.writerow( ["Week"] + FacilityRow.columns + ["Total"] ) total_row = FacilityRow() for week , enrollment in enrollment_counts.items(): csv_writer.writerow( [week] + list(enrollment) + [enrollment.total()] ) total_row += enrollment csv_writer.writerow( ['Total'] + list(total_row) + [total_row.total()] ) return file_path def make_messages_csv(self): """ Messages per week csv """ m_all = cont.Message.objects.all().order_by('created') msg_type_counts = collections.OrderedDict() for msg in m_all: key = msg.created.strftime('%Y-%U') try: msg_type_counts[key][msg.msg_type] += 1 except KeyError as e: msg_type_counts[key] = MessageTypeRow() msg_type_counts[key][msg.msg_type] += 1 file_path = os.path.join(self.options['dir'],'messages.csv') with open( file_path , 'wb') as csvfile: csv_writer = csv.writer(csvfile) # Write Header csv_writer.writerow( ["Week"] + MessageTypeRow.columns + ["Total"] ) total_row = MessageTypeRow() for week , msg_types in msg_type_counts.items(): csv_writer.writerow( [week] + list(msg_types) + [msg_types.total()] ) total_row += msg_types csv_writer.writerow( ['Total'] + list(total_row) + [total_row.total()] ) return file_path def make_edd_csv(self): """ Make report of delivery_date to edd time delta in weeks """ c_all = cont.Contact.objects.filter(delivery_date__isnull=False).exclude(status__in=('loss','sae')) edd_deltas = collections.Counter( (c.delivery_date - c.due_date).days / 7 for c in c_all ) weeks = sorted(edd_deltas.keys()) file_path = os.path.join(self.options['dir'],'edd_deltas.csv') with open( file_path , 'wb') as csvfile: csv_writer = csv.writer(csvfile) # Write Header csv_writer.writerow( ("Week" , "Count") ) for week in range( weeks[0] , weeks[-1] + 1): csv_writer.writerow( (week , edd_deltas[week]) ) return file_path def make_delivery_csv(self): """ Create csv of time delta in weeks between delivery and delivery notification """ c_all = cont.Contact.objects.filter(delivery_date__isnull=False).exclude(status__in=('loss','sae')) delivery_deltas = collections.defaultdict( GroupRowCount ) max_week = 0 for c in c_all: delta = c.delivery_delta() delta_weeks = delta / 7 if delta is not None else 'none' delivery_deltas[delta_weeks][c.study_group] += 1 if delta is not None and delta < 0: print c.study_id, c, c.delivery_date , c.status if delta_weeks > max_week and delta is not None: max_week = delta_weeks file_path = os.path.join(self.options['dir'],'delivery_deltas.csv') with open( file_path , 'wb') as csvfile: csv_writer = csv.writer(csvfile) # Write Header csv_writer.writerow( ("Week" , "Control" , "One-Way", "Two-Way", "Total") ) total_row = GroupRowCount() for week in range(max_week + 1): csv_writer.writerow( [week] + list(delivery_deltas[week]) + [delivery_deltas[week].total()] ) total_row += delivery_deltas[week] csv_writer.writerow( ["Total"] + list(total_row) + [total_row.total()] ) return file_path def make_sae_csv(self): loss = cont.Contact.objects.filter(loss_date__isnull=False) loss_deltas = collections.defaultdict( GroupRowCount ) max_week = 0 for c in loss: loss_notify = c.statuschange_set.filter( models.Q(new='loss') | models.Q(new='sae') ).first().created.date() delta_weeks = (loss_notify - c.loss_date).days / 7 loss_deltas[delta_weeks][c.study_group] += 1 if delta_weeks > max_week: max_week = delta_weeks file_path = os.path.join(self.options['dir'],'loss_deltas.csv') with open( file_path , 'wb') as csvfile: csv_writer = csv.writer(csvfile) # Write Header csv_writer.writerow( ("Week" , "Control" , "One-Way", "Two-Way", "Total") ) total_row = GroupRowCount() for week in range(max_week + 1): csv_writer.writerow( [week] + list(loss_deltas[week]) + [loss_deltas[week].total()] ) total_row += loss_deltas[week] csv_writer.writerow( ["Total"] + list(total_row) + [total_row.total()] ) return file_path def make_msg_dump_csv(self): """ Dump stats for each incomming message to csv """ mode = 'spam' if mode == 'client': columns = collections.OrderedDict([ ('timestamp','created'), ('study_id','contact.study_id'), ('facility','contact.facility'), ('since_enrollment', lambda obj: int((obj.created.date() - obj.contact.created.date()).total_seconds() / 86400) ), ('since_delivery', lambda obj: int((obj.created.date() - obj.contact.delivery_date).total_seconds() / 86400) if obj.contact.delivery_date is not None else '' ), ('topic','topic'), ('related','is_related'), ('languages','languages'), ('text','display_text'), ('chars',lambda m: len(m.text)), ('words',lambda m: len( m.text.split() )), ('text_raw','text'), ]) m_all = cont.Message.objects.filter(is_outgoing=False,contact__study_group='two-way') \ .order_by('contact__study_id','created').prefetch_related('contact') elif mode == 'nurse': columns = collections.OrderedDict([ ('timestamp','created'), ('study_id','contact.study_id'), ('facility','contact.facility'), ('since_enrollment', lambda obj: int((obj.created.date() - obj.contact.created.date()).total_seconds() / 86400) ), ('since_delivery', lambda obj: int((obj.created.date() - obj.contact.delivery_date).total_seconds() / 86400) if obj.contact.delivery_date is not None else '' ), ('languages','languages'), ('text','display_text'), ('chars',lambda m: len(m.text)), ('words',lambda m: len( m.text.split() )), ('reply',lambda m: 1 if m.parent else 0), ('text_raw','text'), ]) m_all = cont.Message.objects.filter(is_outgoing=True,is_system=False,contact__study_group='two-way') \ .exclude(translation_status='cust').order_by('contact__study_id','created').prefetch_related('contact') elif mode == 'system': columns = collections.OrderedDict([ ('send_base','send_base'), ('send_offset','send_offset'), ('group','group'), ('condition','condition'), ('hiv', lambda m: 1 if m.hiv_messaging else 0), ('text', lambda m: m.english[39:] if m.english[0] == '{' else m.english), ]) m_all = back.AutomatedMessage.objects.all().order_by('send_base','send_offset','group','condition','hiv_messaging') elif mode == 'spam': columns = collections.OrderedDict([ ('timestamp','created'), ('number','connection.identity'), ('text','display_text'), ]) m_all = cont.Message.objects.filter(is_outgoing=False,contact__isnull=True).order_by('created') elif mode == 'all': columns = collections.OrderedDict([ ('timestamp','created'), ('study_id','contact.study_id'), ('sent_by','sent_by'), ('status','external_status'), ('topic','topic'), ('related','related'), ]) m_all = cont.Message.objects.filter(contact__study_group='two-way').order_by('contact_study_id','created').prefetch_related('contact') file_path = os.path.join(self.options['dir'],'message_dump_{}.csv'.format(mode)) make_csv(columns,m_all,file_path) return file_path def make_participant_week_csv(self): # Report with number of system, participant and nurse messages per week file_path = os.path.join(self.options['dir'],'participant_msg_per_week.csv') csv_fp = open(file_path,'w') csv_writer = csv.writer(csv_fp) # Header Row csv_writer.writerow( ('id','group','study_week','delivery_week', 'participant','nurse','system', # 'delivered','unkown','failed', # 'n_delivered','n_unkown','n_failed' ) ) def week_start(d): return d - datetime.timedelta(days=d.weekday()+1) def week_end(d): return d + datetime.timedelta(days=6-d.weekday()) def m_status(m): return {'Success':'delivered','Sent':'unkown'}.get(m.external_status,'failed') participants = cont.Contact.objects.all().prefetch_related('message_set') # participants = cont.Contact.objects.filter(study_id__in=['0003','0803']).prefetch_related('message_set') for p in participants: p.study_start , p.delivery_start = week_start(p.created.date()) , week_start(p.delivery_date if p.delivery_date else p.due_date) p.study_end = week_end( p.message_set.first().created.date() ) week_range = (p.study_end - p.study_start).days // 7 delivery_offset = (p.delivery_start - p.study_start).days // 7 counts = [ dict(system=0,participant=0,nurse=0, delivered=0,unkown=0,failed=0, n_delivered=0,n_unkown=0,n_failed=0 ) for _ in range( week_range ) ] for m in p.message_set.all(): week = ( week_start(m.created.date()) - p.study_start ).days // 7 if m.is_outgoing is True: if m.is_system is True: counts[week]['system'] += 1 counts[week][m_status(m)] += 1 else: counts[week]['nurse'] += 1 counts[week]['n_%s'%m_status(m)] += 1 else: counts[week]['participant'] += 1 for idx , row in enumerate(counts): csv_writer.writerow( (p.study_id, p.study_group, idx , idx - delivery_offset, row['participant'], row['nurse'], row['system'], # row['delivered'], row['unkown'], row['failed'], # row['n_delivered'], row['n_unkown'], row['n_failed'], ) ) ######################################## # SEC::XLSX Helper Functions ######################################## last_week = datetime.date.today() - datetime.timedelta(days=7) detail_columns = collections.OrderedDict([ ('Study ID','study_id'), ('ANC Num','anc_num'), ('Enrolled',lambda c: c.created.date()), ('Group','study_group'), ('Status','get_status_display'), ('Shared','phone_shared') , ('EDD','due_date'), ('Δ EDD',lambda c:delta_days(c.due_date)), ('Delivery','delivery_date'), ('Δ Delivery',lambda c:delta_days(c.delivery_date,past=True)), ('Deliver Notify', 'delivery_delta'), ('Client', lambda c: c.message_set.filter(is_outgoing=False).count() ), ('System', lambda c: c.message_set.filter(is_system=True).count() ), ('Nurse', lambda c: c.message_set.filter(is_system=False,is_outgoing=True).count() ), ]) # detail_columns.facility_sheet = True detail_columns.queryset = cont.Contact.objects.all() visit_columns = collections.OrderedDict([ ('Study ID','study_id'), ('Group','study_group'), ('Status','status'), ('EDD','due_date'), ('Δ EDD',lambda c:delta_days(c.due_date)), ('Delivery','delivery_date'), ('Δ Delivery',lambda c:delta_days(c.delivery_date,past=True)), ('TCA',lambda c:c.tca_date()), ('Δ TCA',lambda c:delta_days(c.tca_date())), ('TCA Type',lambda c:c.tca_type()), ('Pending Visits',lambda c:c.visit_set.pending().count()), ]) visit_columns.facility_sheet = True BUCKETS = 4 interaction_columns = collections.OrderedDict([ ('Study ID','study_id'), ('Group','study_group'), ('Status','status'), ('Msg Weeks','msg_weeks'), ('Outgoing','msg_outgoing'), ('System','msg_system'), ('Nurse','msg_nurse'), ('Incoming','msg_incoming'), ('Delivered','msg_delivered'), ('%D','precent_delivered'), ('Sent','msg_sent'), ('%S','precent_sent'), ('Failed','msg_failed'), ('%F','precent_failed'), ('Rejected','count_custom'), ('%R','precent_custom'), ('Start Streak','start_streak'), ('Longest Streak','longest_streak'), ('Miss Streak','miss_streak'), ('Last Miss Streak','last_miss_streak'), ('System Succes','system_success'), ('Nurse Succes','nurse_success'), ('Reply delivered','reply_delivered'), ('Reply Sent','reply_sent'), ('Reply Failed','reply_failed'), ] + [ ("%s%i"%(c,i),"%s%i"%(c,i)) for c in ('d','s','f') for i in range(1,BUCKETS+1) ] ) def make_interaction_columns(): contacts = { c.id:c for c in cont.Contact.objects_no_link.annotate_messages().filter(msg_outgoing__gt=9) # cont.Contact.objects_no_link.annotate_messages().order_by('-msg_failed')[:20] } # Add all messages to contacts.messages messages = cont.Message.objects.filter(contact__in=contacts.keys()).order_by('created') for m in messages: c = contacts[m.contact_id] if hasattr(c,'messages'): c.messages.append(m) else: c.messages = [m] for id , c in contacts.items(): start_streak, start_streak_flag = 0 , False longest_streak , current_streak = 0 , 0 miss_streak , current_miss_streak = 0 , 0 system_success , nurse_success , = 0 , 0 size , rem = divmod( c.msg_outgoing , BUCKETS ) bucket_sizes = [ (size+1) for i in range(rem) ] + [size for i in range(BUCKETS-rem)] delivered_buckets = [ 0 for _ in range(BUCKETS)] sent_buckets = [ 0 for _ in range(BUCKETS)] failed_buckets = [ 0 for _ in range(BUCKETS)] count , bucket = 0 , 0 last_outgoing = None reply_delivered , reply_sent , reply_failed = 0 , 0 , 0 for m in c.messages: if m.is_outgoing: if count == bucket_sizes[bucket]: bucket += 1 count = 0 count += 1 success = m.external_status == 'Success' last_outgoing = m # Start Streak if not start_streak_flag: if success: start_streak += 1 else: start_streak_flag = True if success: current_streak += 1 delivered_buckets[bucket] += 1 if m.is_system: system_success += 1 else: nurse_success += 1 if current_miss_streak > miss_streak: miss_streak = current_miss_streak miss_streak = 0 else: current_miss_streak += 1 if current_streak > longest_streak: longest_streak = current_streak current_streak = 0 if m.external_status == 'Sent': sent_buckets[bucket] += 1 else: failed_buckets[bucket] += 1 elif last_outgoing is not None: # incoming message and no reply yet if (last_outgoing.created - m.created).total_seconds() < 21600: #6h if last_outgoing.external_status == 'Success': reply_delivered += 1 elif last_outgoing.external_status == 'Sent': reply_sent += 1 else: reply_failed += 1 last_outgoing = None if current_miss_streak > miss_streak: miss_streak = current_miss_streak if current_streak > longest_streak: longest_streak = current_streak c.start_streak = start_streak c.longest_streak = longest_streak c.miss_streak = miss_streak c.last_miss_streak = current_miss_streak c.precent_delivered = zero_or_precent( c.msg_delivered , c.msg_outgoing ) c.precent_sent = zero_or_precent( c.msg_sent , c.msg_outgoing ) c.precent_failed = zero_or_precent( c.msg_other , c.msg_outgoing ) custom_count = c.msg_outgoing - c.msg_delivered - c.msg_sent - c.msg_failed c.count_custom = custom_count c.precent_custom = zero_or_precent(custom_count , c.msg_outgoing ) c.system_success = zero_or_precent( system_success , c.msg_system ) c.nurse_success = zero_or_precent( nurse_success , c.msg_nurse ) c.reply_delivered = zero_or_precent( reply_delivered , c.msg_delivered ) c.reply_sent = zero_or_precent( reply_sent , c.msg_sent ) c.reply_failed = zero_or_precent( reply_failed , c.msg_other ) c.msg_weeks = (c.messages[-1].created - c.messages[0].created).total_seconds() / (3600*24*7) for char , buckets in ( ('d',delivered_buckets) , ('s',sent_buckets) , ('f',failed_buckets) ): for idx , count in enumerate(buckets): setattr(c,"%s%i"%(char,idx+1), zero_or_precent(count,bucket_sizes[idx])) return contacts.values() def make_facility_worksheet(columns,ws,facility): contacts = cont.Contact.objects.filter(facility=facility) ws.title = facility.capitalize() make_worksheet(columns,ws,contacts) def make_worksheet(columns,ws,queryset,column_widths=None): # Write Header Row ws.append(columns.keys()) ws.auto_filter.ref = 'A1:{}1'.format( xl.utils.get_column_letter(len(columns)) ) if hasattr('columns','widths'): for col_letter, width in column_widths.items(): ws.column_dimensions[col_letter].width = width # Write Data Rows for row in queryset: ws.append( [make_column(row,attr) for attr in columns.values()] ) def make_weekly_wb(): print "Making Weekly XLSX Report" wb = xl.Workbook() ws = wb.active week_start = timezone.make_aware(datetime.datetime(2015,11,22)) today = timezone.make_aware( datetime.datetime( *datetime.date.today().timetuple()[:3] ) ) ws.append( ('Start','End','Enrolled','System','Success','Participant','Nurse','Spam') ) ws.freeze_panes = 'A2' while week_start < today: week_end = week_start + datetime.timedelta(days=7) messages = cont.Message.objects.filter(created__gte=week_start,created__lt=week_end) count = cont.Contact.objects.filter(created__lt=week_end).count() system = messages.filter(is_outgoing=True,is_system=True).count() success = messages.filter(is_outgoing=True,is_system=True,external_status='Success').count() nurse = messages.filter(is_outgoing=True,is_system=False).count() participant = messages.filter(is_outgoing=False,is_system=False,contact__isnull=False).count() spam = messages.filter(is_outgoing=False,is_system=False,contact__isnull=True).count() ws.append( (week_start.date(),week_end.date(),count,system,success,participant,nurse,spam) ) week_start = week_end wb.save('ignore/weely_messages.xlsx') def make_message_wb(): print "Making Message XLSX Report" wb = xl.Workbook() ws = wb.active header = ('id','day','created','sent_by','auto','external', 'delta_human','delta','delta_last', 'study_wk','edd_wk','chars','words','language') widths = {'B':5,'C':25,'D':12,'E':25,'G':12,'H':12,'I':12,'L':20} xl_add_header_row(ws,header,widths) def date_day(datetime): return (datetime.strftime('%a') , datetime) two_way = cont.Contact.objects.filter(study_group='two-way') # for p in two_way.filter(study_id__in=('0042','0035')): for p in two_way.all(): previous = Namespace(participant=None) p_cols = (p.study_id,) for m in p.message_set.prefetch_related('contact').order_by('created'): study_wk , edd_wk = m.study_wk , m.edd_wk if m.is_system: previous.system , previous.out = m , m ws.append( p_cols + date_day(m.created) + (m.sent_by(),m.auto, m.external_status,'','','',study_wk,edd_wk,'','','') ) xl_style_current_row(ws) else: if m.is_outgoing is True: # Nurse message delta = m.created - (previous.participant.created if previous.participant is not None else m.contact.created) previous.out = m ws.append( p_cols + date_day(m.created) + (m.sent_by(), '', m.external_status,seconds_as_str(delta,True),delta.total_seconds(),'',study_wk,edd_wk, len(m.text),len(m.text.split()),m.languages, ) ) ws["E{0}".format(ws._current_row)].font = bold_font else: # Participant message previous.participant = m if hasattr(previous,'system'): delta = m.created - previous.system.created delta_last = '' if previous.out == previous.system else (m.created - previous.out.created).total_seconds() ws.append( p_cols + date_day(m.created) + (m.sent_by(),'','',seconds_as_str(delta,True),delta.total_seconds(),delta_last, study_wk,edd_wk, len(m.text),len(m.text.split()),m.languages) ) else: print p.study_id, m wb.save('ignore/messages_export.xlsx') def null_boolean_factory(attribute): def null_boolean(obj): value = getattr(obj,attribute) if value is None: return 99 if value is True: return 1 return 0 return null_boolean def make_column(obj,column): if isinstance(column,basestring): for name in column.split('.'): obj = getattr(obj,name) return obj() if hasattr(obj,'__call__') else obj # Else assume column is a function that takes the object return column(obj) def make_csv(columns,data,file_path): ''' Write data to {file_path}.csv ''' with open( file_path , 'wb') as csvfile: csv_writer = csv.writer(csvfile) # Header Row csv_writer.writerow( columns.keys() ) for row in data: csv_writer.writerow( [ make_column(row,value) for value in columns.values()] ) ######################################## #SEC::Utility Functions ######################################## def seconds_as_str(seconds,as_min=False): if seconds is None: return None if isinstance(seconds,datetime.timedelta): seconds = seconds.total_seconds() if as_min is False: if seconds <= 3600: return '{:.2f}'.format(seconds/60) return '{:.2f} (h)'.format(seconds/3600) else: days , hours = divmod(seconds, 86400) hours , mins = divmod(hours, 3600) mins , secons = divmod(mins,60) outstr = '' if days != 0: outstr += '{:.0f}d '.format(days) if hours != 0: outstr += '{:.0f}h '.format(hours) if mins != 0: outstr += '{:.0f}m '.format(mins) return outstr def delta_days(date,past=False): if date is not None: days = (date - datetime.date.today()).days return -days if past else days def zero_or_precent(frac,total): if total == 0: return '' return round( float(frac) / total , 3 ) ######################################## # Message Row Counting Classes for print stats ######################################## class CountRowBase(dict): columns = " DEFINE COLUMN LIST IN SUBCLASS " child_class = int def __init__(self): for c in self.columns: self[c] = self.child_class() def __add__(self,other): new = self.__class__() for c in self.columns: new[c] = self[c] + other[c] return new def __iadd__(self,other): for c in self.columns: self[c] += other[c] return self def __iter__(self): """ Iterate over values in column order instead of keys """ for c in self.columns: yield self[c] def total(self): new = self.child_class() for c in self.columns: new += self[c] return new def __str__(self): return ' '.join( str(self[c]) for c in self.columns ) class MessageRowItem(CountRowBase): columns = ['system','participant','nurse'] def __init__(self): self.replies = 0 for c in self.columns: self[c] = 0 def __str__(self): reply_str = '' if not self.replies else '/{:04d}'.format(self.replies) return '{0[system]:04d}--{0[participant]:04d}{1}--{0[nurse]:04d}'.format(self,reply_str) class MessageRow(CountRowBase): columns = ['control','one-way','two-way'] child_class = MessageRowItem class GroupRowCount(CountRowBase): columns = ['control','one-way','two-way'] @property def condensed(self): return self.condensed_str() def condensed_str(self): row = '--'.join( '{:02d}'.format(self[c]) for c in self.columns ) return "{} ({:03d})".format(row,sum(self.values())) class HivRowItem(CountRowBase): columns = ['none','initiated','system'] def __str__(self): return '--'.join( '{:02d}'.format(self[c]) for c in self.columns ) class HivRowCount(CountRowBase): columns = ['control','one-way','two-way'] child_class = HivRowItem class LanguageRowItem(CountRowBase): columns = ['english','swahili','luo'] def __str__(self): return '--'.join( '{:02d}'.format(self[c]) for c in self.columns ) class LanguageRow(CountRowBase): columns = ['control','one-way','two-way'] child_class = LanguageRowItem class FacilityRow(CountRowBase): columns = ['ahero','bondo','mathare','siaya','rachuonyo','riruta'] def __str__(self): return ' ' + ''.join( '{:^12}'.format(self[c]) for c in self.columns ) class LanguageMessageRowItem(CountRowBase): # Is Ougtgoing columns = [False,True] def __str__(self): return '--'.join( '{:02d}'.format(self[c]) for c in self.columns ) class LanguageMessageRow(CountRowBase): columns = ['english','swahili','luo'] child_class = LanguageMessageRowItem class StatusRow(CountRowBase): columns = ['pregnant','post','loss','sae','other','stopped'] child_class = GroupRowCount @classmethod def header(cls): return "{:^12}{:^18}{:^18}{:^18}{:^18}{:^18}{:^18}{:^18}".format( "","Pregnant","Post-Partum","SAE OptIn","SAE OptOut","Withdrew","Other","Total" ) def row_str(self,label): str_fmt = "{0:^12}{1[pregnant].condensed:^18}{1[post].condensed:^18}{1[loss].condensed:^18}" str_fmt += "{1[sae].condensed:^18}{1[stopped].condensed:^18}{1[other].condensed:^18}{2:^18}" return str_fmt.format( label , self, self.total().condensed_str() ) class MessageTypeRow(CountRowBase): columns = ['edd','dd','loss','visit','bounce','stop','signup', 'control','one-way','two-way','nurse','anonymous','empty_auto'] class DeliverySourceItem(CountRowBase): columns = ['phone','sms','visit','m2m','other',''] child_class = GroupRowCount @classmethod def header(cls): return "{:^12}{:^18}{:^18}{:^18}{:^18}{:^18}{:^18}{:^18}".format( "","Phone","SMS","Clinic Visit","Mothers to Mothers","Other","None","Total" ) def row_str(self,label): str_fmt = "{0:^12}{1[phone].condensed:^18}{1[sms].condensed:^18}{1[visit].condensed:^18}" str_fmt += "{1[m2m].condensed:^18}{1[other].condensed:^18}{2:^18}{3:^18}" return str_fmt.format( label, self, self[''].condensed_str(), self.total().condensed_str() ) ######################################## # Report Utilities ######################################## def message_status_groups(start=None,delta='day'): """ Create report of message status for nightly email """ if delta not in ('day','week','cur_week','month','year','all'): delta = 'day' if start is None: start = datetime.date.today() - datetime.timedelta(days=1) start = utils.make_date( start ) # Make timezone aware if delta == 'day': end = start + datetime.timedelta(days=1) elif delta == 'week': start = start - datetime.timedelta(days=start.weekday()) # Get start of week end = start + datetime.timedelta(weeks=1) elif delta == 'cur_week': end = start start = end - datetime.timedelta(weeks=1) elif delta == 'month': start = start.replace(day=1) # Get start of month # Get last day of month day_in_next_month = start.replace(day=28) + datetime.timedelta(days=4) first_day_next_month = day_in_next_month.replace(day=1) last_day_in_month = first_day_next_month - dtatetime.timedelta(days=1) end = start.replace(day=last_day_in_month.day) elif delta == 'year': start = start.replace(month=1,day=1) end = start.replace(month=12,day=31) elif delta == 'all': end = start start = utils.make_date(2010,1,1) out_string = ['Message Success Stats From: {} To: {}'.format(start.strftime('%Y-%m-%d'),end.strftime('%Y-%m-%d')), ''] messages = cont.Message.objects.filter(created__range=(start,end)) msg_groups = messages.order_by().values( 'external_status','contact__study_group' ).annotate( count=models.Count('external_status'), ) # Create OrderedDict for Groups status_counts = [('Success',0),('Sent',0),('Failed',0),('Other',0),('',0),('Total',0)] msg_dict = collections.OrderedDict( [ ('two-way',collections.OrderedDict( status_counts ) ), (None,collections.OrderedDict( status_counts ) ) ] ) for group in msg_groups: group_dict = msg_dict[group['contact__study_group']] try: group_dict[group['external_status']] += group['count'] except KeyError as e: group_dict['Other'] += group['count'] group_dict['Total'] += group['count'] out_string.append( '{:^15}{:^10}{:^10}{:^10}{:^10}{:^10}{:^10}'.format( 'Group','Delivered','Sent','Failed','Other','Sent','Total') ) total_row = collections.OrderedDict( status_counts ) for group , status_dict in msg_dict.items(): out_string.append( '{:^15}{:^10}{:^10}{:^10}{:^10}{:^10}{:^10}'.format( group, *status_dict.values() ) ) for key in ['Success','Sent','','Other','Total','Failed']: total_row[key] += status_dict[key] out_string.append( '{:^15}{:^10}{:^10}{:^10}{:^10}{:^10}{:^10}'.format( 'Total', *total_row.values() ) ) total_messages = float(sum(total_row.values())/2) if total_messages == 0: total_precents = [0 for _ in range(len(total_row))] else: total_precents = [ c*100/total_messages for c in total_row.values() ] out_string.append( '{:^15} {:06.3f} {:06.3f} {:06.3f} {:06.3f} {:06.3f} {:06.3f}'.format( '%', *total_precents ) ) out_string.append('') return '\n'.join(out_string)
I-TECH-UW/mwachx
utils/management/commands/reports.py
Python
apache-2.0
63,592
[ "VisIt" ]
d7d85a4d6805fe8d4e4db8af2adb48b05f33f5d084d7b00f5879c06c1f9362c4
# -*- coding: utf-8 -*- """ Acceptance tests for Video. """ import os from mock import patch from nose.plugins.attrib import attr from unittest import skipIf, skip from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig from ...pages.lms.video.video import VideoPage from ...pages.lms.tab_nav import TabNavPage from ...pages.lms.courseware import CoursewarePage from ...pages.lms.course_nav import CourseNavPage from ...pages.lms.auto_auth import AutoAuthPage from ...pages.lms.course_info import CourseInfoPage from ...fixtures.course import CourseFixture, XBlockFixtureDesc from ..helpers import skip_if_browser from flaky import flaky VIDEO_SOURCE_PORT = 8777 HTML5_SOURCES = [ 'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT), 'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT), 'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT), ] HTML5_SOURCES_INCORRECT = [ 'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT), ] @skipIf(is_youtube_available() is False, 'YouTube is not available!') class VideoBaseTest(UniqueCourseTest): """ Base class for tests of the Video Player Sets up the course and provides helper functions for the Video tests. """ def setUp(self): """ Initialization of pages and course fixture for video tests """ super(VideoBaseTest, self).setUp() self.video = VideoPage(self.browser) self.tab_nav = TabNavPage(self.browser) self.course_nav = CourseNavPage(self.browser) self.courseware = CoursewarePage(self.browser, self.course_id) self.course_info_page = CourseInfoPage(self.browser, self.course_id) self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id) self.course_fixture = CourseFixture( self.course_info['org'], self.course_info['number'], self.course_info['run'], self.course_info['display_name'] ) self.metadata = None self.assets = [] self.verticals = None self.youtube_configuration = {} self.user_info = {} # reset youtube stub server self.addCleanup(YouTubeStubConfig.reset) def navigate_to_video(self): """ Prepare the course and get to the video and render it """ self._install_course_fixture() self._navigate_to_courseware_video_and_render() def navigate_to_video_no_render(self): """ Prepare the course and get to the video unit however do not wait for it to render, because the has been an error. """ self._install_course_fixture() self._navigate_to_courseware_video_no_render() def _install_course_fixture(self): """ Install the course fixture that has been defined """ if self.assets: self.course_fixture.add_asset(self.assets) chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section') chapter_sequential.add_children(*self._add_course_verticals()) chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential) self.course_fixture.add_children(chapter) self.course_fixture.install() if len(self.youtube_configuration) > 0: YouTubeStubConfig.configure(self.youtube_configuration) def _add_course_verticals(self): """ Create XBlockFixtureDesc verticals :return: a list of XBlockFixtureDesc """ xblock_verticals = [] _verticals = self.verticals # Video tests require at least one vertical with a single video. if not _verticals: _verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]] for vertical_index, vertical in enumerate(_verticals): xblock_verticals.append(self._create_single_vertical(vertical, vertical_index)) return xblock_verticals def _create_single_vertical(self, vertical, vertical_index): """ Create a single course vertical of type XBlockFixtureDesc with category `vertical`. A single course vertical can contain single or multiple video modules. :param vertical: vertical data list :param vertical_index: index for the vertical display name :return: XBlockFixtureDesc """ xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index)) for video in vertical: xblock_course_vertical.add_children( XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata'))) return xblock_course_vertical def _navigate_to_courseware_video(self): """ Register for the course and navigate to the video unit """ self.auth_page.visit() self.user_info = self.auth_page.user_info self.course_info_page.visit() self.tab_nav.go_to_tab('Course') def _navigate_to_courseware_video_and_render(self): """ Wait for the video player to render """ self._navigate_to_courseware_video() self.video.wait_for_video_player_render() def _navigate_to_courseware_video_no_render(self): """ Wait for the video Xmodule but not for rendering """ self._navigate_to_courseware_video() self.video.wait_for_video_class() def metadata_for_mode(self, player_mode, additional_data=None): """ Create a dictionary for video player configuration according to `player_mode` :param player_mode (str): Video player mode :param additional_data (dict): Optional additional metadata. :return: dict """ metadata = {} if player_mode == 'html5': metadata.update({ 'youtube_id_1_0': '', 'youtube_id_0_75': '', 'youtube_id_1_25': '', 'youtube_id_1_5': '', 'html5_sources': HTML5_SOURCES }) if player_mode == 'youtube_html5': metadata.update({ 'html5_sources': HTML5_SOURCES, }) if player_mode == 'youtube_html5_unsupported_video': metadata.update({ 'html5_sources': HTML5_SOURCES_INCORRECT }) if player_mode == 'html5_unsupported_video': metadata.update({ 'youtube_id_1_0': '', 'youtube_id_0_75': '', 'youtube_id_1_25': '', 'youtube_id_1_5': '', 'html5_sources': HTML5_SOURCES_INCORRECT }) if additional_data: metadata.update(additional_data) return metadata def go_to_sequential_position(self, position): """ Navigate to sequential specified by `video_display_name` """ self.courseware.go_to_sequential_position(position) self.video.wait_for_video_player_render() @attr('shard_4') class YouTubeVideoTest(VideoBaseTest): """ Test YouTube Video Player """ def setUp(self): super(YouTubeVideoTest, self).setUp() def test_youtube_video_rendering_wo_html5_sources(self): """ Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources Given the course has a Video component in "Youtube" mode Then the video has rendered in "Youtube" mode """ self.navigate_to_video() # Verify that video has rendered in "Youtube" mode self.assertTrue(self.video.is_video_rendered('youtube')) def test_transcript_button_wo_english_transcript(self): """ Scenario: Transcript button works correctly w/o english transcript in Youtube mode Given the course has a Video component in "Youtube" mode And I have defined a non-english transcript for the video And I have uploaded a non-english transcript file to assets Then I see the correct text in the captions """ data = {'transcripts': {'zh': 'chinese_transcripts.srt'}} self.metadata = self.metadata_for_mode('youtube', data) self.assets.append('chinese_transcripts.srt') self.navigate_to_video() self.video.show_captions() # Verify that we see "好 各位同学" text in the transcript unicode_text = "好 各位同学".decode('utf-8') self.assertIn(unicode_text, self.video.captions_text) def test_cc_button(self): """ Scenario: CC button works correctly with transcript in YouTube mode Given the course has a video component in "Youtube" mode And I have defined a transcript for the video Then I see the closed captioning element over the video """ data = {'transcripts': {'zh': 'chinese_transcripts.srt'}} self.metadata = self.metadata_for_mode('youtube', data) self.assets.append('chinese_transcripts.srt') self.navigate_to_video() # Show captions and make sure they're visible and cookie is set self.video.show_closed_captions() self.video.wait_for_closed_captions() self.assertTrue(self.video.is_closed_captions_visible) self.video.reload_page() self.assertTrue(self.video.is_closed_captions_visible) # Hide captions and make sure they're hidden and cookie is unset self.video.hide_closed_captions() self.video.wait_for_closed_captions_to_be_hidden() self.video.reload_page() self.video.wait_for_closed_captions_to_be_hidden() def test_transcript_button_transcripts_and_sub_fields_empty(self): """ Scenario: Transcript button works correctly if transcripts and sub fields are empty, but transcript file exists in assets (Youtube mode of Video component) Given the course has a Video component in "Youtube" mode And I have uploaded a .srt.sjson file to assets Then I see the correct english text in the captions """ self._install_course_fixture() self.course_fixture.add_asset(['subs_3_yD_cEKoCk.srt.sjson']) self.course_fixture._upload_assets() self._navigate_to_courseware_video_and_render() self.video.show_captions() # Verify that we see "Welcome to edX." text in the captions self.assertIn('Welcome to edX.', self.video.captions_text) def test_transcript_button_hidden_no_translations(self): """ Scenario: Transcript button is hidden if no translations Given the course has a Video component in "Youtube" mode Then the "Transcript" button is hidden """ self.navigate_to_video() self.assertFalse(self.video.is_button_shown('transcript_button')) def test_fullscreen_video_alignment_with_transcript_hidden(self): """ Scenario: Video is aligned with transcript hidden in fullscreen mode Given the course has a Video component in "Youtube" mode When I view the video at fullscreen Then the video with the transcript hidden is aligned correctly """ self.navigate_to_video() # click video button "fullscreen" self.video.click_player_button('fullscreen') # check if video aligned correctly without enabled transcript self.assertTrue(self.video.is_aligned(False)) def test_download_button_wo_english_transcript(self): """ Scenario: Download button works correctly w/o english transcript in YouTube mode Given the course has a Video component in "Youtube" mode And I have defined a downloadable non-english transcript for the video And I have uploaded a non-english transcript file to assets Then I can download the transcript in "srt" format """ data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}} self.metadata = self.metadata_for_mode('youtube', additional_data=data) self.assets.append('chinese_transcripts.srt') # go to video self.navigate_to_video() # check if we can download transcript in "srt" format that has text "好 各位同学" unicode_text = "好 各位同学".decode('utf-8') self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text)) def test_download_button_two_transcript_languages(self): """ Scenario: Download button works correctly for multiple transcript languages Given the course has a Video component in "Youtube" mode And I have defined a downloadable non-english transcript for the video And I have defined english subtitles for the video Then I see the correct english text in the captions And the english transcript downloads correctly And I see the correct non-english text in the captions And the non-english transcript downloads correctly """ self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson']) data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('youtube', additional_data=data) # go to video self.navigate_to_video() # check if "Welcome to edX." text in the captions self.assertIn('Welcome to edX.', self.video.captions_text) # check if we can download transcript in "srt" format that has text "Welcome to edX." self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.')) # select language with code "zh" self.assertTrue(self.video.select_language('zh')) # check if we see "好 各位同学" text in the captions unicode_text = "好 各位同学".decode('utf-8') self.assertIn(unicode_text, self.video.captions_text) # check if we can download transcript in "srt" format that has text "好 各位同学" unicode_text = "好 各位同学".decode('utf-8') self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text)) def test_fullscreen_video_alignment_on_transcript_toggle(self): """ Scenario: Video is aligned correctly on transcript toggle in fullscreen mode Given the course has a Video component in "Youtube" mode And I have uploaded a .srt.sjson file to assets And I have defined subtitles for the video When I view the video at fullscreen Then the video with the transcript enabled is aligned correctly And the video with the transcript hidden is aligned correctly """ self.assets.append('subs_3_yD_cEKoCk.srt.sjson') data = {'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('youtube', additional_data=data) # go to video self.navigate_to_video() # make sure captions are opened self.video.show_captions() # click video button "fullscreen" self.video.click_player_button('fullscreen') # check if video aligned correctly with enabled transcript self.assertTrue(self.video.is_aligned(True)) # click video button "transcript" self.video.click_player_button('transcript_button') # check if video aligned correctly without enabled transcript self.assertTrue(self.video.is_aligned(False)) def test_video_rendering_with_default_response_time(self): """ Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly Given the YouTube server response time less than 1.5 seconds And the course has a Video component in "Youtube_HTML5" mode Then the video has rendered in "Youtube" mode """ # configure youtube server self.youtube_configuration['time_to_response'] = 0.4 self.metadata = self.metadata_for_mode('youtube_html5') self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('youtube')) def test_video_rendering_wo_default_response_time(self): """ Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly Given the YouTube server response time is greater than 1.5 seconds And the course has a Video component in "Youtube_HTML5" mode Then the video has rendered in "HTML5" mode """ # configure youtube server self.youtube_configuration['time_to_response'] = 2.0 self.metadata = self.metadata_for_mode('youtube_html5') self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('html5')) def test_video_with_youtube_blocked_with_default_response_time(self): """ Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked Given the YouTube API is blocked And the course has a Video component in "Youtube_HTML5" mode Then the video has rendered in "HTML5" mode And only one video has rendered """ # configure youtube server self.youtube_configuration.update({ 'youtube_api_blocked': True, }) self.metadata = self.metadata_for_mode('youtube_html5') self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('html5')) # The video should only be loaded once self.assertEqual(len(self.video.q(css='video')), 1) def test_video_with_youtube_blocked_delayed_response_time(self): """ Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked Given the YouTube server response time is greater than 1.5 seconds And the YouTube API is blocked And the course has a Video component in "Youtube_HTML5" mode Then the video has rendered in "HTML5" mode And only one video has rendered """ # configure youtube server self.youtube_configuration.update({ 'time_to_response': 2.0, 'youtube_api_blocked': True, }) self.metadata = self.metadata_for_mode('youtube_html5') self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('html5')) # The video should only be loaded once self.assertEqual(len(self.video.q(css='video')), 1) def test_html5_video_rendered_with_youtube_captions(self): """ Scenario: User should see Youtube captions for If there are no transcripts available for HTML5 mode Given that I have uploaded a .srt.sjson file to assets for Youtube mode And the YouTube API is blocked And the course has a Video component in "Youtube_HTML5" mode And Video component rendered in HTML5 mode And Html5 mode video has no transcripts When I see the captions for HTML5 mode video Then I should see the Youtube captions """ self.assets.append('subs_3_yD_cEKoCk.srt.sjson') # configure youtube server self.youtube_configuration.update({ 'time_to_response': 2.0, 'youtube_api_blocked': True, }) data = {'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('youtube_html5', additional_data=data) self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('html5')) # check if caption button is visible self.assertTrue(self.video.is_button_shown('transcript_button')) self._verify_caption_text('Welcome to edX.') def test_download_transcript_button_works_correctly(self): """ Scenario: Download Transcript button works correctly Given the course has Video components A and B in "Youtube" mode And Video component C in "HTML5" mode And I have defined downloadable transcripts for the videos Then I can download a transcript for Video A in "srt" format And I can download a transcript for Video A in "txt" format And I can download a transcript for Video B in "txt" format And the Download Transcript menu does not exist for Video C """ data_a = {'sub': '3_yD_cEKoCk', 'download_track': True} youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a) self.assets.append('subs_3_yD_cEKoCk.srt.sjson') data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True} youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b) self.assets.append('subs_b7xgknqkQk8.srt.sjson') data_c = {'track': 'http://example.org/', 'download_track': True} html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c) self.verticals = [ [{'display_name': 'A', 'metadata': youtube_a_metadata}], [{'display_name': 'B', 'metadata': youtube_b_metadata}], [{'display_name': 'C', 'metadata': html5_c_metadata}] ] # open the section with videos (open video "A") self.navigate_to_video() # check if we can download transcript in "srt" format that has text "00:00:00,260" self.assertTrue(self.video.downloaded_transcript_contains_text('srt', '00:00:00,260')) # select the transcript format "txt" self.assertTrue(self.video.select_transcript_format('txt')) # check if we can download transcript in "txt" format that has text "Welcome to edX." self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Welcome to edX.')) # open video "B" self.course_nav.go_to_sequential('B') # check if we can download transcript in "txt" format that has text "Equal transcripts" self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Equal transcripts')) # open video "C" self.course_nav.go_to_sequential('C') # menu "download_transcript" doesn't exist self.assertFalse(self.video.is_menu_present('download_transcript')) def _verify_caption_text(self, text): self.video._wait_for( lambda: (text in self.video.captions_text), u'Captions contain "{}" text'.format(text), timeout=5 ) def _verify_closed_caption_text(self, text): """ Scenario: returns True if the captions are visible, False is else """ self.video.wait_for( lambda: (text in self.video.closed_captions_text), u'Closed captions contain "{}" text'.format(text), timeout=5 ) def test_video_language_menu_working(self): """ Scenario: Language menu works correctly in Video component Given the course has a Video component in "Youtube" mode And I have defined multiple language transcripts for the videos And I make sure captions are closed And I see video menu "language" with correct items And I select language with code "zh" Then I see "好 各位同学" text in the captions And I select language with code "en" Then I see "Welcome to edX." text in the captions """ self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson']) data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('youtube', additional_data=data) # go to video self.navigate_to_video() self.video.hide_captions() correct_languages = {'en': 'English', 'zh': 'Chinese'} self.assertEqual(self.video.caption_languages, correct_languages) self.video.select_language('zh') unicode_text = "好 各位同学".decode('utf-8') self._verify_caption_text(unicode_text) self.video.select_language('en') self._verify_caption_text('Welcome to edX.') @flaky # TODO: Fix TNL-4304 def test_video_language_menu_working_closed_captions(self): """ Scenario: Language menu works correctly in Video component, checks closed captions Given the course has a Video component in "Youtube" mode And I have defined multiple language transcripts for the videos And I make sure captions are closed And I see video menu "language" with correct items And I select language with code "en" Then I see "Welcome to edX." text in the closed captions And I select language with code "zh" Then I see "我们今天要讲的题目是" text in the closed captions """ self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson']) data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('youtube', additional_data=data) # go to video self.navigate_to_video() self.video.show_closed_captions() correct_languages = {'en': 'English', 'zh': 'Chinese'} self.assertEqual(self.video.caption_languages, correct_languages) # we start the video, then pause it to activate the transcript self.video.click_player_button('play') self.video.wait_for_position('0:01') self.video.click_player_button('pause') self.video.select_language('en') self.video.click_first_line_in_transcript() self._verify_closed_caption_text('Welcome to edX.') self.video.select_language('zh') unicode_text = "我们今天要讲的题目是".decode('utf-8') self.video.click_first_line_in_transcript() self._verify_closed_caption_text(unicode_text) def test_multiple_videos_in_sequentials_load_and_work(self): """ Scenario: Multiple videos in sequentials all load and work, switching between sequentials Given it has videos "A,B" in "Youtube" mode in position "1" of sequential And videos "C,D" in "Youtube" mode in position "2" of sequential """ self.verticals = [ [{'display_name': 'A'}, {'display_name': 'B'}], [{'display_name': 'C'}, {'display_name': 'D'}] ] tab1_video_names = ['A', 'B'] tab2_video_names = ['C', 'D'] def execute_video_steps(video_names): """ Execute video steps """ for video_name in video_names: self.video.use_video(video_name) self.video.click_player_button('play') self.assertIn(self.video.state, ['playing', 'buffering']) self.video.click_player_button('pause') # go to video self.navigate_to_video() execute_video_steps(tab1_video_names) # go to second sequential position # import ipdb; ipdb.set_trace() self.go_to_sequential_position(2) execute_video_steps(tab2_video_names) # go back to first sequential position # we are again playing tab 1 videos to ensure that switching didn't broke some video functionality. # import ipdb; ipdb.set_trace() self.go_to_sequential_position(1) execute_video_steps(tab1_video_names) def test_video_component_stores_speed_correctly_for_multiple_videos(self): """ Scenario: Video component stores speed correctly when each video is in separate sequential Given I have a video "A" in "Youtube" mode in position "1" of sequential And a video "B" in "Youtube" mode in position "2" of sequential And a video "C" in "HTML5" mode in position "3" of sequential """ self.verticals = [ [{'display_name': 'A'}], [{'display_name': 'B'}], [{'display_name': 'C', 'metadata': self.metadata_for_mode('html5')}] ] self.navigate_to_video() # select the "2.0" speed on video "A" self.course_nav.go_to_sequential('A') self.video.wait_for_video_player_render() self.video.speed = '2.0' # select the "0.50" speed on video "B" self.course_nav.go_to_sequential('B') self.video.wait_for_video_player_render() self.video.speed = '0.50' # open video "C" self.course_nav.go_to_sequential('C') self.video.wait_for_video_player_render() # Since the playback speed was set to .5 in "B", this video will also be impacted # because a playback speed has never explicitly been set for it. However, this video # does not have a .5 playback option, so the closest possible (.75) should be selected. self.video.verify_speed_changed('0.75x') # open video "A" self.course_nav.go_to_sequential('A') # Video "A" should still play at speed 2.0 because it was explicitly set to that. self.assertEqual(self.video.speed, '2.0x') # reload the page self.video.reload_page() # open video "A" self.course_nav.go_to_sequential('A') # check if video "A" should start playing at speed "2.0" self.assertEqual(self.video.speed, '2.0x') # select the "1.0" speed on video "A" self.video.speed = '1.0' # open video "B" self.course_nav.go_to_sequential('B') # Video "B" should still play at speed .5 because it was explicitly set to that. self.assertEqual(self.video.speed, '0.50x') # open video "C" self.course_nav.go_to_sequential('C') # The change of speed for Video "A" should impact Video "C" because it still has # not been explicitly set to a speed. self.video.verify_speed_changed('1.0x') def test_video_has_correct_transcript(self): """ Scenario: Youtube video has correct transcript if fields for other speeds are filled Given it has a video in "Youtube" mode And I have uploaded multiple transcripts And I make sure captions are opened Then I see "Welcome to edX." text in the captions And I select the "1.50" speed And I reload the page with video Then I see "Welcome to edX." text in the captions And I see duration "1:56" """ self.assets.extend(['subs_3_yD_cEKoCk.srt.sjson', 'subs_b7xgknqkQk8.srt.sjson']) data = {'sub': '3_yD_cEKoCk', 'youtube_id_1_5': 'b7xgknqkQk8'} self.metadata = self.metadata_for_mode('youtube', additional_data=data) # go to video self.navigate_to_video() self.video.show_captions() self.assertIn('Welcome to edX.', self.video.captions_text) self.video.speed = '1.50' self.video.reload_page() self.assertIn('Welcome to edX.', self.video.captions_text) self.assertTrue(self.video.duration, '1.56') def test_video_position_stored_correctly_wo_seek(self): """ Scenario: Video component stores position correctly when page is reloaded Given the course has a Video component in "Youtube" mode Then the video has rendered in "Youtube" mode And I click video button "play"" Then I wait until video reaches at position "0.05" And I click video button "pause" And I reload the page with video And I click video button "play"" And I click video button "pause" Then video slider should be Equal or Greater than "0:05" """ self.navigate_to_video() self.video.click_player_button('play') self.video.wait_for_position('0:05') self.video.click_player_button('pause') self.video.reload_page() self.video.click_player_button('play') self.video.click_player_button('pause') self.assertGreaterEqual(self.video.seconds, 5) @skip("Intermittently fails 03 June 2014") def test_video_position_stored_correctly_with_seek(self): """ Scenario: Video component stores position correctly when page is reloaded Given the course has a Video component in "Youtube" mode Then the video has rendered in "Youtube" mode And I click video button "play"" And I click video button "pause" Then I seek video to "0:10" position And I click video button "play"" And I click video button "pause" And I reload the page with video Then video slider should be Equal or Greater than "0:10" """ self.navigate_to_video() self.video.click_player_button('play') self.video.seek('0:10') self.video.click_player_button('pause') self.video.reload_page() self.video.click_player_button('play') self.video.click_player_button('pause') self.assertGreaterEqual(self.video.seconds, 10) def test_simplified_and_traditional_chinese_transcripts(self): """ Scenario: Simplified and Traditional Chinese transcripts work as expected in Youtube mode Given the course has a Video component in "Youtube" mode And I have defined a Simplified Chinese transcript for the video And I have defined a Traditional Chinese transcript for the video Then I see the correct subtitle language options in cc menu Then I see the correct text in the captions for Simplified and Traditional Chinese transcripts And I can download the transcripts for Simplified and Traditional Chinese And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese' and 'Traditional Chinese' respectively """ data = { 'download_track': True, 'transcripts': {'zh_HANS': 'simplified_chinese.srt', 'zh_HANT': 'traditional_chinese.srt'} } self.metadata = self.metadata_for_mode('youtube', data) self.assets.extend(['simplified_chinese.srt', 'traditional_chinese.srt']) self.navigate_to_video() langs = {'zh_HANS': '在线学习是革', 'zh_HANT': '在線學習是革'} for lang_code, text in langs.items(): self.assertTrue(self.video.select_language(lang_code)) unicode_text = text.decode('utf-8') self.assertIn(unicode_text, self.video.captions_text) self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text)) self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'}) def test_video_bumper_render(self): """ Scenario: Multiple videos with bumper in sequentials all load and work, switching between sequentials Given it has videos "A,B" in "Youtube" and "HTML5" modes in position "1" of sequential And video "C" in "Youtube" mode in position "2" of sequential When I open sequential position "1" Then I see video "B" has a poster When I click on it Then I see video bumper is playing When I skip the bumper Then I see the main video When I click on video "A" Then the main video starts playing When I open sequential position "2" And click on the poster Then the main video starts playing Then I see that the main video starts playing once I go back to position "2" of sequential When I reload the page Then I see that the main video starts playing when I click on the poster """ additional_data = { u'video_bumper': { u'value': { "transcripts": {}, "video_id": "video_001" } } } self.verticals = [ [{'display_name': 'A'}, {'display_name': 'B', 'metadata': self.metadata_for_mode('html5')}], [{'display_name': 'C'}] ] tab1_video_names = ['A', 'B'] tab2_video_names = ['C'] def execute_video_steps(video_names): """ Execute video steps """ for video_name in video_names: self.video.use_video(video_name) self.assertTrue(self.video.is_poster_shown) self.video.click_on_poster() self.video.wait_for_video_player_render(autoplay=True) self.assertIn(self.video.state, ['playing', 'buffering', 'finished']) self.course_fixture.add_advanced_settings(additional_data) self.navigate_to_video_no_render() self.video.use_video('B') self.assertTrue(self.video.is_poster_shown) self.video.click_on_poster() self.video.wait_for_video_bumper_render() self.assertIn(self.video.state, ['playing', 'buffering', 'finished']) self.video.click_player_button('skip_bumper') # no autoplay here, maybe video is too small, so pause is not switched self.video.wait_for_video_player_render() self.assertIn(self.video.state, ['playing', 'buffering', 'finished']) self.video.use_video('A') execute_video_steps(['A']) # go to second sequential position self.courseware.go_to_sequential_position(2) execute_video_steps(tab2_video_names) # go back to first sequential position # we are again playing tab 1 videos to ensure that switching didn't broke some video functionality. self.courseware.go_to_sequential_position(1) execute_video_steps(tab1_video_names) self.video.browser.refresh() execute_video_steps(tab1_video_names) @attr('shard_4') class YouTubeHtml5VideoTest(VideoBaseTest): """ Test YouTube HTML5 Video Player """ def setUp(self): super(YouTubeHtml5VideoTest, self).setUp() @flaky # TODO fix this, see TNL-1642 def test_youtube_video_rendering_with_unsupported_sources(self): """ Scenario: Video component is rendered in the LMS in Youtube mode with HTML5 sources that doesn't supported by browser Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode Then the video has rendered in "Youtube" mode """ self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video') self.navigate_to_video() # Verify that the video has rendered in "Youtube" mode self.assertTrue(self.video.is_video_rendered('youtube')) @attr('shard_4') class Html5VideoTest(VideoBaseTest): """ Test HTML5 Video Player """ def setUp(self): super(Html5VideoTest, self).setUp() def test_autoplay_disabled_for_video_component(self): """ Scenario: Autoplay is disabled by default for a Video component Given the course has a Video component in "HTML5" mode When I view the Video component Then it does not have autoplay enabled """ self.metadata = self.metadata_for_mode('html5') self.navigate_to_video() # Verify that the video has autoplay mode disabled self.assertFalse(self.video.is_autoplay_enabled) def test_html5_video_rendering_with_unsupported_sources(self): """ Scenario: LMS displays an error message for HTML5 sources that are not supported by browser Given the course has a Video component in "HTML5_Unsupported_Video" mode When I view the Video component Then and error message is shown And the error message has the correct text """ self.metadata = self.metadata_for_mode('html5_unsupported_video') self.navigate_to_video_no_render() # Verify that error message is shown self.assertTrue(self.video.is_error_message_shown) # Verify that error message has correct text correct_error_message_text = 'No playable video sources found.' self.assertIn(correct_error_message_text, self.video.error_message_text) # Verify that spinner is not shown self.assertFalse(self.video.is_spinner_shown) def test_download_button_wo_english_transcript(self): """ Scenario: Download button works correctly w/o english transcript in HTML5 mode Given the course has a Video component in "HTML5" mode And I have defined a downloadable non-english transcript for the video And I have uploaded a non-english transcript file to assets Then I see the correct non-english text in the captions And the non-english transcript downloads correctly """ data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}} self.metadata = self.metadata_for_mode('html5', additional_data=data) self.assets.append('chinese_transcripts.srt') # go to video self.navigate_to_video() # check if we see "好 各位同学" text in the captions unicode_text = "好 各位同学".decode('utf-8') self.assertIn(unicode_text, self.video.captions_text) # check if we can download transcript in "srt" format that has text "好 各位同学" unicode_text = "好 各位同学".decode('utf-8') self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text)) def test_download_button_two_transcript_languages(self): """ Scenario: Download button works correctly for multiple transcript languages in HTML5 mode Given the course has a Video component in "HTML5" mode And I have defined a downloadable non-english transcript for the video And I have defined english subtitles for the video Then I see the correct english text in the captions And the english transcript downloads correctly And I see the correct non-english text in the captions And the non-english transcript downloads correctly """ self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson']) data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('html5', additional_data=data) # go to video self.navigate_to_video() # check if "Welcome to edX." text in the captions self.assertIn('Welcome to edX.', self.video.captions_text) # check if we can download transcript in "srt" format that has text "Welcome to edX." self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.')) # select language with code "zh" self.assertTrue(self.video.select_language('zh')) # check if we see "好 各位同学" text in the captions unicode_text = "好 各位同学".decode('utf-8') self.assertIn(unicode_text, self.video.captions_text) # Then I can download transcript in "srt" format that has text "好 各位同学" unicode_text = "好 各位同学".decode('utf-8') self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text)) def test_full_screen_video_alignment_with_transcript_visible(self): """ Scenario: Video is aligned correctly with transcript enabled in fullscreen mode Given the course has a Video component in "HTML5" mode And I have uploaded a .srt.sjson file to assets And I have defined subtitles for the video When I show the captions And I view the video at fullscreen Then the video with the transcript enabled is aligned correctly """ self.assets.append('subs_3_yD_cEKoCk.srt.sjson') data = {'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('html5', additional_data=data) # go to video self.navigate_to_video() # make sure captions are opened self.video.show_captions() # click video button "fullscreen" self.video.click_player_button('fullscreen') # check if video aligned correctly with enabled transcript self.assertTrue(self.video.is_aligned(True)) def test_cc_button_with_english_transcript(self): """ Scenario: CC button works correctly with only english transcript in HTML5 mode Given the course has a Video component in "HTML5" mode And I have defined english subtitles for the video And I have uploaded an english transcript file to assets Then I see the correct text in the captions """ self.assets.append('subs_3_yD_cEKoCk.srt.sjson') data = {'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('html5', additional_data=data) # go to video self.navigate_to_video() # make sure captions are opened self.video.show_captions() # check if we see "Welcome to edX." text in the captions self.assertIn("Welcome to edX.", self.video.captions_text) def test_cc_button_wo_english_transcript(self): """ Scenario: CC button works correctly w/o english transcript in HTML5 mode Given the course has a Video component in "HTML5" mode And I have defined a non-english transcript for the video And I have uploaded a non-english transcript file to assets Then I see the correct text in the captions """ self.assets.append('chinese_transcripts.srt') data = {'transcripts': {'zh': 'chinese_transcripts.srt'}} self.metadata = self.metadata_for_mode('html5', additional_data=data) # go to video self.navigate_to_video() # make sure captions are opened self.video.show_captions() # check if we see "好 各位同学" text in the captions unicode_text = "好 各位同学".decode('utf-8') self.assertIn(unicode_text, self.video.captions_text) def test_video_rendering(self): """ Scenario: Video component is fully rendered in the LMS in HTML5 mode Given the course has a Video component in "HTML5" mode Then the video has rendered in "HTML5" mode And video sources are correct """ self.metadata = self.metadata_for_mode('html5') self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('html5')) self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources])) @attr('shard_4') class YouTubeQualityTest(VideoBaseTest): """ Test YouTube Video Quality Button """ def setUp(self): super(YouTubeQualityTest, self).setUp() @skip_if_browser('firefox') def test_quality_button_visibility(self): """ Scenario: Quality button appears on play. Given the course has a Video component in "Youtube" mode Then I see video button "quality" is hidden And I click video button "play" Then I see video button "quality" is visible """ self.navigate_to_video() self.assertFalse(self.video.is_quality_button_visible) self.video.click_player_button('play') self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear') @skip_if_browser('firefox') def test_quality_button_works_correctly(self): """ Scenario: Quality button works correctly. Given the course has a Video component in "Youtube" mode And I click video button "play" And I see video button "quality" is inactive And I click video button "quality" Then I see video button "quality" is active """ self.navigate_to_video() self.video.click_player_button('play') self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear') self.assertFalse(self.video.is_quality_button_active) self.video.click_player_button('quality') self.video.wait_for(lambda: self.video.is_quality_button_active, 'waiting for quality button activation') @attr('shard_4') class DragAndDropTest(VideoBaseTest): """ Tests draggability of closed captions within videos. """ def setUp(self): super(DragAndDropTest, self).setUp() def test_if_captions_are_draggable(self): """ Loads transcripts so that closed-captioning is available. Ensures they are draggable by checking start and dropped location. """ self.assets.append('subs_3_yD_cEKoCk.srt.sjson') data = {'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('html5', additional_data=data) self.navigate_to_video() self.assertTrue(self.video.is_video_rendered('html5')) self.video.show_closed_captions() self.video.wait_for_closed_captions() self.assertTrue(self.video.is_closed_captions_visible) action = ActionChains(self.browser) captions = self.browser.find_element(By.CLASS_NAME, 'closed-captions') captions_start = captions.location action.drag_and_drop_by_offset(captions, 0, -15).perform() captions_end = captions.location self.assertEqual( captions_end.get('y') + 15, captions_start.get('y'), 'Closed captions did not get dragged.' ) @attr('a11y') class LMSVideoModuleA11yTest(VideoBaseTest): """ LMS Video Accessibility Test Class """ def setUp(self): browser = os.environ.get('SELENIUM_BROWSER', 'firefox') # the a11y tests run in CI under phantomjs which doesn't # support html5 video or flash player, so the video tests # don't work in it. We still want to be able to run these # tests in CI, so override the browser setting if it is # phantomjs. if browser == 'phantomjs': browser = 'firefox' with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}): super(LMSVideoModuleA11yTest, self).setUp() def test_video_player_a11y(self): # load transcripts so we can test skipping to self.assets.extend(['english_single_transcript.srt', 'subs_3_yD_cEKoCk.srt.sjson']) data = {'transcripts': {"en": "english_single_transcript.srt"}, 'sub': '3_yD_cEKoCk'} self.metadata = self.metadata_for_mode('youtube', additional_data=data) # go to video self.navigate_to_video() self.video.show_captions() # limit the scope of the audit to the video player only. self.video.a11y_audit.config.set_scope( include=["div.video"], exclude=["a.ui-slider-handle"] ) self.video.a11y_audit.check_for_accessibility_errors()
devs1991/test_edx_docmode
common/test/acceptance/tests/video/test_video_module.py
Python
agpl-3.0
50,731
[ "VisIt" ]
9bc56e48470ff3300ff48e2a0b26828f755ff43fac52d9878f952728bed4da4d
## # This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild # # Copyright:: Copyright 2012-2015 Uni.Lu/LCSB, NTUA # Authors:: Josh Berryman <the.real.josh.berryman@gmail.com>, Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste # License:: MIT/GPL # $Id$ # # This work implements a part of the HPCBIOS project and is a component of the policy: # http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-80.html ## """ EasyBuild support for building and installing ESPResSo, implemented as an easyblock @author: Josh Berryman <the.real.josh.berryman@gmail.com> @author: Fotis Georgatos (Uni.Lu) @author: Kenneth Hoste (Ghent University) """ import os import easybuild.tools.environment as env import easybuild.tools.toolchain as toolchain from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.run import run_cmd class EB_ESPResSo(ConfigureMake): """Support for building/installing ESPResSo, parallel version.""" def __init__(self, *args, **kwargs): """Specify to build in install dir.""" super(EB_ESPResSo, self).__init__(*args, **kwargs) self.build_in_installdir = True self.install_subdir = '%s-%s' % (self.name.lower(), self.version) @staticmethod def extra_options(): extra_vars = { 'runtest': [True, "Run ESPResSo tests.", CUSTOM], } return ConfigureMake.extra_options(extra_vars) def test_step(self): """Custom built-in test procedure for ESPResSo, parallel version.""" if self.cfg['runtest']: cmd = './runtest.sh -p 2 *.tcl' (out, ec) = run_cmd(cmd, simple=False, log_all=False, log_ok=False, path="testsuite") if ec: # ESPResSo fails many of its tests in version 3.1.1, and the test script itself is buggy # so, just provide output in log file, but ignore things if it fails self.log.warning("ESPResSo test failed (exit code: %s): %s" % (ec, out)) else: self.log.info("Successful ESPResSo test completed: %s" % out) def install_step(self): """Build is done in install dir, so no separate install step.""" pass def sanity_check_step(self): """Custom sanity check for ESPResSo.""" custom_paths = { 'files' : [os.path.join(self.install_subdir, 'Espresso')], 'dirs' : [os.path.join(self.install_subdir, x) for x in ['samples', 'scripts', 'tools']], } super(EB_ESPResSo, self).sanity_check_step(custom_paths=custom_paths) def make_module_req_guess(self): """Customize PATH for ESPResSo.""" guesses = super(EB_ESPResSo, self).make_module_req_guess() guesses.update({'PATH': [self.install_subdir]}) return guesses
ULHPC/modules
easybuild/easybuild-easyblocks/easybuild/easyblocks/e/espresso.py
Python
mit
2,912
[ "ESPResSo" ]
ef851ca6d331b4920b86a5822ceea216cd5509ecf984a7d6307a063ebd1d88bf
# # Copyright 2014, 2020-2021 Lars Pastewka (U. Freiburg) # 2014 James Kermode (Warwick U.) # # matscipy - Materials science with Python at the atomic-scale # https://github.com/libAtoms/matscipy # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ====================================================================== # matscipy - Python materials science tools # https://github.com/libAtoms/matscipy # # Copyright (2014) James Kermode, King's College London # Lars Pastewka, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ====================================================================== import unittest import numpy as np from matscipy.elasticity import (full_3x3_to_Voigt_6_index, Voigt_6x6_to_full_3x3x3x3, full_3x3x3x3_to_Voigt_6x6, Voigt_6_to_full_3x3_strain, full_3x3_to_Voigt_6_strain, Voigt_6_to_full_3x3_stress, full_3x3_to_Voigt_6_stress) ### def test_full_3x3_to_Voigt_6_index(): assert full_3x3_to_Voigt_6_index(1, 1) == 1 assert full_3x3_to_Voigt_6_index(1, 2) == 3 assert full_3x3_to_Voigt_6_index(2, 1) == 3 assert full_3x3_to_Voigt_6_index(0, 2) == 4 assert full_3x3_to_Voigt_6_index(0, 1) == 5 def test_stiffness_conversion(): C6 = np.random.random((6, 6)) # Should be symmetric C6 = (C6 + C6.T) / 2 C3x3 = Voigt_6x6_to_full_3x3x3x3(C6) C6_check = full_3x3x3x3_to_Voigt_6x6(C3x3) np.testing.assert_array_almost_equal(C6, C6_check) def test_strain_conversion(): voigt0 = np.random.random(6) full1 = Voigt_6_to_full_3x3_strain(voigt0) voigt1 = full_3x3_to_Voigt_6_strain(full1) np.testing.assert_array_almost_equal(voigt0, voigt1) def test_stress_conversion(): voigt0 = np.random.random(6) full2 = Voigt_6_to_full_3x3_stress(voigt0) voigt2 = full_3x3_to_Voigt_6_stress(full2) np.testing.assert_array_almost_equal(voigt0, voigt2)
libAtoms/matscipy
tests/test_full_to_Voigt.py
Python
lgpl-2.1
3,278
[ "Matscipy" ]
7bd95c442a9e6e7c996bdbf18dd5676abbb7809270cdafc00903bda8248db427
# Copyright 2001 Brad Chapman. # Revisions copyright 2009-2010 by Peter Cock. # All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Definitions for interacting with BLAST related applications. Obsolete wrappers for the old/classic NCBI BLAST tools (written in C): - FastacmdCommandline - BlastallCommandline - BlastpgpCommandline - RpsBlastCommandline Wrappers for the new NCBI BLAST+ tools (written in C++): - NcbiblastpCommandline - Protein-Protein BLAST - NcbiblastnCommandline - Nucleotide-Nucleotide BLAST - NcbiblastxCommandline - Translated Query-Protein Subject BLAST - NcbitblastnCommandline - Protein Query-Translated Subject BLAST - NcbitblastxCommandline - Translated Query-Protein Subject BLAST - NcbipsiblastCommandline - Position-Specific Initiated BLAST - NcbirpsblastCommandline - Reverse Position Specific BLAST - NcbirpstblastnCommandline - Translated Reverse Position Specific BLAST For further details, see: Camacho et al. BLAST+: architecture and applications BMC Bioinformatics 2009, 10:421 doi:10.1186/1471-2105-10-421 """ from Bio.Application import _Option, AbstractCommandline, _Switch class FastacmdCommandline(AbstractCommandline): """Create a commandline for the fasta program from NCBI (OBSOLETE). """ def __init__(self, cmd="fastacmd", **kwargs): self.parameters = \ [ _Option(["-d", "database"], ["input"], None, 1, "The database to retrieve from."), _Option(["-s", "search_string"], ["input"], None, 1, "The id to search for.") ] AbstractCommandline.__init__(self, cmd, **kwargs) class _BlastCommandLine(AbstractCommandline): """Base Commandline object for (classic) NCBI BLAST wrappers (PRIVATE). This is provided for subclassing, it deals with shared options common to all the BLAST tools (blastall, rpsblast, blastpgp). """ def __init__(self, cmd=None, **kwargs): assert cmd is not None extra_parameters = [\ _Switch(["--help", "help"], ["input"], "Print USAGE, DESCRIPTION and ARGUMENTS description; ignore other arguments."), _Option(["-d", "database"], ["input"], None, 1, "The database to BLAST against.", False), _Option(["-i", "infile"], ["input", "file"], None, 1, "The sequence to search with.", False), _Option(["-e", "expectation"], ["input"], None, 0, "Expectation value cutoff.", False), _Option(["-m", "align_view"], ["input"], None, 0, "Alignment view. Integer 0-11. Use 7 for XML output.", False), _Option(["-o", "align_outfile", "outfile"], ["output", "file"], None, 0, "Output file for alignment.", False), _Option(["-y", "xdrop_extension"], ["input"], None, 0, "Dropoff for blast extensions.", False), _Option(["-F", "filter"], ["input"], None, 0, "Filter query sequence with SEG? T/F", False), _Option(["-X", "xdrop"], ["input"], None, 0, "Dropoff value (bits) for gapped alignments."), _Option(["-I", "show_gi"], ["input"], None, 0, "Show GI's in deflines? T/F", False), _Option(["-J", "believe_query"], ["input"], None, 0, "Believe the query defline? T/F", False), _Option(["-Z", "xdrop_final"], ["input"], None, 0, "X dropoff for final gapped alignment.", False), _Option(["-z", "db_length"], ["input"], None, 0, "Effective database length.", False), _Option(["-O", "seqalign_file"], ["output", "file"], None, 0, "seqalign file to output.", False), _Option(["-v", "descriptions"], ["input"], None, 0, "Number of one-line descriptions.", False), _Option(["-b", "alignments"], ["input"], None, 0, "Number of alignments.", False), _Option(["-Y", "search_length"], ["input"], None, 0, "Effective length of search space (use zero for the " + \ "real size).", False), _Option(["-T", "html"], ["input"], None, 0, "Produce HTML output? T/F", False), _Option(["-U", "case_filter"], ["input"], None, 0, "Use lower case filtering of FASTA sequence? T/F", False), _Option(["-a", "nprocessors"], ["input"], None, 0, "Number of processors to use.", False), _Option(["-g", "gapped"], ["input"], None, 0, "Whether to do a gapped alignment. T/F", False), ] try: #Insert extra parameters - at the start just in case there #are any arguments which must come last: self.parameters = extra_parameters + self.parameters except AttributeError: #Should we raise an error? The subclass should have set this up! self.parameters = extra_parameters AbstractCommandline.__init__(self, cmd, **kwargs) def _validate(self): if self.help: #Don't want to check the normally mandatory arguments like db return AbstractCommandline._validate(self) class _BlastAllOrPgpCommandLine(_BlastCommandLine): """Base Commandline object for NCBI BLAST wrappers (PRIVATE). This is provided for subclassing, it deals with shared options common to all the blastall and blastpgp tools (but not rpsblast). """ def __init__(self, cmd=None, **kwargs): assert cmd is not None extra_parameters = [\ _Option(["-G", "gap_open"], ["input"], None, 0, "Gap open penalty", False), _Option(["-E", "gap_extend"], ["input"], None, 0, "Gap extension penalty", False), _Option(["-A", "window_size"], ["input"], None, 0, "Multiple hits window size", False), _Option(["-f", "hit_extend"], ["input"], None, 0, "Threshold for extending hits.", False), _Option(["-K", "keep_hits"], ["input"], None, 0, " Number of best hits from a region to keep.", False), _Option(["-W", "wordsize"], ["input"], None, 0, "Word size", False), _Option(["-P", "passes"], ["input"], None, 0, "Hits/passes. Integer 0-2. 0 for multiple hit, " "1 for single hit (does not apply to blastn)", False), ] try: #Insert extra parameters - at the start just in case there #are any arguments which must come last: self.parameters = extra_parameters + self.parameters except AttributeError: #Should we raise an error? The subclass should have set this up! self.parameters = extra_parameters _BlastCommandLine.__init__(self, cmd, **kwargs) class BlastallCommandline(_BlastAllOrPgpCommandLine): """Create a commandline for the blastall program from NCBI (OBSOLETE). With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI are replacing blastall with separate tools blastn, blastp, blastx, tblastn and tblastx. Like blastall, this wrapper is now obsolete, and will be deprecated and removed in a future release of Biopython. >>> from Bio.Blast.Applications import BlastallCommandline >>> cline = BlastallCommandline(program="blastx", infile="m_cold.fasta", ... database="nr", expectation=0.001) >>> cline BlastallCommandline(cmd='blastall', database='nr', infile='m_cold.fasta', expectation=0.001, program='blastx') >>> print cline blastall -d nr -i m_cold.fasta -e 0.001 -p blastx You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ #TODO - This could use more checking for valid parameters to the program. def __init__(self, cmd="blastall",**kwargs): import warnings warnings.warn("Like blastall, this wrapper is now obsolete, and will be deprecated and removed in a future release of Biopython.", PendingDeprecationWarning) self.parameters = [ \ #Sorted in the same order as the output from blastall --help #which should make it easier to keep them up to date in future. #Note that some arguments are defined the the base clases (above). _Option(["-p", "program"], ["input"], None, 1, "The blast program to use (e.g. blastp, blastn).", False), _Option(["-q", "nuc_mismatch"], ["input"], None, 0, "Penalty for a nucleotide mismatch (blastn only).", False), _Option(["-r", "nuc_match"], ["input"], None, 0, "Reward for a nucleotide match (blastn only).", False), _Option(["-Q", "query_genetic_code"], ["input"], None, 0, "Query Genetic code to use.", False), _Option(["-D", "db_genetic_code"], ["input"], None, 0, "DB Genetic code (for tblast[nx] only).", False), _Option(["-M", "matrix"], ["input"], None, 0, "Matrix to use", False), _Option(["-S", "strands"], ["input"], None, 0, "Query strands to search against database (for blast[nx], " + \ "and tblastx). 3 is both, 1 is top, 2 is bottom.", False), _Option(["-l", "restrict_gi"], ["input"], None, 0, "Restrict search of database to list of GI's.", False), _Option(["-R", "checkpoint"], ["input", "file"], None, 0, "PSI-TBLASTN checkpoint input file.", False), _Option(["-n", "megablast"], ["input"], None, 0, "MegaBlast search T/F.", False), #The old name "region_length" is for consistency with our #old blastall function wrapper: _Option(["-L", "region_length", "range_restriction"], ["input"], None, 0, """Location on query sequence (string format start,end). In older versions of BLAST, -L set the length of region used to judge hits (see -K parameter).""", False), _Option(["-w", "frame_shit_penalty"], ["input"], None, 0, "Frame shift penalty (OOF algorithm for blastx).", False), _Option(["-t", "largest_intron"], ["input"], None, 0, "Length of the largest intron allowed in a translated " + \ "nucleotide sequence when linking multiple distinct " + \ "alignments. (0 invokes default behavior; a negative value " + \ "disables linking.)", False), _Option(["-B", "num_concatenated_queries"], ["input"], None, 0, "Number of concatenated queries, for blastn and tblastn.", False), _Option(["-V", "oldengine"], ["input"], None, 0, "Force use of the legacy BLAST engine.", False), _Option(["-C", "composition_based"], ["input"], None, 0, """Use composition-based statistics for tblastn: D or d: default (equivalent to F) 0 or F or f: no composition-based statistics 1 or T or t: Composition-based statistics as in NAR 29:2994-3005, 2001 2: Composition-based score adjustment as in Bioinformatics 21:902-911, 2005, conditioned on sequence properties 3: Composition-based score adjustment as in Bioinformatics 21:902-911, 2005, unconditionally For programs other than tblastn, must either be absent or be D, F or 0.""", False), _Option(["-s", "smith_waterman"], ["input"], None, 0, "Compute locally optimal Smith-Waterman alignments (This " + \ "option is only available for gapped tblastn.) T/F", False), ] _BlastAllOrPgpCommandLine.__init__(self, cmd, **kwargs) class BlastpgpCommandline(_BlastAllOrPgpCommandLine): """Create a commandline for the blastpgp program from NCBI (OBSOLETE). With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI are replacing blastpgp with a renamed tool psiblast. This module provides NcbipsiblastCommandline as a wrapper for the new tool psiblast. Like blastpgp (and blastall), this wrapper is now obsolete, and will be deprecated and removed in a future release of Biopython. >>> from Bio.Blast.Applications import BlastpgpCommandline >>> cline = BlastpgpCommandline(help=True) >>> cline BlastpgpCommandline(cmd='blastpgp', help=True) >>> print cline blastpgp --help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="blastpgp",**kwargs): import warnings warnings.warn("Like blastpgp (and blastall), this wrapper is now obsolete, and will be deprecated and removed in a future release of Biopython.", PendingDeprecationWarning) self.parameters = [ \ _Option(["-C", "checkpoint_outfile"], ["output", "file"], None, 0, "Output file for PSI-BLAST checkpointing.", False), _Option(["-R", "restart_infile"], ["input", "file"], None, 0, "Input file for PSI-BLAST restart.", False), _Option(["-k", "hit_infile"], ["input", "file"], None, 0, "Hit file for PHI-BLAST.", False), _Option(["-Q", "matrix_outfile"], ["output", "file"], None, 0, "Output file for PSI-BLAST matrix in ASCII.", False), _Option(["-B", "align_infile"], ["input", "file"], None, 0, "Input alignment file for PSI-BLAST restart.", False), _Option(["-S", "required_start"], ["input"], None, 0, "Start of required region in query.", False), _Option(["-H", "required_end"], ["input"], None, 0, "End of required region in query.", False), _Option(["-j", "npasses"], ["input"], None, 0, "Number of passes", False), _Option(["-N", "nbits_gapping"], ["input"], None, 0, "Number of bits to trigger gapping.", False), _Option(["-c", "pseudocounts"], ["input"], None, 0, "Pseudocounts constants for multiple passes.", False), _Option(["-h", "model_threshold"], ["input"], None, 0, "E-value threshold to include in multipass model.", False), #Does the old name "region_length" for -L make sense? _Option(["-L", "region_length"], ["input"], None, 0, "Cost to decline alignment (disabled when zero).", False), _Option(["-M", "matrix"], ["input"], None, 0, "Matrix (string, default BLOSUM62).", False), _Option(["-p", "program"], ["input"], None, 1, "The blast program to use (e.g blastpgp, patseedp or seedp).", False), ] _BlastAllOrPgpCommandLine.__init__(self, cmd, **kwargs) class RpsBlastCommandline(_BlastCommandLine): """Create a commandline for the classic rpsblast program from NCBI (OBSOLETE). With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI are replacing the old rpsblast with a new version of the same name plus a second tool rpstblastn, both taking different command line arguments. This module provides NcbirpsblastCommandline and NcbirpsblasntCommandline as wrappers for the new tools. Like the old rpsblast (and blastall), this wrapper is now obsolete, and will be deprecated and removed in a future release of Biopython. >>> from Bio.Blast.Applications import RpsBlastCommandline >>> cline = RpsBlastCommandline(help=True) >>> cline RpsBlastCommandline(cmd='rpsblast', help=True) >>> print cline rpsblast --help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="rpsblast",**kwargs): import warnings warnings.warn("Like the old rpsblast (and blastall), this wrapper is now obsolete, and will be deprecated and removed in a future release of Biopython.", PendingDeprecationWarning) self.parameters = [ \ #Note -N is also in blastpgp, but not blastall _Option(["-N", "nbits_gapping"], ["input"], None, 0, "Number of bits to trigger gapping.", False), #Note blastall and blastpgp wrappers have -P with name "passes". #If this is the same thing, we should be consistent! _Option(["-P", "multihit"], ["input"], None, 0, "0 for multiple hit, 1 for single hit", False), _Option(["-l", "logfile"], ["output", "file"], None, 0, "Logfile name.", False), _Option(["-p", "protein"], ["input"], None, 0, "Query sequence is protein. T/F", False), _Option(["-L", "range_restriction"], ["input"], None, 0, "Location on query sequence (string format start,end).", False), ] _BlastCommandLine.__init__(self, cmd, **kwargs) class _NcbiblastCommandline(AbstractCommandline): """Base Commandline object for (new) NCBI BLAST+ wrappers (PRIVATE). This is provided for subclassing, it deals with shared options common to all the BLAST tools (blastn, rpsblast, rpsblast, etc). """ def __init__(self, cmd=None, **kwargs): assert cmd is not None extra_parameters = [ \ #Core: _Switch(["-h", "h"], ["input"], "Print USAGE and DESCRIPTION; ignore other arguments."), _Switch(["-help", "help"], ["input"], "Print USAGE, DESCRIPTION and ARGUMENTS description; ignore other arguments."), _Switch(["-version", "version"], ["input"], "Print version number; ignore other arguments."), #Input query options: _Option(["-query", "query"], ["input", "file"], None, 0, "The sequence to search with.", False), #Should this be required? _Option(["-query_loc", "query_loc"], ["input"], None, 0, "Location on the query sequence (Format: start-stop)", False), #General search options: _Option(["-db", "db"], ["input"], None, 0, "The database to BLAST against.", False), #Should this be required? _Option(["-out", "out"], ["output", "file"], None, 0, "Output file for alignment.", False), _Option(["-evalue", "evalue"], ["input"], None, 0, "Expectation value cutoff.", False), _Option(["-word_size","word_size"], ["input"], None, 0, """Word size for wordfinder algorithm. Integer. Minimum 2.""", False), #BLAST-2-Sequences options: # - see subclass #Formatting options: _Option(["-outfmt", "outfmt"], ["input"], None, 0, "Alignment view. Integer 0-10. Use 5 for XML output (differs from classic BLAST which used 7 for XML).", False), #Did not include old aliases as meaning has changed! _Switch(["-show_gis","show_gis"], ["input"], "Show NCBI GIs in deflines?"), _Option(["-num_descriptions","num_descriptions"], ["input"], None, 0, """Number of database sequences to show one-line descriptions for. Integer argument (at least zero). Default is 500. See also num_alignments.""", False), _Option(["-num_alignments","num_alignments"], ["input"], None, 0, """Number of database sequences to show num_alignments for. Integer argument (at least zero). Default is 200. See also num_alignments.""", False), _Switch(["-html", "html"], ["input"], "Produce HTML output? See also the outfmt option."), #Query filtering options # TODO -soft_masking <Boolean>, is this a switch or an option? #_Switch(["-soft_masking", "soft_masking"], ["input"], # "Apply filtering locations as soft masks?"), _Switch(["-lcase_masking", "lcase_masking"], ["input"], "Use lower case filtering in query and subject sequence(s)?"), #Restrict search or results _Option(["-gilist", "gilist"], ["input", "file"], None, 0, """Restrict search of database to list of GI's. Incompatible with: negative_gilist, seqidlist, remote, subject, subject_loc""", False), _Option(["-negative_gilist", "negative_gilist"], ["input", "file"], None, 0, """Restrict search of database to everything except the listed GIs. Incompatible with: gilist, seqidlist, remote, subject, subject_loc""", False), _Option(["-seqidlist", "seqidlist"], ["input", "file"], None, 0, """Restrict search of database to list of SeqID's. Incompatible with: gilist, negative_gilist, remote, subject, subject_loc""", False), _Option(["-entrez_query", "entrez_query"], ["input"], None, 0, "Restrict search with the given Entrez query (requires remote).", False), _Option(["-max_target_seqs", "max_target_seqs"], ["input"], None, 0, """Maximum number of aligned sequences to keep. Integer argument (at least one).""", False), #Statistical options _Option(["-dbsize", "dbsize"], ["input"], None, 0, "Effective length of the database (integer)", False), _Option(["-searchsp", "searchsp"], ["input"], None, 0, "Effective length of the search space (integer)", False), #Extension options _Option(["-xdrop_ungap", "xdrop_ungap"], ["input"], None, 0, "X-dropoff value (in bits) for ungapped extensions. Float.", False), _Option(["-xdrop_gap", "xdrop_gap"], ["input"], None, 0, "X-dropoff value (in bits) for preliminary gapped extensions. Float.", False), _Option(["-xdrop_gap_final", "xdrop_gap_final"], ["input"], None, 0, "X-dropoff value (in bits) for final gapped alignment. Float.", False), _Option(["-window_size", "window_size"], ["input"], None, 0, "Multiple hits window size, use 0 to specify 1-hit algorithm. Integer.", False), # Search strategy options _Option(["-import_search_strategy", "import_search_strategy"], ["input", "file"], None, 0, """Search strategy to use. Incompatible with: export_search_strategy""", False), _Option(["-export_search_strategy", "export_search_strategy"], ["output", "file"], None, 0, """File name to record the search strategy used. Incompatible with: import_search_strategy""", False), #Miscellaneous options _Switch(["-parse_deflines", "parse_deflines"], ["input"], "Should the query and subject defline(s) be parsed?"), _Option(["-num_threads", "num_threads"], ["input"], None, 0, """Number of threads to use in the BLAST search. Integer of at least one. Default is one. Incompatible with: remote""", False), _Switch(["-remote", "remote"], ["input"], """Execute search remotely? Incompatible with: gilist, negative_gilist, subject_loc, num_threads, ..."""), ] try: #Insert extra parameters - at the start just in case there #are any arguments which must come last: self.parameters = extra_parameters + self.parameters except AttributeError: #Should we raise an error? The subclass should have set this up! self.parameters = extra_parameters AbstractCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"remote":["gilist", "negative_gilist", "num_threads"], "import_search_strategy" : ["export_search_strategy"], "gilist":["negative_gilist"], "seqidlist":["gilist", "negative_gilist", "remote"]} self._validate_incompatibilities(incompatibles) if self.entrez_query and not self.remote : raise ValueError("Option entrez_query requires remote option.") AbstractCommandline._validate(self) def _validate_incompatibilities(self, incompatibles): for a in incompatibles: if self._get_parameter(a): for b in incompatibles[a]: if self._get_parameter(b): raise ValueError("Options %s and %s are incompatible." \ % (a,b)) class _Ncbiblast2SeqCommandline(_NcbiblastCommandline): """Base Commandline object for (new) NCBI BLAST+ wrappers (PRIVATE). This is provided for subclassing, it deals with shared options common to all the BLAST tools supporting two-sequence BLAST (blastn, psiblast, etc) but not rpsblast or rpstblastn. """ def __init__(self, cmd=None, **kwargs): assert cmd is not None extra_parameters = [ \ #General search options: _Option(["-gapopen", "gapopen"], ["input"], None, 0, "Cost to open a gap (integer).", False), _Option(["-gapextend", "gapextend"], ["input"], None, 0, "Cost to extend a gap (integer).", False), #BLAST-2-Sequences options: _Option(["-subject", "subject"], ["input", "file"], None, 0, """Subject sequence(s) to search. Incompatible with: db, gilist, negative_gilist. See also subject_loc.""", False), _Option(["-subject_loc", "subject_loc"], ["input"], None, 0, """Location on the subject sequence (Format: start-stop) Incompatible with: db, gilist, negative_gilist, remote. See also subject.""", False), #Restrict search or results: _Option(["-culling_limit", "culling_limit"], ["input"], None, 0, """Hit culling limit (integer). If the query range of a hit is enveloped by that of at least this many higher-scoring hits, delete the hit. Incompatible with: best_hit_overhang, best_hit_score_edge.""", False), _Option(["-best_hit_overhang", "best_hit_overhang"], ["input"], None, 0, """Best Hit algorithm overhang value (recommended value: 0.1) Float between 0.0 and 0.5 inclusive. Incompatible with: culling_limit.""", False), _Option(["-best_hit_score_edge", "best_hit_score_edge"], ["input"], None, 0, """Best Hit algorithm score edge value (recommended value: 0.1) Float between 0.0 and 0.5 inclusive. Incompatible with: culling_limit.""", False), ] try: #Insert extra parameters - at the start just in case there #are any arguments which must come last: self.parameters = extra_parameters + self.parameters except AttributeError: #Should we raise an error? The subclass should have set this up! self.parameters = extra_parameters _NcbiblastCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"subject_loc":["db", "gilist", "negative_gilist", "seqidlist", "remote"], "culling_limit":["best_hit_overhang","best_hit_score_edge"], "subject":["db", "gilist", "negative_gilist", "seqidlist"]} self._validate_incompatibilities(incompatibles) _NcbiblastCommandline._validate(self) class NcbiblastpCommandline(_Ncbiblast2SeqCommandline): """Create a commandline for the NCBI BLAST+ program blastp (for proteins). With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old blastall tool with separate tools for each of the searches. This wrapper therefore replaces BlastallCommandline with option -p blastp. >>> from Bio.Blast.Applications import NcbiblastpCommandline >>> cline = NcbiblastpCommandline(query="rosemary.pro", db="nr", ... evalue=0.001, remote=True, ungapped=True) >>> cline NcbiblastpCommandline(cmd='blastp', query='rosemary.pro', db='nr', evalue=0.001, remote=True, ungapped=True) >>> print cline blastp -query rosemary.pro -db nr -evalue 0.001 -remote -ungapped You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="blastp", **kwargs): self.parameters = [ \ #General search options: _Option(["-task", "task"], ["input"], lambda value : value in ["blastp", "blastp-short"], 0, "Task to execute (string, blastp (default) or blastp-short).", False), _Option(["-matrix", "matrix"], ["input"], None, 0, "Scoring matrix name (default BLOSUM62).", False), _Option(["-threshold", "threshold"], ["input"], None, 0, "Minimum word score such that the word is added to the BLAST lookup table (float)", False), _Option(["-comp_based_stats", "comp_based_stats"], ["input"], lambda value : value in "0Ft2TtDd", 0, """Use composition-based statistics (string, default 2, i.e. True). 0, F or f: no composition-based statistics 2, T or t, D or d : Composition-based score adjustment as in Bioinformatics 21:902-911, 2005, conditioned on sequence properties Note that tblastn also supports values of 1 and 3.""", False), #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), #Restrict search or results: _Option(["-db_soft_mask", "db_soft_mask"], ["input"], None, 0, """Filtering algorithm for soft masking (integer). Filtering algorithm ID to apply to the BLAST database as soft masking. Incompatible with: subject, subject_loc""", False), #Extension options: _Switch(["-ungapped", "ungapped"], ["input"], "Perform ungapped alignment only?"), #Miscellaneous options: _Switch(["-use_sw_tback", "use_sw_tback"], ["input"], "Compute locally optimal Smith-Waterman alignments?"), ] _Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"db_soft_mask":["subject", "subject_loc"]} self._validate_incompatibilities(incompatibles) _Ncbiblast2SeqCommandline._validate(self) class NcbiblastnCommandline(_Ncbiblast2SeqCommandline): """Wrapper for the NCBI BLAST+ program blastn (for nucleotides). With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old blastall tool with separate tools for each of the searches. This wrapper therefore replaces BlastallCommandline with option -p blastn. For example, to run a search against the "nt" nucleotide database using the FASTA nucleotide file "m_code.fasta" as the query, with an expectation value cut off of 0.001, saving the output to a file in XML format: >>> from Bio.Blast.Applications import NcbiblastnCommandline >>> cline = NcbiblastnCommandline(query="m_cold.fasta", db="nt", strand="plus", ... evalue=0.001, out="m_cold.xml", outfmt=5) >>> cline NcbiblastnCommandline(cmd='blastn', query='m_cold.fasta', db='nt', out='m_cold.xml', evalue=0.001, outfmt=5, strand='plus') >>> print cline blastn -query m_cold.fasta -db nt -out m_cold.xml -evalue 0.001 -outfmt 5 -strand plus You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="blastn", **kwargs): self.parameters = [ \ #Input query options: _Option(["-strand", "strand"], ["input"], lambda value : value in ["both", "minus", "plus"],0, """Query strand(s) to search against database/subject. Values allowed are "both" (default), "minus", "plus".""", False), #General search options: _Option(["-task", "task"], ["input"], lambda value : value in ['blastn', 'blastn-short', 'dc-megablast', 'megablast', 'vecscreen'], 0, """Task to execute (string, default 'megablast') Allowed values 'blastn', 'blastn-short', 'dc-megablast', 'megablast' (the default), or 'vecscreen'.""", False), _Option(["-penalty", "penalty"], ["input"], None, 0, "Penalty for a nucleotide mismatch (integer, at most zero).", False), _Option(["-reward", "reward"], ["input"], None, 0, "Reward for a nucleotide match (integer, at least zero).", False), #TODO - Does this need an argument or is it a switch? #_Option(["-use_index", "use_index"], ["input"], None, 0, # "Use MegaBLAST database index (boolean).", False), _Option(["-index_name", "index_name"], ["input"], None, 0, "MegaBLAST database index name.", False), #Query filtering options: _Option(["-dust", "dust"], ["input"], None, 0, """Filter query sequence with DUST (string). Format: 'yes', 'level window linker', or 'no' to disable. Default = '20 64 1'. """, False), _Option(["-filtering_db", "filtering_db"], ["input"], None, 0, "BLAST database containing filtering elements (i.e. repeats).", False), _Option(["-window_masker_taxid", "window_masker_taxid"], ["input"], None, 0, "Enable WindowMasker filtering using a Taxonomic ID (integer).", False), _Option(["-window_masker_db", "window_masker_db"], ["input"], None, 0, "Enable WindowMasker filtering using this repeats database (string).", False), #Restrict search or results: _Option(["-db_soft_mask", "db_soft_mask"], ["input"], None, 0, """Filtering algorithm for soft masking (integer). Filtering algorithm ID to apply to the BLAST database as soft masking. Incompatible with: subject, subject_loc""", False), _Option(["-perc_identity", "perc_identity"], ["input"], None, 0, "Percent identity (real, 0 to 100 inclusive).", False), #Discontiguous MegaBLAST options _Option(["-template_type", "template_type"], ["input"], lambda value : value in ['coding', 'coding_and_optimal','optimal'], 0, """Discontiguous MegaBLAST template type (string). Allowed values: 'coding', 'coding_and_optimal' or 'optimal' Requires: template_length.""", False), _Option(["-template_length", "template_length"], ["input"], lambda value : value in [16,18,21,'16','18','21'], 0, """Discontiguous MegaBLAST template length (integer). Allowed values: 16, 18, 21 Requires: template_type.""", False), #Extension options: _Switch(["-no_greedy", "no_greedy"], ["input"], "Use non-greedy dynamic programming extension"), _Option(["-min_raw_gapped_score", "min_raw_gapped_score"], ["input"], None, 0, "Minimum raw gapped score to keep an alignment in the preliminary gapped and traceback stages (integer).", False), _Switch(["-ungapped", "ungapped"], ["input"], "Perform ungapped alignment only?"), _Option(["-off_diagonal_range", "off_diagonal_range"], ["input"], None, 0, """Number of off-diagonals to search for the 2nd hit (integer). Expects a positive integer, or 0 (default) to turn off. Added in BLAST 2.2.23+ """, False), ] _Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"db_soft_mask":["subject", "subject_loc"]} self._validate_incompatibilities(incompatibles) if (self.template_type and not self.template_length) \ or (self.template_length and not self.template_type) : raise ValueError("Options template_type and template_type require each other.") _Ncbiblast2SeqCommandline._validate(self) class NcbiblastxCommandline(_Ncbiblast2SeqCommandline): """Wrapper for the NCBI BLAST+ program blastx (nucleotide query, protein database). With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old blastall tool with separate tools for each of the searches. This wrapper therefore replaces BlastallCommandline with option -p blastx. >>> from Bio.Blast.Applications import NcbiblastxCommandline >>> cline = NcbiblastxCommandline(query="m_cold.fasta", db="nr", evalue=0.001) >>> cline NcbiblastxCommandline(cmd='blastx', query='m_cold.fasta', db='nr', evalue=0.001) >>> print cline blastx -query m_cold.fasta -db nr -evalue 0.001 You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="blastx", **kwargs): self.parameters = [ \ #Input query options: _Option(["-strand", "strand"], ["input"], lambda value : value in ["both", "minus", "plus"],0, """Query strand(s) to search against database/subject. Values allowed are "both" (default), "minus", "plus".""", False), #Input query options: _Option(["-query_gencode", "query_gencode"], ["input"], None, 0, """Genetic code to use to translate query Integer. Default is one.""", False), #General search options: _Option(["-frame_shift_penalty", "frame_shift_penalty"], ["input"], None, 0, "Frame shift penalty (integer, at least 1, default ignored).", False), _Option(["-max_intron_length", "max_intron_length"], ["input"], None, 0, """Maximum intron length (integer). Length of the largest intron allowed in a translated nucleotide sequence when linking multiple distinct alignments (a negative value disables linking). Default zero.""", False), _Option(["-matrix", "matrix"], ["input"], None, 0, "Scoring matrix name (default BLOSUM62).", False), _Option(["-threshold", "threshold"], ["input"], None, 0, "Minimum word score such that the word is added to the BLAST lookup table (float)", False), #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), #Restrict search or results: _Option(["-db_soft_mask", "db_soft_mask"], ["input"], None, 0, """Filtering algorithm for soft masking (integer). Filtering algorithm ID to apply to the BLAST database as soft masking. Incompatible with: subject, subject_loc""", False), #Extension options: _Switch(["-ungapped", "ungapped"], ["input"], "Perform ungapped alignment only?"), ] _Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"db_soft_mask":["subject", "subject_loc"]} self._validate_incompatibilities(incompatibles) _Ncbiblast2SeqCommandline._validate(self) class NcbitblastnCommandline(_Ncbiblast2SeqCommandline): """Wrapper for the NCBI BLAST+ program tblastn. With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old blastall tool with separate tools for each of the searches. This wrapper therefore replaces BlastallCommandline with option -p tblastn. >>> from Bio.Blast.Applications import NcbitblastnCommandline >>> cline = NcbitblastnCommandline(help=True) >>> cline NcbitblastnCommandline(cmd='tblastn', help=True) >>> print cline tblastn -help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="tblastn", **kwargs): self.parameters = [ \ #General search options: _Option(["-db_gencode", "db_gencode"], ["input"], None, 0, """Genetic code to use to translate query Integer. Default is one.""", False), _Option(["-frame_shift_penalty", "frame_shift_penalty"], ["input"], None, 0, "Frame shift penalty (integer, at least 1, default ignored).", False), _Option(["-max_intron_length", "max_intron_length"], ["input"], None, 0, """Maximum intron length (integer). Length of the largest intron allowed in a translated nucleotide sequence when linking multiple distinct alignments (a negative value disables linking). Default zero.""", False), _Option(["-matrix", "matrix"], ["input"], None, 0, "Scoring matrix name (default BLOSUM62).", False), _Option(["-threshold", "threshold"], ["input"], None, 0, "Minimum word score such that the word is added to the BLAST lookup table (float)", False), _Option(["-comp_based_stats", "comp_based_stats"], ["input"], lambda value : value in "0Ft12TtDd3", 0, """Use composition-based statistics (string, default 2, i.e. True). 0, F or f: no composition-based statistics 1: Composition-based statistics as in NAR 29:2994-3005, 2001 2, T or t, D or d : Composition-based score adjustment as in Bioinformatics 21:902-911, 2005, conditioned on sequence properties 3: Composition-based score adjustment as in Bioinformatics 21:902-911, 2005, unconditionally Note that only tblastn supports values of 1 and 3.""", False), #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), #Restrict search or results: _Option(["-db_soft_mask", "db_soft_mask"], ["input"], None, 0, """Filtering algorithm ID to apply to the BLAST database as soft masking (string). Incompatible with: subject, subject_loc """, False), #Extension options: _Switch(["-ungapped", "ungapped"], ["input"], "Perform ungapped alignment only?"), #Miscellaneous options: _Switch(["-use_sw_tback", "use_sw_tback"], ["input"], "Compute locally optimal Smith-Waterman alignments?"), #PSI-TBLASTN options: _Option(["-in_pssm", "in_pssm"], ["input", "file"], None, 0, """PSI-BLAST checkpoint file Incompatible with: remote, query""", False), ] _Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"in_pssm":["remote", "query"]} self._validate_incompatibilities(incompatibles) _Ncbiblast2SeqCommandline._validate(self) class NcbitblastxCommandline(_Ncbiblast2SeqCommandline): """Wrapper for the NCBI BLAST+ program tblastx. With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old blastall tool with separate tools for each of the searches. This wrapper therefore replaces BlastallCommandline with option -p tblastx. >>> from Bio.Blast.Applications import NcbitblastxCommandline >>> cline = NcbitblastxCommandline(help=True) >>> cline NcbitblastxCommandline(cmd='tblastx', help=True) >>> print cline tblastx -help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="tblastx", **kwargs): self.parameters = [ \ #Input query options: _Option(["-strand", "strand"], ["input"], lambda value : value in ["both", "minus", "plus"],0, """Query strand(s) to search against database/subject. Values allowed are "both" (default), "minus", "plus".""", False), #Input query options: _Option(["-query_gencode", "query_gencode"], ["input"], None, 0, """Genetic code to use to translate query Integer. Default is one.""", False), #General search options: _Option(["-db_gencode", "db_gencode"], ["input"], None, 0, """Genetic code to use to translate query Integer. Default is one.""", False), _Option(["-max_intron_length", "max_intron_length"], ["input"], None, 0, """Maximum intron length (integer). Length of the largest intron allowed in a translated nucleotide sequence when linking multiple distinct alignments (a negative value disables linking). Default zero.""", False), _Option(["-matrix", "matrix"], ["input"], None, 0, "Scoring matrix name (default BLOSUM62).", False), _Option(["-threshold", "threshold"], ["input"], None, 0, "Minimum word score such that the word is added to the BLAST lookup table (float)", False), #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), #Restrict search or results: _Option(["-db_soft_mask", "db_soft_mask"], ["input"], None, 0, """Filtering algorithm ID to apply to the BLAST database as soft masking (string). Incompatible with: subject, subject_loc """, False), ] _Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs) class NcbipsiblastCommandline(_Ncbiblast2SeqCommandline): """Wrapper for the NCBI BLAST+ program psiblast. With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old blastpgp tool with a similar tool psiblast. This wrapper therefore replaces BlastpgpCommandline, the wrapper for blastpgp. >>> from Bio.Blast.Applications import NcbipsiblastCommandline >>> cline = NcbipsiblastCommandline(help=True) >>> cline NcbipsiblastCommandline(cmd='psiblast', help=True) >>> print cline psiblast -help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="psiblast", **kwargs): self.parameters = [ \ #General search options: _Option(["-matrix", "matrix"], ["input"], None, 0, "Scoring matrix name (default BLOSUM62).", False), _Option(["-threshold", "threshold"], ["input"], None, 0, "Minimum word score such that the word is added to the BLAST lookup table (float)", False), _Option(["-comp_based_stats", "comp_based_stats"], ["input"], lambda value : value in "0Ft2TtDd", 0, """Use composition-based statistics (string, default 2, i.e. True). 0, F or f: no composition-based statistics 2, T or t, D or d : Composition-based score adjustment as in Bioinformatics 21:902-911, 2005, conditioned on sequence properties Note that tblastn also supports values of 1 and 3.""", False), #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), #Extension options: _Option(["-gap_trigger", "gap_trigger"], ["input"], None, 0, "Number of bits to trigger gapping (float, default 22)", False), #Miscellaneous options: _Switch(["-use_sw_tback", "use_sw_tback"], ["input"], "Compute locally optimal Smith-Waterman alignments?"), #PSI-BLAST options: _Option(["-num_iterations", "num_iterations"], ["input"], None, 0, """Number of iterations to perform, integer Integer of at least one. Default is one. Incompatible with: remote""", False), _Option(["-out_pssm", "out_pssm"], ["output", "file"], None, 0, "File name to store checkpoint file", False), _Option(["-out_ascii_pssm", "out_ascii_pssm"], ["output", "file"], None, 0, "File name to store ASCII version of PSSM", False), _Option(["-in_msa", "in_msa"], ["input", "file"], None, 0, """File name of multiple sequence alignment to restart PSI-BLAST Incompatible with: in_pssm, query""", False), _Option(["-in_pssm", "in_pssm"], ["input", "file"], None, 0, """PSI-BLAST checkpoint file Incompatible with: in_msa, query, phi_pattern""", False), #PSSM engine options: _Option(["-pseudocount", "pseudocount"], ["input"], None, 0, """Pseudo-count value used when constructing PSSM Integer. Default is zero.""", False), _Option(["-inclusion_ethresh", "inclusion_ethresh"], ["input"], None, 0, """E-value inclusion threshold for pairwise alignments Float. Default is 0.002.""", False), #PHI-BLAST options: _Option(["-phi_pattern", "phi_pattern"], ["input", "file"], None, 0, """File name containing pattern to search Incompatible with: in_pssm""", False), ] _Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs) def _validate(self): incompatibles = {"num_iterations":["remote"], "in_msa":["in_pssm", "query"], "in_pssm":["in_msa","query","phi_pattern"]} self._validate_incompatibilities(incompatibles) _Ncbiblast2SeqCommandline._validate(self) class NcbirpsblastCommandline(_NcbiblastCommandline): """Wrapper for the NCBI BLAST+ program rpsblast. With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old rpsblast tool with a similar tool of the same name. This wrapper replaces RpsBlastCommandline, the wrapper for the old rpsblast. >>> from Bio.Blast.Applications import NcbirpsblastCommandline >>> cline = NcbirpsblastCommandline(help=True) >>> cline NcbirpsblastCommandline(cmd='rpsblast', help=True) >>> print cline rpsblast -help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="rpsblast", **kwargs): self.parameters = [ \ #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), ] _NcbiblastCommandline.__init__(self, cmd, **kwargs) class NcbirpstblastnCommandline(_NcbiblastCommandline): """Wrapper for the NCBI BLAST+ program rpstblastn. With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI replaced the old rpsblast tool with a similar tool of the same name, and a separate tool rpstblastn for Translated Reverse Position Specific BLAST. >>> from Bio.Blast.Applications import NcbirpstblastnCommandline >>> cline = NcbirpstblastnCommandline(help=True) >>> cline NcbirpstblastnCommandline(cmd='rpstblastn', help=True) >>> print cline rpstblastn -help You would typically run the command line with the Python subprocess module, as described in the Biopython tutorial. """ def __init__(self, cmd="rpstblastn", **kwargs): self.parameters = [ \ #Input query options: _Option(["-strand", "strand"], ["input"], lambda value : value in ["both", "minus", "plus"],0, """Query strand(s) to search against database/subject. Values allowed are "both" (default), "minus", "plus".""", False), #Input query options: _Option(["-query_gencode", "query_gencode"], ["input"], None, 0, """Genetic code to use to translate query Integer. Default is one.""", False), #Query filtering options: _Option(["-seg", "seg"], ["input"], None, 0, """Filter query sequence with SEG (string). Format: "yes", "window locut hicut", or "no" to disable. Default is "12 2.2 2.5""", False), #Extension options: _Switch(["-ungapped", "ungapped"], ["input"], "Perform ungapped alignment only?"), ] _NcbiblastCommandline.__init__(self, cmd, **kwargs) def _test(): """Run the Bio.Blast.Applications module's doctests.""" import doctest doctest.testmod(verbose=1) if __name__ == "__main__": #Run the doctests _test()
BlogomaticProject/Blogomatic
opt/blog-o-matic/usr/lib/python/Bio/Blast/Applications.py
Python
gpl-2.0
56,839
[ "BLAST", "Biopython" ]
3c94e4ef9b5752660f00d10e5c48fca5652a5839bb3699fbc6af641d13fec023
# This file is part of cclib (http://cclib.github.io), a library for parsing # and interpreting the results of computational chemistry packages. # # Copyright (C) 2006-2014, the cclib development team # # The library is free software, distributed under the terms of # the GNU Lesser General Public version 2.1 or later. You should have # received a copy of the license along with cclib. You can also access # the full license online at http://www.gnu.org/copyleft/lgpl.html. """Parser for Gaussian output files""" from __future__ import print_function import re import numpy from . import logfileparser from . import utils class Gaussian(logfileparser.Logfile): """A Gaussian 98/03 log file.""" def __init__(self, *args, **kwargs): # Call the __init__ method of the superclass super(Gaussian, self).__init__(logname="Gaussian", *args, **kwargs) def __str__(self): """Return a string representation of the object.""" return "Gaussian log file %s" % (self.filename) def __repr__(self): """Return a representation of the object.""" return 'Gaussian("%s")' % (self.filename) def normalisesym(self, label): """Use standard symmetry labels instead of Gaussian labels. To normalise: (1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta] (2) replace any G or U by their lowercase equivalent >>> sym = Gaussian("dummyfile").normalisesym >>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG'] >>> map(sym, labels) ['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g'] """ # note: DLT must come after DLTA greek = [('SG', 'sigma'), ('PI', 'pi'), ('PHI', 'phi'), ('DLTA', 'delta'), ('DLT', 'delta')] for k, v in greek: if label.startswith(k): tmp = label[len(k):] label = v if tmp: label = v + "." + tmp ans = label.replace("U", "u").replace("G", "g") return ans def before_parsing(self): # Used to index self.scftargets[]. SCFRMS, SCFMAX, SCFENERGY = list(range(3)) # Flag for identifying Coupled Cluster runs. self.coupledcluster = False # Fragment number for counterpoise or fragment guess calculations # (normally zero). self.counterpoise = 0 # Flag for identifying ONIOM calculations. self.oniom = False def after_parsing(self): # Correct the percent values in the etsecs in the case of # a restricted calculation. The following has the # effect of including each transition twice. if hasattr(self, "etsecs") and len(self.homos) == 1: new_etsecs = [[(x[0], x[1], x[2] * numpy.sqrt(2)) for x in etsec] for etsec in self.etsecs] self.etsecs = new_etsecs if hasattr(self, "scanenergies"): self.scancoords = [] self.scancoords = self.atomcoords if (hasattr(self, 'enthalpy') and hasattr(self, 'temperature') and hasattr(self, 'freeenergy')): self.set_attribute('entropy', (self.enthalpy - self.freeenergy) / self.temperature) # This bit is needed in order to trim coordinates that are printed a second time # at the end of geometry optimizations. Note that we need to do this for both atomcoords # and inputcoords. The reason is that normally a standard orientation is printed and that # is what we parse into atomcoords, but inputcoords stores the input (unmodified) coordinates # and that is copied over to atomcoords if no standard oritentation was printed, which happens # for example for jobs with no symmetry. This last step, however, is now generic for all parsers. # Perhaps then this part should also be generic code... # Regression that tests this: Gaussian03/cyclopropenyl.rhf.g03.cut.log if hasattr(self, 'optdone') and len(self.optdone) > 0: last_point = self.optdone[-1] if hasattr(self, 'atomcoords'): self.atomcoords = self.atomcoords[:last_point + 1] if hasattr(self, 'inputcoords'): self.inputcoords = self.inputcoords[:last_point + 1] def extract(self, inputfile, line): """Extract information from the file object inputfile.""" # This block contains some general information as well as coordinates, # which could be parsed in the future: # # Symbolic Z-matrix: # Charge = 0 Multiplicity = 1 # C 0.73465 0. 0. # C 1.93465 0. 0. # C # ... # # It also lists fragments, if there are any, which is potentially valuable: # # Symbolic Z-matrix: # Charge = 0 Multiplicity = 1 in supermolecule # Charge = 0 Multiplicity = 1 in fragment 1. # Charge = 0 Multiplicity = 1 in fragment 2. # B(Fragment=1) 0.06457 -0.0279 0.01364 # H(Fragment=1) 0.03117 -0.02317 1.21604 # ... # # Note, however, that currently we only parse information for the whole system # or supermolecule as Gaussian calls it. if line.strip() == "Symbolic Z-matrix:": self.updateprogress(inputfile, "Symbolic Z-matrix", self.fupdate) line = inputfile.next() while line.split()[0] == 'Charge': # For the supermolecule, we can parse the charge and multicplicity. regex = ".*=(.*)Mul.*=\s*-?(\d+).*" match = re.match(regex, line) assert match, "Something unusual about the line: '%s'" % line self.set_attribute('charge', int(match.groups()[0])) self.set_attribute('mult', int(match.groups()[1])) if line.split()[-2] == "fragment": self.nfragments = int(line.split()[-1].strip('.')) if line.strip()[-13:] == "model system.": self.nmodels = getattr(self, 'nmodels', 0) + 1 line = inputfile.next() # The remaining part will allow us to get the atom count. # When coordinates are given, there is a blank line at the end, but if # there is a Z-matrix here, there will also be variables and we need to # stop at those to get the right atom count. # Also, in older versions there is bo blank line (G98 regressions), # so we need to watch out for leaving the link. natom = 0 while line.split() and not "Variables" in line and not "Leave Link" in line: natom += 1 line = inputfile.next() self.set_attribute('natom', natom) # Continuing from above, there is not always a symbolic matrix, for example # if the Z-matrix was in the input file. In such cases, try to match the # line and get at the charge and multiplicity. # # Charge = 0 Multiplicity = 1 in supermolecule # Charge = 0 Multiplicity = 1 in fragment 1. # Charge = 0 Multiplicity = 1 in fragment 2. if line[1:7] == 'Charge' and line.find("Multiplicity") >= 0: self.updateprogress(inputfile, "Charge and Multiplicity", self.fupdate) if line.split()[-1] == "supermolecule" or not "fragment" in line: regex = ".*=(.*)Mul.*=\s*-?(\d+).*" match = re.match(regex, line) assert match, "Something unusual about the line: '%s'" % line self.set_attribute('charge', int(match.groups()[0])) self.set_attribute('mult', int(match.groups()[1])) if line.split()[-2] == "fragment": self.nfragments = int(line.split()[-1].strip('.')) # Number of atoms is also explicitely printed after the above. if line[1:8] == "NAtoms=": self.updateprogress(inputfile, "Attributes", self.fupdate) natom = int(line.split()[1]) self.set_attribute('natom', natom) # Catch message about completed optimization. if line[1:23] == "Optimization completed": if not hasattr(self, 'optdone'): self.optdone = [] self.optdone.append(len(self.geovalues) - 1) # Catch message about stopped optimization (not converged). if line[1:21] == "Optimization stopped": if not hasattr(self, "optdone"): self.optdone = [] # Extract the atomic numbers and coordinates from the input orientation, # in the event the standard orientation isn't available. if line.find("Input orientation") > -1 or line.find("Z-Matrix orientation") > -1: # If this is a counterpoise calculation, this output means that # the supermolecule is now being considered, so we can set: self.counterpoise = 0 self.updateprogress(inputfile, "Attributes", self.cupdate) if not hasattr(self, "inputcoords"): self.inputcoords = [] self.inputatoms = [] self.skip_lines(inputfile, ['d', 'cols', 'cols', 'd']) atomcoords = [] line = next(inputfile) while list(set(line.strip())) != ["-"]: broken = line.split() self.inputatoms.append(int(broken[1])) atomcoords.append(list(map(float, broken[3:6]))) line = next(inputfile) self.inputcoords.append(atomcoords) self.set_attribute('atomnos', self.inputatoms) self.set_attribute('natom', len(self.inputatoms)) # Extract the atomic masses. # Typical section: # Isotopes and Nuclear Properties: #(Nuclear quadrupole moments (NQMom) in fm**2, nuclear magnetic moments (NMagM) # in nuclear magnetons) # # Atom 1 2 3 4 5 6 7 8 9 10 # IAtWgt= 12 12 12 12 12 1 1 1 12 12 # AtmWgt= 12.0000000 12.0000000 12.0000000 12.0000000 12.0000000 1.0078250 1.0078250 1.0078250 12.0000000 12.0000000 # NucSpn= 0 0 0 0 0 1 1 1 0 0 # AtZEff= -3.6000000 -3.6000000 -3.6000000 -3.6000000 -3.6000000 -1.0000000 -1.0000000 -1.0000000 -3.6000000 -3.6000000 # NQMom= 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 # NMagM= 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 2.7928460 2.7928460 2.7928460 0.0000000 0.0000000 # ... with blank lines dividing blocks of ten, and Leave Link 101 at the end. # This is generally parsed before coordinates, so atomnos is not defined. # Note that in Gaussian03 the comments are not there yet and the labels are different. if line.strip() == "Isotopes and Nuclear Properties:": if not hasattr(self, "atommasses"): self.atommasses = [] line = next(inputfile) while line[1:16] != "Leave Link 101": if line[1:8] == "AtmWgt=": self.atommasses.extend(list(map(float,line.split()[1:]))) line = next(inputfile) # Extract the atomic numbers and coordinates of the atoms. if line.strip() == "Standard orientation:": self.updateprogress(inputfile, "Attributes", self.cupdate) # If this is a counterpoise calculation, this output means that # the supermolecule is now being considered, so we can set: self.counterpoise = 0 if not hasattr(self, "atomcoords"): self.atomcoords = [] self.skip_lines(inputfile, ['d', 'cols', 'cols', 'd']) atomnos = [] atomcoords = [] line = next(inputfile) while list(set(line.strip())) != ["-"]: broken = line.split() atomnos.append(int(broken[1])) atomcoords.append(list(map(float, broken[-3:]))) line = next(inputfile) self.atomcoords.append(atomcoords) self.set_attribute('natom', len(atomnos)) self.set_attribute('atomnos', atomnos) # This is a bit of a hack for regression Gaussian09/BH3_fragment_guess.pop_minimal.log # to skip output for all fragments, assuming the supermolecule is always printed first. # Eventually we want to make this more general, or even better parse the output for # all fragment, but that will happen in a newer version of cclib. if line[1:16] == "Fragment guess:" and getattr(self, 'nfragments', 0) > 1: if not "full" in line: inputfile.file.seek(0, 2) # Another hack for regression Gaussian03/ortho_prod_prod_freq.log, which is an ONIOM job. # Basically for now we stop parsing after the output for the real system, because # currently we don't support changes in system size or fragments in cclib. When we do, # we will want to parse the model systems, too, and that is what nmodels could track. if "ONIOM: generating point" in line and line.strip()[-13:] == 'model system.' and getattr(self, 'nmodels', 0) > 0: inputfile.file.seek(0,2) # With the gfinput keyword, the atomic basis set functios are: # # AO basis set in the form of general basis input (Overlap normalization): # 1 0 # S 3 1.00 0.000000000000 # 0.7161683735D+02 0.1543289673D+00 # 0.1304509632D+02 0.5353281423D+00 # 0.3530512160D+01 0.4446345422D+00 # SP 3 1.00 0.000000000000 # 0.2941249355D+01 -0.9996722919D-01 0.1559162750D+00 # 0.6834830964D+00 0.3995128261D+00 0.6076837186D+00 # 0.2222899159D+00 0.7001154689D+00 0.3919573931D+00 # **** # 2 0 # S 3 1.00 0.000000000000 # 0.7161683735D+02 0.1543289673D+00 # ... # # The same is also printed when the gfprint keyword is used, but the # interstitial lines differ and there are no stars between atoms: # # AO basis set (Overlap normalization): # Atom C1 Shell 1 S 3 bf 1 - 1 0.509245180608 -2.664678875191 0.000000000000 # 0.7161683735D+02 0.1543289673D+00 # 0.1304509632D+02 0.5353281423D+00 # 0.3530512160D+01 0.4446345422D+00 # Atom C1 Shell 2 SP 3 bf 2 - 5 0.509245180608 -2.664678875191 0.000000000000 # 0.2941249355D+01 -0.9996722919D-01 0.1559162750D+00 # ... #ONIOM calculations result basis sets reported for atoms that are not in order of atom number which breaks this code (line 390 relies on atoms coming in order) if line[1:13] == "AO basis set" and not self.oniom: self.gbasis = [] # For counterpoise fragment calcualtions, skip these lines. if self.counterpoise != 0: return atom_line = inputfile.next() self.gfprint = atom_line.split()[0] == "Atom" self.gfinput = not self.gfprint # Note how the shell information is on a separate line for gfinput, # whereas for gfprint it is on the same line as atom information. if self.gfinput: shell_line = inputfile.next() shell = [] while len(self.gbasis) < self.natom: if self.gfprint: cols = atom_line.split() subshells = cols[4] ngauss = int(cols[5]) else: cols = shell_line.split() subshells = cols[0] ngauss = int(cols[1]) parameters = [] for ig in range(ngauss): line = inputfile.next() parameters.append(list(map(self.float, line.split()))) for iss, ss in enumerate(subshells): contractions = [] for param in parameters: exponent = param[0] coefficient = param[iss+1] contractions.append((exponent, coefficient)) subshell = (ss, contractions) shell.append(subshell) if self.gfprint: line = inputfile.next() if line.split()[0] == "Atom": atomnum = int(re.sub(r"\D", "", line.split()[1])) if atomnum == len(self.gbasis) + 2: self.gbasis.append(shell) shell = [] atom_line = line else: self.gbasis.append(shell) else: line = inputfile.next() if line.strip() == "****": self.gbasis.append(shell) shell = [] atom_line = inputfile.next() shell_line = inputfile.next() else: shell_line = line # Find the targets for SCF convergence (QM calcs). if line[1:44] == 'Requested convergence on RMS density matrix': if not hasattr(self, "scftargets"): self.scftargets = [] # The following can happen with ONIOM which are mixed SCF # and semi-empirical if type(self.scftargets) == type(numpy.array([])): self.scftargets = [] scftargets = [] # The RMS density matrix. scftargets.append(self.float(line.split('=')[1].split()[0])) line = next(inputfile) # The MAX density matrix. scftargets.append(self.float(line.strip().split('=')[1][:-1])) line = next(inputfile) # For G03, there's also the energy (not for G98). if line[1:10] == "Requested": scftargets.append(self.float(line.strip().split('=')[1][:-1])) self.scftargets.append(scftargets) # Extract SCF convergence information (QM calcs). if line[1:10] == 'Cycle 1': if not hasattr(self, "scfvalues"): self.scfvalues = [] scfvalues = [] line = next(inputfile) while line.find("SCF Done") == -1: self.updateprogress(inputfile, "QM convergence", self.fupdate) if line.find(' E=') == 0: self.logger.debug(line) # RMSDP=3.74D-06 MaxDP=7.27D-05 DE=-1.73D-07 OVMax= 3.67D-05 # or # RMSDP=1.13D-05 MaxDP=1.08D-04 OVMax= 1.66D-04 if line.find(" RMSDP") == 0: parts = line.split() newlist = [self.float(x.split('=')[1]) for x in parts[0:2]] energy = 1.0 if len(parts) > 4: energy = parts[2].split('=')[1] if energy == "": energy = self.float(parts[3]) else: energy = self.float(energy) if len(self.scftargets[0]) == 3: # Only add the energy if it's a target criteria newlist.append(energy) scfvalues.append(newlist) try: line = next(inputfile) # May be interupted by EOF. except StopIteration: break self.scfvalues.append(scfvalues) # Extract SCF convergence information (AM1, INDO and other semi-empirical calcs). # The output (for AM1) looks like this: # Ext34=T Pulay=F Camp-King=F BShift= 0.00D+00 # It= 1 PL= 0.103D+01 DiagD=T ESCF= 31.564733 Diff= 0.272D+02 RMSDP= 0.152D+00. # It= 2 PL= 0.114D+00 DiagD=T ESCF= 7.265370 Diff=-0.243D+02 RMSDP= 0.589D-02. # ... # It= 11 PL= 0.184D-04 DiagD=F ESCF= 4.687669 Diff= 0.260D-05 RMSDP= 0.134D-05. # It= 12 PL= 0.105D-04 DiagD=F ESCF= 4.687669 Diff=-0.686D-07 RMSDP= 0.215D-05. # 4-point extrapolation. # It= 13 PL= 0.110D-05 DiagD=F ESCF= 4.687669 Diff=-0.111D-06 RMSDP= 0.653D-07. # Energy= 0.172272018655 NIter= 14. if line[1:4] == 'It=': scftargets = numpy.array([1E-7], "d") # This is the target value for the rms scfvalues = [[]] while line.find(" Energy") == -1: self.updateprogress(inputfile, "AM1 Convergence") if line[1:4] == "It=": parts = line.strip().split() scfvalues[0].append(self.float(parts[-1][:-1])) line = next(inputfile) # If an AM1 or INDO guess is used (Guess=INDO in the input, for example), # this will be printed after a single iteration, so that is the line # that should trigger a break from this loop. At least that's what we see # for regression Gaussian/Gaussian09/guessIndo_modified_ALT.out if line[:14] == " Initial guess": break # Attach the attributes to the object Only after the energy is found . if line.find(" Energy") == 0: self.scftargets = scftargets self.scfvalues = scfvalues # Note: this needs to follow the section where 'SCF Done' is used # to terminate a loop when extracting SCF convergence information. if line[1:9] == 'SCF Done': if not hasattr(self, "scfenergies"): self.scfenergies = [] self.scfenergies.append(utils.convertor(self.float(line.split()[4]), "hartree", "eV")) # gmagoon 5/27/09: added scfenergies reading for PM3 case # Example line: " Energy= -0.077520562724 NIter= 14." # See regression Gaussian03/QVGXLLKOCUKJST-UHFFFAOYAJmult3Fixed.out if line[1:8] == 'Energy=': if not hasattr(self, "scfenergies"): self.scfenergies = [] self.scfenergies.append(utils.convertor(self.float(line.split()[1]), "hartree", "eV")) # Total energies after Moller-Plesset corrections. # Second order correction is always first, so its first occurance # triggers creation of mpenergies (list of lists of energies). # Further MP2 corrections are appended as found. # # Example MP2 output line: # E2 = -0.9505918144D+00 EUMP2 = -0.28670924198852D+03 # Warning! this output line is subtly different for MP3/4/5 runs if "EUMP2" in line[27:34]: if not hasattr(self, "mpenergies"): self.mpenergies = [] self.mpenergies.append([]) mp2energy = self.float(line.split("=")[2]) self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV")) # Example MP3 output line: # E3= -0.10518801D-01 EUMP3= -0.75012800924D+02 if line[34:39] == "EUMP3": mp3energy = self.float(line.split("=")[2]) self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV")) # Example MP4 output lines: # E4(DQ)= -0.31002157D-02 UMP4(DQ)= -0.75015901139D+02 # E4(SDQ)= -0.32127241D-02 UMP4(SDQ)= -0.75016013648D+02 # E4(SDTQ)= -0.32671209D-02 UMP4(SDTQ)= -0.75016068045D+02 # Energy for most substitutions is used only (SDTQ by default) if line[34:42] == "UMP4(DQ)": mp4energy = self.float(line.split("=")[2]) line = next(inputfile) if line[34:43] == "UMP4(SDQ)": mp4energy = self.float(line.split("=")[2]) line = next(inputfile) if line[34:44] == "UMP4(SDTQ)": mp4energy = self.float(line.split("=")[2]) self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV")) # Example MP5 output line: # DEMP5 = -0.11048812312D-02 MP5 = -0.75017172926D+02 if line[29:32] == "MP5": mp5energy = self.float(line.split("=")[2]) self.mpenergies[-1].append(utils.convertor(mp5energy, "hartree", "eV")) # Total energies after Coupled Cluster corrections. # Second order MBPT energies (MP2) are also calculated for these runs, # but the output is the same as when parsing for mpenergies. # Read the consecutive correlated energies # but append only the last one to ccenergies. # Only the highest level energy is appended - ex. CCSD(T), not CCSD. if line[1:10] == "DE(Corr)=" and line[27:35] == "E(CORR)=": self.ccenergy = self.float(line.split()[3]) if line[1:10] == "T5(CCSD)=": line = next(inputfile) if line[1:9] == "CCSD(T)=": self.ccenergy = self.float(line.split()[1]) if line[12:53] == "Population analysis using the SCF density": if hasattr(self, "ccenergy"): if not hasattr(self, "ccenergies"): self.ccenergies = [] self.ccenergies.append(utils.convertor(self.ccenergy, "hartree", "eV")) del self.ccenergy #ONIOM component energy extraction if self.oniom and line[1:26] == "ONIOM: calculating energy": if not hasattr(self, "oniomenergies"): self.oniomenergies = [] line = next(inputfile) component_energies = [] while line.find('extrapolated') == -1: component_energy = float(line.split()[8]) component_energy = utils.convertor(component_energy, "hartree", "eV") component_energies.append(component_energy) line = next(inputfile) self.oniomenergies.append(component_energies) # Geometry convergence information. if line[49:59] == 'Converged?': if not hasattr(self, "geotargets"): self.geovalues = [] self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0], "d") newlist = [0]*4 for i in range(4): line = next(inputfile) self.logger.debug(line) parts = line.split() try: value = self.float(parts[2]) except ValueError: self.logger.error("Problem parsing the value for geometry optimisation: %s is not a number." % parts[2]) else: newlist[i] = value self.geotargets[i] = self.float(parts[3]) self.geovalues.append(newlist) # Gradients. # Read in the cartesian energy gradients (forces) from a block like this: # ------------------------------------------------------------------- # Center Atomic Forces (Hartrees/Bohr) # Number Number X Y Z # ------------------------------------------------------------------- # 1 1 -0.012534744 -0.021754635 -0.008346094 # 2 6 0.018984731 0.032948887 -0.038003451 # 3 1 -0.002133484 -0.006226040 0.023174772 # 4 1 -0.004316502 -0.004968213 0.023174772 # -2 -0.001830728 -0.000743108 -0.000196625 # ------------------------------------------------------------------ # # The "-2" line is for a dummy atom # # Then optimization is done in internal coordinates, Gaussian also # print the forces in internal coordinates, which can be produced from # the above. This block looks like this: # Variable Old X -DE/DX Delta X Delta X Delta X New X # (Linear) (Quad) (Total) # ch 2.05980 0.01260 0.00000 0.01134 0.01134 2.07114 # hch 1.75406 0.09547 0.00000 0.24861 0.24861 2.00267 # hchh 2.09614 0.01261 0.00000 0.16875 0.16875 2.26489 # Item Value Threshold Converged? if line[37:43] == "Forces": if not hasattr(self, "grads"): self.grads = [] self.skip_lines(inputfile, ['header', 'd']) forces = [] line = next(inputfile) while list(set(line.strip())) != ['-']: tmpforces = [] for N in range(3): # Fx, Fy, Fz force = line[23+N*15:38+N*15] if force.startswith("*"): force = "NaN" tmpforces.append(float(force)) forces.append(tmpforces) line = next(inputfile) self.grads.append(forces) #Extract PES scan data #Summary of the potential surface scan: # N A SCF #---- --------- ----------- # 1 109.0000 -76.43373 # 2 119.0000 -76.43011 # 3 129.0000 -76.42311 # 4 139.0000 -76.41398 # 5 149.0000 -76.40420 # 6 159.0000 -76.39541 # 7 169.0000 -76.38916 # 8 179.0000 -76.38664 # 9 189.0000 -76.38833 # 10 199.0000 -76.39391 # 11 209.0000 -76.40231 #---- --------- ----------- if "Summary of the potential surface scan:" in line: scanenergies = [] scanparm = [] colmnames = next(inputfile) hyphens = next(inputfile) line = next(inputfile) while line != hyphens: broken = line.split() scanenergies.append(float(broken[-1])) scanparm.append(map(float, broken[1:-1])) line = next(inputfile) if not hasattr(self, "scanenergies"): self.scanenergies = [] self.scanenergies = scanenergies if not hasattr(self, "scanparm"): self.scanparm = [] self.scanparm = scanparm if not hasattr(self, "scannames"): self.scannames = colmnames.split()[1:-1] # Orbital symmetries. if line[1:20] == 'Orbital symmetries:' and not hasattr(self, "mosyms"): # For counterpoise fragments, skip these lines. if self.counterpoise != 0: return self.updateprogress(inputfile, "MO Symmetries", self.fupdate) self.mosyms = [[]] line = next(inputfile) unres = False if line.find("Alpha Orbitals") == 1: unres = True line = next(inputfile) i = 0 while len(line) > 18 and line[17] == '(': if line.find('Virtual') >= 0: self.homos = numpy.array([i-1], "i") # 'HOMO' indexes the HOMO in the arrays parts = line[17:].split() for x in parts: self.mosyms[0].append(self.normalisesym(x.strip('()'))) i += 1 line = next(inputfile) if unres: line = next(inputfile) # Repeat with beta orbital information i = 0 self.mosyms.append([]) while len(line) > 18 and line[17] == '(': if line.find('Virtual')>=0: # Here we consider beta # If there was also an alpha virtual orbital, # we will store two indices in the array # Otherwise there is no alpha virtual orbital, # only beta virtual orbitals, and we initialize # the array with one element. See the regression # QVGXLLKOCUKJST-UHFFFAOYAJmult3Fixed.out # donated by Gregory Magoon (gmagoon). if (hasattr(self, "homos")): # Extend the array to two elements # 'HOMO' indexes the HOMO in the arrays self.homos.resize([2]) self.homos[1] = i-1 else: # 'HOMO' indexes the HOMO in the arrays self.homos = numpy.array([i-1], "i") parts = line[17:].split() for x in parts: self.mosyms[1].append(self.normalisesym(x.strip('()'))) i += 1 line = next(inputfile) # Some calculations won't explicitely print the number of basis sets used, # and will occasionally drop some without warning. We can infer the number, # however, from the MO symmetries printed here. Specifically, this fixes # regression Gaussian/Gaussian09/dvb_sp_terse.log (#23 on github). self.set_attribute('nmo', len(self.mosyms[-1])) # Alpha/Beta electron eigenvalues. if line[1:6] == "Alpha" and line.find("eigenvalues") >= 0: # For counterpoise fragments, skip these lines. if self.counterpoise != 0: return # For ONIOM calcs, ignore this section in order to bypass assertion failure. if self.oniom: return self.updateprogress(inputfile, "Eigenvalues", self.fupdate) self.moenergies = [[]] HOMO = -2 while line.find('Alpha') == 1: if line.split()[1] == "virt." and HOMO == -2: # If there aren't any symmetries, this is a good way to find the HOMO. # Also, check for consistency if homos was already parsed. HOMO = len(self.moenergies[0])-1 if hasattr(self, "homos"): assert HOMO == self.homos[0] else: self.homos = numpy.array([HOMO], "i") # Convert to floats and append to moenergies, but sometimes Gaussian # doesn't print correctly so test for ValueError (bug 1756789). part = line[28:] i = 0 while i*10+4 < len(part): s = part[i*10:(i+1)*10] try: x = self.float(s) except ValueError: x = numpy.nan self.moenergies[0].append(utils.convertor(x, "hartree", "eV")) i += 1 line = next(inputfile) # If, at this point, self.homos is unset, then there were not # any alpha virtual orbitals if not hasattr(self, "homos"): HOMO = len(self.moenergies[0])-1 self.homos = numpy.array([HOMO], "i") if line.find('Beta') == 2: self.moenergies.append([]) HOMO = -2 while line.find('Beta') == 2: if line.split()[1] == "virt." and HOMO == -2: # If there aren't any symmetries, this is a good way to find the HOMO. # Also, check for consistency if homos was already parsed. HOMO = len(self.moenergies[1])-1 if len(self.homos) == 2: assert HOMO == self.homos[1] else: self.homos.resize([2]) self.homos[1] = HOMO part = line[28:] i = 0 while i*10+4 < len(part): x = part[i*10:(i+1)*10] self.moenergies[1].append(utils.convertor(self.float(x), "hartree", "eV")) i += 1 line = next(inputfile) self.moenergies = [numpy.array(x, "d") for x in self.moenergies] # Start of the IR/Raman frequency section. # Caution is advised here, as additional frequency blocks # can be printed by Gaussian (with slightly different formats), # often doubling the information printed. # See, for a non-standard exmaple, regression Gaussian98/test_H2.log if line[1:14] == "Harmonic freq": self.updateprogress(inputfile, "Frequency Information", self.fupdate) removeold = False # The whole block should not have any blank lines. while line.strip() != "": # The line with indices if line[1:15].strip() == "" and line[15:23].strip().isdigit(): freqbase = int(line[15:23]) if freqbase == 1 and hasattr(self, 'vibfreqs'): # This is a reparse of this information removeold = True # Lines with symmetries and symm. indices begin with whitespace. if line[1:15].strip() == "" and not line[15:23].strip().isdigit(): if not hasattr(self, 'vibsyms'): self.vibsyms = [] syms = line.split() self.vibsyms.extend(syms) if line[1:15] == "Frequencies --": if not hasattr(self, 'vibfreqs'): self.vibfreqs = [] if removeold: # This is a reparse, so throw away the old info if hasattr(self, "vibsyms"): # We have already parsed the vibsyms so don't throw away! self.vibsyms = self.vibsyms[-len(line[15:].split()):] if hasattr(self, "vibirs"): self.vibirs = [] if hasattr(self, 'vibfreqs'): self.vibfreqs = [] if hasattr(self, 'vibramans'): self.vibramans = [] if hasattr(self, 'vibdisps'): self.vibdisps = [] removeold = False freqs = [self.float(f) for f in line[15:].split()] self.vibfreqs.extend(freqs) if line[1:15] == "IR Inten --": if not hasattr(self, 'vibirs'): self.vibirs = [] irs = [] for ir in line[15:].split(): try: irs.append(self.float(ir)) except ValueError: irs.append(self.float('nan')) self.vibirs.extend(irs) if line[1:15] == "Raman Activ --": if not hasattr(self, 'vibramans'): self.vibramans = [] ramans = [] for raman in line[15:].split(): try: ramans.append(self.float(raman)) except ValueError: ramans.append(self.float('nan')) self.vibramans.extend(ramans) # Block with displacement should start with this. if line.strip().split()[0:3] == ["Atom", "AN", "X"]: if not hasattr(self, 'vibdisps'): self.vibdisps = [] disps = [] for n in range(self.natom): line = next(inputfile) numbers = [float(s) for s in line[10:].split()] N = len(numbers) // 3 if not disps: for n in range(N): disps.append([]) for n in range(N): disps[n].append(numbers[3*n:3*n+3]) self.vibdisps.extend(disps) line = next(inputfile) # Electronic transitions. if line[1:14] == "Excited State": if not hasattr(self, "etenergies"): self.etenergies = [] self.etoscs = [] self.etsyms = [] self.etsecs = [] # Need to deal with lines like: # (restricted calc) # Excited State 1: Singlet-BU 5.3351 eV 232.39 nm f=0.1695 # (unrestricted calc) (first excited state is 2!) # Excited State 2: ?Spin -A 0.1222 eV 10148.75 nm f=0.0000 # (Gaussian 09 ZINDO) # Excited State 1: Singlet-?Sym 2.5938 eV 478.01 nm f=0.0000 <S**2>=0.000 p = re.compile(":(?P<sym>.*?)(?P<energy>-?\d*\.\d*) eV") groups = p.search(line).groups() self.etenergies.append(utils.convertor(self.float(groups[1]), "eV", "cm-1")) self.etoscs.append(self.float(line.split("f=")[-1].split()[0])) self.etsyms.append(groups[0].strip()) line = next(inputfile) p = re.compile("(\d+)") CIScontrib = [] while line.find(" ->") >= 0: # This is a contribution to the transition parts = line.split("->") self.logger.debug(parts) # Has to deal with lines like: # 32 -> 38 0.04990 # 35A -> 45A 0.01921 frommoindex = 0 # For restricted or alpha unrestricted fromMO = parts[0].strip() if fromMO[-1] == "B": frommoindex = 1 # For beta unrestricted fromMO = int(p.match(fromMO).group())-1 # subtract 1 so that it is an index into moenergies t = parts[1].split() tomoindex = 0 toMO = t[0] if toMO[-1] == "B": tomoindex = 1 toMO = int(p.match(toMO).group())-1 # subtract 1 so that it is an index into moenergies percent = self.float(t[1]) # For restricted calculations, the percentage will be corrected # after parsing (see after_parsing() above). CIScontrib.append([(fromMO, frommoindex), (toMO, tomoindex), percent]) line = next(inputfile) self.etsecs.append(CIScontrib) # Circular dichroism data (different for G03 vs G09) # # G03 # # ## <0|r|b> * <b|rxdel|0> (Au), Rotatory Strengths (R) in # ## cgs (10**-40 erg-esu-cm/Gauss) # ## state X Y Z R(length) # ## 1 0.0006 0.0096 -0.0082 -0.4568 # ## 2 0.0251 -0.0025 0.0002 -5.3846 # ## 3 0.0168 0.4204 -0.3707 -15.6580 # ## 4 0.0721 0.9196 -0.9775 -3.3553 # # G09 # # ## 1/2[<0|r|b>*<b|rxdel|0> + (<0|rxdel|b>*<b|r|0>)*] # ## Rotatory Strengths (R) in cgs (10**-40 erg-esu-cm/Gauss) # ## state XX YY ZZ R(length) R(au) # ## 1 -0.3893 -6.7546 5.7736 -0.4568 -0.0010 # ## 2 -17.7437 1.7335 -0.1435 -5.3845 -0.0114 # ## 3 -11.8655 -297.2604 262.1519 -15.6580 -0.0332 if (line[1:52] == "<0|r|b> * <b|rxdel|0> (Au), Rotatory Strengths (R)" or line[1:50] == "1/2[<0|r|b>*<b|rxdel|0> + (<0|rxdel|b>*<b|r|0>)*]"): self.etrotats = [] self.skip_lines(inputfile, ['units']) headers = next(inputfile) Ncolms = len(headers.split()) line = next(inputfile) parts = line.strip().split() while len(parts) == Ncolms: try: R = self.float(parts[4]) except ValueError: # nan or -nan if there is no first excited state # (for unrestricted calculations) pass else: self.etrotats.append(R) line = next(inputfile) temp = line.strip().split() parts = line.strip().split() self.etrotats = numpy.array(self.etrotats, "d") # Number of basis sets functions. # Has to deal with lines like: # NBasis = 434 NAE= 97 NBE= 97 NFC= 34 NFV= 0 # and... # NBasis = 148 MinDer = 0 MaxDer = 0 # Although the former is in every file, it doesn't occur before # the overlap matrix is printed. if line[1:7] == "NBasis" or line[4:10] == "NBasis": # For counterpoise fragment, skip these lines. if self.counterpoise != 0: return # For ONIOM calcs, ignore this section in order to bypass assertion failure. if self.oniom: return # If nbasis was already parsed, check if it changed. If it did, issue a warning. # In the future, we will probably want to have nbasis, as well as nmo below, # as a list so that we don't need to pick one value when it changes. nbasis = int(line.split('=')[1].split()[0]) if hasattr(self, "nbasis"): try: assert nbasis == self.nbasis except AssertionError: self.logger.warning("Number of basis functions (nbasis) has changed from %i to %i" % (self.nbasis, nbasis)) self.nbasis = nbasis # Number of linearly-independent basis sets. if line[1:7] == "NBsUse": # For counterpoise fragment, skip these lines. if self.counterpoise != 0: return # For ONIOM calcs, ignore this section in order to bypass assertion failure. if self.oniom: return nmo = int(line.split('=')[1].split()[0]) self.set_attribute('nmo', nmo) # For AM1 calculations, set nbasis by a second method, # as nmo may not always be explicitly stated. if line[7:22] == "basis functions, ": nbasis = int(line.split()[0]) self.set_attribute('nbasis', nbasis) # Molecular orbital overlap matrix. # Has to deal with lines such as: # *** Overlap *** # ****** Overlap ****** # Note that Gaussian sometimes drops basis functions, # causing the overlap matrix as parsed below to not be # symmetric (which is a problem for population analyses, etc.) if line[1:4] == "***" and (line[5:12] == "Overlap" or line[8:15] == "Overlap"): # Ensure that this is the main calc and not a fragment if self.counterpoise != 0: return self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d") # Overlap integrals for basis fn#1 are in aooverlaps[0] base = 0 colmNames = next(inputfile) while base < self.nbasis: self.updateprogress(inputfile, "Overlap", self.fupdate) for i in range(self.nbasis-base): # Fewer lines this time line = next(inputfile) parts = line.split() for j in range(len(parts)-1): # Some lines are longer than others k = float(parts[j+1].replace("D", "E")) self.aooverlaps[base+j, i+base] = k self.aooverlaps[i+base, base+j] = k base += 5 colmNames = next(inputfile) self.aooverlaps = numpy.array(self.aooverlaps, "d") # Molecular orbital coefficients (mocoeffs). # Essentially only produced for SCF calculations. # This is also the place where aonames and atombasis are parsed. if line[5:35] == "Molecular Orbital Coefficients" or line[5:41] == "Alpha Molecular Orbital Coefficients" or line[5:40] == "Beta Molecular Orbital Coefficients": # If counterpoise fragment, return without parsing orbital info if self.counterpoise != 0: return # Skip this for ONIOM calcs if self.oniom: return if line[5:40] == "Beta Molecular Orbital Coefficients": beta = True if self.popregular: return # This was continue before refactoring the parsers. #continue # Not going to extract mocoeffs # Need to add an extra array to self.mocoeffs self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d")) else: beta = False self.aonames = [] self.atombasis = [] mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")] base = 0 self.popregular = False for base in range(0, self.nmo, 5): self.updateprogress(inputfile, "Coefficients", self.fupdate) colmNames = next(inputfile) if not colmNames.split(): self.logger.warning("Molecular coefficients header found but no coefficients.") break; if base == 0 and int(colmNames.split()[0]) != 1: # Implies that this is a POP=REGULAR calculation # and so, only aonames (not mocoeffs) will be extracted self.popregular = True symmetries = next(inputfile) eigenvalues = next(inputfile) for i in range(self.nbasis): line = next(inputfile) if i == 0: # Find location of the start of the basis function name start_of_basis_fn_name = line.find(line.split()[3]) - 1 if base == 0 and not beta: # Just do this the first time 'round parts = line[:start_of_basis_fn_name].split() if len(parts) > 1: # New atom if i > 0: self.atombasis.append(atombasis) atombasis = [] atomname = "%s%s" % (parts[2], parts[1]) orbital = line[start_of_basis_fn_name:20].strip() self.aonames.append("%s_%s" % (atomname, orbital)) atombasis.append(i) part = line[21:].replace("D", "E").rstrip() temp = [] for j in range(0, len(part), 10): temp.append(float(part[j:j+10])) if beta: self.mocoeffs[1][base:base + len(part) / 10, i] = temp else: mocoeffs[0][base:base + len(part) / 10, i] = temp if base == 0 and not beta: # Do the last update of atombasis self.atombasis.append(atombasis) if self.popregular: # We now have aonames, so no need to continue break if not self.popregular and not beta: self.mocoeffs = mocoeffs # Natural orbital coefficients (nocoeffs) and occupation numbers (nooccnos), # which are respectively define the eigenvectors and eigenvalues of the # diagnolized one-electron density matrix. These orbitals are formed after # configuration interact (CI) calculations, but not only. Similarly to mocoeffs, # we can parse and check aonames and atombasis here. # # Natural Orbital Coefficients: # 1 2 3 4 5 # Eigenvalues -- 2.01580 2.00363 2.00000 2.00000 1.00000 # 1 1 O 1S 0.00000 -0.15731 -0.28062 0.97330 0.00000 # 2 2S 0.00000 0.75440 0.57746 0.07245 0.00000 # ... # if line[5:33] == "Natural Orbital Coefficients": self.aonames = [] self.atombasis = [] nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d") nooccnos = [] base = 0 self.popregular = False for base in range(0, self.nmo, 5): self.updateprogress(inputfile, "Natural orbitals", self.fupdate) colmNames = next(inputfile) if base == 0 and int(colmNames.split()[0]) != 1: # Implies that this is a POP=REGULAR calculation # and so, only aonames (not mocoeffs) will be extracted self.popregular = True eigenvalues = next(inputfile) nooccnos.extend(map(float, eigenvalues.split()[2:])) for i in range(self.nbasis): line = next(inputfile) # Just do this the first time 'round. if base == 0: # Changed below from :12 to :11 to deal with Elmar Neumann's example. parts = line[:11].split() # New atom. if len(parts) > 1: if i > 0: self.atombasis.append(atombasis) atombasis = [] atomname = "%s%s" % (parts[2], parts[1]) orbital = line[11:20].strip() self.aonames.append("%s_%s" % (atomname, orbital)) atombasis.append(i) part = line[21:].replace("D", "E").rstrip() temp = [] for j in range(0, len(part), 10): temp.append(float(part[j:j+10])) nocoeffs[base:base + len(part) / 10, i] = temp # Do the last update of atombasis. if base == 0: self.atombasis.append(atombasis) # We now have aonames, so no need to continue. if self.popregular: break if not self.popregular: self.nocoeffs = nocoeffs self.nooccnos = nooccnos # For FREQ=Anharm, extract anharmonicity constants if line[1:40] == "X matrix of Anharmonic Constants (cm-1)": Nvibs = len(self.vibfreqs) self.vibanharms = numpy.zeros( (Nvibs, Nvibs), "d") base = 0 colmNames = next(inputfile) while base < Nvibs: for i in range(Nvibs-base): # Fewer lines this time line = next(inputfile) parts = line.split() for j in range(len(parts)-1): # Some lines are longer than others k = float(parts[j+1].replace("D", "E")) self.vibanharms[base+j, i+base] = k self.vibanharms[i+base, base+j] = k base += 5 colmNames = next(inputfile) # Pseudopotential charges. if line.find("Pseudopotential Parameters") > -1: self.skip_lines(inputfile, ['e', 'label1', 'label2', 'e']) line = next(inputfile) if line.find("Centers:") < 0: return # This was continue before parser refactoring. # continue # Needs to handle code like the following: # # Center Atomic Valence Angular Power Coordinates # Number Number Electrons Momentum of R Exponent Coefficient X Y Z # =================================================================================================================================== # Centers: 1 # Centers: 16 # Centers: 21 24 # Centers: 99100101102 # 1 44 16 -4.012684 -0.696698 0.006750 # F and up # 0 554.3796303 -0.05152700 centers = [] while line.find("Centers:") >= 0: temp = line[10:] for i in range(0, len(temp)-3, 3): centers.append(int(temp[i:i+3])) line = next(inputfile) centers.sort() # Not always in increasing order self.coreelectrons = numpy.zeros(self.natom, "i") for center in centers: front = line[:10].strip() while not (front and int(front) == center): line = next(inputfile) front = line[:10].strip() info = line.split() self.coreelectrons[center-1] = int(info[1]) - int(info[2]) line = next(inputfile) # This will be printed for counterpoise calcualtions only. # To prevent crashing, we need to know which fragment is being considered. # Other information is also printed in lines that start like this. if line[1:14] == 'Counterpoise:': if line[42:50] == "fragment": self.counterpoise = int(line[51:54]) # This will be printed only during ONIOM calcs; use it to set a flag # that will allow assertion failures to be bypassed in the code. if line[1:7] == "ONIOM:": self.oniom = True # Atomic charges are straightforward to parse, although the header # has changed over time somewhat. # # Mulliken charges: # 1 # 1 C -0.004513 # 2 C -0.077156 # ... # Sum of Mulliken charges = 0.00000 # Mulliken charges with hydrogens summed into heavy atoms: # 1 # 1 C -0.004513 # 2 C 0.002063 # ... # if line[1:25] == "Mulliken atomic charges:" or line[1:18] == "Mulliken charges:" or \ line[1:23] == "Lowdin Atomic Charges:" or line[1:16] == "Lowdin charges:": if not hasattr(self, "atomcharges"): self.atomcharges = {} ones = next(inputfile) charges = [] nline = next(inputfile) while not "Sum of" in nline: charges.append(float(nline.split()[2])) nline = next(inputfile) if "Mulliken" in line: self.atomcharges["mulliken"] = charges else: self.atomcharges["lowdin"] = charges if line.strip() == "Natural Population": if not hasattr(self, 'atomcharges'): self.atomcharges = {} line1 = next(inputfile) line2 = next(inputfile) if line1.split()[0] == 'Natural' and line2.split()[2] == 'Charge': dashes = next(inputfile) charges = [] for i in range(self.natom): nline = next(inputfile) charges.append(float(nline.split()[2])) self.atomcharges["natural"] = charges #Extract Thermochemistry #Temperature 298.150 Kelvin. Pressure 1.00000 Atm. #Zero-point correction= 0.342233 (Hartree/ #Thermal correction to Energy= 0. #Thermal correction to Enthalpy= 0. #Thermal correction to Gibbs Free Energy= 0.302940 #Sum of electronic and zero-point Energies= -563.649744 #Sum of electronic and thermal Energies= -563.636699 #Sum of electronic and thermal Enthalpies= -563.635755 #Sum of electronic and thermal Free Energies= -563.689037 if "Sum of electronic and thermal Enthalpies" in line: self.set_attribute('enthalpy', float(line.split()[6])) if "Sum of electronic and thermal Free Energies=" in line: self.set_attribute('freenergy', float(line.split()[7])) if line[1:12] == "Temperature": self.set_attribute('temperature', float(line.split()[1])) if __name__ == "__main__": import doctest, gaussianparser, sys if len(sys.argv) == 1: doctest.testmod(gaussianparser, verbose=False) if len(sys.argv) >= 2: parser = gaussianparser.Gaussian(sys.argv[1]) data = parser.parse() if len(sys.argv) > 2: for i in range(len(sys.argv[2:])): if hasattr(data, sys.argv[2 + i]): print(getattr(data, sys.argv[2 + i]))
Clyde-fare/cclib
src/cclib/parser/gaussianparser.py
Python
lgpl-2.1
62,712
[ "Gaussian", "cclib" ]
d1151e501edad8ed2a60e117f03b11dcb4f8a2d3a2d73a98e7094c546bead158
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2011-2015 Slack # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ taken from: http://peter-hoffmann.com/2010/extrinsic-visitor-pattern-python-inheritance.html """ class Visitor(object): def visit(self, obj, *args, **kwargs): meth = None for cls in obj.__class__.__mro__: meth_name = 'visit_'+cls.__name__ meth = getattr(self, meth_name, None) if meth: break if not meth: meth = self.generic_visit return meth(obj, *args, **kwargs) def generic_visit(self, obj, *args, **kwargs): raise NotImplementedError()
Slack06/yadg
descgen/visitor/base.py
Python
mit
1,695
[ "VisIt" ]
dd97e04896eb51a337244df177afbfdb09d97ba1d418aba3c3458948db934e0a
#!/usr/bin/env python # # Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA. import sys from gi.repository import GObject sys.path.append("..") from twitter.twr_oauth import TwrOauth from twitter.twr_account import TwrAccount consumer_key = '' consumer_secret = '' access_key = '' access_secret = '' TwrAccount.set_secrets(consumer_key, consumer_secret, access_key, access_secret) def __phase2_failed_cb(oauth, info): print '[FAILED] phase2: access-downloaded-failed, with %s' % info loop.quit() def __phase1_failed_cb(oauth, info): print '[FAILED] phase1: request-downloaded-failed, with %s' % info loop.quit() def __phase2_cb(oauth, info): print '[OK] phase2: access-downloaded.' TwrAccount.set_secrets(consumer_key, consumer_secret, info['oauth_token'], info['oauth_token_secret']) message = ''' Replace these values to run the remaining tests: consumer_key = \'%s\' consumer_secret = \'%s\' access_key = \'%s\' access_secret = \'%s\' ''' print message % (consumer_key, consumer_secret, info['oauth_token'], info['oauth_token_secret']) loop.quit() def __phase1_cb(oauth, info): print '[OK] phase1: request-downloaded' url = TwrOauth.AUTHORIZATION_URL % info['oauth_token'] print 'Please visit %s' % url verifier = raw_input('verifier: ') TwrAccount.set_secrets(consumer_key, consumer_secret, info['oauth_token'], info['oauth_token_secret']) oauth.connect('access-downloaded', __phase2_cb) oauth.connect('access-downloaded-failed', __phase2_failed_cb) oauth.access_token(verifier) oauth = TwrOauth() oauth.connect('request-downloaded', __phase1_cb) oauth.connect('request-downloaded-failed', __phase1_failed_cb) oauth.request_token() loop = GObject.MainLoop() loop.run()
tchx84/twitter-gobject
tests/test_twr_oauth.py
Python
lgpl-2.1
2,627
[ "VisIt" ]
2fc04d455cd71016cada41a1c963c1038f73b5417e2f6d1e12d7ddaf8b37cdef
#!/usr/bin/env python """ Commands related to syncing copytext from Google Docs. """ import app_config import logging from fabric.api import task from oauth import get_document, get_credentials, get_doc logging.basicConfig(format=app_config.LOG_FORMAT) logger = logging.getLogger(__name__) logger.setLevel(app_config.LOG_LEVEL) @task(default=True) def update(): """ Downloads a Google Doc as an Excel file. """ if app_config.AUTHORS_GOOGLE_DOC_KEY == None: logger.warn('You have set AUTHORS_GOOGLE_DOC_KEY to None. If you want to use a Google Sheet, set AUTHORS_GOOGLE_DOC_KEY to the key of your sheet in app_config.py') return credentials = get_credentials() if not credentials: print logger.warn('No Google OAuth credentials file found.') print logger.warn('Run `fab app` and visit `http://localhost:8000` to generate credentials.') return get_document(app_config.AUTHORS_GOOGLE_DOC_KEY, app_config.AUTHORS_PATH) @task def get_transcript(): gdoc = app_config.TRANSCRIPT_GDOC_KEY path = app_config.TRANSCRIPT_HTML_PATH if gdoc: get_doc(gdoc, path)
nprapps/debates
fabfile/text.py
Python
mit
1,148
[ "VisIt" ]
ba0ba4ebc8b1f81806ff1142131b985ead2401753de186a0e380a2a672b0d5ff
# -*- coding: utf-8 -*- """ The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <vincent.michel@inria.fr> # Minor fixes by Fabian Pedregosa # Amit Aides <amitibo@tx.technion.ac.il> # Yehuda Finkelstein <yehudaf@tx.technion.ac.il> # Lars Buitinck <L.J.Buitinck@uva.nl> # (parts based on earlier work by Mathieu Blondel) # # License: BSD Style. from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from .base import BaseEstimator, ClassifierMixin from .preprocessing import binarize, LabelBinarizer from .utils import array2d, atleast2d_or_csr from .utils.extmath import safe_sparse_dot, logsumexp from .utils import check_arrays __all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB'] class BaseNB(BaseEstimator, ClassifierMixin): """Abstract base class for naive Bayes estimators""" __metaclass__ = ABCMeta @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape [n_classes, n_samples]. Input is passed to _joint_log_likelihood as-is by predict, predict_proba and predict_log_proba. """ def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the sample for each class in the model, where classes are ordered arithmetically. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered arithmetically. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(BaseNB): """ Gaussian Naive Bayes (GaussianNB) Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target vector relative to X Attributes ---------- `class_prior_` : array, shape = [n_classes] probability of each class. `theta_` : array, shape = [n_classes, n_features] mean of each feature per class `sigma_` : array, shape = [n_classes, n_features] variance of each feature per class Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] """ def fit(self, X, y): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : object Returns self. """ X, y = check_arrays(X, y, sparse_format='dense') n_samples, n_features = X.shape if n_samples != y.shape[0]: raise ValueError("X and y have incompatible shapes") self.classes_ = unique_y = np.unique(y) n_classes = unique_y.shape[0] self.theta_ = np.zeros((n_classes, n_features)) self.sigma_ = np.zeros((n_classes, n_features)) self.class_prior_ = np.zeros(n_classes) epsilon = 1e-9 for i, y_i in enumerate(unique_y): self.theta_[i, :] = np.mean(X[y == y_i, :], axis=0) self.sigma_[i, :] = np.var(X[y == y_i, :], axis=0) + epsilon self.class_prior_[i] = np.float(np.sum(y == y_i)) / n_samples return self def _joint_log_likelihood(self, X): X = array2d(X) joint_log_likelihood = [] for i in xrange(np.size(self.classes_)): jointi = np.log(self.class_prior_[i]) n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :])) n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / \ (self.sigma_[i, :]), 1) joint_log_likelihood.append(jointi + n_ij) joint_log_likelihood = np.array(joint_log_likelihood).T return joint_log_likelihood class BaseDiscreteNB(BaseNB): """Abstract base class for naive Bayes on discrete/categorical data Any estimator based on this class should provide: __init__ _joint_log_likelihood(X) as per BaseNB """ def fit(self, X, y, sample_weight=None, class_prior=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). class_prior : array, shape [n_classes] Custom prior probability per class. Overrides the fit_prior parameter. Returns ------- self : object Returns self. """ X = atleast2d_or_csr(X) labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ n_classes = len(self.classes_) if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) if X.shape[0] != Y.shape[0]: msg = "X and y have incompatible shapes." if issparse(X): msg += "\nNote: Sparse matrices cannot be indexed w/ boolean \ masks (use `indices=True` in CV)." raise ValueError(msg) if sample_weight is not None: Y *= array2d(sample_weight).T if class_prior: if len(class_prior) != n_classes: raise ValueError( "Number of priors must match number of classes") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: # empirical prior, with sample_weight taken into account y_freq = Y.sum(axis=0) self.class_log_prior_ = np.log(y_freq) - np.log(y_freq.sum()) else: self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes) N_c, N_c_i = self._count(X, Y) self.feature_log_prob_ = (np.log(N_c_i + self.alpha) - np.log(N_c.reshape(-1, 1) + self.alpha * X.shape[1])) return self @staticmethod def _count(X, Y): """Count feature occurrences. Returns (N_c, N_c_i), where N_c is the count of all features in all samples of class c; N_c_i is the count of feature i in all samples of class c. """ if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative.") N_c_i = safe_sparse_dot(Y.T, X) N_c = np.sum(N_c_i, axis=1) return N_c, N_c_i # XXX The following is a stopgap measure; we need to set the dimensions # of class_log_prior_ and feature_log_prob_ correctly. def _get_coef(self): return self.feature_log_prob_[1] if len(self.classes_) == 2 \ else self.feature_log_prob_ def _get_intercept(self): return self.class_log_prior_[1] if len(self.classes_) == 2 \ else self.class_log_prior_ coef_ = property(_get_coef) intercept_ = property(_get_intercept) class MultinomialNB(BaseDiscreteNB): """ Naive Bayes classifier for multinomial models The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work. Parameters ---------- alpha: float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). fit_prior: boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. Attributes ---------- `intercept_`, `class_log_prior_` : array, shape = [n_classes] Smoothed empirical log probability for each class. `feature_log_prob_`, `coef_` : array, shape = [n_classes, n_features] Empirical log probability of features given a class, P(x_i|y). (`intercept_` and `coef_` are properties referring to `class_log_prior_` and `feature_log_prob_`, respectively.) Examples -------- >>> import numpy as np >>> X = np.random.randint(5, size=(6, 100)) >>> Y = np.array([1, 2, 3, 4, 5, 6]) >>> from sklearn.naive_bayes import MultinomialNB >>> clf = MultinomialNB() >>> clf.fit(X, Y) MultinomialNB(alpha=1.0, fit_prior=True) >>> print(clf.predict(X[2])) [3] Notes ----- For the rationale behind the names `coef_` and `intercept_`, i.e. naive Bayes as a linear classifier, see J. Rennie et al. (2003), Tackling the poor assumptions of naive Bayes text classifiers, ICML. """ def __init__(self, alpha=1.0, fit_prior=True): self.alpha = alpha self.fit_prior = fit_prior def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" X = atleast2d_or_csr(X) return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_) class BernoulliNB(BaseDiscreteNB): """Naive Bayes classifier for multivariate Bernoulli models. Like MultinomialNB, this classifier is suitable for discrete data. The difference is that while MultinomialNB works with occurrence counts, BernoulliNB is designed for binary/boolean features. Parameters ---------- alpha: float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). binarize: float or None, optional Threshold for binarizing (mapping to booleans) of sample features. If None, input is presumed to already consist of binary vectors. fit_prior: boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. Attributes ---------- `class_log_prior_` : array, shape = [n_classes] Log probability of each class (smoothed). `feature_log_prob_` : array, shape = [n_classes, n_features] Empirical log probability of features given a class, P(x_i|y). Examples -------- >>> import numpy as np >>> X = np.random.randint(2, size=(6, 100)) >>> Y = np.array([1, 2, 3, 4, 4, 5]) >>> from sklearn.naive_bayes import BernoulliNB >>> clf = BernoulliNB() >>> clf.fit(X, Y) BernoulliNB(alpha=1.0, binarize=0.0, fit_prior=True) >>> print(clf.predict(X[2])) [3] References ---------- C.D. Manning, P. Raghavan and H. Schütze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234–265. A. McCallum and K. Nigam (1998). A comparison of event models for naive Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for Text Categorization, pp. 41–48. V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). """ def __init__(self, alpha=1.0, binarize=.0, fit_prior=True): self.alpha = alpha self.binarize = binarize self.fit_prior = fit_prior def _count(self, X, Y): if self.binarize is not None: X = binarize(X, threshold=self.binarize) return super(BernoulliNB, self)._count(X, Y) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" X = atleast2d_or_csr(X) if self.binarize is not None: X = binarize(X, threshold=self.binarize) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X.shape if n_features_X != n_features: raise ValueError("Expected input with %d features, got %d instead" % (n_features, n_features_X)) neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob X_neg_prob = (neg_prob.sum(axis=1) - safe_sparse_dot(X, neg_prob.T)) jll = safe_sparse_dot(X, self.feature_log_prob_.T) + X_neg_prob return jll + self.class_log_prior_
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/sklearn/naive_bayes.py
Python
agpl-3.0
14,400
[ "Gaussian" ]
52767f0638096530d82819ca1b3c8a3f5dc426fa336decc3ec9742dbedbffc8f
import gc import logging import traceback from collections import defaultdict from datetime import datetime, timedelta from multiprocessing import Process, Queue import numpy as np import pandas as pd import xarray as xr from typhon.geodesy import great_circle_distance from typhon.geographical import GeoIndex from typhon.utils import add_xarray_groups, get_xarray_groups from typhon.utils.timeutils import to_datetime, to_timedelta, Timer __all__ = [ "Collocator", "check_collocation_data" ] logger = logging.getLogger(__name__) # The names for the processes. This started as an easter egg, but it actually # helps to identify different processes during debugging. PROCESS_NAMES = [ 'Newton', 'Einstein', 'Bohr', 'Darwin', 'Pasteur', 'Freud', 'Galilei', 'Lavoisier', 'Kepler', 'Copernicus', 'Faraday', 'Maxwell', 'Bernard', 'Boas', 'Heisenberg', 'Pauling', 'Virchow', 'Schrodinger', 'Rutherford', 'Dirac', 'Vesalius', 'Brahe', 'Buffon', 'Boltzmann', 'Planck', 'Curie', 'Herschel', 'Lyell', 'Laplace', 'Hubble', 'Thomson', 'Born', 'Crick', 'Fermi', 'Euler', 'Liebig', 'Eddington', 'Harvey', 'Malpighi', 'Huygens', 'Gauss', 'Haller', 'Kekule', 'Koch', 'Gell-Mann', 'Fischer', 'Mendeleev', 'Glashow', 'Watson', 'Bardeen', 'Neumann', 'Feynman', 'Wegener', 'Hawking', 'Leeuwenhoek', 'Laue', 'Kirchhoff', 'Bethe', 'Euclid', 'Mendel', 'Onnes', 'Morgan', 'Helmholtz', 'Ehrlich', 'Mayr', 'Sherrington', 'Dobzhansky', 'Delbruck', 'Lamarck', 'Bayliss', 'Chomsky', 'Sanger', 'Lucretius', 'Dalton', 'Broglie', 'Linnaeus', 'Piaget', 'Simpson', 'Levi-Strauss', 'Margulis', 'Landsteiner', 'Lorenz', 'Wilson', 'Hopkins', 'Elion', 'Selye', 'Oppenheimer', 'Teller', 'Libby', 'Haeckel', 'Salk', 'Kraepelin', 'Lysenko', 'Galton', 'Binet', 'Kinsey', 'Fleming', 'Skinner', 'Wundt', 'Archimedes' ] class ProcessCrashed(Exception): """Helper exception for crashed processes""" pass class Collocator: def __init__( self, threads=None, name=None, #log_dir=None ): """Initialize a collocator object that can find collocations Args: threads: Finding collocations can be parallelized in threads. Give here the maximum number of threads that you want to use. Which number of threads is the best, may be machine-dependent. So this is a parameter that you can use to fine-tune the performance. Note: Not yet implemented due to GIL usage of sklearn BallTree. name: The name of this collocator, will be used in log statements. """ self.empty = None # xr.Dataset() self.index = None self.index_with_primary = False self.threads = threads # These optimization parameters will be overwritten in collocate self.bin_factor = None self.magnitude_factor = None self.tunnel_limit = None self.leaf_size = None self.name = name if name is not None else "Collocator" # If no collocations are found, this will be returned. We need empty # arrays to concatenate the results without problems: @property def no_pairs(self): return np.array([[], []]) @property def no_intervals(self): return np.array([], dtype='timedelta64[ns]') @property def no_distances(self): return np.array([]) def __call__(self, *args, **kwargs): return self.collocate(*args, **kwargs) def _debug(self, msg): logger.debug(f"[{self.name}] {msg}") def _info(self, msg): logger.info(f"[{self.name}] {msg}") def _error(self, msg): logger.error(f"[{self.name}] {msg}") def collocate_filesets( self, filesets, start=None, end=None, processes=None, output=None, bundle=None, skip_file_errors=False, post_processor=None, post_processor_kwargs=None, **kwargs ): """Find collocation between the data of two filesets If you want to save the collocations directly to disk, it may be easier to use :meth:`~typhon.collocations.Collocations.search` directly. Args: filesets: A list of two :class:`FileSet` objects, the primary and the secondary fileset. Can be also :class:`~typhon.collocations.common.Collocations` objects with `read_mode=collapse`. The order of the filesets is irrelevant for the results of the collocation search but files from the secondary fileset might be read multiple times if using parallel processing (`processes` is greater than one). The number of output files could be different (see also the option `bundle`). start: Start date either as datetime object or as string ("YYYY-MM-DD hh:mm:ss"). Year, month and day are required. Hours, minutes and seconds are optional. If not given, it is datetime.min per default. end: End date. Same format as "start". If not given, it is datetime.max per default. processes: Collocating can be parallelized which improves the performance significantly. Pass here the number of processes to use. output: Fileset object where the collocated data should be stored. bundle: Set this to *primary* if you want to bundle the output files by their collocated primaries, i.e. there will be only one output file per primary. *daily* is also possible, then all files from one day are bundled together. Per default, all collocations for each file match will be saved separately. This might lead to a high number of output files. Note: *daily* means one process bundles all collocations from one day into one output file. If using multiple processes, this could still produce several daily output files per day. skip_file_errors: If this is *True* and a file could not be read, the file and its match will be skipped and a warning will be printed. Otheriwse the program will stop (default). post_processor: A function for post-processing the collocated data before saving it to `output`. Must accept two parameters: a xarray.Dataset with the collocated data and a dictionary with the path attributes from the collocated files. post_processor_kwargs: A dictionary with keyword arguments that should be passed to `post_processor`. **kwargs: Further keyword arguments that are allowed for :meth:`collocate`. Yields: A xarray.Dataset with the collocated data if `output` is not set. If `output` is set to a FileSet-like object, only the filename of the stored collocations is yielded. The results are not ordered if you use more than one process. For more information about the yielded xarray.Dataset have a look at :meth:`collocate`. Examples: .. code-block:: python """ timer = Timer().start() if len(filesets) != 2: raise ValueError("Only collocating two filesets at once is allowed" "at the moment!") # Check the max_interval argument because we need it later max_interval = kwargs.get("max_interval", None) if max_interval is None: raise ValueError("Collocating filesets without max_interval is" " not yet implemented!") if start is None: start = datetime.min else: start = to_datetime(start) if end is None: end = datetime.max else: end = to_datetime(end) self._info(f"Collocate from {start} to {end}") # Find the files from both filesets which overlap tempoerally. matches = list(filesets[0].match( filesets[1], start=start, end=end, max_interval=max_interval, )) if processes is None: processes = 1 # Make sure that there are never more processes than matches processes = min(processes, len(matches)) total_matches = sum(len(match[1]) for match in matches) self._info(f"using {processes} process(es) on {total_matches} matches") # MAGIC with processes # Each process gets a list with matches. Important: the matches should # be continuous to guarantee a good performance. After finishing one # match, the process pushes its results to the result queue. If errors # are raised during collocating, the raised errors are pushed to the # error queue, matches_chunks = np.array_split(matches, processes) # This queue collects all results: results = Queue(maxsize=processes) # This queue collects all error exceptions errors = Queue() # Extend the keyword arguments that we are going to pass to # _collocate_files: kwargs.update({ "start": start, "end": end, "filesets": filesets, "output": output, "bundle": bundle, "skip_file_errors": skip_file_errors, "post_processor": post_processor, "post_processor_kwargs": post_processor_kwargs, }) # This list contains all running processes process_list = [ Process( target=Collocator._process_caller, args=( self, results, errors, PROCESS_NAMES[i], ), kwargs={**kwargs, "matches": matches_chunk}, daemon=True, ) for i, matches_chunk in enumerate(matches_chunks) ] # We want to keep track of the progress of the collocation search since # it may take a while. process_progress = { name: 0. # Each process is at 0 percent at the beginning for name in PROCESS_NAMES[:processes] } # Start all processes: for process in process_list: process.start() # As long as some processes are still running, wait for their results: running = process_list.copy() processed_matches = 0 # The main process has two tasks during its child processes are # collocating. # 1) Collect their results and yield them to the user # 2) Display the progress and estimate the remaining processing time while running: # Filter out all processes that are dead: they either crashed or # complete their task running = [ process for process in running if process.is_alive() ] # Get all results from the result queue while not results.empty(): process, progress, result = results.get() # The process might be crashed. To keep the remaining time # estimation useful, we exclude the crashed process from the # calculation. if result is ProcessCrashed: del process_progress[process] else: process_progress[process] = progress try: nerrors = errors.qsize() except NotImplementedError: nerrors = 'unknown' self._print_progress( timer.elapsed, process_progress, len(running), nerrors) if result is not None: yield result # Explicit free up memory: gc.collect() for process in process_list: process.join() if not errors.empty(): self._error("Some processes terminated due to errors:") while not errors.empty(): error = errors.get() msg = '\n'.join([ "-"*79, error[2], "".join(traceback.format_tb(error[1])), "-" * 79 + "\n" ]) self._error(msg) @staticmethod def _print_progress(elapsed_time, process_progress, processes, errors): elapsed_time -= timedelta(microseconds=elapsed_time.microseconds) if len(process_progress) == 0: msg = "-"*79 + "\n" msg += f"100% | {elapsed_time} hours elapsed" if errors != "unknown": msg += f" | {errors} failed" msg += "\n" + "-"*79 + "\n" logger.error(msg) return progress = sum(process_progress.values()) / len(process_progress) try: expected_time = elapsed_time * (100 / progress - 1) expected_time -= timedelta( microseconds=expected_time.microseconds) except ZeroDivisionError: expected_time = "unknown" msg = "-"*79 + "\n" msg += f"{progress:.0f}% | {elapsed_time} hours elapsed, " \ f"{expected_time} hours left | {processes} proc running" if errors != "unknown": msg += f", {errors} failed" msg += "\n" + "-"*79 + "\n" logger.error(msg) @staticmethod def _process_caller( self, results, errors, name, output, bundle, post_processor, post_processor_kwargs, **kwargs): """Wrapper around _collocate_matches This function is called for each process. It communicates with the main process via the result and error queue. Result Queue: Adds for each collocated file match the process name, its progress and the actual results. Error Queue: If an error is raised, the name of this proces and the error messages is put to this queue. """ self.name = name # We keep track of how many file pairs we have already processed to # make the error debugging easier. We need the match in flat form: matches = [ [match[0], secondary] for match in kwargs['matches'] for secondary in match[1] ] # If we want to bundle the output, we need to collect some contents. # The current_bundle_tag stores a certain information for the current # bundle (e.g. filename of primary or day of the year). If it changes, # the bundle is stored to disk and a new bundle is created. cached_data = [] cached_attributes = {} current_bundle_tag = None try: processed = 0 collocated_matches = self._collocate_matches(**kwargs) for collocations, attributes in collocated_matches: match = matches[processed] processed += 1 progress = 100 * processed / len(matches) if collocations is None: results.put([name, progress, None]) continue # The user does not want to bundle anything therefore just save # the current collocations if bundle is None: result = self._save_and_return( collocations, attributes, output, post_processor, post_processor_kwargs ) results.put([name, progress, result]) continue # The user may want to bundle the collocations before writing # them to disk, e.g. by their primaries. save_cache = self._should_save_cache( bundle, current_bundle_tag, match, to_datetime(collocations.attrs["start_time"]) ) if save_cache: result = self._save_and_return( cached_data, cached_attributes, output, post_processor, post_processor_kwargs ) results.put([name, progress, result]) cached_data = [] cached_attributes = {} # So far, we have not cached any collocations or we still need # to wait before saving them to disk. cached_data.append(collocations) cached_attributes.update(**attributes) if bundle == "primary": current_bundle_tag = match[0].path elif bundle == "daily": current_bundle_tag = \ to_datetime(collocations.attrs["start_time"]).date() # After all iterations, save last cached data to disk: if cached_data: result = self._save_and_return( cached_data, cached_attributes, output, post_processor, post_processor_kwargs ) results.put([name, progress, result]) except Exception as exception: # Tell the main process to stop considering this process for the # remaining processing time: results.put( [name, 100., ProcessCrashed] ) self._error("ERROR: I got a problem and terminate!") # Build a message that contains all important information for # debugging: msg = f"Process {name} ({matches[0][0].times[0]} -" \ f"{matches[-1][0].times[1]}) failed\n" \ f"Failed to collocate {matches[processed]} with"\ f"{matches[processed]}\n" # The main process needs to know about this exception! error = [ name, exception.__traceback__, msg + "ERROR: " + str(exception) ] errors.put(error) self._error(exception) # Finally, raise the exception to terminate this process: raise exception self._info(f"Finished all {len(matches)} matches") def _save_and_return(self, collocations, attributes, output, post_processor, post_processor_kwargs): """Save collocations to disk or return them""" if isinstance(collocations, list): collocations = concat_collocations( collocations ) if output is None: return collocations, attributes else: filename = output.get_filename( [to_datetime(collocations.attrs["start_time"]), to_datetime(collocations.attrs["end_time"])], fill=attributes ) # Apply a post processor function from the user if post_processor is not None: if post_processor_kwargs is None: post_processor_kwargs = {} collocations = post_processor( collocations, attributes, **post_processor_kwargs ) if collocations is None: return None self._info(f"Store collocations to\n{filename}") # Write the data to the file. output.write(collocations, filename) return filename @staticmethod def _should_save_cache(bundle, current_bundle_tag, match, start_time): """Return true if the cache should be saved otherwise false """ if current_bundle_tag is None: return False elif bundle == "primary": # Check whether the primary has changed since the last time: return current_bundle_tag != match[0].path elif bundle == "daily": # Has the day changed since last time? return current_bundle_tag != start_time.date() # In all other cases, the bundle should not be saved yet: return False def _collocate_matches( self, filesets, matches, skip_file_errors, **kwargs ): """Load file matches and collocate their content Yields: A tuple of two items: the first is always the current percentage of progress. If output is True, the second is only the filename of the saved collocations. Otherwise, it is a tuple of collocations and their collected :class:`~typhon.files.handlers.common.FileInfo` attributes as a dictionary. """ # Load all matches in a parallized queue: loaded_matches = filesets[0].align( filesets[1], matches=matches, return_info=True, compact=False, skip_errors=skip_file_errors, ) for loaded_match in loaded_matches: # The FileInfo objects of the matched files: files = loaded_match[0][0], loaded_match[1][0] # We copy the data from the matches since they might be used for # other matches as well: primary, secondary = \ loaded_match[0][1].copy(), loaded_match[1][1].copy() self._debug(f"Collocate {files[0].path}\nwith {files[1].path}") collocations = self.collocate( (filesets[0].name, primary), (filesets[1].name, secondary), **kwargs, ) if collocations is None: self._debug("Found no collocations!") # At least, give the process caller a progress update: yield None, None continue # Check whether the collocation data is compatible and was build # correctly check_collocation_data(collocations) found = [ collocations[f"{filesets[0].name}/time"].size, collocations[f"{filesets[1].name}/time"].size ] self._debug( f"Found {found[0]} ({filesets[0].name}) and " f"{found[1]} ({filesets[1].name}) collocations" ) # Add the names of the processed files: for f in range(2): if f"{filesets[f].name}/__file" in collocations.variables: continue collocations[f"{filesets[f].name}/__file"] = files[f].path # Collect the attributes of the input files. The attributes get a # prefix, primary or secondary, to allow not-unique names. attributes = { f"primary.{p}" if f == 0 else f"secondary.{p}": v for f, file in enumerate(files) for p, v in file.attr.items() } yield collocations, attributes def collocate( self, primary, secondary, max_interval=None, max_distance=None, bin_factor=1, magnitude_factor=10, tunnel_limit=None, start=None, end=None, leaf_size=40 ): """Find collocations between two xarray.Dataset objects Collocations are two or more data points that are located close to each other in space and/or time. Each xarray.Dataset contain the variables *time*, *lat*, *lon*. They must be - if they are coordinates - unique. Otherwise, their coordinates must be unique, i.e. they cannot contain duplicated values. *time* must be a 1-dimensional array with a *numpy.datetime64*-like data type. *lat* and *lon* can be gridded, i.e. they can be multi- dimensional. However, they must always share the first dimension with *time*. *lat* must be latitudes between *-90* (south) and *90* (north) and *lon* must be longitudes between *-180* (west) and *180* (east) degrees. See below for examples. The collocation searched is performed with a fast ball tree implementation by scikit-learn. The ball tree is cached and reused whenever the data points from `primary` or `secondary` have not changed. If you want to find collocations between FileSet objects, use :class:`collocate_filesets` instead. Args: primary: A tuple of a string with the dataset name and a xarray.Dataset that fulfill the specifications from above. Can be also a xarray.Dataset only, the name is then automatically set to *primary*. secondary: A tuple of a string with the dataset name and a xarray.Dataset that fulfill the specifications from above. Can be also a xarray.Dataset only, the name is then automatically set to *secondary*. max_interval: Either a number as a time interval in seconds, a string containing a time with a unit (e.g. *100 minutes*) or a timedelta object. This is the maximum time interval between two data points. If this is None, the data will be searched for spatial collocations only. max_distance: Either a number as a length in kilometers or a string containing a length with a unit (e.g. *100 meters*). This is the maximum distance between two data points to meet the collocation criteria. If this is None, the data will be searched for temporal collocations only. Either `max_interval` or *max_distance* must be given. tunnel_limit: Maximum distance in kilometers at which to switch from tunnel to haversine distance metric. Per default this algorithm uses the tunnel metric, which simply transform all latitudes and longitudes to 3D-cartesian space and calculate their euclidean distance. This is faster than the haversine metric but produces an error that grows with larger distances. When searching for distances exceeding this limit (`max_distance` is greater than this parameter), the haversine metric is used, which is more accurate but takes more time. Default is 1000 kilometers. magnitude_factor: Since building new trees is expensive, this algorithm tries to use the last tree when possible (e.g. for data with fixed grid). However, building the tree with the larger dataset and query it with the smaller dataset is faster than vice versa. Depending on which premise to follow, there might have a different performance in the end. This parameter is the factor of that one dataset must be larger than the other to throw away an already-built ball tree and rebuild it with the larger dataset. leaf_size: The size of one leaf in the Ball Tree. The higher the leaf size the faster is the tree building but the slower is the tree query. The optimal leaf size is dataset-dependent. Default is 40. bin_factor: When using a temporal criterion via `max_interval`, the data will be temporally binned to speed-up the search. The bin size is `bin_factor` * `max_interval`. Which bin factor is the best, may be dataset-dependent. So this is a parameter that you can use to fine-tune the performance. start: Limit the collocated data from this start date. Can be either as datetime object or as string ("YYYY-MM-DD hh:mm:ss"). Year, month and day are required. Hours, minutes and seconds are optional. If not given, it is datetime.min per default. end: End date. Same format as "start". If not given, it is datetime.max per default. Returns: None if no collocations were found. Otherwise, a xarray.Dataset with the collocated data in *compact* form. It consists of three groups (groups of variables containing */* in their name): the *primary*, *secondary* and the *Collocations* group. If you passed `primary` or `secondary` with own names, they will be used in the output. The *Collocations* group contains information about the found collocations. *Collocations/pairs* is a 2xN array where N is the number of found collocations. It contains the indices of the *primary* and *secondary* data points which are collocations. The indices refer to the data points stored in the *primary* or *secondary* group. *Collocations/interval* and *Collocations/distance* are the intervals and distances between the collocations in seconds and kilometers, respectively. Collocations in *compact* form are efficient when saving them to disk but it might be complicated to use them directly. Consider applying :func:`~typhon.collocations.common.collapse` or :func:`~typhon.collocations.common.expand` on them. Examples: .. code-block: python # TODO: Update this example! import numpy as np from typhon.collocations import Collocator # Create the data. primary and secondary can also be # xarray.Dataset objects: primary = { "time": np.arange( "2018-01-01", "2018-01-02", dtype="datetime64[h]" ), "lat": 30.*np.sin(np.linspace(-3.14, 3.14, 24))+20, "lon": np.linspace(0, 90, 24), } secondary = { "time": np.arange( "2018-01-01", "2018-01-02", dtype="datetime64[h]" ), "lat": 30.*np.sin(np.linspace(-3.14, 3.14, 24)+1.)+20, "lon": np.linspace(0, 90, 24), } # Find collocations with a maximum distance of 300 kilometers # and a maximum interval of 1 hour collocator = Collocator() collocated = collocator.collocate( primary, secondary, max_distance="300km", max_interval="1h" ) print(collocated) """ if max_distance is None and max_interval is None: raise ValueError( "Either max_distance or max_interval must be given!" ) if max_interval is not None: max_interval = to_timedelta(max_interval, numbers_as="seconds") # The user can give strings instead of datetime objects: start = datetime.min if start is None else to_datetime(start) end = datetime.max if end is None else to_datetime(end) # Did the user give the datasets specific names? primary_name, primary, secondary_name, secondary = self._get_names( primary, secondary ) # Select the common time period of both datasets and flat them. primary, secondary = self._prepare_data( primary, secondary, max_interval, start, end ) # Maybe there is no data left after selection? if primary is None: return self.empty self.bin_factor = bin_factor self.magnitude_factor = magnitude_factor self.tunnel_limit = tunnel_limit self.leaf_size = leaf_size timer = Timer().start() # We cannot allow NaNs in the time, lat or lon fields not_nans1 = self._get_not_nans(primary) not_nans2 = self._get_not_nans(secondary) # Retrieve the important fields from the data. To avoid any overhead by # xarray, we use the plain numpy.arrays and do not use the isel method # (see https://github.com/pydata/xarray/issues/2227). We rather use # index arrays that we use later to select the rest of the data lat1 = primary.lat.values[not_nans1] lon1 = primary.lon.values[not_nans1] time1 = primary.time.values[not_nans1] lat2 = secondary.lat.values[not_nans2] lon2 = secondary.lon.values[not_nans2] time2 = secondary.time.values[not_nans2] original_indices = [ np.arange(primary.time.size)[not_nans1], np.arange(secondary.time.size)[not_nans2] ] self._debug(f"{timer} for filtering NaNs") # We can search for spatial collocations (max_interval=None), temporal # collocations (max_distance=None) or both. if max_interval is None: # Search for spatial collocations only: pairs, distances = self.spatial_search( lat1, lon1, lat2, lon2, max_distance, ) intervals = self._get_intervals( time1[pairs[0]], time2[pairs[1]] ) return self._create_return( primary, secondary, primary_name, secondary_name, self._to_original(pairs, original_indices), intervals, distances, max_interval, max_distance ) elif max_distance is None: # Search for temporal collocations only: pairs, intervals = self.temporal_search( time1, time2, max_interval ) distances = self._get_distances( lat1[pairs[0]], lon1[pairs[0]], lat2[pairs[1]], lon2[pairs[1]], ) return self._create_return( primary, secondary, primary_name, secondary_name, self._to_original(pairs, original_indices), intervals, distances, max_interval, max_distance ) # The user wants to use both criteria and search for spatial and # temporal collocations. At first, we do a coarse temporal pre-binning # so that we only search for collocations between points that might # also be temporally collocated. Unfortunately, this also produces an # overhead that is only negligible if we have a lot of data: data_magnitude = time1.size * time2.size if data_magnitude > 100_0000: # We have enough data, do temporal pre-binning! pairs, distances = self.spatial_search_with_temporal_binning( {"lat": lat1, "lon": lon1, "time": time1}, {"lat": lat2, "lon": lon2, "time": time2}, max_distance, max_interval ) else: # We do not have enough data to justify that whole pre-binning. # Simply do it directly! pairs, distances = self.spatial_search( lat1, lon1, lat2, lon2, max_distance, ) # Did we find any spatial collocations? if not pairs.any(): return self.empty # Check now whether the spatial collocations really pass the temporal # condition: passed_temporal_check, intervals = self._temporal_check( time1[pairs[0]], time2[pairs[1]], max_interval ) # Return only the values that passed the time check return self._create_return( primary, secondary, primary_name, secondary_name, self._to_original( pairs[:, passed_temporal_check], original_indices), intervals, distances[passed_temporal_check], max_interval, max_distance ) @staticmethod def _to_original(pairs, original_indices): return np.array([ original_indices[i][pair_array] for i, pair_array in enumerate(pairs) ]) @staticmethod def _get_names(primary, secondary): # Check out whether the user gave the primary and secondary any name: if isinstance(primary, (tuple, list)): primary_name, primary = primary else: primary_name = "primary" if isinstance(secondary, (tuple, list)): secondary_name, secondary = secondary else: secondary_name = "secondary" return primary_name, primary, secondary_name, secondary def _prepare_data(self, primary, secondary, max_interval, start, end): """Prepare the data for the collocation search This method selects the time period which should be searched for collocations and flats the input datasets if they have gridded variables. Returns: The datasets constraint to the common time period, sorted by time and flattened. If no common time period could be found, two None objects are returned. """ if max_interval is not None: timer = Timer().start() # We do not have to collocate everything, just the common time # period expanded by max_interval and limited by the global start # and end parameter: primary_period, secondary_period = self._get_common_time_period( primary, secondary, max_interval, start, end ) # Check whether something is left: if not primary_period.size or not secondary_period.size: return None, None # We need everything sorted by the time, otherwise xarray's stack # method makes problems: primary_period = primary_period.sortby(primary_period) primary_dim = primary_period.dims[0] secondary_period = secondary_period.sortby(secondary_period) secondary_dim = secondary_period.dims[0] # Select the common time period and while using sorted indices: primary = primary.sel(**{primary_dim: primary_period[primary_dim]}) secondary = secondary.sel( **{secondary_dim: secondary_period[secondary_dim]} ) # Check whether something is left: if not primary_period.size or not secondary_period.size: return None, None self._debug(f"{timer} for selecting common time period") # Flat the data: For collocating, we need a flat data structure. # Fortunately, xarray provides the very convenient stack method # where we can flat multiple dimensions to one. Which dimensions do # we have to stack together? We need the fields *time*, *lat* and # *lon* to be flat. So we choose their dimensions to be stacked. timer = Timer().start() primary = self._flat_to_main_coord(primary) secondary = self._flat_to_main_coord(secondary) self._debug(f"{timer} for flatting data") return primary, secondary @staticmethod def _get_common_time_period( primary, secondary, max_interval, start, end): max_interval = pd.Timedelta(max_interval) # We want to select a common time window from both datasets, # aligned to the primary's time coverage. Because xarray has a # very annoying bug in time retrieving # (https://github.com/pydata/xarray/issues/1240), this is a # little bit cumbersome: common_start = max( start, pd.Timestamp(primary.time.values.min().item(0)).tz_localize(None) - max_interval, pd.Timestamp(secondary.time.values.min().item(0)).tz_localize(None) - max_interval ) common_end = min( end, pd.Timestamp(primary.time.values.max().item(0)).tz_localize(None) + max_interval, pd.Timestamp(secondary.time.values.max().item(0)).tz_localize(None) + max_interval ) primary_period = primary.time.where( (primary.time.values >= np.datetime64(common_start)) & (primary.time.values <= np.datetime64(common_end)) ).dropna(primary.time.dims[0]) secondary_period = secondary.time.where( (secondary.time.values >= np.datetime64(common_start)) & (secondary.time.values <= np.datetime64(common_end)) ).dropna(secondary.time.dims[0]) return primary_period, secondary_period @staticmethod def _get_not_nans(dataset): return dataset.lat.notnull().values & dataset.lon.notnull().values @staticmethod def _flat_to_main_coord(data): """Make the dataset flat despite of its original structure We need a flat dataset structure for the collocation algorithms, i.e. time, lat and lon are not allowed to be gridded, they must be 1-dimensional and share the same dimension (namely *collocation*). There are three groups of original data structures that this method can handle: * linear (e.g. ship track measurements): time, lat and lon have the same dimension and are all 1-dimensional. Fulfills all criteria from above. No action has to be taken. * gridded_coords (e.g. instruments on satellites with gridded swaths): lat or lon are gridded (they have multiple dimensions). Stack the coordinates of them together to a new shared dimension. Args: data: xr.Dataset object Returns: A xr.Dataset where time, lat and lon are aligned on one shared dimension. """ # Flat: shared_dims = list( set(data.time.dims) | set(data.lat.dims) | set(data.lon.dims) ) # Check whether the dataset is flat (time, lat and lon share the same # dimension size and are 1-dimensional) if len(shared_dims) == 1: if shared_dims[0] in ("time", "lat", "lon"): # One of the key variables is the main dimension! Change this: data["collocation"] = shared_dims[0], np.arange( data[shared_dims[0]].size) data = data.swap_dims({shared_dims[0]: "collocation"}) data = data.reset_coords(shared_dims[0]) # So far, collocation is a coordinate. We want to make it to a # dimension, so drop its values: return data.drop_vars("collocation") return data.rename({ shared_dims[0]: "collocation" }) # The coordinates are gridded: # Some field might be more deeply stacked than another. Choose the # dimensions of the most deeply stacked variable: dims = max( data["time"].dims, data["lat"].dims, data["lon"].dims, key=lambda x: len(x) ) # We want to be able to retrieve additional fields after collocating. # Therefore, we give each dimension that is no coordinate yet a value # to use them as indices later. for dim in dims: if dim not in data.coords: data[dim] = dim, np.arange(data.dims[dim]) # We assume that coordinates must be unique! Otherwise, we would have # to use this ugly work-around: # Replace the former coordinates with new coordinates that have unique # values. # new_dims = [] # for dim in dims: # new_dim = f"__replacement_{dim}" # data[new_dim] = dim, np.arange(data.dims[dim]) # data = data.swap_dims({dim: new_dim}) # new_dims.append(new_dim) return data.stack(collocation=dims) def _create_return( self, primary, secondary, primary_name, secondary_name, original_pairs, intervals, distances, max_interval, max_distance ): if not original_pairs.any(): return self.empty pairs = [] output = {} names = [primary_name, secondary_name] for i, dataset in enumerate([primary, secondary]): # name of the current dataset (primary or secondary) name = names[i] # These are the indices of the points in the original data that # have collocations. We remove the duplicates since we want to copy # the required data only once. They are called original_indices # because they are the indices in the original data array: original_indices = pd.unique(original_pairs[i]) # After selecting the collocated data, the original indices cannot # be applied any longer. We need new indices that indicate the # pairs in the collocated data. new_indices = np.empty(original_indices.max() + 1, dtype=int) new_indices[original_indices] = np.arange( original_indices.size ) collocation_indices = new_indices[original_pairs[i]] # Save the collocation indices in the metadata group: pairs.append(collocation_indices) output[names[i]] = dataset.isel(collocation=original_indices) # We have to convert the MultiIndex to a normal index because we # cannot store it to a file otherwise. We can convert it by simply # setting it to new values, but we are losing the sub-level # coordinates (the dimenisons that we stacked to create the # multi-index in the first place) with that step. Hence, we store # the sub-level coordinates in additional dataset to preserve them. main_coord_is_multiindex = isinstance( output[name].get_index("collocation"), pd.core.indexes.multi.MultiIndex ) if main_coord_is_multiindex: stacked_dims_data = xr.merge([ xr.DataArray( output[name][dim].values, name=dim, dims=["collocation"] ) for dim in output[name].get_index("collocation").names ]) # Okay, actually we want to get rid of the main coordinate. It # should stay as a dimension name but without own labels. I.e. we # want to drop it. Because it still may a MultiIndex, we cannot # drop it directly but we have to set it to something different. output[name]["collocation"] = \ np.arange(output[name]["collocation"].size) if main_coord_is_multiindex: # Now, since we unstacked the multi-index, we can add the # stacked dimensions back to the dataset: output[name] = xr.merge( [output[name], stacked_dims_data], ) # For the flattening we might have created temporal variables, # also collect them to drop: vars_to_drop = [ var for var in output[name].variables.keys() if var.startswith("__replacement_") ] output[name] = output[name].drop_vars([ f"collocation", *vars_to_drop ]) # Merge all datasets into one: output = add_xarray_groups( xr.Dataset(), **output ) # This holds the collocation information (pairs, intervals and # distances): metadata = xr.Dataset() metadata["pairs"] = xr.DataArray( np.array(pairs, dtype=int), dims=("group", "collocation"), attrs={ "max_interval": f"Max. interval in secs: {max_interval}", "max_distance": f"Max. distance in kilometers: {max_distance}", "primary": primary_name, "secondary": secondary_name, } ) metadata["interval"] = xr.DataArray( intervals, dims=("collocation", ), attrs={ "max_interval": f"Max. interval in secs: {max_interval}", "max_distance": f"Max. distance in kilometers: {max_distance}", "primary": primary_name, "secondary": secondary_name, } ) metadata["distance"] = xr.DataArray( distances, dims=("collocation",), attrs={ "max_interval": f"Max. interval in secs: {max_interval}", "max_distance": f"Max. distance in kilometers: {max_distance}", "primary": primary_name, "secondary": secondary_name, "units": "kilometers", } ) metadata["group"] = xr.DataArray( [primary_name, secondary_name], dims=("group",), attrs={ "max_interval": f"Max. interval in secs: {max_interval}", "max_distance": f"Max. distance in kilometers: {max_distance}", } ) output = add_xarray_groups( output, Collocations=metadata ) start = pd.Timestamp( output[primary_name+"/time"].values.min().item(0) ) end = pd.Timestamp( output[primary_name+"/time"].values.max().item(0) ) output.attrs = { "start_time": str(start), "end_time": str(end), } return output @staticmethod def get_meta_group(): return f"Collocations" def spatial_search_with_temporal_binning( self, primary, secondary, max_distance, max_interval ): # For time-binning purposes, pandas Dataframe objects are a good choice primary = pd.DataFrame(primary).set_index("time") secondary = pd.DataFrame(secondary).set_index("time") # Now let's split the two data data along their time coordinate so # we avoid searching for spatial collocations that do not fulfill # the temporal condition in the first place. However, the overhead # of the finding algorithm must be considered too (for example the # BallTree creation time). This can be adjusted by the parameter # bin_factor: bin_duration = self.bin_factor * max_interval # The binning is more efficient if we use the largest dataset as # primary: swapped_datasets = secondary.size > primary.size if swapped_datasets: primary, secondary = secondary, primary # Let's bin the primaries along their time axis and search for the # corresponding secondary bins: bin_pairs = ( self._bin_pairs(start, chunk, primary, secondary, max_interval) for start, chunk in primary.groupby(pd.Grouper(freq=bin_duration)) ) # Add arguments to the bins (we need them for the spatial search # function): bins_with_args = ( [self, max_distance, *bin_pair] for bin_pair in bin_pairs ) # Unfortunately, a first attempt parallelizing this using threads # worsened the performance. Update: The BallTree code from scikit-learn # does not release the GIL. But apparently there will be a new version # coming that solves this problem, see this scikit-learn issue: # https://github.com/scikit-learn/scikit-learn/pull/10887. So stay # tuned! # threads = 1 if self.threads is None else self.threads t = Timer(verbose=False).start() # with ThreadPoolExecutor(max_workers=2) as pool: # results = list(pool.map( # Collocator._spatial_search_bin, bins_with_args # )) results = list(map( Collocator._spatial_search_bin, bins_with_args )) self._debug(f"Collocated {len(results)} bins in {t.stop()}") pairs_list, distances_list = zip(*results) pairs = np.hstack(pairs_list) # No collocations were found. if not pairs.any(): return self.no_pairs, self.no_distances # Stack the rest of the results together: distances = np.hstack(distances_list) if swapped_datasets: # Swap the rows of the results pairs[[0, 1]] = pairs[[1, 0]] return pairs.astype("int64"), distances @staticmethod def _bin_pairs(chunk1_start, chunk1, primary, secondary, max_interval): """""" chunk2_start = chunk1_start - max_interval chunk2_end = chunk1.index.max() + max_interval offset1 = primary.index.searchsorted(chunk1_start) offset2 = secondary.index.searchsorted(chunk2_start) chunk2 = secondary.loc[chunk2_start:chunk2_end] return offset1, chunk1, offset2, chunk2 @staticmethod def _spatial_search_bin(args): self, max_distance, offset1, data1, offset2, data2 = args if data1.empty or data2.empty: return self.no_pairs, self.no_distances pairs, distances = self.spatial_search( data1["lat"].values, data1["lon"].values, data2["lat"].values, data2["lon"].values, max_distance ) pairs[0] += offset1 pairs[1] += offset2 return pairs, distances def spatial_search(self, lat1, lon1, lat2, lon2, max_distance): # Finding collocations is expensive, therefore we want to optimize it # and have to decide which points to use for the index building. index_with_primary = self._choose_points_to_build_index( [lat1, lon1], [lat2, lon2], ) self.index_with_primary = index_with_primary if index_with_primary: build_points = lat1, lon1 query_points = lat2, lon2 else: build_points = lat2, lon2 query_points = lat1, lon1 self.index = self._build_spatial_index(*build_points) pairs, distances = self.index.query(*query_points, r=max_distance) # No collocations were found. if not pairs.any(): # We return empty arrays to have consistent return values: return self.no_pairs, self.no_distances if not index_with_primary: # The primary indices should be in the first row, the secondary # indices in the second: pairs[[0, 1]] = pairs[[1, 0]] return pairs, distances def _build_spatial_index(self, lat, lon): # Find out whether the cached index still works with the new points: if self._spatial_is_cached(lat, lon): self._debug("Spatial index is cached and can be reused") return self.index return GeoIndex(lat, lon, leaf_size=self.leaf_size) def _spatial_is_cached(self, lat, lon): """Return True if the cached ball tree is still applicable to the new data""" if self.index is None: return False try: return np.allclose(lat, self.index.lat) \ & np.allclose(lon, self.index.lon) except ValueError: # The shapes are different return False def _choose_points_to_build_index(self, primary, secondary): """Choose which points should be used for tree building This method helps to optimize the performance. Args: primary: Converted primary points secondary: Converted secondary points Returns: True if primary points should be used for tree building. False otherwise. """ # There are two options to optimize the performance: # A) Cache the index and reuse it if either the primary or the # secondary points have not changed (that is the case for data with a # fixed grid). Building the tree is normally very expensive, so it # should never be done without a reason. # B) Build the tree with the larger set of points and query it with the # smaller set. # Which option should be used if A and B cannot be applied at the same # time? If the magnitude of one point set is much larger (by # `magnitude factor` larger) than the other point set, we strictly # follow B. Otherwise, we prioritize A. if primary[0].size > secondary[0].size * self.magnitude_factor: # Use primary points return True elif secondary[0].size > primary[0].size * self.magnitude_factor: # Use secondary points return False # Apparently, none of the datasets is much larger than the others. So # just check whether we still have a cached tree. If we used the # primary points last time and they still fit, use them again: if self.index_with_primary and self._spatial_is_cached(*primary): return True # Check the same for the secondary data: if not self.index_with_primary and self._spatial_is_cached(*secondary): return False # Otherwise, just use the larger dataset: return primary[0].size > secondary[0].size def temporal_search(self, primary, secondary, max_interval): raise NotImplementedError("Not yet implemented!") #return self.no_pairs, self.no_intervals def _temporal_check( self, primary_time, secondary_time, max_interval ): """Checks whether the current collocations fulfill temporal conditions Returns: """ intervals = self._get_intervals(primary_time, secondary_time) # Check whether the time differences are less than the temporal # boundary: passed_time_check = intervals < max_interval return passed_time_check, intervals[passed_time_check] @staticmethod def _get_intervals(time1, time2): return np.abs((time1 - time2)).astype("timedelta64[s]") @staticmethod def _get_distances(lat1, lon1, lat2, lon2): return great_circle_distance(lat1, lon1, lat2, lon2) def concat_collocations(collocations): """Concat compact collocations Compact collocations cannot be concatenated directly because indices in *Collocations/pairs* won't be correct any longer afterwards. This concatenate function fixes this problem. Args: collocations: A list of xarray.Dataset objects with compact collocations. Returns: One xarray.Dataset object """ # We need to increment the pair indices when concatening the datasets primary = collocations[0]["Collocations/group"].item(0) secondary = collocations[0]["Collocations/group"].item(1) primary_size = 0 secondary_size = 0 collocation_coord = { "Collocations": "Collocations/collocation", primary: f"{primary}/collocation", secondary: f"{secondary}/collocation", } # Collect all collocations for each single group: groups = defaultdict(list) for obj in collocations: for group, data in get_xarray_groups(obj).items(): if group == "Collocations": # Correct the indices: data["Collocations/pairs"][0, :] += primary_size data["Collocations/pairs"][1, :] += secondary_size data = data.drop_vars("Collocations/group") groups[group].append(data) primary_size += obj.dims[f"{primary}/collocation"] secondary_size += obj.dims[f"{secondary}/collocation"] starts = [] ends = [] for group, data_list in groups.items(): groups[group] = xr.concat( data_list, dim=collocation_coord[group] ) start = pd.Timestamp(groups[primary][primary+"/time"].min().item(0)) end = pd.Timestamp(groups[primary][primary+"/time"].max().item(0)) merged = xr.merge(groups.values()) merged.attrs = { "start_time": str(start), "end_time": str(end), } merged["Collocations/group"] = collocations[0]["Collocations/group"] return merged class InvalidCollocationData(Exception): """Error when trying to collapse / expand invalid collocation data """ def __init__(self, message, *args): Exception.__init__(self, message, *args) def check_collocation_data(dataset): """Check whether the dataset fulfills the standard of collocated data Args: dataset: A xarray.Dataset object Raises: A InvalidCollocationData Error if the dataset did not pass the test. """ mandatory_fields = ["Collocations/pairs", "Collocations/group"] for mandatory_field in mandatory_fields: if mandatory_field not in dataset.variables: raise InvalidCollocationData( f"Could not find the field '{mandatory_field}'!" )
atmtools/typhon
typhon/collocations/collocator.py
Python
mit
60,801
[ "DIRAC", "Dalton" ]
2335e4f07f2dc7420c411dc8853be877a06397c0f062d77385fbf4010f6f8025
#!/usr/bin/python3 #This program produces temperature vs density, and pressure vs temperature phase diagrams #from data stored in *best.dat (or *best_tensor.dat) data files generated by figs/new-melting.cpp #and found in deft/papers/fuzzy-fmt/data/phase-diagram (edit later - currently files in newdata/phase-diagram and newdata_tensor/phasediagram) #NOTE: Run this plot script from directory deft/papers/fuzzy-fmt #with comand ./plot-phasediagram.py [Optional: --tensor] from __future__ import print_function, division import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt import os, glob import argparse import sys parser = argparse.ArgumentParser("Plots phase diagrams p vs T and T vs n. Plots p-vs-T, p-vs-n, and T-vs-n.") parser.add_argument('--tensor', action='store_true', help='use tensor weight') args=parser.parse_args() p_at_freezing = [] #pressure at freezing (intersection point between homogeneous and crystal plots) n_homogeneous_at_freezing =[] n_crystal_at_freezing = [] kT_homogeneous_at_freezing = [] kT_crystal_at_freezing = [] kT_in_plot = [] kT_data = [] density_data = [] #index corresponds to kT pressure_data = [] #index corresponds to kT #for kT in np.arange(0.1, 1.15, 0.05): #data files with these temperatures will be plotted #for kT in np.arange(0.1, 2.05, 0.05): #original #for kT in np.arange(0.4, 2.05, 0.05): # new normal #for kT in (1, 2, 4, 6, 8, 10, 12, 14, 16, 18): #for kT in (0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2): #for paper #for kT in (1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 26, 28, 30, 32, 34, 35, 37, 38): #use for kT from 1 to 38 and 0.5 for kT in (0.5, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 26, 28, 30, 33, 34, 37, 38): #THESIS-AS for kT from 1 to 38 and 0.5 (35 does not work) #for kT in (5, 7, 9): #THESIS-p-vs-n plot #for kT in (13, 15, 17, 19, 21, 26, 33): #THESIS-P-vs-V/atom plot for kT from 1 to 38 and 0.5 (35 does not work) #for kT in (1, 3, 5, 7, 9, 11): #THESIS-MC for kT from 1 to 38 and 0.5 (35 does not work) #for kT in (2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38): #use for kT from 1 to 38 and 0.5 #for kT in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38): #use for kT from 1 to 38 and 0.5 #for kT in np.arange(0.1, 1.05, 0.05): #data files with these temperatures will be plotted DEBUG #values above and below this range do not currrently work DEBUG n = [] invn = [] hfe = [] cfe = [] if args.tensor : #files = sorted(list(glob.glob('data/phase-diagram/kT%.3f_n*_best_tensor.dat' % kT))) files = sorted(list(glob.glob('newdata_tensor/phase-diagram/kT%.3f_n*_best_tensor.dat' % kT))) #remove 2 at the end of phase-diagram when done comparing new data else : files = sorted(list(glob.glob('newdata/phase-diagram/kT%.3f_n*_best.dat' % kT))) #remove 2 at the end of phase-diagram when done comparing new data #files = sorted(list(glob.glob('data/phase-diagram/kT%.3f_n*_best.dat' % kT))) #files = sorted(list(glob.glob('crystallization/kT%.3f_n*_best.dat' % kT))) if len(files) == 0: continue for f in files: data = np.loadtxt(f) n.append(data[1]) #density invn.append(1/data[1]) hfe.append(data[4]) #homogeneous free energy/atom cfe.append(data[5]) #crystal free energy/atom hfe = np.array(hfe) cfe = np.array(cfe) invn = np.array(invn) n = np.array(n) functions = np.vstack((np.ones_like(invn), invn**-1, invn**-2, invn**-3, invn**-4, invn**-5, invn**-6)).T pressure_functions = np.vstack((np.zeros_like(invn), invn**-2, 2*invn**-3, 3*invn**-4, 4*invn**-5, 5*invn**-6, 6*invn**-7)).T A = np.linalg.lstsq(functions, cfe) coeff = A[0] #print('residuals', A[1]) #print('coeff', coeff) fit_cfe = np.dot(functions, coeff) dhfe=np.diff(hfe) #Caution: depends on order of data files! dcfe=np.diff(cfe) #Caution: depends on order of data files! dinvn=np.diff(invn) #Caution: depends on order of data files! mid_invn=invn[0:len(invn)-1]+dinvn/2 hpressure = -(dhfe/dinvn) #for fixed N and Te cpressure = -(dcfe/dinvn) #for fixed N and Te fit_p = np.dot(pressure_functions, coeff) mid_hfe = 0.5*(hfe[1:] + hfe[:-1]) mid_cfe = 0.5*(cfe[1:] + cfe[:-1]) mid_h_gibbs = mid_hfe + mid_invn*hpressure mid_c_gibbs = mid_cfe + mid_invn*cpressure fit_c_gibbs = fit_cfe + invn*fit_p #Find pressure at point of intersection def find_first_intersection(p1, g1, p2, g2): for i in range(1,len(g1)-1): m1=(g1[i+1]-g1[i])/(p1[i+1]-p1[i]) for j in range(1,len(g2)-1): m2=(g2[j+1]-g2[j])/(p2[j+1]-p2[j]) #print(m2) #debug ASK! if m1!=m2 : P_inter=(g2[j] - m2*p2[j] -g1[i] + m1*p1[i])/(m1-m2) if p1[i] < P_inter < p1[i+1] and p2[j] < P_inter < p2[j+1]: g_inter=m1*P_inter+g1[i]-m1*p1[i] if g1[i] < g_inter < g1[i+1] and g2[j] < g_inter < g2[j+1] : return P_inter, g_inter p_inter, g_inter = find_first_intersection(hpressure, mid_h_gibbs, cpressure, mid_c_gibbs) pf_inter, gf_inter = find_first_intersection(hpressure, mid_h_gibbs, fit_p, fit_c_gibbs) #Find homogeneous and crystal densities at p_inter def find_densities(p_inter, pressure, invn): for i in range(1,len(pressure)-1): if pressure[i] > p_inter : pressureabove=pressure[i] invnabove=invn[i] pressurebelow=pressure[i-1] invnbelow=invn[i-1] m=(pressureabove-pressurebelow)/(invnabove-invnbelow) invn_inter=invnabove-((pressureabove-p_inter)/m) return invn_inter invnh=find_densities(p_inter, hpressure, mid_invn) invnc=find_densities(p_inter, cpressure, mid_invn) p_at_freezing.append(p_inter) n_homogeneous_at_freezing.append(1/invnh) n_crystal_at_freezing.append(1/invnc) # compute the actual physical pressure as a function of density, and skip over coexistence actual_pressure = [] actual_density = [] for i in range(len(mid_invn)): if hpressure[i] >= p_inter: break # if the pressure is too high, then we should just stop, since we have left the fluid actual_pressure.append(hpressure[i]) actual_density.append(1/mid_invn[i]) actual_pressure.append(p_inter) actual_density.append(1/invnh) actual_pressure.append(p_inter) actual_density.append(1/invnc) for i in range(len(mid_invn)): if cpressure[i] < 0 and mid_invn[i] <= invnc: break # when the pressure is negative, we know we are in the crazy part where our dft fails. if cpressure[i] > p_inter: actual_pressure.append(cpressure[i]) actual_density.append(1/mid_invn[i]) actual_pressure = np.array(actual_pressure) actual_density = np.array(actual_density) #print (kT, p_inter, 1/invnh, 1/invnc) #Use >> phase_diagram_data.dat (or phase_diagram_data-tensor.dat) to store data for reference kT_data.append(kT) #holds all values of kT in a list density_data.append(actual_density) pressure_data.append(actual_pressure) n_homogeneous_at_freezing = np.array(n_homogeneous_at_freezing) n_crystal_at_freezing = np.array(n_crystal_at_freezing) p_at_freezing = np.array(p_at_freezing) plt.figure('T-vs-n at fixed P') plt.fill_betweenx(kT_data, n_homogeneous_at_freezing, n_crystal_at_freezing, color='#eeeeee') #Plot T vs n at constant P #for p in [2,5,10,20]: #paper for p in [20, 60, 100, 200, 600, 1000, 2000, 6000]: #use for kT from 1 to 38 and 0.5 n_mid_at_p_list = [] kT_at_p_list = [] for i in range(0, len(kT_data)) : #number of temperatures kT for j in range(0, len(density_data[i])-1) : #number of elements of n at some kT if pressure_data[i][j] < p < pressure_data[i][j+1] : phi = pressure_data[i][j+1] plo = pressure_data[i][j] nhi = density_data[i][j+1] nlo = density_data[i][j] n_mid_at_p_list.append((nlo*(phi - p) + nhi*(p - plo))/(phi - plo)) kT_at_p_list.append(kT_data[i]) plt.plot(n_mid_at_p_list, kT_at_p_list, label= 'P=%g' % p) #plt.title("Temperature vs Number Density at fixed Pressure") plt.legend(loc='best') plt.xlabel('n*') plt.ylabel('T*') # - OR - uncomment the plot you want #Plot n vs T at constant P #plt.plot(kT_at_p_list, n_mid_at_p_list, '.-', label= 'P=%g' % p) #plt.title("Number Density vs Temperature at fixed Pressure") #plt.legend(loc='best') #plt.ylabel('Number Density') #plt.xlabel('Temperature') plt.figure('p-vs-n at fixed T') plt.fill_betweenx(p_at_freezing, n_homogeneous_at_freezing, n_crystal_at_freezing, color='#eeeeee') for i in range(len(kT_data)): if kT_data[i] in [0.1, 0.2, 0.5, 1.0] or True: #Plot P vs n at constant kT plt.plot(density_data[i], pressure_data[i], label= 'kT=%g' % kT_data[i]) #plt.title("Pressure vs Number Density at kT") plt.legend(loc='best') #plt.ylim(0, 26) #plt.ylim(0, 500) #plt.ylim(0, 45) #paper plt.ylim(0, 1000) #use for kT from 1 to 38 and 0.5 #plt.xlim(0, 1.1) #plt.xlim(0, 1.8) #plt.xlim(0, 1.1) #paper plt.xlim(0.65, 1.8) #use for kT from 1 to 38 and 0.5 plt.xlabel('n*') plt.ylabel('p*') plt.figure('p-vs-V at fixed T') #Plot P vs 1/n (or V) at constant kT plt.fill_betweenx(p_at_freezing, 1/n_homogeneous_at_freezing, 1/n_crystal_at_freezing, color='#eeeeee') for i in range(len(kT_data)): if kT_data[i] in [0.1, 0.2, 0.5, 1.0] or True: plt.plot(1/density_data[i], pressure_data[i], label= 'kT=%g' % kT_data[i]) #plt.title("Pressure vs volume at kT") plt.legend(loc='best') #plt.ylim(0, 26) #plt.xlim(0.95, 1.6) plt.ylim(0, 6000) plt.xlim(0.45, 0.7) plt.xlabel('Volume per atom') plt.ylabel('p*') plt.figure('p-vs-T at fixed n') #--------------NEW #Plot P vs T at constant n #for n in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]: #densities to show on the plot #for n in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1, 1.05, 1.1]: #densities to show on the plot #for n in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]: #densities to show on the plot - paper #for n in [0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.4, 1.6]: #densities to show on the plot - use for kT from 1 to 38 and 0.5 for n in [0.7, 0.8, 0.9, 1, 1.1, 1.2]: #densities to show on the plot - use for kT from 1 to 38 and 0.5 p_mid_at_n_list = [] kT_at_n_list = [] for i in range(0, len(kT_data)) : #number of temperatures kT for j in range(0, len(pressure_data[i])-1) : #number of elements of P at some kT if density_data[i][j] < n < density_data[i][j+1] : phi = pressure_data[i][j+1] plo = pressure_data[i][j] nhi = density_data[i][j+1] nlo = density_data[i][j] p_mid_at_n_list.append((plo*(nhi - n) + phi*(n - nlo))/(nhi - nlo)) kT_at_n_list.append(kT_data[i]) #plt.plot(kT_at_n_list, p_mid_at_n_list, '.-', label= 'n=%g' % n) plt.plot(kT_at_n_list, p_mid_at_n_list, label= 'n=%g' % n) #plt.title("Pressure vs Temperature at fixed n") plt.legend(loc='best') #plt.ylim(0, 2500) plt.ylabel('p*') plt.xlabel('T*') # - OR - uncomment the plot you want ##Plot T vs P at constant n #plt.plot(kT_at_n_list, p_mid_at_n_list, '.-', label= 'n=%g' % n) #plt.title("Temperature vs Pressure at fixed n") #plt.legend(loc='best') #plt.xlabel('Pressure') #plt.ylabel('Temperature') #--------------end NEW plt.figure('Phase Diagram of T vs n') #Temperature vs Density Phase Diagram plt.plot(n_homogeneous_at_freezing, kT_data, label='liquid', color='red') plt.plot(n_crystal_at_freezing, kT_data, label='solid', color='blue') #plt.fill_betweenx(kT_data, .1, n_homogeneous_at_freezing, color='red') #paper plt.fill_betweenx(kT_data, .6, n_homogeneous_at_freezing, color='red') #use THESIS-AS for kT from 1 to 38 and 0.5 #plt.fill_betweenx(kT_data, .4, n_homogeneous_at_freezing, color='red') #use THESIS-MC for kT from 1 to 38 and 0.5 plt.fill_betweenx(kT_data, n_homogeneous_at_freezing, n_crystal_at_freezing, color='gray') #plt.fill_betweenx(kT_data, n_crystal_at_freezing, 1.6, color='blue') #plt.fill_betweenx(kT_data, n_crystal_at_freezing, 1.8, color='blue') #paper plt.fill_betweenx(kT_data, n_crystal_at_freezing, 2.14, color='blue') #use for kT from 1 to 38 and 0.5 #plt.title("Temperature vs Number Density") #plt.legend(loc='best') plt.xlabel('n*') plt.ylabel('T*') ##plt.plot([0.88, 0.90, 0.91, 0.92, 1.04, 1.12],[0.7, 0.8, 0.9, 1.0, 2.0, 3.0], label='chris_l', color='green') ##plt.plot([0.96, 0.98, 0.99, 1.00, 1.11, 1.19],[0.7, 0.8, 0.9, 1.0, 2.0, 3.0], label='chris_s', color='green') #plt.plot([0.88, 0.90, 0.91, 0.92, 1.04, 1.12, 1.24, 1.44],[0.7, 0.8, 0.9, 1.0, 2.0, 3,5,10], label='chris_l', color='green') #plt.plot([0.96, 0.98, 0.99, 1.00, 1.11, 1.19, 1.31, 1.51],[0.7, 0.8, 0.9, 1.0, 2.0, 3, 5, 10], label='chris_s', color='green') #plt.plot([0.88, 0.90, 0.91, 0.92, 1.04, 1.12, 1.24, 1.44],[0.7, 0.8, 0.9, 1.0, 2.0, 3, 5, 10], label='MC_l', color='yellow') #plt.plot([0.96, 0.98, 0.99, 1.00, 1.11, 1.19, 1.31, 1.51],[0.7, 0.8, 0.9, 1.0, 2.0, 3, 5, 10], label='MC_s', color='yellow') plt.plot([0.87, 0.91, 0.95, 1.07, 1.17, 1.29, 1.41, 1.51, 1.66, 1.92],[0.5, 0.7, 1, 2, 3.6, 5.4, 8, 10.5, 15.4, 28.6], label='AS_l', color='white') plt.plot([0.94, 0.98, 1.016, 1.14, 1.22, 1.34, 1.48, 1.58, 1.73, 2.0],[0.5, 0.7, 1, 2, 3.6, 5.4, 8, 10.5, 15.4, 28.6], label='AS_s', color='white') plt.legend() plt.figure('Phase Diagram of P vs T') ##Pressure vs Temperature Phase Diagram plt.fill_between(kT_data, 0, p_at_freezing, color='red') #plt.fill_between(kT_data, p_at_freezing, 50, color='blue') #FIX - change 30 plt.fill_between(kT_data, p_at_freezing, 6500, color='blue') #use THESIS-AS for kT 1 to 38 and 0.5 #plt.fill_between(kT_data, p_at_freezing, 1000, color='blue') #use THESIS-MC for kT 1 to 38 and 0.5 #plt.fill_between(kT_data, p_at_freezing, 50, color='blue') #paper plt.plot(kT_data, p_at_freezing, color='black') #plt.ylim(0, 40) #plt.xlim(kT_data.min(), kT_data.max()) #FIX! #plt.title("Pressure vs Temperature") plt.xlabel('T*') plt.ylabel('p*') #plt.plot([0.7, 0.8,0.9,1.0,2.0,3.0], [6.24, 7.62, 8.78, 9.99, 25.5,43.8], label='chris_l', color='green') ##plt.plot([0.7, 0.8,0.9,1.0,2.0, 3, 5, 10], [6.24, 7.62, 8.78, 9.99, 25.5,43.8, 85.6, 210], label='chris_l', color='green') #plt.plot([0.7, 0.8,0.9,1.0,2.0, 3, 5, 10], [6.24, 7.62, 8.78, 9.99, 25.5, 43.8, 85.6, 210], label='MC', color='yellow') plt.plot([3.6, 5.4, 8, 10.5, 15.4, 28.6], [66.3, 113.48, 191.1, 274.7, 450.9, 1021.9], label='AS', color='white') plt.legend() plt.show()
droundy/deft
papers/fuzzy-fmt/plot-phasediagram.py
Python
gpl-2.0
15,275
[ "CRYSTAL" ]
5d046e8659fb2f0f707c39ea4bfc6dd01cdedf95645233897b2878682198f594
#!/usr/bin/env python __author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki' __copyright__ = '2013' __license__ = 'GPL v3' __contact__ = 'duane at mbari.org' __doc__ = ''' cron loader for CANON wave gliders slocum, OA and TEX in September 2013 Mike McCann; Modified by Duane Edgington and Reiko Michisaki MBARI 02 September 2013 @var __date__: Date of last svn commit @undocumented: __doc__ parser @status: production @license: GPL ''' import os import sys import datetime # needed for glider data import time # for startdate, enddate args project_dir = os.path.dirname(__file__) # the next line makes it possible to find CANON sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # this makes it possible to find CANON, one directory up from CANON import CANONLoader # building input data sources object from socket import gethostname hostname=gethostname() print(hostname) if hostname=='odss-test.shore.mbari.org': cl = CANONLoader('stoqs_september2011', 'CANON - September 2011') else: cl = CANONLoader('stoqs_september2013', 'CANON - September 2013') # default location of thredds and dods data: cl.tdsBase = 'http://odss.mbari.org/thredds/' cl.dodsBase = cl.tdsBase + 'dodsC/' ###################################################################### # GLIDERS ###################################################################### # Set start and end dates for all glider loads # startdate is 24hours from now ts=time.time()-(12.2*60*60) st=datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M') t=time.strptime(st,"%Y-%m-%d %H:%M") t =time.strptime("2013-09-03 0:01", "%Y-%m-%d %H:%M") startdate=t[:6] t =time.strptime("2013-10-15 0:01", "%Y-%m-%d %H:%M") enddate=t[:6] # WG Tex Eco Puck cl.wg_tex_ctd_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/NetCDF/' cl.wg_tex_ctd_files = [ 'WG_Tex_eco.nc'] cl.wg_tex_ctd_parms = ['chlorophyll','backscatter650','backscatter470'] cl.wg_tex_ctd_startDatetime = datetime.datetime(*startdate[:]) cl.wg_tex_ctd_endDatetime = datetime.datetime(*enddate[:]) ################################################################################################################### # Execute the load cl.process_command_line() if cl.args.test: cl.load_wg_tex_ctd(stride=1) elif cl.args.optimal_stride: cl.load_wg_tex_ctd(stride=1) # cl.loadStella204(stride=1) else: cl.load_wg_tex_ctd(stride=1)
duane-edgington/stoqs
stoqs/loaders/CANON/puck_loadsep2013.py
Python
gpl-3.0
2,446
[ "NetCDF" ]
e74e8dacb0294ee32dbd964aba797a61bb4086ef4a76d493de434ee9d67d6898
########################################################################## # # Copyright (c) 2012, John Haddon. All rights reserved. # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import unittest import six import imath import IECore import IECoreScene import Gaffer import GafferImageTest import GafferScene import GafferSceneTest class SceneTestCase( GafferImageTest.ImageTestCase ) : def setUp( self ) : GafferImageTest.ImageTestCase.setUp( self ) sanitiser = GafferSceneTest.ContextSanitiser() sanitiser.__enter__() self.addCleanup( sanitiser.__exit__, None, None, None ) def assertSceneValid( self, scenePlug, assertBuiltInSetsComplete=True ) : def walkScene( scenePath ) : # at least pull on the attributes, even though we don't have any test cases for that right now attributes = scenePlug.attributes( scenePath ) thisBound = scenePlug.bound( scenePath ) o = scenePlug.object( scenePath, _copy = False ) if isinstance( o, IECoreScene.VisibleRenderable ) : if not IECore.BoxAlgo.contains( thisBound, o.bound() ) : self.fail( "Bound %s does not contain object %s at %s" % ( thisBound, o.bound(), scenePath ) ) if isinstance( o, IECoreScene.Primitive ) : if "P" in o : if not isinstance( o["P"].data, IECore.V3fVectorData ) : self.fail( "Object %s has incorrect type %s for primitive variable \"P\"" % ( scenePath, o["P"].data.typeName() ) ) if o["P"].data.getInterpretation() != IECore.GeometricData.Interpretation.Point : self.fail( "Object %s has primitive variable \"P\" with incorrect interpretation" % scenePath ) if not o.arePrimitiveVariablesValid() : self.fail( "Object %s has invalid primitive variables" % scenePath ) childNames = scenePlug.childNames( scenePath, _copy = False ) self.assertEqual( len( childNames ), len( set( childNames ) ) ) unionOfTransformedChildBounds = imath.Box3f() for childName in childNames : childPath = IECore.InternedStringVectorData( scenePath ) childPath.append( childName ) childBound = scenePlug.bound( childPath ) childTransform = scenePlug.transform( childPath ) childBound = childBound * childTransform unionOfTransformedChildBounds.extendBy( childBound ) walkScene( childPath ) if not IECore.BoxAlgo.contains( thisBound, unionOfTransformedChildBounds ) : self.fail( "Bound ( %s ) does not contain children ( %s ) at %s" % ( thisBound, unionOfTransformedChildBounds, scenePath ) ) # check that the root doesn't have any properties it shouldn't self.assertEqual( scenePlug.attributes( "/" ), IECore.CompoundObject() ) self.assertEqual( scenePlug.transform( "/" ), imath.M44f() ) self.assertEqual( scenePlug.object( "/" ), IECore.NullObject() ) # then walk the scene to check the bounds walkScene( IECore.InternedStringVectorData() ) self.assertSetsValid( scenePlug ) if assertBuiltInSetsComplete : self.assertBuiltInSetsComplete( scenePlug ) def assertPathExists( self, scenePlug, path ) : if isinstance( path, str ) : path = GafferScene.ScenePlug.stringToPath( path ) for i in range( 0, len( path ) ) : self.assertTrue( path[i] in scenePlug.childNames( path[:i] ), "\"{childName}\" in {scene}.childNames( \"{location}\" )".format( childName = path[i], scene = scenePlug.relativeName( scenePlug.ancestor( Gaffer.ScriptNode ) ), location = GafferScene.ScenePlug.pathToString( path[:i] ) ) ) ## Checks that all paths referenced by sets do exist. def assertSetsValid( self, scenePlug ) : for setName in scenePlug["setNames"].getValue() : s = scenePlug.set( setName ) for path in s.value.paths() : self.assertPathExists( scenePlug, path ) ## Checks that all lights, coordinate systems and cameras # in the scene are in the appropriate built-in sets. def assertBuiltInSetsComplete( self, scenePlug ) : setNames = scenePlug["setNames"].getValue() lightSet = scenePlug.set( "__lights" ) if "__lights" in setNames else IECore.PathMatcherData() cameraSet = scenePlug.set( "__cameras" ) if "__cameras" in setNames else IECore.PathMatcherData() coordinateSystemSet = scenePlug.set( "__coordinateSystems" ) if "__coordinateSystems" in setNames else IECore.PathMatcherData() def walkScene( scenePath ) : object = scenePlug.object( scenePath, _copy = False ) if isinstance( object, IECoreScene.Camera ) : self.assertTrue( cameraSet.value.match( scenePath ) & IECore.PathMatcher.Result.ExactMatch, scenePath + " in __cameras set" ) elif isinstance( object, IECoreScene.CoordinateSystem ) : self.assertTrue( coordinateSystemSet.value.match( scenePath ) & IECore.PathMatcher.Result.ExactMatch, scenePath + " in __coordinateSystems set" ) attributes = scenePlug.attributes( scenePath, _copy = False ) if any( [ n == "light" or n.endswith( ":light" ) for n in attributes.keys() ] ) : self.assertTrue( lightSet.value.match( scenePath ) & IECore.PathMatcher.Result.ExactMatch, scenePath + " in __lights set" ) childNames = scenePlug.childNames( scenePath, _copy = False ) for childName in childNames : walkScene( os.path.join( scenePath, str( childName ) ) ) walkScene( "/" ) allPathChecks = { "bound", "transform", "attributes", "object", "childNames" } allSceneChecks = allPathChecks | { "sets", "globals" } def assertPathsEqual( self, scenePlug1, scenePath1, scenePlug2, scenePath2, checks = allPathChecks ) : assert( checks.issubset( self.allPathChecks ) ) for childPlugName in checks : value1 = getattr( scenePlug1, childPlugName )( scenePath1 ) value2 = getattr( scenePlug2, childPlugName )( scenePath2 ) self.assertEqual( value1, value2, "{0} != {1} : comparing {childPlugName} at {paths}".format( unittest.util.safe_repr( value1 ), unittest.util.safe_repr( value2 ), childPlugName = childPlugName, paths = self.__formatPaths( scenePath1, scenePath2 ) ) ) def assertScenesEqual( self, scenePlug1, scenePlug2, scenePlug2PathPrefix = "", pathsToPrune = (), checks = allSceneChecks ) : assert( checks.issubset( self.allSceneChecks ) ) def walkScene( scenePath1, scenePath2 ) : if pathsToPrune and self.__pathToString( scenePath1 ) in pathsToPrune : return self.assertPathsEqual( scenePlug1, scenePath1, scenePlug2, scenePath2, checks.intersection( self.allPathChecks ) ) # Sometimes we may not want to fail because of different orders of childNames, so we may not # put "childNames" in `checks` - but we still need to visit the same locations regardless of # which scene comes first, otherwise the rest of the checks don't make any sense self.assertEqual( set( scenePlug1.childNames( scenePath1 ) ), set( scenePlug2.childNames( scenePath2 ) ) ) childNames = scenePlug1.childNames( scenePath1 ) for childName in childNames : childPath1 = IECore.InternedStringVectorData( scenePath1 ) childPath1.append( childName ) childPath2 = IECore.InternedStringVectorData( scenePath2 ) childPath2.append( childName ) walkScene( childPath1, childPath2 ) scenePath1 = IECore.InternedStringVectorData() scenePath2 = IECore.InternedStringVectorData() if scenePlug2PathPrefix : scenePath2.extend( GafferScene.ScenePlug.stringToPath( scenePlug2PathPrefix ) ) walkScene( scenePath1, scenePath2 ) if "globals" in checks : self.assertEqual( scenePlug1.globals(), scenePlug2.globals() ) if "sets" in checks : self.assertEqual( scenePlug1.setNames(), scenePlug2.setNames() ) for setName in scenePlug1.setNames() : set1 = scenePlug1.set( setName ).value set2 = scenePlug2.set( setName ).value for p in pathsToPrune : set1.prune( p ) set2.prune( p ) if scenePlug2PathPrefix : set2 = set2.subTree( scenePlug2PathPrefix ) self.assertEqual( set1, set2 ) def assertPathHashesEqual( self, scenePlug1, scenePath1, scenePlug2, scenePath2, checks = allPathChecks ) : assert( checks.issubset( self.allPathChecks ) ) for childPlugName in checks : hash1 = getattr( scenePlug1, childPlugName + "Hash" )( scenePath1 ) hash2 = getattr( scenePlug2, childPlugName + "Hash" )( scenePath2 ) self.assertEqual( hash1, hash2, "{0} != {1} : comparing {childPlugName}Hash at {paths}".format( unittest.util.safe_repr( hash1 ), unittest.util.safe_repr( hash2 ), childPlugName = childPlugName, paths = self.__formatPaths( scenePath1, scenePath2 ) ) ) def assertPathHashesNotEqual( self, scenePlug1, scenePath1, scenePlug2, scenePath2, checks = allPathChecks ) : assert( checks.issubset( self.allPathChecks ) ) for childPlugName in checks : hash1 = getattr( scenePlug1, childPlugName + "Hash" )( scenePath1 ) hash2 = getattr( scenePlug2, childPlugName + "Hash" )( scenePath2 ) self.assertNotEqual( hash1, hash2, "{0} == {1} : comparing {childPlugName}Hash at {paths}".format( unittest.util.safe_repr( hash1 ), unittest.util.safe_repr( hash2 ), childPlugName = childPlugName, paths = self.__formatPaths( scenePath1, scenePath2 ) ) ) def assertSceneHashesEqual( self, scenePlug1, scenePlug2, scenePlug2PathPrefix = "", pathsToPrune = (), checks = allSceneChecks ) : assert( checks.issubset( self.allSceneChecks ) ) def walkScene( scenePath1, scenePath2 ) : if pathsToPrune and self.__pathToString( scenePath1 ) in pathsToPrune : return self.assertPathHashesEqual( scenePlug1, scenePath1, scenePlug2, scenePath2, checks.intersection( self.allPathChecks ) ) childNames = scenePlug1.childNames( scenePath1 ) for childName in childNames : childPath1 = IECore.InternedStringVectorData( scenePath1 ) childPath1.append( childName ) childPath2 = IECore.InternedStringVectorData( scenePath2 ) childPath2.append( childName ) walkScene( childPath1, childPath2 ) scenePath1 = IECore.InternedStringVectorData() scenePath2 = IECore.InternedStringVectorData() if scenePlug2PathPrefix : scenePath2.extend( GafferScene.ScenePlug.stringToPath( scenePlug2PathPrefix ) ) walkScene( scenePath1, scenePath2 ) if "globals" in checks : self.assertEqual( scenePlug1.globalsHash(), scenePlug2.globalsHash() ) if "sets" in checks : self.assertEqual( scenePlug1.setNamesHash(), scenePlug2.setNamesHash() ) for setName in scenePlug1.setNames() : self.assertEqual( scenePlug1.setHash( setName ), scenePlug2.setHash( setName ) ) def assertSceneHashesNotEqual( self, scenePlug1, scenePlug2, scenePlug2PathPrefix = "", pathsToPrune = (), checks = allSceneChecks ) : def walkScene( scenePath1, scenePath2 ) : if pathsToPrune and self.__pathToString( scenePath1 ) in pathsToPrune : return pathChecks = checks.intersection( self.allPathChecks ) if len( scenePath1 ) == 0 : # Hashes will automatically be equal for these plugs at the root, # because they are dealt with automatically in the SceneNode base class. pathChecks -= { "attributes", "object", "transform" } self.assertPathHashesNotEqual( scenePlug1, scenePath1, scenePlug2, scenePath2, pathChecks ) childNames = scenePlug1.childNames( scenePath1 ) for childName in childNames : childPath1 = IECore.InternedStringVectorData( scenePath1 ) childPath1.append( childName ) childPath2 = IECore.InternedStringVectorData( scenePath2 ) childPath2.append( childName ) walkScene( childPath1, childPath2 ) scenePath1 = IECore.InternedStringVectorData() scenePath2 = IECore.InternedStringVectorData() if scenePlug2PathPrefix : scenePath2.extend( GafferScene.ScenePlug.stringToPath( scenePlug2PathPrefix ) ) walkScene( scenePath1, scenePath2 ) if "globals" in checks : self.assertNotEqual( scenePlug1.globalsHash(), scenePlug2.globalsHash() ) if "sets" in checks : self.assertNotEqual( scenePlug1.setNamesHash(), scenePlug2.setNamesHash() ) for setName in scenePlug1.setNames() : self.assertNotEqual( scenePlug1.setHash( setName ), scenePlug2.setHash( setName ) ) def assertBoxesEqual( self, box1, box2 ) : for n in "min", "max" : v1 = getattr( box1, n ) v2 = getattr( box1, n ) for i in range( 0, 3 ) : self.assertEqual( v1[i], v2[i] ) def assertBoxesAlmostEqual( self, box1, box2, places ) : for n in "min", "max" : v1 = getattr( box1, n )() v2 = getattr( box1, n )() for i in range( 0, 3 ) : self.assertAlmostEqual( v1[i], v2[i], places ) __uniqueInts = {} @classmethod def uniqueInt( cls, key ) : value = cls.__uniqueInts.get( key, 0 ) value += 1 cls.__uniqueInts[key] = value return value def __pathToString( self, path ) : return "/" + "/".join( [ p.value() for p in path ] ) def __formatPaths( self, path1, path2 ) : if not isinstance( path1, six.string_types ) : path1 = self.__pathToString( path1 ) if not isinstance( path2, six.string_types ) : path2 = self.__pathToString( path2 ) if path1 == path2 : return "\"{0}\"".format( path1 ) else : return "\"{0}\" and \"{1}\"".format( path1, path2 )
hradec/gaffer
python/GafferSceneTest/SceneTestCase.py
Python
bsd-3-clause
14,718
[ "VisIt" ]
9e08638f210db588ae5e41baff10a6f5be2b84499aa7431f8f23be43060e951c
#! compare MemJK and DiskJK import psi4 import numpy as np import random psi4.set_output_file("output.dat", False) mol = psi4.geometry(""" O H 1 1.00 H 1 1.00 2 103.1 """) psi4.set_num_threads(6) memory = 50000 primary = psi4.core.BasisSet.build(mol, "ORBITAL", "cc-pVDZ") aux = psi4.core.BasisSet.build(mol, "ORBITAL", "cc-pVDZ-jkfit") nbf = primary.nbf() naux = aux.nbf() # construct spaces names = ['C1', 'C2', 'C3', 'C4', 'C5'] sizes = [16, 16, 20, 20, 30] spaces = {names[ind]: psi4.core.Matrix.from_array(np.random.rand(nbf, size)) for ind, size in enumerate(sizes)} space_pairs = [[0, 0], [0, 1], [1, 1], [2, 2], [3, 2], [3, 3], [4, 4]] # space vectors C_vectors = [[spaces[names[left]], spaces[names[right]]] for left, right in space_pairs] # now construct DiskJK and symm_JK objects # DiskJK psi4.set_options({"SCF_TYPE" : "DISK_DF"}) DiskJK = psi4.core.JK.build_JK(primary, aux) DiskJK.initialize() DiskJK.print_header() # symm_JK psi4.set_options({"SCF_TYPE" : "MEM_DF"}) MemJK = psi4.core.JK.build_JK(primary, aux) MemJK.initialize() MemJK.print_header() # add C matrices for Cleft, Cright in C_vectors: DiskJK.C_left_add(Cleft) MemJK.C_left_add(Cleft) DiskJK.C_right_add(Cright) MemJK.C_right_add(Cright) # compute DiskJK.compute() MemJK.compute() # get integrals DiskJK_ints = [DiskJK.J(), DiskJK.K()] MemJK_ints = [MemJK.J(), MemJK.K()] # compare for j, t in enumerate(['J', 'K']): for i in range(len(DiskJK_ints[0])): psi4.compare_arrays(np.asarray(DiskJK_ints[j][i]), np.asarray(MemJK_ints[j][i]), 9, t + str(i))
amjames/psi4
tests/python/memdfjk/input.py
Python
lgpl-3.0
1,603
[ "Psi4" ]
79e65da11b4b0ecfe94bdb479cf2e1480a445c3a725d155c8c13e7dbe5eb5d6a
#!/usr/bin/env python #=============================================================================# # # # NAME: do_RMsynth_1D.py # # # # PURPOSE: Run RM-synthesis on an ASCII Stokes I, Q & U spectrum. # # # # MODIFIED: 31-Jan-2018 by C. Purcell # # # #=============================================================================# # # # The MIT License (MIT) # # # # Copyright (c) 2015 - 2018 Cormac R. Purcell # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # #=============================================================================# import sys import os import time import argparse import traceback import json import math as m import numpy as np import matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax C = 2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------# def main(): """ Start the function to perform RM-synthesis if called from the command line. """ # Help string to be shown using the -h option descStr = """ Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII file. The Stokes I spectrum is first fit with a polynomial and the resulting model used to create fractional q = Q/I and u = U/I spectra. """ # Parse the command line options parser = argparse.ArgumentParser(description=descStr, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1, help="ASCII file containing Stokes spectra & errors.") parser.add_argument("-t", dest="fitRMSF", action="store_true", help="fit a Gaussian to the RMSF [False]") parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None, help="absolute max Faraday depth sampled [Auto].") parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None, help="width of Faraday depth channel [Auto].") parser.add_argument("-s", dest="nSamples", type=float, default=10, help="number of samples across the RMSF lobe [10].") parser.add_argument("-w", dest="weightType", default="variance", help="weighting [variance] or 'natural' (all 1s).") parser.add_argument("-o", dest="polyOrd", type=int, default=2, help="polynomial order to fit to I spectrum [2].") parser.add_argument("-i", dest="noStokesI", action="store_true", help="ignore the Stokes I spectrum [False].") parser.add_argument("-b", dest="bit64", action="store_true", help="use 64-bit floating point precision [False]") parser.add_argument("-p", dest="showPlots", action="store_true", help="show the plots [False].") parser.add_argument("-D", dest="debug", action="store_true", help="turn on debugging messages & plots [False].") args = parser.parse_args() # Sanity checks if not os.path.exists(args.dataFile[0]): print "File does not exist: '%s'." % args.dataFile[0] sys.exit() dataDir, dummy = os.path.split(args.dataFile[0]) # Set the floating point precision nBits = 32 if args.bit64: nBits = 64 # Run RM-synthesis on the spectra run_rmsynth(dataFile = args.dataFile[0], polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples, weightType = args.weightType, fitRMSF = args.fitRMSF, noStokesI = args.noStokesI, nBits = nBits, showPlots = args.showPlots, debug = args.debug) #-----------------------------------------------------------------------------# def run_rmsynth(dataFile, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType="variance", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False): """ Read the I, Q & U data from the ASCII file and run RM-synthesis. """ # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2*nBits) # Output prefix is derived from the input file name prefixOut, ext = os.path.splitext(dataFile) # Read the data-file. Format=space-delimited, comments="#". print "Reading the data file '%s':" % dataFile # freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy try: print "> Trying [freq_Hz, I_Jy, Q_Jy, U_Jy, dI_Jy, dQ_Jy, dU_Jy]", (freqArr_Hz, IArr_Jy, QArr_Jy, UArr_Jy, dIArr_Jy, dQArr_Jy, dUArr_Jy) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) print "... success." except Exception: print "...failed." # freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy try: print "> Trying [freq_Hz, q_Jy, u_Jy, dq_Jy, du_Jy]", (freqArr_Hz, QArr_Jy, UArr_Jy, dQArr_Jy, dUArr_Jy) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) print "... success." noStokesI = True except Exception: print "...failed." if debug: print traceback.format_exc() sys.exit() print "Successfully read in the Stokes spectra." # If no Stokes I present, create a dummy spectrum = unity if noStokesI: print "Warn: no Stokes I data in use." IArr_Jy = np.ones_like(QArr_Jy) dIArr_Jy = np.zeros_like(QArr_Jy) # Convert to GHz and mJy for convenience freqArr_GHz = freqArr_Hz / 1e9 IArr_mJy = IArr_Jy * 1e3 QArr_mJy = QArr_Jy * 1e3 UArr_mJy = UArr_Jy * 1e3 dIArr_mJy = dIArr_Jy * 1e3 dQArr_mJy = dQArr_Jy * 1e3 dUArr_mJy = dUArr_Jy * 1e3 dQUArr_mJy = (dQArr_mJy + dUArr_mJy)/2.0 dQUArr_Jy = dQUArr_mJy / 1e3 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr_mJy, QArr = QArr_mJy, UArr = UArr_mJy, dIArr = dIArr_mJy, dQArr = dQArr_mJy, dUArr = dUArr_mJy, polyOrd = polyOrd, verbose = True, debug = debug) # Plot the data and the Stokes I model fit if showPlots: print "Plotting the input data and spectral index fit." freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr_mJy = poly5(fitDict["p"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr_mJy = IArr_mJy, qArr = qArr, uArr = uArr, dIArr_mJy = dIArr_mJy, dqArr = dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr_mJy = IModHirArr_mJy, fig = specFig) # Use the custom navigation toolbar (does not work on Mac OS X) try: specFig.canvas.toolbar.pack_forget() CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) except Exception: pass # Display the figure specFig.show() # DEBUG (plot the Q, U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr_mJy, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr_mJy, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr_mJy, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\nu$ (GHz)') ax.set_ylabel('RMS (mJy bm$^{-1}$)') ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra") rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, 600.0) # Force the minimum phiMax # Faraday depth sampling. Zero always centred on middle channel nChanRM = round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0 startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) print "PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM) # Calculate the weighting as 1/sigma^2 or all 1s (natural) if weightType=="variance": weightArr = 1.0 / np.power(dQUArr_mJy, 2.0) else: weightType = "natural" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) print "Weight type is '%s'." % weightType startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits = nBits, verbose = True) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = np.isnan(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = True) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) print "> RM-synthesis completed in %.2f seconds." % cputime # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model # Multiply the dirty FDF by Ifreq0 to recover the PI in Jy freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0_mJybm = poly5(fitDict["p"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0_mJybm / 1e3) # FDF is in Jy # Calculate the theoretical noise in the FDF dFDFth_Jybm = np.sqrt(1./np.sum(1./dQUArr_Jy**2.)) # Measure the parameters of the dirty FDF # Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth_Jybm, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict["Ifreq0_mJybm"] = toscalar(Ifreq0_mJybm) mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]]) mDict["IfitStat"] = fitDict["fitStatus"] mDict["IfitChiSqRed"] = fitDict["chiSqRed"] mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2) mDict["freq0_Hz"] = toscalar(freq0_Hz) mDict["fwhmRMSF"] = toscalar(fwhmRMSF) mDict["dQU_Jybm"] = toscalar(nanmedian(dQUArr_Jy)) mDict["dFDFth_Jybm"] = toscalar(dFDFth_Jybm) # Measure the complexity of the q and u spectra mDict["fracPol"] = mDict["ampPeakPIfit_Jybm"]/(Ifreq0_mJybm/1e3) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr = uArr, dqArr = dqArr, duArr = duArr, fracPol = mDict["fracPol"], psi0_deg = mDict["polAngle0Fit_deg"], RM_radm2 = mDict["phiPeakPIfit_rm2"]) mDict.update(mD) # Debugging plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD["xArrQ"], qArr=pD["yArrQ"], dqArr=pD["dyArrQ"], sigmaAddqArr=pD["sigmaAddArrQ"], chiSqRedqArr=pD["chiSqRedArrQ"], probqArr=pD["probArrQ"], uArr=pD["yArrU"], duArr=pD["dyArrU"], sigmaAdduArr=pD["sigmaAddArrU"], chiSqReduArr=pD["chiSqRedArrU"], probuArr=pD["probArrU"], mDict=mDict) tmpFig.show() # Save the dirty FDF, RMSF and weight array to ASCII files print "Saving the dirty FDF, RMSF weight arrays to ASCII files." outFile = prefixOut + "_FDFdirty.dat" print "> %s" % outFile np.savetxt(outFile, zip(phiArr_radm2, dirtyFDF.real, dirtyFDF.imag)) outFile = prefixOut + "_RMSF.dat" print "> %s" % outFile np.savetxt(outFile, zip(phi2Arr_radm2, RMSFArr.real, RMSFArr.imag)) outFile = prefixOut + "_weight.dat" print "> %s" % outFile np.savetxt(outFile, zip(freqArr_Hz, weightArr)) # Save the measurements to a "key=value" text file print "Saving the measurements on the FDF in 'key=val' and JSON formats." outFile = prefixOut + "_RMsynth.dat" print "> %s" % outFile FH = open(outFile, "w") for k, v in mDict.iteritems(): FH.write("%s=%s\n" % (k, v)) FH.close() outFile = prefixOut + "_RMsynth.json" print "> %s" % outFile json.dump(dict(mDict), open(outFile, "w")) # Print the results to the screen print print '-'*80 print 'RESULTS:\n' print 'FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]) print 'Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"]) print 'Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"]) print 'Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"]) print 'freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9) print 'I freq0 = %.4g mJy/beam' % (mDict["Ifreq0_mJybm"]) print 'Peak PI = %.4g (+/-%.4g) mJy/beam' % (mDict["ampPeakPIfit_Jybm"]*1e3, mDict["dAmpPeakPIfit_Jybm"]*1e3) print 'QU Noise = %.4g mJy/beam' % (mDict["dQU_Jybm"]*1e3) print 'FDF Noise (theory) = %.4g mJy/beam' % (mDict["dFDFth_Jybm"]*1e3) print 'FDF SNR = %.4g ' % (mDict["snrPIfit"]) print 'sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"]) print 'sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"]) print print '-'*80 # Plot the RM Spread Function and dirty FDF if showPlots: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict["phiPeakPIfit_rm2"], fig = fdfFig) # Use the custom navigation toolbar try: fdfFig.canvas.toolbar.pack_forget() CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) except Exception: pass # Display the figure fdfFig.show() # Pause if plotting enabled if showPlots or debug: print "Press <RETURN> to exit ...", raw_input() #-----------------------------------------------------------------------------# if __name__ == "__main__": main()
crpurcell/RM-tools
RMtools_1D/do_RMsynth_1D.py
Python
mit
21,366
[ "Gaussian" ]
fe2cb15dd6caf83f4c66bb678766bee34d7ee450be32bbd2fde08bfd71cdd7b6
# # AtHomePowerlineServer - networked server for various controllers # Copyright © 2019 Dave Hocker # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # See the LICENSE file for more details. # import logging from drivers.Dummy import Dummy from drivers.XTB232 import XTB232 # from drivers.tplink import TPLinkDriver from drivers.py_kasa import PyKasaDriver # from drivers.meross import MerossDriver from drivers.meross_v4 import MerossDriverV4 from database.managed_devices import ManagedDevices logger = logging.getLogger("server") class DeviceDriverManager(): """ Singleton class for managing current configuration of device drivers """ # device name to device driver look up table driver_list = {} # All of the supported X10 devices and drivers X10_DEVICE_LIST = [] X10_DRIVER_LIST = ["xtb232", "xtb-232", "cm11a", "cm11"] # All of the supported TPLink/Kasa devices and drivers TPLINK_DEVICE_LIST = [] TPLINK_DRIVER_LIST = ["tplink"] # All of the supported Meross devices and drivers MEROSS_DEVICE_LIST = [] MEROSS_DRIVER_LIST = ["meross"] # Driver list for creating a custom device name # DriverName:DriverClass DRIVER_LIST = { "xtb232": XTB232, "tplink": PyKasaDriver, "meross": MerossDriverV4, "dummy": Dummy } # Tracks drivers in use. We basically treat a driver as a singleton. used_driver_list = { "xtb232": None, "tplink": None, "meross": None, "dummy": None } # Build list of supported devices # The point is to have one list of devices in the Devices model for device, device_mfg in ManagedDevices.VALID_DEVICE_LIST.items(): if device_mfg == "x10": X10_DEVICE_LIST.append(device) elif device_mfg == "tplink": TPLINK_DEVICE_LIST.append(device) elif device_mfg == "meross": MEROSS_DEVICE_LIST.append(device) @classmethod def init(cls, driver_list_config): for device_name, driver_name in driver_list_config.items(): # X10 devices device_name = device_name.lower() driver_name = driver_name.lower() if device_name in cls.X10_DEVICE_LIST: if driver_name in cls.X10_DRIVER_LIST: if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = XTB232() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) elif driver_name == "dummy": if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = Dummy() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) else: logger.error("Configuration error: unrecognized driver name %s for device %s", driver_name, device_name) logger.error("Defaulting to Dummy driver for device %s", device_name) if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = Dummy() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) # TPLink/Kasa devices elif device_name in cls.TPLINK_DEVICE_LIST: if driver_name in cls.TPLINK_DRIVER_LIST: if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = cls.DRIVER_LIST["tplink"]() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) else: logger.error("Configuration error: unrecognized driver name %s for device %s", driver_name, device_name) logger.error("Defaulting to Dummy driver for device %s", device_name) if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = Dummy() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) # Meross devices elif device_name in cls.MEROSS_DEVICE_LIST: if driver_name in cls.MEROSS_DRIVER_LIST: if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = MerossDriverV4() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) else: logger.error("Configuration error: unrecognized driver name %s for device %s", driver_name, device_name) logger.error("Defaulting to Dummy driver for device %s", device_name) if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = Dummy() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Device %s using driver %s", device_name, driver_name) # Custom device name else: if driver_name in cls.DRIVER_LIST.keys(): # TODO This won't work if the driver is a true singleton like XTB232 if not cls.used_driver_list[driver_name]: cls.used_driver_list[driver_name] = cls.DRIVER_LIST[driver_name]() cls.driver_list[device_name] = cls.used_driver_list[driver_name] logger.info("Custom device-to-driver mapping created: %s/%s", device_name, driver_name) else: logger.error("Configuration error: unrecognized device name %s", device_name) logger.error("Defaulting to Dummy driver for device %s", device_name) if not cls.used_driver_list["dummy"]: cls.used_driver_list["dummy"] = Dummy() cls.driver_list[device_name] = cls.used_driver_list["dummy"] logger.info("Device %s using driver %s", device_name, driver_name) # Open all used drivers for dn, driver in cls.used_driver_list.items(): if driver: driver.open() @classmethod def close_drivers(cls): # Call each driver's close method for dn, driver in cls.used_driver_list.items(): if driver: logger.debug("Closing driver %s", dn) driver.close() @classmethod def get_driver(cls, device_name): if device_name in cls.driver_list.keys(): return cls.driver_list[device_name] return None @classmethod def discover_devices(cls): for device_name, driver in cls.driver_list.items(): driver.discover_devices()
dhocker/athomepowerlineserver
drivers/device_driver_manager.py
Python
gpl-3.0
7,405
[ "xTB" ]
2e55cf28b9cff8f90e817e1afa67a9efaffc9b134ff301664066516cdfb08f05
# -*- coding: utf-8 -*- # FAO Translators: # First of all thank you for your interest in translating this game, # I will be grateful if you could share it with the community - # if possible please send it back to my email, and I'll add it to the next version. # The translation does not have to be exact as long as it makes sense and fits in its location # (if it doesn't I'll try to either make the font smaller or make the area wider - where possible). # The colour names in other languages than English are already in smaller font. d = dict() dp = dict() # messages with pronunciation exceptions - this dictionary will override entries in a copy of d # word lists numbers = ['yksi', 'kaksi', 'kolme', 'neljä', 'viisi', 'kuusi', 'seitsemän', 'kahdeksan', 'yhdeksän', 'kymmenen', 'yksitoista', 'kaksitoista', 'kolmetoista', 'neljätoista', 'viisitoista', 'kuusitoista', 'seitsemäntoista', 'kahdeksantoista', 'yhdeksäntoista', 'kaksikymmentä', 'kaksikymmentäyksi', 'kaksikymmentäkaksi', 'kaksikymmentäkolme', 'kaksikymmentäneljä', 'kaksikymmentäviisi', 'kaksikymmentäkuusi', 'kaksikymmentäseitsemän', 'kaksikymmentäkahdeksan', 'kaksikymmentäyhdeksän'] numbers2090 = ['kaksikymmentä', 'kolmekymmentä', 'neljäkymmentä', 'viiskymmentä', 'kuusikymmentä', 'seitsemänkymmentä', 'kahdeksankymmentä', 'yhdeksänkymmentä'] # The following 2 lines are not to be translated but replaced with a sequence of words starting in each of the letters of your alphabet in order, best if these words have a corresponding picture in images/flashcard_images.jpg. The second line has the number of the image that the word describes. # The images are numbered from left to bottom such that the top left is numbered 0, the last image is 73, if none of the available things have names that start with any of the letters we can add new pictures. dp['abc_flashcards_word_sequence'] = ['Avain', 'Banaani', 'Cheddar', 'Delfiini', 'Elefantti', 'Flyygeli', 'Gnuu', 'Hiiri', 'Ikkuna', 'Jooga', 'Kirahvi', 'Leipä', 'Muurahainen', 'Näyttö', 'Omena', 'Papukaija', 'Q', 'Riippumatto', 'Seepra', 'Talo', 'Uuni', 'Vene', 'Watti', 'Xylofoni', 'Yö', 'Z', 'Å', 'Ämpäri', 'Öinen'] d['abc_flashcards_word_sequence'] = ['<1>A<2>v<1>a<2>in', '<1>B<2>anaani', '<1>C<2>heddar', '<1>D<2>elfiini', '<1>E<2>l<1>e<2>fantti', '<1>F<2>lyygeli', '<1>G<2>nuu', '<1>H<2>iiri', '<1>I<2>kkuna', '<1>J<2>ooga', '<1>K<2>irahvi', '<1>L<2>eipä', '<1>M<2>uurahainen', '<1>N<2>äyttö', '<1>O<2>mena', '<1>P<2>a<1>p<2>ukaija', '<1>Q<2> ', '<1>R<2>iippumatto', '<1>S<2>eepra', '<1>T<2>alo', '<1>U<2>uni', '<1>V<2>ene', '<1>W<2>atti', '<1>X<2>ylofoni', '<1>Y<2>ö', '<1>Z<2> ', '<1>Å<2> ', '<1>Ä<2>mp<1>ä<2>ri', '<1>Ö<2>inen'] d['abc_flashcards_frame_sequence'] = [10, 71, 57, 59, 4, 34, 70, 12, 22, 32, 30, 35, 0, 40, 42, 15, 43, 56, 25, 7, 67, 1, 18, 23, 54, 43, 43, 73, 54] # alphabet en alphabet_lc = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'å', 'ä', 'ö'] alphabet_uc = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'Å', 'Ä', 'Ö'] # correction of eSpeak pronounciation of single letters if needed letter_names = [] # letters that may exist in words but are not part of the officail alphabet accents_lc = ['š', 'ž', '-'] accents_uc = ['Š', 'Ž'] def n2txt(n, twoliner=False): "takes a number from 1 - 99 and returns it back in a word form, ie: 63 returns 'sixty three'." if 0 < n < 30: return numbers[n - 1] elif 30 <= n < 100: m = n % 10 tens = numbers2090[(n // 10) - 2] if m == 0: return tens elif m > 0: ones = numbers[m - 1] if twoliner: return [tens + "-", ones] else: return tens + ones elif n == 0: return "nolla" elif n == 100: return "sata" return "" def time2str(h, m): 'takes 2 variables: h - hour, m - minute, returns time as a string, ie. five to seven - for 6:55' if m > 30: if h == 12: h = 1 else: h += 1 if m == 0: return "tasan %s" % n2txt(h) elif m == 1: return "minuutin yli %s" % n2txt(h) elif m == 15: return "vartin yli %s" % n2txt(h) elif m == 30: return "puoli %s" % n2txt(h + 1) elif m == 45: return "varttia vaille %s" % n2txt(h) elif m == 59: return "minuutin vaille %s" % n2txt(h) elif m < 30: return "%s yli %s" % (n2txt(m), n2txt(h)) elif m > 30: return "%s vaille %s" % (n2txt(60 - m), n2txt(h)) return "" d["a4a_animals"] = ["cow", "turkey", "shrimp", "wolf", "panther", "panda", "magpie", "clam", "pony", "mouse", "pug", "koala", "frog", "ladybug", "gorilla", "llama", "vulture", "hamster", "bird", "starfish", "crow", "parakeet", "caterpillar", "tiger", "hummingbird", "piranha", "pig", "scorpion", "fox", "leopard", "iguana", "dolphin", "bat", "chick", "crab", "hen", "wasp", "chameleon", "whale", "hedgehog", "fawn", "moose", "bee", "viper", "shrike", "donkey", "guinea pig", "sloth", "horse", "penguin", "otter", "bear", "zebra", "ostrich", "camel", "antelope", "lemur", "pigeon", "lama", "mole", "ray", "ram", "skunk", "jellyfish", "sheep", "shark", "kitten", "deer", "snail", "flamingo", "rabbit", "oyster", "beaver", "sparrow", "dove", "eagle", "beetle", "hippopotamus", "owl", "cobra", "salamander", "goose", "kangaroo", "dragonfly", "toad", "pelican", "squid", "lion cub", "jaguar", "duck", "lizard", "rhinoceros", "hyena", "ox", "peacock", "parrot", "elk", "alligator", "ant", "goat", "baby rabbit", "lion", "squirrel", "opossum", "chimp", "doe", "gopher", "elephant", "giraffe", "spider", "puppy", "jay", "seal", "rooster", "turtle", "bull", "cat", "lamb", "rat", "slug", "buffalo", "blackbird", "swan", "lobster", "dog", "mosquito", "snake", "chicken", "anteater"] d["a4a_sport"] = ["judo", "pool", "ride", "stretch", "helmet", "ice skating", "walk", "ran", "run", "swim", "hop", "hike", "boxing", "hockey", "race", "throw", "skate", "win", "squat", "ski", "golf", "whistle", "torch", "sailing", "stand", "tennis", "jump", "rowing", "jog", "rope"] d["a4a_body"] = ["teeth", "cheek", "ankle", "knee", "toe", "muscle", "mouth", "feet", "hand", "elbow", "hair", "eyelash", "beard", "belly button", "thumb", "breast", "nostril", "nose", "hip", "arm", "eyebrow", "fist", "neck", "wrist", "throat", "eye", "leg", "spine", "ear", "finger", "foot", "braid", "face", "back", "chin", "bottom", "thigh", "belly"] d["a4a_people"] = ["girl", "male", "son", "mates", "friends", "baby", "child", "dad", "mom", "twin boys", "brothers", "man", "mother", "grandfather", "family", "female", "wife", "husband", "bride", "madam", "grandmother", "couple", "lad", "twin girls", "tribe", "boy", "sisters", "woman", "lady"] d["a4a_food"] = ["candy", "sausage", "hamburger", "steak", "fudge", "doughnut", "coconut", "rice", "ice cream", "jelly", "yoghurt", "dessert", "pretzel", "peanut", "jam", "feast", "cookie", "bacon", "spice", "coffee", "pie", "lemonade", "chocolate", "water bottle", "lunch", "ice", "sugar", "sauce", "soup", "juice", "fries", "cake", "mashed potatoes", "tea", "bun", "cheese", "beef", "sandwich", "slice", "sprinkle", "pizza", "flour", "gum", "spaghetti", "roast", "drink", "stew", "spread", "meat", "milk", "meal", "corn", "bread", "walnut", "egg", "hot dog", "ham"] d["a4a_clothes_n_accessories"] = ["jewellery", "sock", "jacket", "heel", "smock", "shorts", "pocket", "necklace", "sweatshirt", "uniform", "raincoat", "trousers", "sunglasses", "coat", "pullover", "shirt", "sandals", "suit", "pyjamas", "skirt", "zip", "shoes", "jewel", "tie", "slippers", "gloves", "hat", "sleeve", "cap", "swimming suit", "trainer", "vest", "glasses", "shoelace", "patch", "scarf", "shoe", "button", "dress", "sash", "shoe sole", "robe", "pants", "kimono", "overalls"] d["a4a_actions"] = ["lick", "slam", "beg", "fell", "scratch", "touch", "sniff", "see", "climb", "dig", "howl", "sleep", "explore", "draw", "hug", "teach", "nap", "clay", "catch", "clap", "cry", "sing", "meet", "sell", "peck", "beat", "kneel", "find", "dance", "cough", "cut", "think", "bark", "speak", "cheer", "bake", "write", "punch", "strum", "study", "plow", "dream", "post", "dive", "whisper", "sob", "shake", "feed", "crawl", "camp", "spill", "clean", "scream", "tear", "float", "pull", "ate", "kiss", "sit", "hatch", "blink", "hear", "smooch", "play", "wash", "chat", "drive", "drink", "fly", "juggle", "bit", "sweep", "look", "knit", "lift", "fetch", "read", "croak", "stare", "eat"] d["a4a_construction"] = ["lighthouse", "door", "circus", "church", "kennel", "temple", "smoke", "chimney", "brick", "well", "street", "castle", "store", "staircase", "school", "farm", "bridge", "dam", "pyramid", "barn", "mill", "window", "cabin", "step", "shop", "shed", "roof", "steeple", "garage", "mosque", "hospital", "tent", "house", "wall", "bank", "shutter", "hut"] d["a4a_nature"] = ["land", "cliff", "hill", "canyon", "rock", "sea", "lake", "coast", "shore", "mountain", "pond", "peak", "lava", "cave", "dune", "island", "forest", "desert", "iceberg"] d["a4a_jobs"] = ["clown", "engineer", "priest", "vet", "judge", "chef", "athlete", "librarian", "juggler", "police", "plumber", "badge", "queen", "farmer", "magic", "knight", "doctor", "bricklayer", "cleaner", "teacher", "hunter", "soldier", "musician", "lawyer", "fisherman", "princess", "fireman", "nun", "chief", "pirate", "cowboy", "electrician", "nurse", "king", "president", "office", "carpenter", "jockey", "worker", "mechanic", "pilot", "actor", "cook", "student", "butcher", "accountant", "prince", "pope", "sailor", "boxer", "ballet", "coach", "astronaut", "painter", "anaesthesiologist", "scientist"] d["a4a_fruit_n_veg"] = ["carrot", "blackberries", "celery", "turnip", "cacao", "peach", "melon", "grapefruit", "broccoli", "grapes", "spinach", "fig", "kernel", "radish", "tomato", "kiwi", "asparagus", "olives", "cucumbers", "beans", "strawberry", "peppers", "raspberry", "apricot", "potatoes", "peas", "cabbage", "cherries", "squash", "blueberries", "pear", "orange", "pumpkin", "avocado", "garlic", "onion", "apple", "lime", "cauliflower", "mango", "lettuce", "lemon", "aubergine", "artichokes", "plums", "leek", "bananas", "papaya"] d["a4a_transport"] = ["sail", "taxi", "car", "bike", "raft", "pedal", "bus", "handlebar", "boat", "truck", "sleigh", "carpet", "motorcycle", "train", "ship", "van", "canoe", "rocket", "mast", "sledge", "bicycle"]
imiolek-ireneusz/pysiogame
i18n/custom/fi.py
Python
gpl-3.0
12,144
[ "Elk", "Jaguar", "MOOSE" ]
33edd52863eeb1c7511ca5917b98f5a0b637d48fa484d9b9e3d8751f019d02ea
import os from time import sleep from django.core.files import File from lettuce import world, step, after from mock import mock_open, patch from questionnaire.features.pages.questionnaires import QuestionnairePage from questionnaire.features.pages.uploads import UploadDocumentPage, DeleteDocumentPage from questionnaire.models import SupportDocument @step(u'And I click the upload support document link') def and_i_click_the_upload_support_document_link(step): world.page.click_by_id('upload-file') @step(u'Then I should see the upload form') def then_i_should_see_the_upload_form(step): world.page = UploadDocumentPage(world.browser, world.questionnaire) world.page.validate_url() world.page.validate_upload_form({'Support document': 'path'}) @step(u'When I select a file to upload') def when_i_select_a_file_to_upload(step): world.page.input_file(world.filename) @step(u'And I click upload button') def and_i_click_upload_button(step): world.page.click_by_id('upload-btn') @step(u'Then I should see the file was uploaded successfully') def then_i_should_see_the_file_was_uploaded_successfully(step): world.page.is_text_present(world.filename) world.page.is_text_present("File was uploaded successfully") @step(u'And I have a pdf document') def and_i_have_a_pdf_document(step): world.filename = 'sample_document.pdf' m = mock_open() with patch('__main__.open', m, create=True): with open(world.filename, 'w') as document: document.write("Some stuff") @step(u'And I should be able to download the file') def and_i_should_be_able_to_download_the_file(step): world.page.is_text_present(world.filename) @step(u'And I have a zip file') def and_i_have_a_zip_file(step): world.filename = 'sample_file.zip' m = mock_open() with patch('__main__.open', m, create=True): with open(world.filename, 'w') as document: document.write("Some stuff") @step(u'Then I should see an error that the file type is not supported') def then_i_should_see_an_error_that_the_file_type_is_not_supported(step): world.page.is_element_present_by_css('.error') world.page.is_text_present('file type is not an allowed') @step(u'And I visit the attachments page') def and_i_visit_the_attachments_page(step): world.page.click_by_id('id_attachments') @step(u'And I have an attached file') def and_i_have_an_attached_file(step): world.filename = 'sample_document.pdf' m = mock_open() with patch('__main__.open', m, create=True): with open(world.filename, 'w') as document: document.write("Some stuff") document = open(world.filename, 'rb') world.document = SupportDocument.objects.create(path=File(document), country=world.uganda, questionnaire=world.questionnaire) @step(u'I click delete button next to that file') def and_i_click_delete_button_next_to_that_file(step): world.page.click_by_css('.glyphicon-trash') sleep(3) @step(u'Then I should see a warning dialog') def then_i_should_see_a_warning_dialog(step): world.page = DeleteDocumentPage(world.browser, world.document) world.page.is_text_present("Are you sure you want to delete this document?") @step(u'When I click confirm') def when_i_click_confirm(step): world.page.click_by_id('confirm-delete-document-%s' % world.document.id) @step(u'Then I should see that file was deleted') def then_i_should_see_that_file_was_deleted(step): world.page.is_text_present(os.path.basename(world.document.path.url), status=False) world.page.is_text_present("Attachment was deleted successfully") @step(u'And I clean up the files') def and_i_clean_up_the_files(step): os.system("rm -rf %s" % world.filename) @step(u'And I visit the questionnaire section page') def and_i_visit_the_questionnaire_section_page(step): world.page = QuestionnairePage(world.browser, world.section_1) world.page.visit() @step(u'And I should see the number of attachments indicated in the attachments link') def and_i_should_see_the_number_of_attachments_indicated_in_the_attachments_link(step): world.page.validate_number_of_attachments(1) @step(u'And the number of attachments indicated in the attachments link should be updated') def and_the_number_of_attachments_indicated_in_the_attachments_link_should_be_updated(step): world.page = UploadDocumentPage(world.browser, world.questionnaire) world.page.validate_number_of_attachments(0)
eJRF/ejrf
questionnaire/features/upload_steps.py
Python
bsd-3-clause
4,524
[ "VisIt" ]
50fd0865727b34dba779fea69a7319162c5f658fff3ab6dba6491f829448f5d7
#!/usr/bin/env python """ Script to update pilot version in CS """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = "$Id$" import DIRAC from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script @Script() def main(): Script.registerSwitch( "v:", "vo=", "Location of pilot version in CS /Operations/<vo>/Pilot/Version" " (default value specified in CS under /DIRAC/DefaultSetup)", ) # Registering arguments will automatically add their description to the help menu Script.registerArgument("version: pilot version you want to update to") Script.parseCommandLine(ignoreErrors=False) # parseCommandLine show help when mandatory arguments are not specified or incorrect argument version = Script.getPositionalArgs(group=True) vo = None for switch in Script.getUnprocessedSwitches(): if switch[0] == "v" or switch[0] == "vo": vo = switch[1] from DIRAC import S_OK, S_ERROR from DIRAC import gConfig, gLogger from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI def updatePilot(version, vo): """ Update in the CS the pilot version used, If only one version present in CS it's overwritten. If two versions present, the new one is added and the last removed :param version: version vArBpC of pilot you want to use :param vo: Location of pilot version in CS /Operations/<vo>/Pilot/Version """ setup = vo if not vo: setup = gConfig.getValue("/DIRAC/DefaultSetup") if not setup: return S_ERROR("No value set for /DIRAC/DefaultSetup in CS") pilotVersion = gConfig.getValue("Operations/%s/Pilot/Version" % setup, []) if not pilotVersion: return S_ERROR("No pilot version set under Operations/%s/Pilot/Version in CS" % setup) pilotVersion.pop() pilotVersion.insert(0, version) api = CSAPI() api.setOption("Operations/%s/Pilot/Version" % setup, ", ".join(pilotVersion)) result = api.commit() if not result["OK"]: gLogger.fatal("Could not commit new version of pilot!") return result newVersion = gConfig.getValue("Operations/%s/Pilot/Version" % setup) return S_OK("New version of pilot set to %s" % newVersion) result = updatePilot(version, vo) if not result["OK"]: gLogger.fatal(result["Message"]) DIRAC.exit(1) gLogger.notice(result["Value"]) DIRAC.exit(0) if __name__ == "__main__": main()
ic-hep/DIRAC
src/DIRAC/FrameworkSystem/scripts/dirac_admin_update_pilot.py
Python
gpl-3.0
2,636
[ "DIRAC" ]
bb489b95da890a596069d04fc31b750f49ab583f28a77773f7ff36f06d9a5146
# -*- coding:utf-8 -*- # Copyright (c) 2015, Galaxy Authors. All Rights Reserved # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Author: wangtaize@baidu.com # Date: 2015-03-30 import logging from common import shell from galaxy import sdk LOG = logging.getLogger("console") class Galaxy(object): def __init__(self, master_addr,bin_path): self.master_addr = master_addr self.shell_helper = shell.ShellHelper() self.bin_path = bin_path def create_task(self,name,url,cmd_line,replicate_count,mem_limit,cpu_quota, deploy_step_size=-1, one_task_per_host=False, restrict_tags = []): galaxy_sdk = sdk.GalaxySDK(self.master_addr) status,job_id = galaxy_sdk.make_job(name,'ftp',url,cmd_line, replicate_num = replicate_count, mem_limit = mem_limit, cpu_limit = cpu_quota, deploy_step_size=deploy_step_size, one_task_per_host=one_task_per_host, restrict_tags = restrict_tags) return status,job_id def list_task_by_job_id(self,job_id): galaxy_sdk = sdk.GalaxySDK(self.master_addr) status ,task_list = galaxy_sdk.list_task_by_job_id(int(job_id)) if not status: return False,[] ret_task_list = [] for task in task_list: ret_task_list.append(task.__dict__) return True, ret_task_list def list_task_by_host(self,agent): galaxy_sdk = sdk.GalaxySDK(self.master_addr) status ,task_list = galaxy_sdk.list_task_by_host(str(agent)) if not status: return False,[] ret_task_list = [] for task in task_list: ret_task_list.append(task.__dict__) return True, ret_task_list def job_history(self, job_id): galaxy_sdk = sdk.GalaxySDK(self.master_addr) status ,task_list = galaxy_sdk.get_scheduled_history(int(job_id)) if not status: return False,[] ret_task_list = [] for task in task_list: ret_task_list.append(task.__dict__) return True, ret_task_list def list_node(self): galaxy_sdk = sdk.GalaxySDK(self.master_addr) node_list = galaxy_sdk.list_all_node() return node_list def kill_job(self,job_id): galaxy_sdk = sdk.GalaxySDK(self.master_addr) galaxy_sdk.kill_job(job_id) def update_job(self,job_id,replicate_num): galaxy_sdk = sdk.GalaxySDK(self.master_addr) return galaxy_sdk.update_job(job_id,replicate_num) def list_jobs(self): galaxy_sdk = sdk.GalaxySDK(self.master_addr) status,job_list = galaxy_sdk.list_all_job() return status ,job_list def tag_agent(self, tag, agent_set): galaxy_sdk = sdk.GalaxySDK(self.master_addr) status = galaxy_sdk.tag_agent(tag, agent_set) return status def list_tag(self): galaxy_sdk = sdk.GalaxySDK(self.master_addr) return galaxy_sdk.list_tag()
fxsjy/galaxy
console/backend/src/galaxy/wrapper.py
Python
bsd-3-clause
3,231
[ "Galaxy" ]
696cbf9386eea94346fc55f662bd5441e288b87bc744496ad8d04d8deb414a6f
# coding: utf-8 from __future__ import unicode_literals, division import json from monty.json import MontyEncoder, MontyDecoder """ Created on Dec 6, 2012 """ import os import shutil from unittest import TestCase import unittest from pkg_resources import parse_version import pymatgen from custodian.qchem.handlers import QChemErrorHandler from custodian.qchem.jobs import QchemJob __author__ = "Xiaohui Qu" __version__ = "0.1" __maintainer__ = "Xiaohui Qu" __email__ = "xqu@lbl.gov" __date__ = "Dec 6, 2013" test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files", "qchem") # noinspection PyUnresolvedReferences scr_dir = os.path.join(test_dir, "scr") @unittest.skipIf(parse_version(pymatgen.__version__) <= parse_version('3.0.11'), "Folding comment text is a feature after " "version 3.0.11") class QChemErrorHandlerTest(TestCase): def setUp(self): os.makedirs(scr_dir) os.chdir(scr_dir) def test_scf_rca(self): shutil.copyfile(os.path.join(test_dir, "hf_rca.inp"), os.path.join(scr_dir, "hf_rca.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "hf_rca_tried_0.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_0.inp"), os.path.join(scr_dir, "hf_rca_tried_0.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_0.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['rca_diis']}) with open(os.path.join(test_dir, "hf_rca_tried_1.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_0.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_1.inp"), os.path.join(scr_dir, "hf_rca_tried_1.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_1.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gwh']}) with open(os.path.join(test_dir, "hf_rca_tried_2.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_1.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_2.inp"), os.path.join(scr_dir, "hf_rca_tried_2.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_2.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gdm']}) with open(os.path.join(test_dir, "hf_rca_tried_3.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_2.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_3.inp"), os.path.join(scr_dir, "hf_rca_tried_3.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_3.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['rca']}) with open(os.path.join(test_dir, "hf_rca_tried_4.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_3.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_4.inp"), os.path.join(scr_dir, "hf_rca_tried_4.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_4.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['core+rca']}) with open(os.path.join(test_dir, "hf_rca_tried_5.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_4.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_5.inp"), os.path.join(scr_dir, "hf_rca_tried_5.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_5.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': None}) def test_negative_eigen(self): shutil.copyfile(os.path.join(test_dir, "negative_eigen.qcinp"), os.path.join(scr_dir, "negative_eigen.qcinp")) shutil.copyfile(os.path.join(test_dir, "negative_eigen.qcout"), os.path.join(scr_dir, "negative_eigen.qcout")) h = QChemErrorHandler(input_file="negative_eigen.qcinp", output_file="negative_eigen.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Molecular charge is not found', 'Negative Eigen'], 'actions': ['use tight integral threshold']}) with open(os.path.join(test_dir, "negative_eigen_tried_1.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "negative_eigen.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "negative_eigen_tried_1.qcinp"), os.path.join(scr_dir, "negative_eigen_tried_1.qcinp")) shutil.copyfile(os.path.join(test_dir, "negative_eigen.qcout"), os.path.join(scr_dir, "negative_eigen.qcout")) h = QChemErrorHandler(input_file="negative_eigen_tried_1.qcinp", output_file="negative_eigen.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Molecular charge is not found', 'Negative Eigen'], 'actions': ['use even tighter integral threshold']}) with open(os.path.join(test_dir, "negative_eigen_tried_2.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "negative_eigen_tried_1.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_no_error(self): shutil.copyfile(os.path.join(test_dir, "hf_no_error.inp"), os.path.join(scr_dir, "hf_no_error.inp")) shutil.copyfile(os.path.join(test_dir, "hf_no_error.out"), os.path.join(scr_dir, "hf_no_error.out")) h = QChemErrorHandler(input_file="hf_no_error.inp", output_file="hf_no_error.out") has_error = h.check() self.assertFalse(has_error) def test_scf_reset(self): shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_1.inp"), os.path.join(scr_dir, "hf_scf_reset.inp")) shutil.copyfile(os.path.join(test_dir, "hf_scf_reset.out"), os.path.join(scr_dir, "hf_scf_reset.out")) h = QChemErrorHandler(input_file="hf_scf_reset.inp", output_file="hf_scf_reset.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed'], 'actions': ['reset']}) with open(os.path.join(test_dir, "hf_scf_reset.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_scf_reset.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_unable_to_determine_lambda(self): shutil.copyfile(os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt.qcinp"), os.path.join(scr_dir, "unable_to_determine_lambda_in_geom_opt.qcinp")) shutil.copyfile(os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt.qcout"), os.path.join(scr_dir, "unable_to_determine_lambda_in_geom_opt.qcout")) h = QChemErrorHandler(input_file="unable_to_determine_lambda_in_geom_opt.qcinp", output_file="unable_to_determine_lambda_in_geom_opt.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed', 'Lamda Determination Failed'], 'actions': ['reset']}) with open(os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt_reset.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "unable_to_determine_lambda_in_geom_opt.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_scf_gdm(self): shutil.copyfile(os.path.join(test_dir, "hf_gdm.inp"), os.path.join(scr_dir, "hf_gdm.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "hf_gdm_tried_0.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_0.inp"), os.path.join(scr_dir, "hf_gdm_tried_0.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_0.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['diis_gdm']}) with open(os.path.join(test_dir, "hf_gdm_tried_1.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_0.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_1.inp"), os.path.join(scr_dir, "hf_gdm_tried_1.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_1.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gwh']}) with open(os.path.join(test_dir, "hf_gdm_tried_2.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_1.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_2.inp"), os.path.join(scr_dir, "hf_gdm_tried_2.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_2.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['rca']}) with open(os.path.join(test_dir, "hf_gdm_tried_3.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_2.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_3.inp"), os.path.join(scr_dir, "hf_gdm_tried_3.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_3.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gdm']}) with open(os.path.join(test_dir, "hf_gdm_tried_4.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_3.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_4.inp"), os.path.join(scr_dir, "hf_gdm_tried_4.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_4.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['core+gdm']}) with open(os.path.join(test_dir, "hf_gdm_tried_5.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_4.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_5.inp"), os.path.join(scr_dir, "hf_gdm_tried_5.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_5.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': None}) def test_opt_failed(self): shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcinp"), os.path.join(scr_dir, "hf_opt_failed.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "hf_opt_failed_tried_0.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_opt_failed.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed_tried_0.qcinp"), os.path.join(scr_dir, "hf_opt_failed_tried_0.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed_tried_0.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': ['GDIIS']}) with open(os.path.join(test_dir, "hf_opt_failed_tried_1.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_opt_failed_tried_0.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed_tried_1.qcinp"), os.path.join(scr_dir, "hf_opt_failed_tried_1.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed_tried_1.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': ['CartCoords']}) with open(os.path.join(test_dir, "hf_opt_failed_tried_2.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_opt_failed_tried_1.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed_tried_2.qcinp"), os.path.join(scr_dir, "hf_opt_failed_tried_2.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed_tried_2.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': None}) def test_autoz_error(self): shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcinp"), os.path.join(scr_dir, "qunino_vinyl.qcinp")) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcout"), os.path.join(scr_dir, "qunino_vinyl.qcout")) h = QChemErrorHandler(input_file="qunino_vinyl.qcinp", output_file="qunino_vinyl.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found', 'autoz error'], 'actions': ['disable symmetry']}) with open(os.path.join(test_dir, "qunino_vinyl_nosymm.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "qunino_vinyl.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl_nosymm.qcinp"), os.path.join(scr_dir, "qunino_vinyl_nosymm.qcinp")) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcout"), os.path.join(scr_dir, "qunino_vinyl.qcout")) h = QChemErrorHandler(input_file="qunino_vinyl_nosymm.qcinp", output_file="qunino_vinyl.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found', 'autoz error'], 'actions': None}) def test_nan_error(self): shutil.copyfile(os.path.join(test_dir, "thiane_nan.inp"), os.path.join(scr_dir, "thiane_nan.inp")) shutil.copyfile(os.path.join(test_dir, "thiane_nan.out"), os.path.join(scr_dir, "thiane_nan.out")) h = QChemErrorHandler(input_file="thiane_nan.inp", output_file="thiane_nan.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['NAN values'], 'actions': ['use tighter grid']}) with open(os.path.join(test_dir, "thiane_nan_dense_grid.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "thiane_nan.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "thiane_nan_dense_grid.inp"), os.path.join(scr_dir, "thiane_nan_dense_grid.inp")) shutil.copyfile(os.path.join(test_dir, "thiane_nan.out"), os.path.join(scr_dir, "thiane_nan.out")) h = QChemErrorHandler(input_file="thiane_nan_dense_grid.inp", output_file="thiane_nan.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['NAN values'], 'actions': None}) shutil.copyfile(os.path.join(test_dir, "h2o_nan.qcinp"), os.path.join(scr_dir, "h2o_nan.qcinp")) shutil.copyfile(os.path.join(test_dir, "h2o_nan.qcout"), os.path.join(scr_dir, "h2o_nan.qcout")) h = QChemErrorHandler(input_file="h2o_nan.qcinp", output_file="h2o_nan.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['NAN values'], 'actions': ['use tighter grid']}) with open(os.path.join(test_dir, "h2o_nan_dense_grid.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "h2o_nan.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_no_input_text(self): shutil.copyfile(os.path.join(test_dir, "no_reading.qcinp"), os.path.join(scr_dir, "no_reading.qcinp")) shutil.copyfile(os.path.join(test_dir, "no_reading.qcout"), os.path.join(scr_dir, "no_reading.qcout")) h = QChemErrorHandler(input_file="no_reading.qcinp", output_file="no_reading.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Molecular charge is not found', 'No input text'], 'actions': ['disable symmetry']}) with open(os.path.join(test_dir, "no_reading_nosymm.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "no_reading.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_exit_code_134(self): shutil.copyfile(os.path.join(test_dir, "exit_code_134.qcinp"), os.path.join(scr_dir, "exit_code_134.qcinp")) shutil.copyfile(os.path.join(test_dir, "exit_code_134.qcout"), os.path.join(scr_dir, "exit_code_134.qcout")) h = QChemErrorHandler(input_file="exit_code_134.qcinp", output_file="exit_code_134.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Molecular charge is not found'], 'actions': ['use tight integral threshold']}) with open(os.path.join(test_dir, "exit_code_134_tight_thresh.qcinp"))\ as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "exit_code_134.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_exit_code_134_after_scf_fix(self): shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix.qcinp"), os.path.join(scr_dir, "exit_134_after_scf_fix.qcinp")) shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix.qcout"), os.path.join(scr_dir, "exit_134_after_scf_fix.qcout")) h = QChemErrorHandler(input_file="exit_134_after_scf_fix.qcinp", output_file="exit_134_after_scf_fix.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['use tight integral threshold']}) with open(os.path.join(test_dir, "exit_134_after_scf_fix_tight_thresh.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "exit_134_after_scf_fix.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix_tight_thresh.qcinp"), os.path.join(scr_dir, "exit_134_after_scf_fix_tight_thresh.qcinp")) shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix.qcout"), os.path.join(scr_dir, "exit_134_after_scf_fix.qcout")) qchem_job = QchemJob(qchem_cmd="qchem -np 24", input_file="exit_134_after_scf_fix_tight_thresh.qcinp", output_file="exit_134_after_scf_fix.qcout", alt_cmd={"half_cpus": "qchem -np 12", "openmp": "qchem -nt 24"}) h = QChemErrorHandler(input_file="exit_134_after_scf_fix_tight_thresh.qcinp", output_file="exit_134_after_scf_fix.qcout", qchem_job=qchem_job) has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['half_cpus']}) def test_ts_opt(self): shutil.copyfile(os.path.join(test_dir, "ts_cf3_leave.qcinp"), os.path.join(scr_dir, "ts_cf3_leave.qcinp")) shutil.copyfile(os.path.join(test_dir, "ts_cf3_leave.qcout"), os.path.join(scr_dir, "ts_cf3_leave.qcout")) h = QChemErrorHandler(input_file="ts_cf3_leave.qcinp", output_file="ts_cf3_leave.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Exit Code 134', 'Geometry optimization failed'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "ts_cf3_leave_reset_first_step_mol.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "ts_cf3_leave.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_scf_in_aimd_reset(self): shutil.copyfile(os.path.join(test_dir, "h2o_aimd.qcinp"), os.path.join(scr_dir, "h2o_aimd.qcinp")) shutil.copyfile(os.path.join(test_dir, "h2o_aimd.qcout"), os.path.join(scr_dir, "h2o_aimd.qcout")) h = QChemErrorHandler(input_file="h2o_aimd.qcinp", output_file="h2o_aimd.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence'], 'actions': ['reset']}) with open(os.path.join(test_dir, "h2o_aimd_reset.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "h2o_aimd.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_json_serializable(self): q1 = QChemErrorHandler() str1 = json.dumps(q1, cls=MontyEncoder) q2 = json.loads(str1, cls=MontyDecoder) self.assertEqual(q1.as_dict(), q2.as_dict()) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcinp"), os.path.join(scr_dir, "qunino_vinyl.qcinp")) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcout"), os.path.join(scr_dir, "qunino_vinyl.qcout")) q3 = QChemErrorHandler(input_file="qunino_vinyl.qcinp", output_file="qunino_vinyl.qcout") q3.check() q3.correct() for od in q3.outdata: od.pop("input") str3 = json.dumps(q3, cls=MontyEncoder) q4 = json.loads(str3, cls=MontyDecoder) self.assertEqual(q3.as_dict(), q4.as_dict()) def tearDown(self): shutil.rmtree(scr_dir) pass if __name__ == "__main__": unittest.main()
alberthxf/custodian
custodian/qchem/tests/test_handlers.py
Python
mit
35,404
[ "pymatgen" ]
31004221fec859387c4da13664e9bcdf16a4ce8094203cb9667bf34f6bdeb996
#!/usr/bin/python #Audio Tools, a module and set of tools for manipulating audio data #Copyright (C) 2007-2012 Brian Langenberger #This program is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 2 of the License, or #(at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import sys import optparse import json from bitstream import Byte_Bank,Bitbuffer,last_element #This takes Huffman code tables in JSON format, like the following: # # [[1], 0, # [0, 1], 1, # [0, 0, 1], 2, # [0, 0, 0], 3] # #Where the first value in each pair is the leading bits #and each trailing value is the Huffman value. #The order of each pair in the list is irrelevent. # #It outputs a 2-dimensional array with a variable number of rows, #each containing 512 columns of bs_huffman_table structs. #Each row is a non-leaf node in the Huffman tree #and each column is a bitstream reader context state. #The value of the bs_table_struct is the next context/next node #(encoded as a single int, to save space) and/or the final leaf value. # #Walking the tree is a simple matter of starting from table[0][context] #(reading in a new context from the byte stream, if necessary) #and continuing along table[node][context] until the next node is 0 #before returning the final value. class Counter: def __init__(self): self.value = 0 def __int__(self): value = self.value self.value += 1 return value class Huffman_Node: def __init__(self, value=None, bit_0=None, bit_1=None): self.value = value self.id = 0 self.bit_0 = bit_0 self.bit_1 = bit_1 def is_leaf(self): return self.value is not None def __repr__(self): if (self.value is not None): return "Huffman_Node(value=%s)" % (repr(self.value)) else: return "Huffman_Node(bit_0=%s, bit_1=%s)" % \ (repr(self.bit_0), repr(self.bit_1)) def enumerate_nodes(self, counter=None): if (not self.is_leaf()): if (counter is None): counter = Counter() self.id = int(counter) self.bit_0.enumerate_nodes(counter) self.bit_1.enumerate_nodes(counter) def populate_jump_table(self, little_endian=False): if (self.value is None): self.jump_table = ([(0, 0, None), #input context 0 (0, 0, None)] + #input context 1 [next_read_huffman_state(context.bitbuffer(), self, little_endian) for context in Byte_Bank.contexts() if (context.size > 0)]) self.bit_0.populate_jump_table() self.bit_1.populate_jump_table() def jump_tables(self): if (not self.is_leaf()): yield (self.id, self.jump_table) for table in self.bit_0.jump_tables(): yield table for table in self.bit_1.jump_tables(): yield table def build_huffman_tree(frequencies, bits=tuple()): if (bits in frequencies): return Huffman_Node(value=frequencies[bits]) else: return Huffman_Node(bit_0=build_huffman_tree(frequencies, bits + (0,)), bit_1=build_huffman_tree(frequencies, bits + (1,))) def next_read_huffman_state(bit_stream, tree, little_endian): if (tree.is_leaf()): #reached a leaf node, so return byte bank and node return (int(bit_stream.byte_bank()), tree.id, tree.value) elif (len(bit_stream) == 0): #exhausted byte bank, so return empty bank and node return (0, tree.id, None) elif (little_endian): #progress through bit stream in little-endian order if (bit_stream[0]): return next_read_huffman_state(bit_stream[1:], tree.bit_1, little_endian) else: return next_read_huffman_state(bit_stream[1:], tree.bit_0, little_endian) else: #progress through bit stream in big-endian order if (bit_stream[-1]): return next_read_huffman_state(bit_stream[:-1], tree.bit_1, little_endian) else: return next_read_huffman_state(bit_stream[:-1], tree.bit_0, little_endian) def encode_huffman_value(value, next_node, next_context): if (value is not None): return ("{0x%X, %d}" % ((next_node << Byte_Bank.size()) | next_context, value)) else: return ("{0x%X, 0}" % ((next_node << Byte_Bank.size()) | next_context)) if (__name__ == '__main__'): parser = optparse.OptionParser() parser.add_option("-i", dest='input', help='input JSON file') parser.add_option('--le', dest='little_endian', action='store_true', default=False, help='generate a little-endian jump table') (options, args) = parser.parse_args() if (options.input is None): print "a JSON file is required" sys.exit(1) json_data = json.loads(open(options.input, "r").read()) tree = build_huffman_tree(dict([(tuple(bits), value) for (bits, value) in zip(json_data[::2], json_data[1::2])])) tree.enumerate_nodes() tree.populate_jump_table(options.little_endian) jump_tables = dict(tree.jump_tables()) print "{" for (last_row, row) in last_element([jump_tables[key] for key in sorted(jump_tables.keys())]): print " {" for (last_col, col) in last_element(row): (next_context, next_node, value) = col sys.stdout.write(" %s" % (encode_huffman_value(value, next_node, next_context))) if (last_col): print "" else: print "," if (last_row): print " }" else: print " }," print "}" print >>sys.stderr,"%d rows total" % (len(jump_tables.keys()))
Excito/audiotools
src/huffman.py
Python
gpl-2.0
7,440
[ "Brian" ]
28e4dd289c986813a3a147fb2591abf93abcc56e1686aa3a28a67645ab2a9ecc
# Author: Prabhu Ramachandran <prabhu_r at users dot sf dot net> # Copyright (c) 2006, Enthought, Inc. # License: BSD Style. # Enthought library imports. from traits.api import Instance from tvtk.api import tvtk # Local imports from mayavi.filters.poly_data_normals import PolyDataNormals from mayavi.core.pipeline_info import PipelineInfo ###################################################################### # `WarpScalar` class. ###################################################################### class WarpScalar(PolyDataNormals): """Warps the input data along a particular direction (either the normals or a specified direction) with a scale specified by the local scalar value. Useful for making carpet plots. """ # The version of this class. Used for persistence. __version__ = 0 # The actual TVTK filter that this class manages. filter = Instance(tvtk.WarpScalar, args=(), allow_none=False, record=True) input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['scalars']) output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any'])
dmsurti/mayavi
mayavi/filters/warp_scalar.py
Python
bsd-3-clause
1,273
[ "Mayavi" ]
05da72f2e57930b3b1601acf6485d58159e3d8751f1f612f2eb000f764e7f0cd
"""Reads the RCV1-v2 dataset for Multi-label Classification Important: Train and test sets are _switched_, since the original split leaves the sides unbalanced. Visit: http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm""" # noqa import os import gzip import collections import anna.data.utils as utils from anna.data.api import Doc NAME = "rcv1-v2" HOST = "http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/" FILE_URL_FORMAT = HOST + "a12-token-files/{}" TOPIC_URL_FORMAT = HOST + "a08-topic-qrels/{}" TEST_FILES = ["lyrl2004_tokens_test_pt0.dat.gz", "lyrl2004_tokens_test_pt1.dat.gz", "lyrl2004_tokens_test_pt2.dat.gz", "lyrl2004_tokens_test_pt3.dat.gz"] TRAIN_FILE = "lyrl2004_tokens_train.dat.gz" TOPICS_FILE = "rcv1-v2.topics.qrels.gz" FILE_URLS = {f: FILE_URL_FORMAT.format(f) for f in TEST_FILES + [TRAIN_FILE]} FILE_URLS[TOPICS_FILE] = TOPIC_URL_FORMAT.format(TOPICS_FILE) TEST_FINAL = "test.dat" TRAIN_FINAL = "train.dat" TOPICS_FINAL = "topics.dat" def fetch_and_parse(data_dir): """ Fetches and parses the RCV1-v2 dataset. Args: data_dir (str): absolute path to the dir where datasets are stored Returns: train_docs (tf.data.Dataset): annotated articles for training test_docs (tf.data.Dataset): annotated articles for testing unused_docs (tf.data.Dataset): unused docs labels (list[str]): final list of labels, from most to least frequent """ rcv_dir = os.path.join(data_dir, NAME) return utils.mlc_tfrecords(rcv_dir, lambda: parse(fetch(data_dir))) def parse(rcv1_dir): """ Parses the RCV1-v2 dataset. Args: rcv1_dir (str): absolute path to the extracted RCV1-v2 dir Returns: train_docs (list[Doc]): annotated articles for training test_docs (list[Doc]): annotated articles for testing unused_docs (list[Doc]): unused docs labels (list[str]): final list of labels, from most to least frequent """ train_docs = [] test_docs = [] unused_docs = [] label_counts = collections.Counter() topics = collections.defaultdict(set) topics_path = os.path.join(rcv1_dir, TOPICS_FINAL) with open(topics_path, "r") as f: for line in f: split = line.split(" ") topic = split[0].strip() doc_id = split[1].strip() topics[doc_id].add(topic) # IMPORTANT: we switch the order of original train and test for path, docs, is_train in [(TEST_FINAL, train_docs, True), (TRAIN_FINAL, test_docs, False)]: path = os.path.join(rcv1_dir, path) with open(path, "r") as f: doc_id = None text = None for line in f: if line == "\n": if not doc_id or not text: print("What!") exit(0) labels = list(topics[doc_id]) if is_train: label_counts.update(labels) docs.append(Doc(doc_id, None, None, None, text, labels)) doc_id = None text = None elif line.startswith(".I"): doc_id = line.split(" ")[1].strip() elif line.startswith(".W"): pass elif not text: text = line else: text += " " + line # Get list of labels, from frequent to rare labels = [l[0] for l in label_counts.most_common()] return train_docs, test_docs, unused_docs, labels def fetch(data_dir): """ Fetches the tokenized RCV1-v2 dataset. Args: data_dir (str): absolute path to the folder where datasets are stored Returns: final_dir (str): absolute path where RCV1-v2 was extracted """ # Create folder rcv1_dir = os.path.join(data_dir, NAME) utils.create_folder(rcv1_dir) # Download all datasets for f, url in FILE_URLS.items(): path = os.path.join(rcv1_dir, f) if not os.path.exists(path): utils.urlretrieve(url, path) # Extract topics path = os.path.join(rcv1_dir, TOPICS_FINAL) if not os.path.exists(path): src = os.path.join(rcv1_dir, TOPICS_FILE) with open(path, "wb") as o, gzip.open(src, "rb") as i: o.write(i.read()) # Extract train path = os.path.join(rcv1_dir, TRAIN_FINAL) if not os.path.exists(path): src = os.path.join(rcv1_dir, TRAIN_FILE) with open(path, "wb") as o, gzip.open(src, "rb") as i: o.write(i.read()) # Extract test path = os.path.join(rcv1_dir, TEST_FINAL) if not os.path.exists(path): with open(path, "wb") as o: for p in TEST_FILES: src = os.path.join(rcv1_dir, p) with gzip.open(src, "rb") as i: o.write(i.read()) return rcv1_dir
jpbottaro/anna
anna/data/dataset/rcv1.py
Python
mit
5,033
[ "VisIt" ]
f47f2dbb2f70a69f13f8fcb3860e60b5eebdba85a18e7f51578ab08a37c15879
from __future__ import (absolute_import, division, print_function) # pylint: disable=no-init,invalid-name,too-few-public-methods,unused-import from mantid.kernel import * from mantid.simpleapi import * from mantid.api import * from mantid.geometry import * import os class PoldiCompound(object): """Small helper class to handle the results from PoldiCrystalFileParser.""" def __init__(self, name, elements): self._spacegroup = "" self._atomString = "" self._cellDict = "" self._name = name self.assign(elements) def assign(self, elements): for c in elements: if c[0] == "atoms": self._atomString = ';'.join(c[1:]) elif c[0] == "lattice": cellNames = ['a', 'b', 'c', 'alpha', 'beta', 'gamma'] self._cellDict = dict(list(zip(cellNames, c[1:]))) elif c[0] == "spacegroup": self._spacegroup = c[1] def getAtomString(self): return self._atomString def getCellParameters(self): return self._cellDict def getSpaceGroup(self): return self._spacegroup def getName(self): return self._name def raiseParseErrorException(message): raise ParseException(message) # pylint: disable=too-many-instance-attributes class PoldiCrystalFileParser(object): """Small parser for crystal structure files used at POLDI This class encapsulates a small parser for crystal structure files that are used at POLDI. The files contains information about the lattice, the space group and the basis (atoms in the asymmetric unit). The file format is defined as follows: Compound_1 { Lattice: [1 - 6 floats] => a, b, c, alpha, beta, gamma Spacegroup: [valid space group symbol] Atoms; { Element x y z [occupancy [U_eq]] Element x y z [occupancy [U_eq]] } } Compound_2 { ... } The parser returns a list of PoldiCompound objects with the compounds that were found in the file. These are then processed by PoldiCreatePeaksFromFile to generate arguments for calling PoldiCreatePeaksFromCell. """ def __init__(self): self.elementSymbol = Word(alphas, min=1, max=2).setFailAction( lambda o, s, loc, token: raiseParseErrorException("Element symbol must be one or two characters.")) self.integerNumber = Word(nums) self.decimalSeparator = Word('./', max=1) self.floatNumber = Combine( self.integerNumber + Optional(self.decimalSeparator + Optional(self.integerNumber)) ) self.whiteSpace = Suppress(White()) self.atomLine = Combine( self.elementSymbol + self.whiteSpace + delimitedList(self.floatNumber, delim=White()), joinString=' ' ) self.keyValueSeparator = Suppress(Literal(":")) self.groupOpener = Suppress(Literal('{')) self.groupCloser = Suppress(Literal('}')) self.atomsGroup = Group(CaselessLiteral("atoms") + self.keyValueSeparator + self.groupOpener + delimitedList(self.atomLine, delim=lineEnd) + self.groupCloser) self.unitCell = Group(CaselessLiteral("lattice") + self.keyValueSeparator + delimitedList( self.floatNumber, delim=White())) self.spaceGroup = Group(CaselessLiteral("spacegroup") + self.keyValueSeparator + Word( alphanums + "-" + ' ' + '/')) self.compoundContent = Each([self.atomsGroup, self.unitCell, self.spaceGroup]).setFailAction( lambda o, s, loc, token: raiseParseErrorException( "One of 'Lattice', 'SpaceGroup', 'Atoms' is missing or contains errors.")) self.compoundName = Word(alphanums + '_') self.compound = Group(self.compoundName + Optional(self.whiteSpace) + self.groupOpener + self.compoundContent + self.groupCloser) self.comment = Suppress(Literal('#') + restOfLine) self.compounds = Optional(self.comment) + OneOrMore(self.compound).ignore(self.comment) + stringEnd def __call__(self, contentString): parsedContent = None if os.path.isfile(contentString): parsedContent = self._parseFile(contentString) else: parsedContent = self._parseString(contentString) return [PoldiCompound(x[0], x[1:]) for x in parsedContent] def _parseFile(self, filename): return self.compounds.parseFile(filename) def _parseString(self, stringContent): return self.compounds.parseString(stringContent) class PoldiCreatePeaksFromFile(PythonAlgorithm): _parser=None def category(self): return "SINQ\\Poldi" def seeAlso(self): return [ "PoldiCreatePeaksFromCell" ] def name(self): return "PoldiLoadCrystalData" def summary(self): return ("The algorithm reads a POLDI crystal structure file and creates a WorkspaceGroup that contains tables" "with the expected reflections.") def PyInit(self): self.declareProperty( FileProperty(name="InputFile", defaultValue="", action=FileAction.Load, extensions=["dat"]), doc="A file with POLDI crystal data.") self.declareProperty("LatticeSpacingMin", 0.5, direction=Direction.Input, doc="Lowest allowed lattice spacing.") self.declareProperty("LatticeSpacingMax", 0.0, direction=Direction.Input, doc="Largest allowed lattice spacing.") self.declareProperty( WorkspaceProperty(name="OutputWorkspace", defaultValue="", direction=Direction.Output), doc="WorkspaceGroup with reflection tables.") self._parser = PoldiCrystalFileParser() def PyExec(self): crystalFileName = self.getProperty("InputFile").value try: # Try parsing the supplied file using PoldiCrystalFileParser compounds = self._parser(crystalFileName) dMin = self.getProperty("LatticeSpacingMin").value dMax = self.getProperty("LatticeSpacingMax").value workspaces = [] # Go through found compounds and run "_createPeaksFromCell" for each of them # If two compounds have the same name, a warning is written to the log. for compound in compounds: if compound.getName() in workspaces: self.log().warning("A compound with the name '" + compound.getName() + "' has already been created. Please check the file '" + crystalFileName + "'") else: workspaces.append(self._createPeaksFromCell(compound, dMin, dMax)) self.setProperty("OutputWorkspace", GroupWorkspaces(workspaces)) # All parse errors are caught here and logged as errors except ParseException as error: errorString = "Could not parse input file '" + crystalFileName + "'.\n" errorString += "The parser reported the following error:\n\t" + str(error) self.log().error(errorString) def _createPeaksFromCell(self, compound, dMin, dMax): if not SpaceGroupFactory.isSubscribedSymbol(compound.getSpaceGroup()): raise RuntimeError("SpaceGroup '" + compound.getSpaceGroup() + "' is not registered.") PoldiCreatePeaksFromCell(SpaceGroup=compound.getSpaceGroup(), Atoms=compound.getAtomString(), LatticeSpacingMin=dMin, LatticeSpacingMax=dMax, OutputWorkspace=compound.getName(), **compound.getCellParameters()) return compound.getName() try: from pyparsing import * AlgorithmFactory.subscribe(PoldiCreatePeaksFromFile) except ImportError: logger.debug('Failed to subscribe algorithm PoldiCreatePeaksFromFile; Python package pyparsing' 'may be missing (https://pypi.python.org/pypi/pyparsing)')
ScreamingUdder/mantid
Framework/PythonInterface/plugins/algorithms/PoldiCreatePeaksFromFile.py
Python
gpl-3.0
8,381
[ "CRYSTAL" ]
c241b862f4ceef9515afd0ba8fa9259dd2ef3a0127185e94a8cbec455e7e0f4c
#!/usr/bin/python # -*- coding: utf-8 -*- import sys from collections import OrderedDict from gkeys.action_map import Action_Map, Available_Actions __version__ = '0.3' __license__ = 'GPLv2' if sys.version_info[0] >= 3: py_input = input _unicode = str else: py_input = raw_input _unicode = unicode subdata = OrderedDict() for cmd in Available_Actions: subdata[cmd] = Action_Map[cmd]['desc'] Gkeys_Map = { 'options': ['help', 'config', 'debug', 'version'], 'desc': 'OpenPGP/GPG key management tool', 'long_desc': '''Gentoo Keys (gkeys) is a Python based project that aims to manage the GPG keys used for validation on users and Gentoo's infrastracutre servers. Gentoo Keys is able to verify GPG keys used for Gentoo's release media, such as installation CD's, Live DVD's, packages and other GPG signed documents.''', 'sub-cmds': subdata, 'authors': ['Brian Dolbec <dolsen@gentoo.org>', 'Pavlos Ratis <dastergon@gentoo.org>', 'aeroniero33 <justthisthing@gmail.com>'], }
gentoo/gentoo-keys
gkeys/gkeys/__init__.py
Python
gpl-2.0
1,017
[ "Brian" ]
9c30bd39a2593dc5d5d6706be2bbe56894b6bd1d53fc6c356abf5b50540baf5c
#------------------------------------------------------------------------------ # Copyright (c) 2013, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #------------------------------------------------------------------------------ from atom.api import ( List, Typed, ForwardTyped, ForwardInstance, observe, set_default ) from enaml.core.declarative import d_ from .control import Control, ProxyControl #: Delay the import of vtk until needed. This removes the hard dependecy #: on vtk for the rest of the Enaml code base. def vtkRenderer(): from vtk import vtkRenderer return vtkRenderer class ProxyVTKCanvas(ProxyControl): """ The abstract definition of a proxy VTKCanvas object. """ #: A reference to the VTKCanvas declaration. declaration = ForwardTyped(lambda: VTKCanvas) def set_renderer(self, renderer): raise NotImplementedError def set_renderers(self, renderers): raise NotImplementedError def render(self): raise NotImplementedError class VTKCanvas(Control): """ A control which can be used to embded vtk renderers. """ #: The vtk renderer to display in the window. This should be used #: if only a single renderer is required for the scene. renderer = d_(ForwardInstance(vtkRenderer)) #: The list of vtk renderers to display in the window. This should #: be used if multiple renderers are required for the scene. renderers = d_(List(ForwardInstance(vtkRenderer))) #: A VTKCanvas expands freely in height and width by default. hug_width = set_default('ignore') hug_height = set_default('ignore') #: A reference to the ProxyVTKCanvas object. proxy = Typed(ProxyVTKCanvas) def render(self): """ Request a render of the underlying scene. """ if self.proxy_is_active: self.proxy.render() #-------------------------------------------------------------------------- # Observers #-------------------------------------------------------------------------- @observe('renderer', 'renderers') def _update_proxy(self, change): """ An observer which sends state change to the proxy. """ # The superclass handler implementation is sufficient. super(VTKCanvas, self)._update_proxy(change)
viz4biz/PyDataNYC2015
enaml/vtk_canvas.py
Python
apache-2.0
2,430
[ "VTK" ]
c998fe959591398dc24a93ae7fca24331ffc852f94e4369c2072a2a42bd5cb82
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import libcst as cst import pathlib import sys from typing import (Any, Callable, Dict, List, Sequence, Tuple) def partition( predicate: Callable[[Any], bool], iterator: Sequence[Any] ) -> Tuple[List[Any], List[Any]]: """A stable, out-of-place partition.""" results = ([], []) for i in iterator: results[int(predicate(i))].append(i) # Returns trueList, falseList return results[1], results[0] class redisCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'create_instance': ('parent', 'instance_id', 'instance', ), 'delete_instance': ('name', ), 'export_instance': ('name', 'output_config', ), 'failover_instance': ('name', 'data_protection_mode', ), 'get_instance': ('name', ), 'import_instance': ('name', 'input_config', ), 'list_instances': ('parent', 'page_size', 'page_token', ), 'update_instance': ('update_mask', 'instance', ), 'upgrade_instance': ('name', 'redis_version', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: try: key = original.func.attr.value kword_params = self.METHOD_TO_PARAMS[key] except (AttributeError, KeyError): # Either not a method from the API or too convoluted to be sure. return updated # If the existing code is valid, keyword args come after positional args. # Therefore, all positional args must map to the first parameters. args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) if any(k.keyword.value == "request" for k in kwargs): # We've already fixed this file, don't fix it again. return updated kwargs, ctrl_kwargs = partition( lambda a: a.keyword.value not in self.CTRL_PARAMS, kwargs ) args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) request_arg = cst.Arg( value=cst.Dict([ cst.DictElement( cst.SimpleString("'{}'".format(name)), cst.Element(value=arg.value) ) # Note: the args + kwargs looks silly, but keep in mind that # the control parameters had to be stripped out, and that # those could have been passed positionally or by keyword. for name, arg in zip(kword_params, args + kwargs)]), keyword=cst.Name("request") ) return updated.with_changes( args=[request_arg] + ctrl_kwargs ) def fix_files( in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=redisCallTransformer(), ): """Duplicate the input dir to the output dir, fixing file method calls. Preconditions: * in_dir is a real directory * out_dir is a real, empty directory """ pyfile_gen = ( pathlib.Path(os.path.join(root, f)) for root, _, files in os.walk(in_dir) for f in files if os.path.splitext(f)[1] == ".py" ) for fpath in pyfile_gen: with open(fpath, 'r') as f: src = f.read() # Parse the code and insert method call fixes. tree = cst.parse_module(src) updated = tree.visit(transformer) # Create the path and directory structure for the new file. updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) updated_path.parent.mkdir(parents=True, exist_ok=True) # Generate the updated source file at the corresponding path. with open(updated_path, 'w') as f: f.write(updated.code) if __name__ == '__main__': parser = argparse.ArgumentParser( description="""Fix up source that uses the redis client library. The existing sources are NOT overwritten but are copied to output_dir with changes made. Note: This tool operates at a best-effort level at converting positional parameters in client method calls to keyword based parameters. Cases where it WILL FAIL include A) * or ** expansion in a method call. B) Calls via function or method alias (includes free function calls) C) Indirect or dispatched calls (e.g. the method is looked up dynamically) These all constitute false negatives. The tool will also detect false positives when an API method shares a name with another method. """) parser.add_argument( '-d', '--input-directory', required=True, dest='input_dir', help='the input directory to walk for python files to fix up', ) parser.add_argument( '-o', '--output-directory', required=True, dest='output_dir', help='the directory to output files fixed via un-flattening', ) args = parser.parse_args() input_dir = pathlib.Path(args.input_dir) output_dir = pathlib.Path(args.output_dir) if not input_dir.is_dir(): print( f"input directory '{input_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if not output_dir.is_dir(): print( f"output directory '{output_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if os.listdir(output_dir): print( f"output directory '{output_dir}' is not empty", file=sys.stderr, ) sys.exit(-1) fix_files(input_dir, output_dir)
googleapis/gapic-generator-python
tests/integration/goldens/redis/scripts/fixup_redis_v1_keywords.py
Python
apache-2.0
6,396
[ "VisIt" ]
f811e72e1894296dd9b9e9c6ddf4d8d09eb24be8fc4e16fb4c41ac7cacfd91f2
#!/usr/bin/env python ############################################################################## # # Usage example for the procedure PPXF, which # implements the Penalized Pixel-Fitting (pPXF) method by # Cappellari M., & Emsellem E., 2004, PASP, 116, 138. # The example also shows how to include a library of templates # and how to mask gas emission lines if present. # The example is specialized for a fit to a SDSS spectrum. # # MODIFICATION HISTORY: # V1.0.0: Written by Michele Cappellari, Leiden 11 November 2003 # V1.1.0: Log rebin the galaxy spectrum. Show how to correct the velocity # for the difference in starting wavelength of galaxy and templates. # MC, Vicenza, 28 December 2004 # V1.1.1: Included explanation of correction for instrumental resolution. # After feedback from David Valls-Gabaud. MC, Venezia, 27 June 2005 # V2.0.0: Included example routine to determine the goodPixels vector # by masking known gas emission lines. MC, Oxford, 30 October 2008 # V2.0.1: Included instructions for high-redshift usage. Thanks to Paul Westoby # for useful feedback on this issue. MC, Oxford, 27 November 2008 # V2.0.2: Included example for obtaining the best-fitting redshift. # MC, Oxford, 14 April 2009 # V2.1.0: Bug fix: Force PSF_GAUSSIAN to produce a Gaussian with an odd # number of elements centered on the middle one. Many thanks to # Harald Kuntschner, Eric Emsellem, Anne-Marie Weijmans and # Richard McDermid for reporting problems with small offsets # in systemic velocity. MC, Oxford, 15 February 2010 # V2.1.1: Added normalization of galaxy spectrum to avoid numerical # instabilities. After feedback from Andrea Cardullo. # MC, Oxford, 17 March 2010 # V2.2.0: Perform templates convolution in linear wavelength. # This is useful for spectra with large wavelength range. # MC, Oxford, 25 March 2010 # V2.2.1: Updated for Coyote Graphics. MC, Oxford, 11 October 2011 # V2.3.0: Specialized for SDSS spectrum following requests from users. # Renamed PPXF_KINEMATICS_EXAMPLE_SDSS. MC, Oxford, 12 January 2012 # V3.0.0: Translated from IDL into Python. MC, Oxford, 10 December 2013 # V3.0.1: Uses MILES models library. MC, Oxford 11 December 2013 # V3.0.2: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014 # ############################################################################## from __future__ import print_function import pyfits from scipy import ndimage import numpy as np import glob from time import clock from ppxf import ppxf import ppxf_util as util def ppxf_kinematics_example_sdss(): # Read SDSS DR8 galaxy spectrum taken from here http://www.sdss3.org/dr8/ # The spectrum is *already* log rebinned by the SDSS DR8 # pipeline and log_rebin should not be used in this case. # file = 'spectra/NGC3522_SDSS.fits' hdu = pyfits.open(file) t = hdu[1].data z = float(hdu[1].header["Z"]) # SDSS redshift estimate # Only use the wavelength range in common between galaxy and stellar library. # mask = (t.field('wavelength') > 3540) & (t.field('wavelength') < 7409) galaxy = t[mask].field('flux')/np.median(t[mask].field('flux')) # Normalize spectrum to avoid numerical issues wave = t[mask].field('wavelength') noise = galaxy*0 + 0.0156 # Assume constant noise per pixel here # The velocity step was already chosen by the SDSS pipeline # and we convert it below to km/s # c = 299792.458 # speed of light in km/s velscale = np.log(wave[1]/wave[0])*c FWHM_gal = 2.76 # SDSS has an instrumental resolution FWHM of 2.76A. # If the galaxy is at a significant redshift (z > 0.03), one would need to apply # a large velocity shift in PPXF to match the template to the galaxy spectrum. # This would require a large initial value for the velocity (V > 1e4 km/s) # in the input parameter START = [V,sig]. This can cause PPXF to stop! # The solution consists of bringing the galaxy spectrum roughly to the # rest-frame wavelength, before calling PPXF. In practice there is no # need to modify the spectrum in any way, given that a red shift # corresponds to a linear shift of the log-rebinned spectrum. # One just needs to compute the wavelength range in the rest-frame # and adjust the instrumental resolution of the galaxy observations. # This is done with the following three commented lines: # # z = 1.23 # Initial estimate of the galaxy redshift # wave = wave/(1+z) # Compute approximate restframe wavelength # FWHM_gal = FWHM_gal/(1+z) # Adjust resolution in Angstrom # Read the list of filenames from the Single Stellar Population library # by Vazdekis (2010, MNRAS, 404, 1639) http://miles.iac.es/. A subset # of the library is included for this example with permission # vazdekis = glob.glob('miles_models/Mun1.30Z*.fits') FWHM_tem = 2.51 # Vazdekis+10 spectra have a resolution FWHM of 2.51A. # Extract the wavelength range and logarithmically rebin one spectrum # to the same velocity scale of the SDSS galaxy spectrum, to determine # the size needed for the array which will contain the template spectra. # hdu = pyfits.open(vazdekis[0]) ssp = hdu[0].data h2 = hdu[0].header lamRange2 = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)]) sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp, velscale=velscale) templates = np.empty((sspNew.size,len(vazdekis))) # Convolve the whole Vazdekis library of spectral templates # with the quadratic difference between the SDSS and the # Vazdekis instrumental resolution. Logarithmically rebin # and store each template as a column in the array TEMPLATES. # Quadratic sigma difference in pixels Vazdekis --> SDSS # The formula below is rigorously valid if the shapes of the # instrumental spectral profiles are well approximated by Gaussians. # FWHM_dif = np.sqrt(FWHM_gal**2 - FWHM_tem**2) sigma = FWHM_dif/2.355/h2['CDELT1'] # Sigma difference in pixels for j in range(len(vazdekis)): hdu = pyfits.open(vazdekis[j]) ssp = hdu[0].data ssp = ndimage.gaussian_filter1d(ssp,sigma) sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp, velscale=velscale) templates[:,j] = sspNew/np.median(sspNew) # Normalizes templates # The galaxy and the template spectra do not have the same starting wavelength. # For this reason an extra velocity shift DV has to be applied to the template # to fit the galaxy spectrum. We remove this artificial shift by using the # keyword VSYST in the call to PPXF below, so that all velocities are # measured with respect to DV. This assume the redshift is negligible. # In the case of a high-redshift galaxy one should de-redshift its # wavelength to the rest frame before using the line below (see above). # c = 299792.458 dv = (logLam2[0]-np.log(wave[0]))*c # km/s vel = c*z # Initial estimate of the galaxy velocity in km/s goodpixels = util.determine_goodpixels(np.log(wave),lamRange2,vel) # Here the actual fit starts. The best fit is plotted on the screen. # Gas emission lines are excluded from the pPXF fit using the GOODPIXELS keyword. # start = [vel, 180.] # (km/s), starting guess for [V,sigma] t = clock() pp = ppxf(templates, galaxy, noise, velscale, start, goodpixels=goodpixels, plot=True, moments=4, degree=10, vsyst=dv, clean=False) print("Formal errors:") print(" dV dsigma dh3 dh4") print("".join("%8.2g" % f for f in pp.error*np.sqrt(pp.chi2))) print('Elapsed time in PPXF: %.2f s' % (clock() - t)) # If the galaxy is at significant redshift z and the wavelength has been # de-redshifted with the three lines "z = 1.23..." near the beginning of # this procedure, the best-fitting redshift is now given by the following # commented line (equation 2 of Cappellari et al. 2009, ApJ, 704, L34): # #print, 'Best-fitting redshift z:', (z + 1)*(1 + sol[0]/c) - 1 #------------------------------------------------------------------------------ if __name__ == '__main__': ppxf_kinematics_example_sdss()
zpace/SparsePak-SFH
ppxf_kinematics_example_sdss.py
Python
mit
8,349
[ "Galaxy", "Gaussian" ]
7072d4b7b26f651920ec558004b811e257901a3098bc23ac32472bb8587b242e
from chainkey.util import print_error import httplib, urllib import socket import threading import hashlib import json from urlparse import urlparse, parse_qs try: import PyQt4 except Exception: sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'") from PyQt4.QtGui import * from PyQt4.QtCore import * import PyQt4.QtCore as QtCore import PyQt4.QtGui as QtGui import aes import base64 import chainkey from chainkey.plugins import BasePlugin, hook from chainkey.i18n import _ from chainkey_gui.qt import HelpButton, EnterButton class Plugin(BasePlugin): target_host = 'labelectrum.herokuapp.com' encode_password = None def fullname(self): return _('Label Sync') def description(self): return '%s\n\n%s%s%s' % (_("This plugin can sync your labels across multiple Electrum installs by using a remote database to save your data. Labels, transactions ids and addresses are encrypted before they are sent to the remote server. This code might increase the load of your wallet with a few microseconds as it will sync labels on each startup."), _("To get started visit"), " http://labelectrum.herokuapp.com/ ", _(" to sign up for an account.")) def version(self): return "0.2.1" def encode(self, message): encrypted = electrum.bitcoin.aes_encrypt_with_iv(self.encode_password, self.iv, message.encode('utf8')) encoded_message = base64.b64encode(encrypted) return encoded_message def decode(self, message): decoded_message = electrum.bitcoin.aes_decrypt_with_iv(self.encode_password, self.iv, base64.b64decode(message)).decode('utf8') return decoded_message @hook def init_qt(self, gui): self.window = gui.main_window if not self.auth_token(): # First run, throw plugin settings in your face self.load_wallet(self.window.wallet) if self.settings_dialog(): self.set_enabled(True) return True else: self.set_enabled(False) return False @hook def load_wallet(self, wallet): self.wallet = wallet mpk = self.wallet.get_master_public_key() self.encode_password = hashlib.sha1(mpk).digest().encode('hex')[:32] self.iv = hashlib.sha256(self.encode_password).digest()[:16] self.wallet_id = hashlib.sha256(mpk).digest().encode('hex') addresses = [] for account in self.wallet.accounts.values(): for address in account.get_addresses(0): addresses.append(address) self.addresses = addresses if self.auth_token(): # If there is an auth token we can try to actually start syncing threading.Thread(target=self.do_full_pull).start() def auth_token(self): return self.config.get("plugin_label_api_key") def is_available(self): # Disabled until compatibility is ensured return False return True def requires_settings(self): return True @hook def set_label(self, item,label, changed): if self.encode_password is None: return if not changed: return try: bundle = {"label": {"external_id": self.encode(item), "text": self.encode(label)}} params = json.dumps(bundle) connection = httplib.HTTPConnection(self.target_host) connection.request("POST", ("/api/wallets/%s/labels.json?auth_token=%s" % (self.wallet_id, self.auth_token())), params, {'Content-Type': 'application/json'}) response = connection.getresponse() if response.reason == httplib.responses[httplib.NOT_FOUND]: return response = json.loads(response.read()) except socket.gaierror as e: print_error('Error connecting to service: %s ' % e) return False def settings_widget(self, window): return EnterButton(_('Settings'), self.settings_dialog) def settings_dialog(self): def check_for_api_key(api_key): if api_key and len(api_key) > 12: self.config.set_key("plugin_label_api_key", str(self.auth_token_edit.text())) self.upload.setEnabled(True) self.download.setEnabled(True) self.accept.setEnabled(True) else: self.upload.setEnabled(False) self.download.setEnabled(False) self.accept.setEnabled(False) d = QDialog() layout = QGridLayout(d) layout.addWidget(QLabel("API Key: "),0,0) self.auth_token_edit = QLineEdit(self.auth_token()) self.auth_token_edit.textChanged.connect(check_for_api_key) layout.addWidget(QLabel("Label sync options: "),2,0) layout.addWidget(self.auth_token_edit, 0,1,1,2) decrypt_key_text = QLineEdit(self.encode_password) decrypt_key_text.setReadOnly(True) layout.addWidget(decrypt_key_text, 1,1) layout.addWidget(QLabel("Decryption key: "),1,0) layout.addWidget(HelpButton("This key can be used on the LabElectrum website to decrypt your data in case you want to review it online."),1,2) self.upload = QPushButton("Force upload") self.upload.clicked.connect(self.full_push) layout.addWidget(self.upload, 2,1) self.download = QPushButton("Force download") self.download.clicked.connect(self.full_pull) layout.addWidget(self.download, 2,2) c = QPushButton(_("Cancel")) c.clicked.connect(d.reject) self.accept = QPushButton(_("Done")) self.accept.clicked.connect(d.accept) layout.addWidget(c,3,1) layout.addWidget(self.accept,3,2) check_for_api_key(self.auth_token()) self.window.labelsChanged.connect(self.done_processing) if d.exec_(): return True else: return False def done_processing(self): QMessageBox.information(None, _("Labels synchronised"), _("Your labels have been synchronised.")) def full_push(self): threading.Thread(target=self.do_full_push).start() def full_pull(self): threading.Thread(target=self.do_full_pull, args=([True])).start() def do_full_push(self): try: bundle = {"labels": {}} for key, value in self.wallet.labels.iteritems(): try: encoded_key = self.encode(key) except: print_error('cannot encode', repr(key)) continue try: encoded_value = self.encode(value) except: print_error('cannot encode', repr(value)) continue bundle["labels"][encoded_key] = encoded_value params = json.dumps(bundle) connection = httplib.HTTPConnection(self.target_host) connection.request("POST", ("/api/wallets/%s/labels/batch.json?auth_token=%s" % (self.wallet_id, self.auth_token())), params, {'Content-Type': 'application/json'}) response = connection.getresponse() if response.reason == httplib.responses[httplib.NOT_FOUND]: print_error('404 error' % e) return try: response = json.loads(response.read()) except ValueError as e: print_error('Error loading labelsync response: %s' % e) return False if "error" in response: print_error('Error loading labelsync response.') return False except socket.gaierror as e: print_error('Error connecting to service: %s ' % e) return False self.window.labelsChanged.emit() def do_full_pull(self, force = False): connection = httplib.HTTPConnection(self.target_host) connection.request("GET", ("/api/wallets/%s/labels.json?auth_token=%s" % (self.wallet_id, self.auth_token())),"", {'Content-Type': 'application/json'}) response = connection.getresponse() if response.status != 200: print_error("Cannot retrieve labels:", response.status, response.reason) return response = json.loads(response.read()) if "error" in response: raise BaseException(_("Could not sync labels: %s" % response["error"])) for label in response: try: key = self.decode(label["external_id"]) except: continue try: value = self.decode(label["text"]) except: continue try: json.dumps(key) json.dumps(value) except: print_error('error: no json', key) continue if force or not self.wallet.labels.get(key): self.wallet.labels[key] = value self.wallet.storage.put('labels', self.wallet.labels) print_error("received %d labels"%len(response)) self.window.labelsChanged.emit()
Kefkius/encompass
plugins/labels.py
Python
gpl-3.0
9,203
[ "VisIt" ]
985cd2d9e9a9f39e1fcb880c680bdde15fbc912de9d02638eb411cb12c792916
""" RKS via SGM (squared gradient minimization) """ import numpy as np from frankenstein.sgscf import rks, sgrhf """ Methods for output """ def update_sghf(mf): pass class SGRKS(rks.RKS): # methods for output print_info = sgrhf.print_info # methods for SCF update_sghf = update_sghf get_grad_L = sgrhf.get_grad_L update_all = sgrhf.update_all # methods for err/conv check check_conv = sgrhf.check_conv # methods for GDM get_value_gdm = sgrhf.get_value_gdm get_grad_gdm = sgrhf.get_grad_gdm get_prec = sgrhf.get_prec def __init__(self, pymol): rks.RKS.__init__(self, pymol) self.orb_swap = None self.fd_grad = True # not supported for xc for now self.grad_L = None @property def L(self): grad_E = self.get_grad_E() if self.grad_E is None else self.grad_E return grad_E.ravel()@grad_E.ravel() @property def err_grad_L(self): grad_L = self.get_grad_L() if self.grad_L is None else self.grad_L return np.mean(grad_L**2.)**0.5 if __name__ == "__main__": import sys try: geom = sys.argv[1] basis = sys.argv[2] xc = sys.argv[3] ii = int(sys.argv[4]) aa = int(sys.argv[5]) except: print("Usage: geom, basis, xc, ii, aa") sys.exit(1) from frankenstein.tools.pyscf_utils import get_pymol pymol = get_pymol(geom, basis, verbose=3) pymol.verbose = 4 rmf = rks.RKS(pymol) rmf.xc = xc rmf.kernel() mf = SGRKS(pymol) mf.xc = xc mf.orb_swap = [[mf.no-1-ii, mf.no-1+aa]] mf.kernel(mo_coeff0=rmf.mo_coeff.copy()) eex = (mf.e_tot - rmf.e_tot) * 27.211399 print("Eex (%s) = %.3f eV" % (xc, eex))
hongzhouye/frankenstein
sgscf/sgrks.py
Python
bsd-3-clause
1,750
[ "PyMOL" ]
8b69430a6a8aeef66da1163b20f1c85e2883a9071a33719ac0eed887c529dbc3
import sys file1=sys.argv[1] file2=sys.argv[2] file3=sys.argv[3] def file_len_fasta(fname): with open(fname) as f: for i, l in enumerate(f): pass return (i + 1)/2 from Bio import SeqIO from Bio.SeqIO.FastaIO import FastaWriter records_r = SeqIO.parse(file2, "fasta") records_f = SeqIO.parse(file1, "fasta") destination = open(file3,'w') destination.close() # normal biopython writer wraps every 60 characterrs, but mothur and qiime are not happy with that. regular_forward_dict={} forward_list=[] for record in records_f: #print record.id forward_id = record.id.split("_1_N")[0] # pulls out part that is the beginning and should be #print forward_id forward_list.append(forward_id) regular_forward_dict[record.id] = record.seq regular_reverse_dict={} reverse_list=[] for record in records_r: #print record.id reverse_id = record.id.split("_2_N")[0] #print reverse_id reverse_list.append(reverse_id) regular_reverse_dict[record.id]=record.seq records_r.close() records_f.close() forward_set = set(forward_list) reverse_set = set(reverse_list) # print "reverse set" #print forward_set # ## find ids that are in both forward and reverse matching_keys = forward_set.intersection(reverse_set) #print matching_keys matching_set =set(matching_keys) #print matching_set # def add_to_overall_dict(sequence_dictionary, matching_list, new_overall_dict): # length_keys=len(sequence_dictionary.keys()) # count = 0 # for key, value in sequence_dictionary.iteritems(): # count = count + 1 # # print count, "/", length_keys # for key_short in matching_list: # if key.startswith(key_short): # # print "yay" # # .setdefault workks with there is no key existing. # new_overall_dict.setdefault(key_short,[]).extend([key, value]) # return new_overall_dict def add_to_overall_dict_with_set(sequence_dictionary, matching_set, new_overall_dict): length_keys=len(sequence_dictionary.keys()) count = 0 for key, value in sequence_dictionary.iteritems(): count = count + 1 #print count, "/", length_keys for key_short in matching_set: if key.startswith(key_short): print "yay" # .setdefault workks with there is no key existing. new_overall_dict.setdefault(key_short,[]).extend([key, value]) return new_overall_dict overall_dict={} matching_forward = add_to_overall_dict_with_set(regular_forward_dict, matching_set, overall_dict) #print matching_forward matching_forward_and_reverse = add_to_overall_dict_with_set(regular_reverse_dict, matching_set, matching_forward) #print matching_forward_and_reverse print "Now getting ready to write that big file!!! Concatenation here we come!" ## So now dictionary structure is short_key: [forwardid, forward_seq, reverseid, reverse_seq] concat_temp=open(file3, 'w') concat_temp.close() concat_temp=open(file3, 'a') writer = FastaWriter(concat_temp, wrap=None) writer.write_header() ### maybe could make it faster by having the reverse and forward as a series of lists in a dictionary. Short_key: [keyF, keyR, seqF, seqR] count=0 length_overall_keys=len(matching_forward_and_reverse.keys()) import difflib for key, value in matching_forward_and_reverse.iteritems(): count = count + 1 print count, "/", length_overall_keys forward_key = value[0] print forward_key reverse_key = value[2] print reverse_key forward_seq = value[1] print forward_seq reverse_seq = value[3] print reverse_seq ## I have had a problem with seuqences in the R1 and R2 being almost identical when translated. I don't know how this would happen, ## but it is seriously messing with my data and I start to lose a lot of sequences because I have to get rid of those 10 and greater ## and this is quite problematic. Thus I have used this section to compare R1 and R2 to each other and if they are less than 40% similar ## they are kept. I based the 40% on looking at some test cases and blasting the resulting sequences. seq=difflib.SequenceMatcher(a=forward_seq.lower(), b=reverse_seq.lower()) print seq.ratio() if seq.ratio() < 0.40: if forward_key.endswith("+"): print "the plus side" R1_comes_first = SeqIO.SeqRecord(seq= forward_seq + reverse_seq, id = forward_key + "joined_with" + reverse_key, description = "") writer.write_record(R1_comes_first) elif forward_key.endswith("-"): print "the minus side" R2_comes_first = SeqIO.SeqRecord(seq= reverse_seq +forward_seq, id = reverse_key + "joined_with" + forward_key, description = "") writer.write_record(R2_comes_first) else: print "what the hell Julia!" else: "sequences show a weird similarity" writer.write_footer() concat_temp.close()
jooolia/phylo_temporal_jericho
sequence_processing/concatenate_R1_and_R2_for_translated_non_merging_primers.py
Python
mit
5,052
[ "Biopython" ]
93989c77af869caa86a98fec5f865dacd443c44f1f034f5bb9448ab27e9f5079
from ..utils import type_from_ast, is_valid_literal_value from ..error import GraphQLError from ..type.definition import is_composite_type, is_input_type, is_leaf_type, GraphQLNonNull from ..language import ast from ..language.visitor import Visitor, visit from ..language.printer import print_ast class ValidationRule(Visitor): def __init__(self, context): self.context = context class UniqueOperationNames(ValidationRule): def __init__(self, context): super(UniqueOperationNames, self).__init__(context) self.known_operation_names = {} def enter_OperationDefinition(self, node, *args): operation_name = node.name if operation_name: if operation_name.value in self.known_operation_names: return GraphQLError( self.message(operation_name.value), [self.known_operation_names[operation_name.value], operation_name] ) self.known_operation_names[operation_name.value] = operation_name @staticmethod def message(operation_name): return 'There can only be one operation named "{}".'.format(operation_name) class LoneAnonymousOperation(ValidationRule): def __init__(self, context): super(LoneAnonymousOperation, self).__init__(context) self._op_count = 0 def enter_Document(self, node, *args): n = 0 for definition in node.definitions: if isinstance(definition, ast.OperationDefinition): n += 1 self._op_count = n def enter_OperationDefinition(self, node, *args): if not node.name and self._op_count > 1: return GraphQLError(self.message(), [node]) @staticmethod def message(): return 'This anonymous operation must be the only defined operation.' class KnownTypeNames(ValidationRule): def enter_NamedType(self, node, *args): type_name = node.name.value type = self.context.get_schema().get_type(type_name) if not type: return GraphQLError(self.message(type_name), [node]) @staticmethod def message(type): return 'Unknown type "{}".'.format(type) class FragmentsOnCompositeTypes(ValidationRule): def enter_InlineFragment(self, node, *args): type = self.context.get_type() if type and not is_composite_type(type): return GraphQLError( self.inline_message(print_ast(node.type_condition)), [node.type_condition] ) def enter_FragmentDefinition(self, node, *args): type = self.context.get_type() if type and not is_composite_type(type): return GraphQLError( self.message(node.name.value, print_ast(node.type_condition)), [node.type_condition] ) @staticmethod def inline_message(type): return 'Fragment cannot condition on non composite type "{}".'.format(type) @staticmethod def message(frag_name, type): return 'Fragment "{}" cannot condition on non composite type "{}".'.format(frag_name, type) class VariablesAreInputTypes(ValidationRule): def enter_VariableDefinition(self, node, *args): type = type_from_ast(self.context.get_schema(), node.type) if type and not is_input_type(type): variable_name = node.variable.name.value return GraphQLError( self.message(variable_name, print_ast(node.type)), [node.type] ) @staticmethod def message(variable_name, type_name): return 'Variable "${}" cannot be non-input type "{}".'.format(variable_name, type_name) class ScalarLeafs(ValidationRule): def enter_Field(self, node, *args): type = self.context.get_type() if type: if is_leaf_type(type): if node.selection_set: return GraphQLError( self.not_allowed_message(node.name.value, type), [node.selection_set] ) elif not node.selection_set: return GraphQLError( self.required_message(node.name.value, type), [node] ) @staticmethod def not_allowed_message(field, type): return 'Field "{}" of type "{}" must not have a sub selection.'.format(field, type) @staticmethod def required_message(field, type): return 'Field "{}" of type "{}" must have a sub selection.'.format(field, type) class FieldsOnCorrectType(ValidationRule): def enter_Field(self, node, *args): type = self.context.get_parent_type() if type: field_def = self.context.get_field_def() if not field_def: return GraphQLError( self.message(node.name.value, type.name), [node] ) @staticmethod def message(field_name, type): return 'Cannot query field "{}" on "{}".'.format(field_name, type) class UniqueFragmentNames(ValidationRule): def __init__(self, context): super(UniqueFragmentNames, self).__init__(context) self.known_fragment_names = {} def enter_FragmentDefinition(self, node, *args): fragment_name = node.name.value if fragment_name in self.known_fragment_names: return GraphQLError( self.duplicate_fragment_name_message(fragment_name), [self.known_fragment_names[fragment_name], node.name] ) self.known_fragment_names[fragment_name] = node.name @staticmethod def duplicate_fragment_name_message(field): return 'There can only be one fragment named {}'.format(field) class KnownFragmentNames(ValidationRule): def enter_FragmentSpread(self, node, *args): fragment_name = node.name.value fragment = self.context.get_fragment(fragment_name) if not fragment: return GraphQLError( self.unknown_fragment_message(fragment_name), [node.name] ) @staticmethod def unknown_fragment_message(fragment_name): return 'Unknown fragment "{}".'.format(fragment_name) class NoUnusedFragments(ValidationRule): def __init__(self, context): super(NoUnusedFragments, self).__init__(context) self.fragment_definitions = [] self.spreads_within_operation = [] self.fragment_adjacencies = {} self.spread_names = set() def enter_OperationDefinition(self, *args): self.spread_names = set() self.spreads_within_operation.append(self.spread_names) def enter_FragmentDefinition(self, node, *args): self.fragment_definitions.append(node) self.spread_names = set() self.fragment_adjacencies[node.name.value] = self.spread_names def enter_FragmentSpread(self, node, *args): self.spread_names.add(node.name.value) def leave_Document(self, *args): fragment_names_used = set() def reduce_spread_fragments(spreads): for fragment_name in spreads: if fragment_name in fragment_names_used: continue fragment_names_used.add(fragment_name) if fragment_name in self.fragment_adjacencies: reduce_spread_fragments(self.fragment_adjacencies[fragment_name]) for spreads in self.spreads_within_operation: reduce_spread_fragments(spreads) errors = [ GraphQLError( self.unused_fragment_message(fragment_definition.name.value), [fragment_definition] ) for fragment_definition in self.fragment_definitions if fragment_definition.name.value not in fragment_names_used ] if errors: return errors @staticmethod def unused_fragment_message(fragment_name): return 'Fragment "{}" is never used.'.format(fragment_name) class PossibleFragmentSpreads(ValidationRule): pass class NoFragmentCycles(ValidationRule): def __init__(self, context): super(NoFragmentCycles, self).__init__(context) self.spreads_in_fragment = { node.name.value: self.gather_spreads(node) for node in context.get_ast().definitions if isinstance(node, ast.FragmentDefinition) } self.known_to_lead_to_cycle = set() def enter_FragmentDefinition(self, node, *args): errors = [] initial_name = node.name.value spread_path = [] # This will convert the ast.FragmentDefinition to something that we can add # to a set. Otherwise we get a `unhashable type: dict` error. # This makes it so that we can define a way to uniquely identify a FragmentDefinition # within a set. fragment_node_to_hashable = lambda fs: (fs.loc['start'], fs.loc['end'], fs.name.value) def detect_cycle_recursive(fragment_name): spread_nodes = self.spreads_in_fragment[fragment_name] for spread_node in spread_nodes: if fragment_node_to_hashable(spread_node) in self.known_to_lead_to_cycle: continue if spread_node.name.value == initial_name: cycle_path = spread_path + [spread_node] self.known_to_lead_to_cycle |= set(map(fragment_node_to_hashable, cycle_path)) errors.append(GraphQLError( self.cycle_error_message(initial_name, [s.name.value for s in spread_path]), cycle_path )) continue if any(spread is spread_node for spread in spread_path): continue spread_path.append(spread_node) detect_cycle_recursive(spread_node.name.value) spread_path.pop() detect_cycle_recursive(initial_name) if errors: return errors @staticmethod def cycle_error_message(fragment_name, spread_names): via = ' via {}'.format(', '.join(spread_names)) if spread_names else '' return 'Cannot spread fragment "{}" within itself{}.'.format(fragment_name, via) @classmethod def gather_spreads(cls, node): visitor = cls.CollectFragmentSpreadNodesVisitor() visit(node, visitor) return visitor.collect_fragment_spread_nodes() class CollectFragmentSpreadNodesVisitor(Visitor): def __init__(self): self.spread_nodes = [] def enter_FragmentSpread(self, node, *args): self.spread_nodes.append(node) def collect_fragment_spread_nodes(self): return self.spread_nodes class NoUndefinedVariables(ValidationRule): def __init__(self, context): self.operation = None self.visited_fragment_names = {} self.defined_variable_names = {} self.visit_spread_fragments = True super(NoUndefinedVariables, self).__init__(context) @staticmethod def undefined_var_message(var_name): return 'Variable "${}" is not defined.'.format(var_name) @staticmethod def undefined_var_by_op_message(var_name, op_name): return 'Variable "${}" is not defined by operation "{}".'.format( var_name, op_name ) def enter_OperationDefinition(self, node, *args): self.operation = node self.visited_fragment_names = {} self.defined_variable_names = {} def enter_VariableDefinition(self, node, *args): self.defined_variable_names[node.variable.name.value] = True def enter_Variable(self, variable, key, parent, path, ancestors): var_name = variable.name.value if var_name not in self.defined_variable_names: is_fragment = lambda node: isinstance(node, ast.FragmentDefinition) within_fragment = any(is_fragment(node) for node in ancestors) if within_fragment and self.operation and self.operation.name: return GraphQLError( self.undefined_var_by_op_message(var_name, self.operation.name.value), [variable, self.operation] ) return GraphQLError( self.undefined_var_message(var_name), [variable] ) def enter_FragmentSpread(self, spread_ast, *args): if spread_ast.name.value in self.visited_fragment_names: return False self.visited_fragment_names[spread_ast.name.value] = True class NoUnusedVariables(ValidationRule): visited_fragment_names = None variable_definitions = None variable_name_used = None visit_spread_fragments = True def __init__(self, context): super(NoUnusedVariables, self).__init__(context) def enter_OperationDefinition(self, *args): self.visited_fragment_names = set() self.variable_definitions = [] self.variable_name_used = set() def leave_OperationDefinition(self, *args): errors = [ GraphQLError( self.unused_variable_message(variable_definition.variable.name.value), [variable_definition] ) for variable_definition in self.variable_definitions if variable_definition.variable.name.value not in self.variable_name_used ] if errors: return errors def enter_VariableDefinition(self, node, *args): if self.variable_definitions is not None: self.variable_definitions.append(node) return False def enter_Variable(self, node, *args): if self.variable_name_used is not None: self.variable_name_used.add(node.name.value) def enter_FragmentSpread(self, node, *args): if self.visited_fragment_names is not None: spread_name = node.name.value if spread_name in self.visited_fragment_names: return False self.visited_fragment_names.add(spread_name) @staticmethod def unused_variable_message(variable_name): return 'Variable "${}" is never used.'.format(variable_name) class KnownDirectives(ValidationRule): def enter_Directive(self, node, key, parent, path, ancestors): directive_def = None for definition in self.context.get_schema().get_directives(): if definition.name == node.name.value: directive_def = definition break if not directive_def: return GraphQLError( self.message(node.name.value), [node] ) applied_to = ancestors[-1] if isinstance(applied_to, ast.OperationDefinition) and not directive_def.on_operation: return GraphQLError( self.misplaced_directive_message(node.name.value, 'operation'), [node] ) if isinstance(applied_to, ast.Field) and not directive_def.on_field: return GraphQLError( self.misplaced_directive_message(node.name.value, 'field'), [node] ) if (isinstance(applied_to, (ast.FragmentSpread, ast.InlineFragment, ast.FragmentDefinition)) and not directive_def.on_fragment): return GraphQLError( self.misplaced_directive_message(node.name.value, 'fragment'), [node] ) @staticmethod def message(directive_name): return 'Unknown directive "{}".'.format(directive_name) @staticmethod def misplaced_directive_message(directive_name, placement): return 'Directive "{}" may not be used on "{}".'.format(directive_name, placement) class KnownArgumentNames(ValidationRule): def enter_Argument(self, node, key, parent, path, ancestors): argument_of = ancestors[-1] if isinstance(argument_of, ast.Field): field_def = self.context.get_field_def() if field_def: field_arg_def = None for arg in field_def.args: if arg.name == node.name.value: field_arg_def = arg break if not field_arg_def: parent_type = self.context.get_parent_type() assert parent_type return GraphQLError( self.message(node.name.value, field_def.name, parent_type.name), [node] ) elif isinstance(argument_of, ast.Directive): directive = self.context.get_directive() if directive: directive_arg_def = None for arg in directive.args: if arg.name == node.name.value: directive_arg_def = arg break if not directive_arg_def: return GraphQLError( self.directive_message(node.name.value, directive.name), [node] ) @staticmethod def message(arg_name, field_name, type): return 'Unknown argument "{}" on field "{}" of type "{}".'.format(arg_name, field_name, type) @staticmethod def directive_message(arg_name, directive_name): return 'Unknown argument "{}" on directive "@{}".'.format(arg_name, directive_name) class UniqueArgumentNames(ValidationRule): def __init__(self, context): super(UniqueArgumentNames, self).__init__(context) self.known_arg_names = {} def enter_Field(self, node, *args): self.known_arg_names = {} def enter_Directive(self, node, key, parent, path, ancestors): self.known_arg_names = {} def enter_Argument(self, node, *args): arg_name = node.name.value if arg_name in self.known_arg_names: return GraphQLError( self.duplicate_arg_message(arg_name), [self.known_arg_names[arg_name], node.name] ) self.known_arg_names[arg_name] = node.name @staticmethod def duplicate_arg_message(field): return 'There can only be one argument named {}'.format(field) class ArgumentsOfCorrectType(ValidationRule): pass class ProvidedNonNullArguments(ValidationRule): def leave_Field(self, node, key, parent, path, ancestors): field_def = self.context.get_field_def() if not field_def: return False errors = [] arg_asts = node.arguments or [] arg_ast_map = {arg.name.value: arg for arg in arg_asts} for arg_def in field_def.args: arg_ast = arg_ast_map.get(arg_def.name, None) if not arg_ast and isinstance(arg_def.type, GraphQLNonNull): errors.append(GraphQLError( self.missing_field_arg_message(node.name.value, arg_def.name, arg_def.type), [node] )) if errors: return errors def leave_Directive(self, node, key, parent, path, ancestors): directive_def = self.context.get_directive() if not directive_def: return False errors = [] arg_asts = node.arguments or [] arg_ast_map = {arg.name.value: arg for arg in arg_asts} for arg_def in directive_def.args: arg_ast = arg_ast_map.get(arg_def.name, None) if not arg_ast and isinstance(arg_def.type, GraphQLNonNull): errors.append(GraphQLError( self.missing_directive_arg_message(node.name.value, arg_def.name, arg_def.type), [node] )) if errors: return errors @staticmethod def missing_field_arg_message(name, arg_name, type): return 'Field "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type) @staticmethod def missing_directive_arg_message(name, arg_name, type): return 'Directive "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type) class DefaultValuesOfCorrectType(ValidationRule): def enter_VariableDefinition(self, node, *args): name = node.variable.name.value default_value = node.default_value type = self.context.get_input_type() if isinstance(type, GraphQLNonNull) and default_value: return GraphQLError( self.default_for_non_null_arg_message(name, type, type.of_type), [default_value] ) if type and default_value and not is_valid_literal_value(type, default_value): return GraphQLError( self.bad_value_for_default_arg_message(name, type, print_ast(default_value)), [default_value] ) @staticmethod def default_for_non_null_arg_message(var_name, type, guess_type): return 'Variable "${}" of type "{}" is required and will not use the default value. ' \ 'Perhaps you meant to use type "{}".'.format(var_name, type, guess_type) @staticmethod def bad_value_for_default_arg_message(var_name, type, value): return 'Variable "${}" of type "{}" has invalid default value: {}.'.format(var_name, type, value) class VariablesInAllowedPosition(ValidationRule): pass class OverlappingFieldsCanBeMerged(ValidationRule): pass
woodb/graphql-py
graphql/core/validation/rules.py
Python
mit
21,570
[ "VisIt" ]
53bea2f2f3f052c3a8afa4c34fe6e8ad278d33059fc5f1297d13d5408d383edb
#!/usr/bin/env python # -*- coding: utf-8 -*- import vtk def main(): colors = vtk.vtkNamedColors() points = vtk.vtkPoints() points.InsertNextPoint(0, 0, 0) vertex = vtk.vtkVertex() vertex.GetPointIds().SetId(0, 0) vertices = vtk.vtkCellArray() vertices.InsertNextCell(vertex) polydata = vtk.vtkPolyData() polydata.SetPoints(points) polydata.SetVerts(vertices) # Setup actor and mapper mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(polydata) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetPointSize(10) actor.GetProperty().SetColor(colors.GetColor3d("Cyan")) # Setup render window, renderer, and interactor renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.SetWindowName("Vertex") renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(actor) renderer.SetBackground(colors.GetColor3d("DarkGreen")) renderWindow.Render() renderWindowInteractor.Start() if __name__ == '__main__': main()
lorensen/VTKExamples
src/Python/GeometricObjects/Vertex.py
Python
apache-2.0
1,179
[ "VTK" ]
ec4ad03c7ee49bc060d8dae4d47f1fdff420c8c2aa6de5e7b9d020a310079d6d
#!/usr/bin/env python ############################################################################################## # # # regrid_emissions_N96e.py # # # Requirements: # Iris 1.10, time, cf_units, numpy # # # This Python script has been written by N.L. Abraham as part of the UKCA Tutorials: # http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4 # # Copyright (C) 2015 University of Cambridge # # This is free software: you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # It is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. # # You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>. # # Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk> # Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk> # # ############################################################################################## # preamble import time import iris import cf_units import numpy # --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. --- # name of file containing an ENDGame grid, e.g. your model output # NOTE: all the fields in the file should be on the same horizontal # grid, as the field used MAY NOT be the first in order of STASH grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec' # # name of emissions file emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_HCHO_1960-2020_greg.nc' # # STASH code emissions are associated with # 301-320: surface # m01s00i304: HCHO surface emissions # # 321-340: full atmosphere # stash='m01s00i304' # --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED --- species_name='HCHO' # this is the grid we want to regrid to, e.g. N96 ENDGame grd=iris.load(grid_file)[0] grd.coord(axis='x').guess_bounds() grd.coord(axis='y').guess_bounds() # This is the original data ems=iris.load_cube(emissions_file) # make intersection between 0 and 360 longitude to ensure that # the data is regridded correctly nems = ems.intersection(longitude=(0, 360)) # make sure that we use the same coordinate system, otherwise regrid won't work nems.coord(axis='x').coord_system=grd.coord_system() nems.coord(axis='y').coord_system=grd.coord_system() # now guess the bounds of the new grid prior to regridding nems.coord(axis='x').guess_bounds() nems.coord(axis='y').guess_bounds() # now regrid ocube=nems.regrid(grd,iris.analysis.AreaWeighted()) # now add correct attributes and names to netCDF file ocube.var_name='emissions_'+str.strip(species_name) ocube.long_name=str.strip(species_name)+' surf emissions' ocube.standard_name='tendency_of_atmosphere_mass_content_of_formaldehyde_due_to_emission' ocube.units=cf_units.Unit('kg m-2 s-1') ocube.attributes['vertical_scaling']='surface' ocube.attributes['um_stash_source']=stash ocube.attributes['tracer_name']=str.strip(species_name) # global attributes, so don't set in local_keys # NOTE: all these should be strings, including the numbers! # basic emissions type ocube.attributes['emission_type']='1' # time series ocube.attributes['update_type']='1' # same as above ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days ocube.attributes['um_version']='10.6' # UM version ocube.attributes['source']='combined_sources_HCHO_1960-2020_greg.nc' ocube.attributes['title']='Time-varying monthly surface emissions of formaldehyde from 1960 to 2020' ocube.attributes['File_version']='v2' ocube.attributes['File_creation_date']=time.ctime(time.time()) ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)' ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history'] ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.' ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010' del ocube.attributes['file_creation_date'] del ocube.attributes['description'] # rename and set time coord - mid-month from 1960-Jan to 2020-Dec # this bit is annoyingly fiddly ocube.coord(axis='t').var_name='time' ocube.coord(axis='t').standard_name='time' ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian') ocube.coord(axis='t').points=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5, 228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5, 624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989, 1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5, 1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5, 1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993, 2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5, 2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662, 2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5, 3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333, 3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5, 3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5, 4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337, 4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5, 4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007, 5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5, 5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5, 5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011, 6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5, 6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680, 6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5, 7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5, 7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5, 7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5, 8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355, 8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5, 8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025, 9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5, 9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5, 9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028, 10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5, 10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5, 10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912, 10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216, 11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5, 11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794, 11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099, 12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5, 12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5, 12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5, 13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316, 13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620, 13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925, 13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5, 14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5, 14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5, 14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081, 15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5, 15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599, 15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872, 15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5, 16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391, 16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5, 16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907, 16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5, 17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425, 17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5, 17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943, 17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217, 18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5, 18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733, 18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5, 19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5, 19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5, 19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769, 19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043, 20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5, 20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5, 20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5, 20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078, 21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351, 21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595, 21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869, 21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5, 22143.5, 22174, 22204.5, 22235, 22265.5]) # make z-direction. zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number', units='1',attributes={'positive':'up'}) ocube.add_aux_coord(zdims) ocube=iris.util.new_axis(ocube, zdims) # now transpose cube to put Z 2nd ocube.transpose([1,0,2,3]) # make coordinates 64-bit ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64') ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64') #ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64') # for some reason, longitude_bounds are double, but latitude_bounds are float ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64') # add forecast_period & forecast_reference_time # forecast_reference_time frt=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5, 228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5, 624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989, 1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5, 1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5, 1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993, 2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5, 2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662, 2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5, 3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333, 3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5, 3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5, 4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337, 4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5, 4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007, 5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5, 5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5, 5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011, 6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5, 6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680, 6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5, 7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5, 7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5, 7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5, 8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355, 8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5, 8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025, 9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5, 9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5, 9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028, 10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5, 10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5, 10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912, 10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216, 11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5, 11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794, 11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099, 12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5, 12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5, 12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5, 13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316, 13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620, 13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925, 13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5, 14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5, 14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5, 14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081, 15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5, 15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599, 15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872, 15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5, 16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391, 16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5, 16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907, 16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5, 17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425, 17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5, 17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943, 17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217, 18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5, 18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733, 18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5, 19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5, 19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5, 19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769, 19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043, 20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5, 20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5, 20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5, 20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078, 21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351, 21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595, 21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869, 21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5, 22143.5, 22174, 22204.5, 22235, 22265.5],dtype='float64') frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time', units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian')) ocube.add_aux_coord(frt_dims,data_dims=0) ocube.coord('forecast_reference_time').guess_bounds() # forecast_period fp=numpy.array([-360],dtype='float64') fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period', units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64')) ocube.add_aux_coord(fp_dims,data_dims=None) # add-in cell_methods ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')] # set _FillValue fillval=1e+20 ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32') # output file name, based on species outpath='ukca_emiss_'+species_name+'.nc' # don't want time to be cattable, as is a periodic emissions file iris.FUTURE.netcdf_no_unlimited=True # annoying hack to set a missing_value attribute as well as a _FillValue attribute dict.__setitem__(ocube.attributes, 'missing_value', fillval) # now write-out to netCDF saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC') saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION) saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name']) # end of script
acsis-project/emissions
emissions/python/timeseries_1960-2020/regrid_HCHO_emissions_n96e_greg.py
Python
gpl-3.0
19,014
[ "NetCDF" ]
81d6e534051cfcc1d019a3c0900e5391a37f9a81b35e5110d54cbf574e33a77c
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAffycontam(RPackage): """structured corruption of cel file data to demonstrate QA effectiveness.""" homepage = "https://www.bioconductor.org/packages/affyContam/" url = "https://git.bioconductor.org/packages/affyContam" version('1.34.0', git='https://git.bioconductor.org/packages/affyContam', commit='03529f26d059c19e069cdda358dbf7789b6d4c40') depends_on('r@3.4.0:3.4.9', when=('@1.34.0')) depends_on('r-biobase', type=('build', 'run')) depends_on('r-affy', type=('build', 'run')) depends_on('r-affydata', type=('build', 'run'))
EmreAtes/spack
var/spack/repos/builtin/packages/r-affycontam/package.py
Python
lgpl-2.1
1,838
[ "Bioconductor" ]
b70dabd3f1a5202396ed9075d376caba73f48bd6c6adf288e7001659c349e631
#!/usr/bin/python import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit # function to model and create data: Two-Gaussian def func(x, a0, b0, c0, a1, b1, c1): return a0 * np.exp(-(x - b0)**2 / (2 * c0**2)) \ + a1 * np.exp(-(x - b1)**2 / (2 * c1**2)) # clean data x = np.linspace(0, 20, 200) y = func(x, 1, 3, 1, -2, 15, 0.5) # add noise to data yn = y + 0.2 * np.random.normal(size=len(x)) # fit noisy data providing guesses guesses = [1, 3, 1, 1, 15, 1] popt, pcov = curve_fit(func, x, yn, p0=guesses) # print best fit and variances (diagonal elements) for i in range(0, 6): print "(", popt[i], "+/-", pcov[i,i], ")" # plot plt.title('Fitting two Gaussians') plt.plot(x, y, label='Function') plt.scatter(x, yn) yfit = func(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5]) plt.plot(x, yfit, '--', label='Best Fit') plt.legend() plt.xlabel('x') plt.ylabel('y = f(x)') plt.show()
casep/Molido
fitting-two-gaussians.py
Python
gpl-2.0
947
[ "Gaussian" ]
5035bba5b8014e3c977369c97fd6cfab5862332cf284ed4fcca5b24fac724663
from __future__ import annotations import inspect import json import logging import os import pathlib from xia2.Handlers.Streams import banner logger = logging.getLogger("xia2.Schema.Interfaces.Scaler") class Scaler: """An interface to present scaling functionality in a similar way to the integrater interface.""" def __init__(self, base_path=None): # key this by the epoch, if available, else will need to # do something different. self._scalr_integraters = {} self._scalr_corrections = False self._scalr_correct_decay = None self._scalr_correct_modulation = None self._scalr_correct_absorption = None # integraters have the following methods for pulling interesting # information out: # # get_integrater_project_info() - pname, xname, dname # get_integrater_epoch() - measurement of first frame self.scaler_reset() self._scalr_reference_reflection_file = None self._scalr_freer_file = None # user input to guide spacegroup choices self._scalr_input_spacegroup = None self._scalr_input_pointgroup = None # places to hold the output # this should be a dictionary keyed by datset / format, or # format / dataset self._scalr_scaled_reflection_files = None # this needs to be a dictionary keyed by dataset etc, e.g. # key = pname, xname, dname self._scalr_statistics = None # and also the following keys: self._scalr_statistics_keys = [ "High resolution limit", "Low resolution limit", "Completeness", "Multiplicity", "I/sigma", "Rmerge(I)", "Rmerge(I+/I-)", "Rmeas(I)", "Rmeas(I+/-)", "Rpim(I)", "Rpim(I+/-)", "CC half", "Wilson B factor", "Partial bias", "Anomalous completeness", "Anomalous multiplicity", "Anomalous correlation", "Anomalous slope", "dF/F", "dI/s(dI)", "Total observations", "Total unique", ] # information for returning "interesting facts" about the data self._scalr_highest_resolution = 0.0 self._scalr_cell = None self._scalr_cell_esd = None self._scalr_cell_dict = {} self._scalr_likely_spacegroups = [] self._scalr_unlikely_spacegroups = [] # do we want anomalous pairs separating? self._scalr_anomalous = False # admin junk self._base_path = base_path self._working_directory = os.getcwd() self._scalr_pname = None self._scalr_xname = None # link to parent xcrystal self._scalr_xcrystal = None self._scalr_resolution_limits = {} # serialization functions def to_dict(self): obj = {} obj["__id__"] = "Scaler" obj["__module__"] = self.__class__.__module__ obj["__name__"] = self.__class__.__name__ if self._base_path: obj["_base_path"] = self._base_path.__fspath__() attributes = inspect.getmembers(self, lambda m: not inspect.isroutine(m)) for a in attributes: if a[0] == "_scalr_xcrystal": # XXX I guess we probably want this? continue elif a[0] == "_scalr_integraters": d = {} for k, v in a[1].items(): d[k] = v.to_dict() obj[a[0]] = d elif a[0] == "_scalr_statistics" and a[1] is not None: # dictionary has tuples as keys - json can't handle this so serialize # keys in place d = {} for k, v in a[1].items(): k = json.dumps(k) d[k] = v obj[a[0]] = d elif a[0] == "_scalr_resolution_limits": d = {} for k, v in a[1].items(): k = json.dumps(k) d[k] = v obj[a[0]] = d elif a[0].startswith("_scalr_"): obj[a[0]] = a[1] return obj @classmethod def from_dict(cls, obj): assert obj["__id__"] == "Scaler" base_path = obj.get("_base_path") if base_path: base_path = pathlib.Path(base_path) else: base_path = None return_obj = cls(base_path=base_path) for k, v in obj.items(): if k == "_scalr_integraters": for k_, v_ in v.items(): from libtbx.utils import import_python_object integrater_cls = import_python_object( import_path=".".join((v_["__module__"], v_["__name__"])), error_prefix="", target_must_be="", where_str="", ).object v[k_] = integrater_cls.from_dict(v_) elif k == "_scalr_statistics" and v is not None: d = {} for k_, v_ in v.items(): k_ = tuple(str(s) for s in json.loads(k_)) d[k_] = v_ v = d elif k == "_scalr_resolution_limits": d = {} for k_, v_ in v.items(): k_ = tuple(str(s) for s in json.loads(k_)) d[k_] = v_ v = d elif k == "_base_path": continue setattr(return_obj, k, v) return return_obj def as_json(self, filename=None, compact=False): obj = self.to_dict() if compact: text = json.dumps( obj, skipkeys=False, separators=(",", ":"), ensure_ascii=True ) else: text = json.dumps(obj, skipkeys=False, indent=2, ensure_ascii=True) # If a filename is set then dump to file otherwise return string if filename is not None: with open(filename, "w") as outfile: outfile.write(text) else: return text @classmethod def from_json(cls, filename=None, string=None): assert [filename, string].count(None) == 1 if filename is not None: with open(filename) as f: string = f.read() obj = json.loads(string) return cls.from_dict(obj) def _scale_prepare(self): raise NotImplementedError("overload me") def _scale(self): raise NotImplementedError("overload me") def _scale_finish(self): pass def set_working_directory(self, working_directory): self._working_directory = working_directory def get_working_directory(self): return self._working_directory def set_scaler_input_spacegroup(self, spacegroup): self._scalr_input_spacegroup = spacegroup def set_scaler_input_pointgroup(self, pointgroup): self._scalr_input_pointgroup = pointgroup def set_scaler_xcrystal(self, xcrystal): self._scalr_xcrystal = xcrystal def get_scaler_xcrystal(self): return self._scalr_xcrystal def set_scaler_project_info(self, pname, xname): """Set the project and crystal this scaler is working with.""" self._scalr_pname = pname self._scalr_xname = xname def set_scaler_reference_reflection_file(self, reference_reflection_file): self._scalr_reference_reflection_file = reference_reflection_file def get_scaler_reference_reflection_file(self): return self._scalr_reference_reflection_file def set_scaler_freer_file(self, freer_file): self._scalr_freer_file = freer_file def get_scaler_freer_file(self): return self._scalr_freer_file def get_scaler_resolution_limits(self): return self._scalr_resolution_limits def set_scaler_prepare_done(self, done=True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) logger.debug( "Called scaler prepare done from %s %d (%s)" % (mod.__name__, frm[0].f_lineno, done) ) self._scalr_prepare_done = done def set_scaler_done(self, done=True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) logger.debug( "Called scaler done from %s %d (%s)" % (mod.__name__, frm[0].f_lineno, done) ) self._scalr_done = done def set_scaler_finish_done(self, done=True): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) logger.debug( "Called scaler finish done from %s %d (%s)" % (mod.__name__, frm[0].f_lineno, done) ) self._scalr_finish_done = done def set_scaler_anomalous(self, anomalous): self._scalr_anomalous = anomalous def get_scaler_anomalous(self): return self._scalr_anomalous def scaler_reset(self): logger.debug("Scaler reset") self._scalr_done = False self._scalr_prepare_done = False self._scalr_finish_done = False self._scalr_result = None # getters for the scaling model which was used - first see that the # corrections were applied, then the individual getters for the # separate corrections # getters of the status - note how the gets cascade to ensure that # everything is up-to-date... def get_scaler_prepare_done(self): return self._scalr_prepare_done def get_scaler_done(self): if not self.get_scaler_prepare_done(): logger.debug("Resetting Scaler done as prepare not done") self.set_scaler_done(False) return self._scalr_done def get_scaler_finish_done(self): if not self.get_scaler_done(): logger.debug("Resetting scaler finish done as scaling not done") self.set_scaler_finish_done(False) return self._scalr_finish_done def add_scaler_integrater(self, integrater): """Add an integrater to this scaler, to provide the input.""" # epoch values are trusted as long as they are unique. # if a collision is detected, all epoch values are replaced by an # integer series, starting with 0 if 0 in self._scalr_integraters: epoch = len(self._scalr_integraters) else: epoch = integrater.get_integrater_epoch() # FIXME This is now probably superflous? if epoch == 0 and self._scalr_integraters: raise RuntimeError("multi-sweep integrater has epoch 0") if epoch in self._scalr_integraters: logger.debug( "integrater with epoch %d already exists. will not trust epoch values" % epoch ) # collision. Throw away all epoch keys, and replace with integer series self._scalr_integraters = dict( enumerate(self._scalr_integraters.values()) ) epoch = len(self._scalr_integraters) self._scalr_integraters[epoch] = integrater self.scaler_reset() def scale(self): """Actually perform the scaling - this is delegated to the implementation.""" if self._scalr_integraters == {}: raise RuntimeError("no Integrater implementations assigned for scaling") xname = self._scalr_xcrystal.get_name() while not self.get_scaler_finish_done(): while not self.get_scaler_done(): while not self.get_scaler_prepare_done(): logger.notice(banner("Preparing %s" % xname)) self._scalr_prepare_done = True self._scale_prepare() logger.notice(banner("Scaling %s" % xname)) self._scalr_done = True self._scalr_result = self._scale() self._scalr_finish_done = True self._scale_finish() return self._scalr_result def get_scaled_reflections(self, format): """Get a specific format of scaled reflection files. This may trigger transmogrification of files.""" if format not in ("mtz", "sca", "mtz_unmerged", "sca_unmerged"): raise RuntimeError("format %s unknown" % format) self.scale() if format in self._scalr_scaled_reflection_files: return self._scalr_scaled_reflection_files[format] raise RuntimeError("unknown format %s" % format) def get_scaled_merged_reflections(self): """Return the reflection files and so on.""" self.scale() return self._scalr_scaled_reflection_files def get_scaler_statistics(self): """Return the overall scaling statistics.""" self.scale() return self._scalr_statistics def get_scaler_cell(self): """Return the final unit cell from scaling.""" self.scale() return self._scalr_cell def get_scaler_cell_esd(self): """Return the estimated standard deviation of the final unit cell.""" self.scale() return self._scalr_cell_esd def get_scaler_likely_spacegroups(self): """Return a list of likely spacegroups - you should try using the first in this list first.""" self.scale() return self._scalr_likely_spacegroups def get_scaler_highest_resolution(self): """Get the highest resolution achieved by the crystal.""" self.scale() return self._scalr_highest_resolution
xia2/xia2
src/xia2/Schema/Interfaces/Scaler.py
Python
bsd-3-clause
13,708
[ "CRYSTAL" ]
945f82418b4e804707d962adc206153c48a01d44ec159905c482194a6a559e78
# # Handle the special case of the first scenario # self.notebook.switchScenario(0,scenarioType="Powder") # # # tab = self.notebook.mainTab tab.settings['Program'] = 'castep' tab.settings['Output file name'] = 'phonon.castep' tab.settings['Excel file name'] = 'application_note_br_permittivity.xlsx' tab.settings['Script file name'] = 'application_note_br_permittivity.py' tab.settings['QM program'] = '' # # tab = self.notebook.settingsTab tab.settings['Eckart flag'] = True tab.settings['Neutral Born charges'] = False tab.settings['Sigma value'] = 10.0 tab.settings['Mass definition'] = 'average' tab.settings['Optical permittivity edited'] = False tab.sigmas_cm1 = [10.0, 10.0, 10.0, 10.0, 10.0, 10.0] # # tab = self.notebook.scenarios[0] tab.settings['Legend'] = 'Eps=1' tab.settings['Scenario type'] = 'Powder' tab.settings['Matrix'] = 'ptfe' tab.settings['Matrix density'] = 2.2 tab.settings['Matrix permittivity'] = 1.0 tab.settings['Bubble radius'] = 30.0 tab.settings['Bubble volume fraction'] = 0.0 tab.settings['Mass fraction'] = 0.01579304466235449 tab.settings['Volume fraction'] = 0.009999999999999998 tab.settings['Particle size(mu)'] = 0.0001 tab.settings['Particle size distribution sigma(mu)'] = 0.0 tab.settings['Ellipsoid a/b'] = 1.0 tab.settings['Unique direction - h'] = 0 tab.settings['Unique direction - k'] = 0 tab.settings['Unique direction - l'] = 1 tab.settings['Mass or volume fraction'] = 'volume' tab.settings['ATR material refractive index'] = 4.0 tab.settings['ATR theta'] = 45.0 tab.settings['ATR S polarisation fraction'] = 0.5 tab.settings['Effective medium method'] = 'Bruggeman' tab.settings['Particle shape'] = 'Sphere' # # self.notebook.addScenario(scenarioType="Powder") tab = self.notebook.scenarios[1] tab.settings['Legend'] = 'Eps=2' tab.settings['Scenario type'] = 'Powder' tab.settings['Matrix'] = 'ptfe' tab.settings['Matrix density'] = 2.2 tab.settings['Matrix permittivity'] = 2.0 tab.settings['Bubble radius'] = 30.0 tab.settings['Bubble volume fraction'] = 0.0 tab.settings['Mass fraction'] = 0.01579304466235449 tab.settings['Volume fraction'] = 0.009999999999999998 tab.settings['Particle size(mu)'] = 0.0001 tab.settings['Particle size distribution sigma(mu)'] = 0.0 tab.settings['Ellipsoid a/b'] = 1.0 tab.settings['Unique direction - h'] = 0 tab.settings['Unique direction - k'] = 0 tab.settings['Unique direction - l'] = 1 tab.settings['Mass or volume fraction'] = 'volume' tab.settings['ATR material refractive index'] = 4.0 tab.settings['ATR theta'] = 45.0 tab.settings['ATR S polarisation fraction'] = 0.5 tab.settings['Effective medium method'] = 'Bruggeman' tab.settings['Particle shape'] = 'Sphere' # # self.notebook.addScenario(scenarioType="Powder") tab = self.notebook.scenarios[2] tab.settings['Legend'] = 'Eps=3' tab.settings['Scenario type'] = 'Powder' tab.settings['Matrix'] = 'ptfe' tab.settings['Matrix density'] = 2.2 tab.settings['Matrix permittivity'] = 3.0 tab.settings['Bubble radius'] = 30.0 tab.settings['Bubble volume fraction'] = 0.0 tab.settings['Mass fraction'] = 0.01579304466235449 tab.settings['Volume fraction'] = 0.009999999999999998 tab.settings['Particle size(mu)'] = 0.0001 tab.settings['Particle size distribution sigma(mu)'] = 0.0 tab.settings['Ellipsoid a/b'] = 1.0 tab.settings['Unique direction - h'] = 0 tab.settings['Unique direction - k'] = 0 tab.settings['Unique direction - l'] = 1 tab.settings['Mass or volume fraction'] = 'volume' tab.settings['ATR material refractive index'] = 4.0 tab.settings['ATR theta'] = 45.0 tab.settings['ATR S polarisation fraction'] = 0.5 tab.settings['Effective medium method'] = 'Bruggeman' tab.settings['Particle shape'] = 'Sphere' # # self.notebook.addScenario(scenarioType="Powder") tab = self.notebook.scenarios[3] tab.settings['Legend'] = 'Eps=5' tab.settings['Scenario type'] = 'Powder' tab.settings['Matrix'] = 'ptfe' tab.settings['Matrix density'] = 2.2 tab.settings['Matrix permittivity'] = 5.0 tab.settings['Bubble radius'] = 30.0 tab.settings['Bubble volume fraction'] = 0.0 tab.settings['Mass fraction'] = 0.01579304466235449 tab.settings['Volume fraction'] = 0.009999999999999998 tab.settings['Particle size(mu)'] = 0.0001 tab.settings['Particle size distribution sigma(mu)'] = 0.0 tab.settings['Ellipsoid a/b'] = 1.0 tab.settings['Unique direction - h'] = 0 tab.settings['Unique direction - k'] = 0 tab.settings['Unique direction - l'] = 1 tab.settings['Mass or volume fraction'] = 'volume' tab.settings['ATR material refractive index'] = 4.0 tab.settings['ATR theta'] = 45.0 tab.settings['ATR S polarisation fraction'] = 0.5 tab.settings['Effective medium method'] = 'Bruggeman' tab.settings['Particle shape'] = 'Sphere' # # self.notebook.addScenario(scenarioType="Powder") tab = self.notebook.scenarios[4] tab.settings['Legend'] = 'Eps=10' tab.settings['Scenario type'] = 'Powder' tab.settings['Matrix'] = 'ptfe' tab.settings['Matrix density'] = 2.2 tab.settings['Matrix permittivity'] = 10.0 tab.settings['Bubble radius'] = 30.0 tab.settings['Bubble volume fraction'] = 0.0 tab.settings['Mass fraction'] = 0.01579304466235449 tab.settings['Volume fraction'] = 0.009999999999999998 tab.settings['Particle size(mu)'] = 0.0001 tab.settings['Particle size distribution sigma(mu)'] = 0.0 tab.settings['Ellipsoid a/b'] = 1.0 tab.settings['Unique direction - h'] = 0 tab.settings['Unique direction - k'] = 0 tab.settings['Unique direction - l'] = 1 tab.settings['Mass or volume fraction'] = 'volume' tab.settings['ATR material refractive index'] = 4.0 tab.settings['ATR theta'] = 45.0 tab.settings['ATR S polarisation fraction'] = 0.5 tab.settings['Effective medium method'] = 'Bruggeman' tab.settings['Particle shape'] = 'Sphere' # # self.notebook.addScenario(scenarioType="Powder") tab = self.notebook.scenarios[5] tab.settings['Legend'] = 'Eps=50' tab.settings['Scenario type'] = 'Powder' tab.settings['Matrix'] = 'ptfe' tab.settings['Matrix density'] = 2.2 tab.settings['Matrix permittivity'] = 50.0 tab.settings['Bubble radius'] = 30.0 tab.settings['Bubble volume fraction'] = 0.0 tab.settings['Mass fraction'] = 0.01579304466235449 tab.settings['Volume fraction'] = 0.009999999999999998 tab.settings['Particle size(mu)'] = 0.0001 tab.settings['Particle size distribution sigma(mu)'] = 0.0 tab.settings['Ellipsoid a/b'] = 1.0 tab.settings['Unique direction - h'] = 0 tab.settings['Unique direction - k'] = 0 tab.settings['Unique direction - l'] = 1 tab.settings['Mass or volume fraction'] = 'volume' tab.settings['ATR material refractive index'] = 4.0 tab.settings['ATR theta'] = 45.0 tab.settings['ATR S polarisation fraction'] = 0.5 tab.settings['Effective medium method'] = 'Bruggeman' tab.settings['Particle shape'] = 'Sphere' # # tab = self.notebook.analysisTab tab.settings['Minimum frequency'] = -1 tab.settings['Maximum frequency'] = 400 tab.settings['title'] = 'Analysis' tab.settings['Covalent radius scaling'] = 1.1 tab.settings['Bonding tolerance'] = 0.1 tab.settings['Bar width'] = 0.5 # # tab = self.notebook.viewerTab tab.settings['Atom scaling'] = 0.5 tab.settings['Maximum displacement'] = 1.0 tab.settings['Bond colour'] = [80, 80, 80, 255] tab.settings['Bond radius'] = 0.1 tab.settings['Cell colour'] = [255, 0, 0, 255] tab.settings['Cell radius'] = 0.1 tab.settings['Background colour'] = [120, 120, 120, 255] tab.settings['Arrow colour'] = [0, 255, 0, 255] tab.settings['Arrow radius'] = 0.07 tab.settings['Number of phase steps'] = 41 tab.settings['Super Cell'] = [1, 1, 1] # # tab = self.notebook.fitterTab tab.settings['Excel file name'] = '' tab.settings['Plot title'] = 'Experimental and Calculated Spectral Comparison' tab.settings['Fitting type'] = 'Minimise x-correlation' tab.settings['Number of iterations'] = 20 tab.settings['Frequency scaling factor'] = 1.0 tab.settings['Optimise frequency scaling'] = False tab.settings['Spectrum scaling'] = False tab.settings['Spectrum scaling factor'] = 1.0 tab.settings['Independent y-axes'] = True tab.settings['Spectral difference threshold'] = 0.05 tab.settings['HPFilter lambda'] = 7.0 tab.settings['Baseline removal'] = False tab.settings['Scenario index'] = 0 # # tab = self.notebook.plottingTab tab.settings['Minimum frequency'] = 0 tab.settings['Maximum frequency'] = 800 tab.settings['Frequency increment'] = 0.2 tab.settings['Molar definition'] = 'Unit cells' tab.settings['Number of atoms'] = 1 tab.settings['Plot type'] = 'Powder Molar Absorption' tab.settings['concentration'] = 86.71312720248292 tab.settings['cell concentration'] = 86.71312720248292
JohnKendrick/PDielec
Examples/Castep/MgO/application_note_br_permittivity.py
Python
mit
8,612
[ "CASTEP" ]
eab1705bc45bed66699ae78b4c2190c9135f2ccb59a16743ae31b61f393331cf
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # sssadmin - [insert a few words of module description on this line] # Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # -- END_HEADER --- # import cgi import cgitb cgitb.enable() from shared.functionality.sssadmin import main from shared.cgiscriptstub import run_cgi_script_possibly_with_cert run_cgi_script_possibly_with_cert(main)
heromod/migrid
mig/cgi-bin/sssadmin.py
Python
gpl-2.0
1,142
[ "Brian" ]
ca145dd893a6e4f0c9a3b15f9787e1954e36f116fe126d41fa43b7324cd8f49d
# coding: utf-8 import io import struct from .. import common from .. import vmd def write(ios, motion): """ """ assert(isinstance(ios, io.IOBase)) assert(isinstance(motion, vmd.Motion)) writer=common.BinaryWriter(ios) # 30 bytes writer.write_bytes(b"Vocaloid Motion Data 0002", 30) # 20 bytes writer.write_bytes(b"", 20) print(len(motion.motions)) writer.write_uint(len(motion.motions), 4) for m in motion.motions: """ フレームひとつ分(111 bytes) """ writer.write_bytes(m.name, 15) writer.write_uint(m.frame, 4) writer.write_float(m.pos.x) writer.write_float(m.pos.y) writer.write_float(m.pos.z) writer.write_float(m.q.x) writer.write_float(m.q.y) writer.write_float(m.q.z) writer.write_float(m.q.w) writer.write_bytes(m.complement, 64) # ToDo writer.write_uint(0, 4) writer.write_uint(0, 4) writer.write_uint(0, 4) return True
GRGSIBERIA/mmd-transporter
mmd-transporter/pymeshio/vmd/writer.py
Python
gpl-2.0
1,015
[ "VMD" ]
259d2fdb4fb57f30ee3419a54fb74de886ad724002537568d9e0490b3677a849
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import os import sys from optparse import OptionParser, OptionValueError import math import vtk cohesion = 1.0 friction_angle = 20 * math.pi / 180.0 sinphi = math.sin(friction_angle) cosphi = math.sin(friction_angle) cohcos = cohesion * cosphi dp_c = 3.0 dp_phi = math.pi / 6.0 dp_sinphi = math.sin(dp_phi) dp_cosphi = math.cos(dp_phi) dp_t = 0.0 dp_tc = 2.0 def ismoother(f_diff): if (abs(f_diff) >= opts.smoothing_tol): return 0.0 return 0.5 * (opts.smoothing_tol - abs(f_diff)) - opts.smoothing_tol / math.pi * math.cos(0.5 * math.pi * f_diff / opts.smoothing_tol) def yield_function_2(yf1, yf2): return max(yf1, yf2) + ismoother(yf1 - yf2) def yield_function(x, y, z): yfs = [] if opts.twoD_example: yfs += [- x, - y, y - 1.0, - y - 0.5 + 0.5 * x] if opts.twoD_example_alternative: yfs += [y - 1.0, - y - 0.5 + 0.5 * x, - x, - y] if opts.dp: yfs += [y + x * dp_sinphi - dp_c * dp_cosphi, x - dp_t, -x - dp_tc] if opts.tensile: yfs += [x - opts.tensile_strength, y - opts.tensile_strength, z - opts.tensile_strength] if opts.mc: yfs += [0.5 * (x - z) + 0.5 * (x + z) * sinphi - cohcos, 0.5 * (y - z) + 0.5 * (y + z) * sinphi - cohcos, 0.5 * (x - y) + 0.5 * (x + y) * sinphi - cohcos, 0.5 * (y - x) + 0.5 * (x + y) * sinphi - cohcos, 0.5 * (z - y) + 0.5 * (y + z) * sinphi - cohcos, 0.5 * (z - x) + 0.5 * (x + z) * sinphi - cohcos] yf = yfs[0] for i in range(1, len(yfs)): yf = yield_function_2(yf, yfs[i]) return yf # parse command line p = OptionParser(usage="""usage: %prog [options] <vtk_file> Inserts yield function values into <vtk_file>. Only 3D input is accepted: this program assumes that the individual yield functions are functions of x, y, z. """) p.add_option("-v", action="store_true", dest="verbose", help="Verbose") p.add_option("--name", action="store", type="string", default="yield_function", dest="name", help="The pointdata produced will have this name. Default=%default") p.add_option("--smoothing_tol", action="store", type="float", default=0.1, dest="smoothing_tol", help="The smoothing tolerance (a) parameter. Default=%default") p.add_option("-t", action="store_true", dest="tensile", help="Yield function will contain contributions from tensile (Rankine) failure") p.add_option("--tensile_strength", action="store", type="float", default=0.7, dest="tensile_strength", help="Tensile strength") p.add_option("-m", action="store_true", dest="mc", help="Yield function will contain contributions from Mohr-Coulomb failure") p.add_option("-d", action="store_true", dest="dp", help="Yield function will contain contributions from Drucker-Prager failure") p.add_option("-e", action="store_true", dest="twoD_example", help="Yield function will contain contributions from an example 2D yield function") p.add_option("-a", action="store_true", dest="twoD_example_alternative", help="Yield function will contain contributions from an alternative example 2D yield function") (opts, args) = p.parse_args() # get the com filename if len(args) != 1: p.print_help() sys.exit(1) in_file = args[0] if opts.verbose: print "Reading", in_file if in_file.endswith(".vtp"): indata = vtk.vtkXMLPolyDataReader() writer = vtk.vtkXMLPolyDataWriter() elif in_file.endswith(".vtu"): indata = vtk.vtkXMLUnstructuredGridReader() writer = vtk.vtkXMLUnstructuredGridWriter() elif in_file.endswith(".vtr"): indata = vtk.vtkXMLRectilinearGridReader() writer = vtk.vtkXMLRectilinearGridWriter() else: print "This program has not yet been configured to read files of type", in_file sys.exit(2) indata.SetFileName(in_file) indata.Update() indata = indata.GetOutput() if opts.verbose: print "Generating", opts.name yf = vtk.vtkDoubleArray() yf.SetName(opts.name) yf.SetNumberOfValues(indata.GetNumberOfPoints()) for ptid in range(indata.GetNumberOfPoints()): (x, y, z) = indata.GetPoint(ptid) yf.SetValue(ptid, yield_function(x, y, z)) indata.GetPointData().AddArray(yf) if opts.verbose: print "Writing", in_file writer.SetFileName(in_file) writer.SetDataModeToBinary() writer.SetInputConnection(indata.GetProducerPort()) writer.Write() sys.exit(0)
nuclear-wizard/moose
modules/tensor_mechanics/doc/tests/yf.py
Python
lgpl-2.1
4,579
[ "MOOSE", "VTK" ]
831739af17ea65b5c5eaf99e07ec2f0d25cf6f6ef4372777327604267d04434a
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RSummarizedexperiment(RPackage): """The SummarizedExperiment container contains one or more assays, each represented by a matrix-like object of numeric or other mode. The rows typically represent genomic ranges of interest and the columns represent samples.""" homepage = "https://bioconductor.org/packages/SummarizedExperiment/" url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/SummarizedExperiment_1.6.5.tar.gz" list_url = homepage version('1.6.5', '8f7d534e37cfda1e3e145ec7609c61f5') depends_on('r-genomicranges', type=('build', 'run')) depends_on('r-biobase', type=('build', 'run')) depends_on('r-delayedarray', type=('build', 'run')) depends_on('r-matrix', type=('build', 'run')) depends_on('r-s4vectors', type=('build', 'run')) depends_on('r-iranges', type=('build', 'run')) depends_on('r-genomeinfodb', type=('build', 'run')) depends_on('r@3.4.0:3.4.9', when='@1.6.5')
lgarren/spack
var/spack/repos/builtin/packages/r-summarizedexperiment/package.py
Python
lgpl-2.1
2,232
[ "Bioconductor" ]
669b9ca4534699b1d86bbd14bbec9c20b1efb81660180ea181410ef855ce2706
from PIL import Image, ImageFilter import scipy from scipy import ndimage import numpy from skimage import filter import numpy as np import matplotlib.pyplot as plt from scipy import ndimage import gdalnumeric from skimage.io import imread from skimage.segmentation import quickshift, slic, felzenszwalb from skimage.filter.rank import entropy from PIL import Image, ImageEnhance from osgeo import gdal from gdalconst import * basepath = "/home/vortex/Desktop/LAYERS/lorenzo/tiles/2003_02/LE71690612003035SGS00/rgb/" file = "projected_5.tif" image = Image.open(basepath + file) path = basepath + "contrast_proj5/" src_dataset = gdal.Open(path + "contrast0.tif") geotransform = src_dataset.GetGeoTransform() spatialreference = src_dataset.GetProjection() ncol = src_dataset.RasterXSize nrow = src_dataset.RasterYSize nband = 1 #print "GaussianBlur" # file = path + 'FIND_EDGES.tif' # image = image.filter(ImageFilter.GaussianBlur) # image.save(file) # # print "Meadian_files" #file = path + 'MedianFilter.tif' # image = image.filter(ImageFilter.MedianFilter) # image.save(file) # # print "SMOOTH" # file = path + 'SMOOTH.tif' # image = image.filter(ImageFilter.SMOOTH) # image.save(file) # FILTERS #Brightness and Contrast # print "Meadian_files" # file = path + 'MedianFilter.tif' # image = image.filter(ImageFilter.MedianFilter) # image.save(file) # # print "Meadian_files2" # image = Image.open(path + 'MedianFilter.tif') # file = path + 'MedianFilter2.tif' # image = image.filter(ImageFilter.MedianFilter) # image.save(file) # # print "Meadian_files2" # image = Image.open(path + 'MedianFilter2.tif') # file = path + 'MedianFilter3.tif' # image = image.filter(ImageFilter.MedianFilter) # image.save(file) # lastfile = None # for i in range(14, 15): # print "gaussian" + str(i) # image = Image.open(path + 'gaussian'+ str(i)+ '.tif') # file = path + 'gaussian'+ str(i+1)+ '.tif' # image = image.filter(ImageFilter.GaussianBlur) # image.save(file) # lastfile = file # # # # # SOBELFILTER # im = scipy.misc.imread(lastfile) # im = im.astype('int32') # # # dx = ndimage.sobel(im, 0) # horizontal derivative # dy = ndimage.sobel(im, 1) # vertical derivative # mag = numpy.hypot(dx, dy) # magnitude # mag *= 255.0 / numpy.max(mag) # normalize (Q&D) # scipy.misc.imsave(path + "sobelfile2_.tif", mag) lastfile = None for i in range(0, 3): print "contrast" + str(i) image = Image.open(path + 'contrast'+ str(i)+ '.tif') file = path + 'contrast'+ str(i+1)+ '.tif' image = image.convert('L') processed_image = ImageEnhance.Contrast(image) im = processed_image.enhance(2) # bright = ImageEnhance.Brightness(im) # im = bright.enhance(1) im.save(file, "tiff") fmt = 'GTiff' driver = gdal.GetDriverByName(fmt) dst_dataset_sobel = gdal.Open(file, GA_Update) dst_dataset_sobel.SetGeoTransform(geotransform) dst_dataset_sobel.SetProjection(spatialreference) lastfile = file # SOBELFILTER print "SOBEL" im = scipy.misc.imread(lastfile) im = im.astype('int32') # dx = ndimage.sobel(im, 0) # horizontal derivative dy = ndimage.sobel(im, 1) # vertical derivative mag = numpy.hypot(dx, dy) # magnitude mag *= 255.0 / numpy.max(mag) # normalize (Q&D) scipy.misc.imsave(path + "sobelfile32222_.tif", mag) fmt = 'GTiff' driver = gdal.GetDriverByName(fmt) dst_dataset_sobel = gdal.Open(path + "sobelfile32222_.tif", GA_Update) dst_dataset_sobel.SetGeoTransform(geotransform) dst_dataset_sobel.SetProjection(spatialreference) # print "CANNY" # from osgeo import gdal # ds = gdal.Open(path + "sobelfile32_.tif") # im = np.array(ds.GetRasterBand(1).ReadAsArray()) # #im = gdalnumeric.LoadFile(path + "gaussian_filter.tif") # print "canny" # print im # # #edges = filter.canny(im) # # # edges2 = filter.canny(im, sigma=3) # edges = filter.canny(im, 3.0, 0.1, 0.5) # # print edges # # print "saving canny" # scipy.misc.imsave(path + "gaussian_filter_cannys2333.tif", edges) # # scipy.misc.imsave(path + "gaussian_filter_edges2.tif", edges2) #lastfile = None # for i in range(30,30): # print "Meadian_files" + str(i) # image = Image.open(path + 'MedianFilter'+ str(i)+ '.tif') # file = path + 'MedianFilter'+ str(i+1)+ '.tif' # image = image.filter(ImageFilter.MedianFilter) # image.save(file) # lastfile = file # for i in range(32, 50): # print "Meadian_files" + str(i) # image = Image.open(path + 'MedianFilter'+ str(i)+ '.tif') # file = path + 'MedianFilter'+ str(i+1)+ '.tif' # image = image.filter(ImageFilter.MaxFilter) # image.save(file) # lastfile = file # for i in range(33,34): # print "Meadian_files" + str(i) # image = Image.open(path + 'MedianFilter'+ str(i)+ '.tif') # file = path + 'MedianFilter'+ str(i+1)+ '.tif' # image = image.filter(ImageFilter.MedianFilter) # image.save(file) # lastfile = file # im = scipy.misc.imread(lastfile) # im = im.astype('int32') # # # im = ndimage.gaussian_filter(im, 4) # # # scipy.misc.imsave(path + "gaussian_filter_test.tif", im) # im = scipy.misc.imread(file) # im = im.astype('int32') # # im = ndimage.gaussian_filter(im, 4) # # scipy.misc.imsave(path + "gaussian_filter.tif", im) # from osgeo import gdal # ds = gdal.Open(path + "gaussian_filter.tif") # im = np.array(ds.GetRasterBand(1).ReadAsArray()) # #im = gdalnumeric.LoadFile(path + "gaussian_filter.tif") # print "canny" # print im # #edges = filter.canny(im) # # # edges2 = filter.canny(im, sigma=3) # edges = filter.canny(im, 3.0, 0.1, 0.5) # # print edges # # print "saving canny" # scipy.misc.imsave(path + "gaussian_filter_cannys23.tif", edges) # # scipy.misc.imsave(path + "gaussian_filter_edges2.tif", edges2) # # print "DONE!" # SOBELFILTER # dx = ndimage.sobel(im, 0) # horizontal derivative # dy = ndimage.sobel(im, 1) # vertical derivative # mag = numpy.hypot(dx, dy) # magnitude # mag *= 255.0 / numpy.max(mag) # normalize (Q&D) # scipy.misc.imsave(path + "sobelfile_SMOOTH.tif", mag) # print "canny filter" # # Compute the Canny filter for two values of sigma # edges1 = filter.canny(im) # edges2 = filter.canny(im, sigma=3) # # # display results # fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3)) # # ax1.imshow(im, cmap=plt.cm.jet) # ax1.axis('off') # ax1.set_title('noisy image', fontsize=20) # # ax2.imshow(edges1, cmap=plt.cm.gray) # ax2.axis('off') # ax2.set_title('Canny filter, $\sigma=1$', fontsize=20) # # ax3.imshow(edges2, cmap=plt.cm.gray) # ax3.axis('off') # ax3.set_title('Canny filter, $\sigma=3$', fontsize=20) # # fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, # bottom=0.02, left=0.02, right=0.98) # # plt.show() print "DONE!" # # # print "GaussianBlur" # image = image.filter(ImageFilter.FIND_EDGES) # image.save(path + 'FIND_EDGES.tif') # # # print "GaussianBlur" # # image = image.filter(ImageFilter.GaussianBlur) # # image.save(path + 'projected_5_GaussianBlur.tif') # # # print "EDGE_ENHANCE_MORE" # # image = image.filter(ImageFilter.EDGE_ENHANCE_MORE) # # image.save(path + 'rgb_true_color.tif') # # # # print "EDGE_ENHANCE" # # image = image.filter(ImageFilter.EDGE_ENHANCE) # # image.save(path + 'EDGE_ENHANCE.tif') # # # # print "DETAIL" # # image = image.filter(ImageFilter.DETAIL) # # image.save(path + 'DETAIL.tif') # # # # print "RANK" # # image = image.filter(ImageFilter.RankFilter) # # image.save(path + 'RANK.tif') # # # # print "SHARPEN" # # image = Image.open(basepath + 'projected_5.tif') # # image = image.filter(ImageFilter.SHARPEN) # # image.save(path + 'SHARPEN.tif') # # # # image = Image.open(path + 'SHARPEN.tif') # # image = image.filter(ImageFilter.SMOOTH_MORE) # # image.save(path + 'SMOOTH_MORE.tif')
geobricks/Playground
playground/clustering/image_processing_landsat_example/threshold_detection.py
Python
gpl-2.0
7,725
[ "Gaussian" ]
44274212033e157c3f23a9ee1af490a6e7b7b8ffc0b5f7a825e3bf3b02a8ae5a
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2012 - 2013 Daniel Reis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Project Issue related Tasks', 'summary': 'Use Tasks to support Issue resolution reports', 'version': '1.1', 'category': 'Project Management', 'description': """\ Support for the use case where solving an Issue means a Task should be done, such as an on site visit, and a report must be made to document the work done. This is a common scenario in technical field services. The Issue form already has a "Task" field, allowing to create a Task related to an Issue. This module adds some usability improvements: * "Create Task" button on the Issue form * Automaticaly Close the Issue when the Task is Closed * Automatically Cancel the Task when Issue is Cancelled * Make the Task also visible to all followers of the related Issue """, 'author': 'Daniel Reis', 'depends': [ 'project_issue', ], 'data': [ 'project_issue_view.xml', 'project_task_cause_view.xml', 'project_task_view.xml', 'security/ir.model.access.csv', 'security/project_security.xml', ], 'installable': True, }
hugosdsantos/project-service
project_issue_task/__openerp__.py
Python
agpl-3.0
2,012
[ "VisIt" ]
490298e55440db8dad0c6b76c5291170dd5aded5c683e238e7e64def0630f8e7
# Copyright (C) 2010-2018 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function import sys import unittest as ut import numpy as np import espressomd @ut.skipIf(not espressomd.has_features("LENNARD_JONES"), "Skipped because LENNARD_JONES turned off.") class AnalyzeDistributions(ut.TestCase): system = espressomd.System(box_l=[1.0, 1.0, 1.0]) np.random.seed(1234) num_part = 10 @classmethod def setUpClass(self): box_l = 20.0 # start with a small bo self.system.box_l = np.array([box_l, box_l, box_l]) self.system.cell_system.set_n_square(use_verlet_lists=False) for p in range(self.num_part): self.system.part.add( id=p, pos=np.random.random() * self.system.box_l) def calc_rdf(self, r, bins): # this generates indices for all i<j combinations ij = np.triu_indices(len(r), k=1) r_ij = r[ij[0]] - r[ij[1]] dist = np.sqrt(np.sum(r_ij**2, axis=1)) hist = np.histogram(dist, bins=bins, density=False)[0] return hist def calc_min_distribution(self, bins, type_list_a): dist = [] for i in range(self.num_part): dist.append(self.system.analysis.dist_to(id=i)) hist = np.histogram(dist, bins=bins, density=False)[0] return hist / (float(np.sum(hist))) # test system.analysis.rdf() def test_rdf(self): # increase PBC for remove mirror images old_pos = self.system.part[:].pos.copy() self.system.box_l = self.system.box_l * 2. self.system.part[:].pos = old_pos r_min = 0.0 r_max = 100.0 r_bins = 10 bin_width = (r_max - r_min) / r_bins bins = np.arange(r_min, r_max + bin_width, bin_width) bin_volume = 4. / 3. * np.pi * (bins[1:]**3 - bins[:-1]**3) box_volume = self.system.box_l[ 0] * self.system.box_l[ 1] * self.system.box_l[ 2] # all the same type core_rdf = self.system.analysis.rdf(rdf_type='rdf', type_list_a=[0], type_list_b=[0], r_min=r_min, r_max=r_max, r_bins=r_bins) num_pair = 0.5 * (self.num_part) * (self.num_part - 1) r = self.system.part[:].pos # bins self.assertTrue(np.allclose(core_rdf[0], (bins[1:] + bins[:-1]) * 0.5)) # rdf self.assertTrue( np.allclose(core_rdf[1] * bin_volume * num_pair / box_volume, self.calc_rdf(r, bins))) # change one type self.system.part[0].type = 1 r = self.system.part[1:].pos core_rdf = self.system.analysis.rdf(rdf_type='rdf', type_list_a=[0], type_list_b=[0], r_min=r_min, r_max=r_max, r_bins=r_bins) num_pair = 0.5 * (self.num_part - 1) * (self.num_part - 2) self.assertTrue( np.allclose(core_rdf[1] * bin_volume * num_pair / box_volume, self.calc_rdf(r, bins))) # compare with type core_rdf = self.system.analysis.rdf(rdf_type='rdf', type_list_a=[1], type_list_b=[0], r_min=r_min, r_max=r_max, r_bins=r_bins) num_pair = (self.num_part - 1) dist = np.sqrt( np.sum((self.system.part[1:].pos - self.system.part[0].pos)**2, axis=1)) hist = np.histogram(dist, bins=bins, density=False)[0] self.assertTrue( np.allclose(core_rdf[1] * bin_volume * num_pair / box_volume, hist)) # restore PBC self.system.box_l = self.system.box_l / 2. self.system.part[:].pos = old_pos # test system.analysis.distribution(), all the same particle types def test_distribution_lin(self): # increase PBC for remove mirror images old_pos = self.system.part[:].pos.copy() self.system.box_l = self.system.box_l * 2. self.system.part[:].pos = old_pos r_min = 0.0 r_max = 100.0 r_bins = 100 bins = np.linspace(r_min, r_max, num=r_bins + 1, endpoint=True) bin_volume = 4. / 3. * np.pi * (bins[1:]**3 - bins[:-1]**3) box_volume = self.system.box_l[ 0] * self.system.box_l[ 1] * self.system.box_l[ 2] # no int flag core_rdf = self.system.analysis.distribution(type_list_a=[0], type_list_b=[0], r_min=r_min, r_max=r_max, r_bins=r_bins, log_flag=0, int_flag=0) # bins self.assertTrue(np.allclose(core_rdf[0], (bins[1:] + bins[:-1]) * 0.5)) # rdf self.assertTrue(np.allclose(core_rdf[1], self.calc_min_distribution(bins, type_list_a=[0]))) # with int flag core_rdf = self.system.analysis.distribution(type_list_a=[0], type_list_b=[0], r_min=r_min, r_max=r_max, r_bins=r_bins, log_flag=0, int_flag=1) self.assertTrue(np.allclose(core_rdf[1], np.cumsum(self.calc_min_distribution(bins, type_list_a=[0])))) if __name__ == "__main__": print("Features: ", espressomd.features()) ut.main()
hmenke/espresso
testsuite/python/analyze_distribution.py
Python
gpl-3.0
6,990
[ "ESPResSo" ]
aff717b1dc2da7738807ca9e1721f94ddf29e4e2d8065f4953a4f0b69f0e6514
#!/usr/bin/env python # # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2021 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # import sys, os root = os.path.dirname(os.path.realpath(__file__)) # => Driver Code <= # if __name__ == '__main__': # > Working Dirname < # if len(sys.argv) == 1: dirname = '.' elif len(sys.argv) == 2: dirname = sys.argv[1] else: raise Exception('Usage: fsapt.py [dirname]') # > Copy Files < # os.system('cp %s/pymol/*pymol %s' % (root, dirname))
ashutoshvt/psi4
psi4/share/psi4/fsapt/copy_pymol.py
Python
lgpl-3.0
1,338
[ "Psi4", "PyMOL" ]
427a8e2665a29bdea65ef4bf6cfc3ba59289545c65ec5f6f1b14e6da274117be
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Created on Nov 10, 2012 @author: shyue """ from __future__ import division, unicode_literals from pymatgen.util.testing import PymatgenTest __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Production" __date__ = "Nov 10, 2012" import unittest from pymatgen.core.periodic_table import Element from pymatgen.core.composition import Composition, CompositionError, \ ChemicalPotential import random class CompositionTest(PymatgenTest): def setUp(self): self.comp = list() self.comp.append(Composition("Li3Fe2(PO4)3")) self.comp.append(Composition("Li3Fe(PO4)O")) self.comp.append(Composition("LiMn2O4")) self.comp.append(Composition("Li4O4")) self.comp.append(Composition("Li3Fe2Mo3O12")) self.comp.append(Composition("Li3Fe2((PO4)3(CO3)5)2")) self.comp.append(Composition("Li1.5Si0.5")) self.comp.append(Composition("ZnOH")) self.indeterminate_comp = [] self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("Co1", True) ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("Co1", False) ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("co2o3") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("ncalu") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("calun") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula( "liCoo2n (pO4)2") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula( "(co)2 (PO)4") ) self.indeterminate_comp.append( Composition.ranked_compositions_from_indeterminate_formula("Fee3")) def test_immutable(self): try: self.comp[0]["Fe"] = 1 except Exception as ex: self.assertIsInstance(ex, TypeError) try: del self.comp[0]["Fe"] except Exception as ex: self.assertIsInstance(ex, TypeError) def test_in(self): self.assertIn("Fe", self.comp[0]) self.assertNotIn("Fe", self.comp[2]) self.assertIn(Element("Fe"), self.comp[0]) self.assertEqual(self.comp[0]["Fe"], 2) self.assertEqual(self.comp[0]["Mn"], 0) self.assertRaises(TypeError, self.comp[0].__getitem__, "Hello") self.assertRaises(TypeError, self.comp[0].__getitem__, "Vac") def test_hill_formula(self): c = Composition("CaCO3") self.assertEqual(c.hill_formula, "C Ca O3") c = Composition("C2H5OH") self.assertEqual(c.hill_formula, "C2 H6 O") def test_init_(self): self.assertRaises(CompositionError, Composition, {"H": -0.1}) f = {'Fe': 4, 'Li': 4, 'O': 16, 'P': 4} self.assertEqual("Li4 Fe4 P4 O16", Composition(f).formula) f = {None: 4, 'Li': 4, 'O': 16, 'P': 4} self.assertRaises(TypeError, Composition, f) f = {1: 2, 8: 1} self.assertEqual("H2 O1", Composition(f).formula) self.assertEqual("Na2 O1", Composition(Na=2, O=1).formula) c = Composition({'S': Composition.amount_tolerance / 2}) self.assertEqual(len(c.elements), 0) def test_average_electroneg(self): val = [2.7224999999999997, 2.4160000000000004, 2.5485714285714285, 2.21, 2.718, 3.08, 1.21, 2.43] for i, c in enumerate(self.comp): self.assertAlmostEqual(c.average_electroneg, val[i]) def test_formula(self): correct_formulas = ['Li3 Fe2 P3 O12', 'Li3 Fe1 P1 O5', 'Li1 Mn2 O4', 'Li4 O4', 'Li3 Fe2 Mo3 O12', 'Li3 Fe2 P6 C10 O54', 'Li1.5 Si0.5', 'Zn1 H1 O1'] all_formulas = [c.formula for c in self.comp] self.assertEqual(all_formulas, correct_formulas) self.assertRaises(CompositionError, Composition, "(co2)(po4)2") self.assertEqual(Composition("K Na 2").reduced_formula, "KNa2") self.assertEqual(Composition("K3 Na 2").reduced_formula, "K3Na2") self.assertEqual(Composition("Na 3 Zr (PO 4) 3").reduced_formula, "Na3Zr(PO4)3") def test_mixed_valence(self): comp = Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8}) self.assertEqual(comp.reduced_formula, "Li4Fe3") self.assertEqual(comp.alphabetical_formula, "Fe6 Li8") self.assertEqual(comp.formula, "Li8 Fe6") def test_indeterminate_formula(self): correct_formulas = [["Co1"], ["Co1", "C1 O1"], ["Co2 O3", "C1 O5"], ["N1 Ca1 Lu1", "U1 Al1 C1 N1"], ["N1 Ca1 Lu1", "U1 Al1 C1 N1"], ["Li1 Co1 P2 N1 O10", "Li1 P2 C1 N1 O11", "Li1 Co1 Po8 N1 O2", "Li1 Po8 C1 N1 O3"], ["Co2 P4 O4", "Co2 Po4", "P4 C2 O6", "Po4 C2 O2"], []] for i, c in enumerate(correct_formulas): self.assertEqual([Composition(comp) for comp in c], self.indeterminate_comp[i]) def test_alphabetical_formula(self): correct_formulas = ['Fe2 Li3 O12 P3', 'Fe1 Li3 O5 P1', 'Li1 Mn2 O4', 'Li4 O4', 'Fe2 Li3 Mo3 O12', 'C10 Fe2 Li3 O54 P6', 'Li1.5 Si0.5', 'H1 O1 Zn1'] all_formulas = [c.alphabetical_formula for c in self.comp] self.assertEqual(all_formulas, correct_formulas) def test_reduced_composition(self): correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4', 'Li2O2', 'Li3Fe2(MoO4)3', 'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO'] for i in range(len(self.comp)): self.assertEqual(self.comp[i] .get_reduced_composition_and_factor()[0], Composition(correct_reduced_formulas[i])) def test_reduced_formula(self): correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4', 'Li2O2', 'Li3Fe2(MoO4)3', 'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO'] all_formulas = [c.reduced_formula for c in self.comp] self.assertEqual(all_formulas, correct_reduced_formulas) # test rounding c = Composition({'Na': 2 - Composition.amount_tolerance / 2, 'Cl': 2}) self.assertEqual('NaCl', c.reduced_formula) def test_integer_formula(self): correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4', 'Li2O2', 'Li3Fe2(MoO4)3', 'Li3Fe2P6(C5O27)2', 'Li3Si', 'ZnHO'] all_formulas = [c.get_integer_formula_and_factor()[0] for c in self.comp] self.assertEqual(all_formulas, correct_reduced_formulas) self.assertEqual(Composition('Li0.5O0.25').get_integer_formula_and_factor(), ('Li2O', 0.25)) self.assertEqual(Composition('O0.25').get_integer_formula_and_factor(), ('O2', 0.125)) formula, factor = Composition("Li0.16666667B1.0H1.0").get_integer_formula_and_factor() self.assertEqual(formula, 'Li(BH)6') self.assertAlmostEqual(factor, 1 / 6) def test_num_atoms(self): correct_num_atoms = [20, 10, 7, 8, 20, 75, 2, 3] all_natoms = [c.num_atoms for c in self.comp] self.assertEqual(all_natoms, correct_num_atoms) def test_weight(self): correct_weights = [417.427086, 187.63876199999999, 180.81469, 91.7616, 612.3258, 1302.430172, 24.454250000000002, 82.41634] all_weights = [c.weight for c in self.comp] self.assertArrayAlmostEqual(all_weights, correct_weights, 5) def test_get_atomic_fraction(self): correct_at_frac = {"Li": 0.15, "Fe": 0.1, "P": 0.15, "O": 0.6} for el in ["Li", "Fe", "P", "O"]: self.assertEqual(self.comp[0].get_atomic_fraction(el), correct_at_frac[el], "Wrong computed atomic fractions") self.assertEqual(self.comp[0].get_atomic_fraction("S"), 0, "Wrong computed atomic fractions") def test_anonymized_formula(self): expected_formulas = ['A2B3C3D12', 'ABC3D5', 'AB2C4', 'AB', 'A2B3C3D12', 'A2B3C6D10E54', 'A0.5B1.5', 'ABC'] for i in range(len(self.comp)): self.assertEqual(self.comp[i].anonymized_formula, expected_formulas[i]) def test_get_wt_fraction(self): correct_wt_frac = {"Li": 0.0498841610868, "Fe": 0.267567687258, "P": 0.222604831158, "O": 0.459943320496} for el in ["Li", "Fe", "P", "O"]: self.assertAlmostEqual(correct_wt_frac[el], self.comp[0].get_wt_fraction(el), 5, "Wrong computed weight fraction") self.assertEqual(self.comp[0].get_wt_fraction(Element("S")), 0, "Wrong computed weight fractions") def test_from_dict(self): sym_dict = {"Fe": 6, "O": 8} self.assertEqual(Composition.from_dict(sym_dict).reduced_formula, "Fe3O4", "Creation form sym_amount dictionary failed!") comp = Composition({"Fe2+": 2, "Fe3+": 4, "O2-": 8}) comp2 = Composition.from_dict(comp.as_dict()) self.assertEqual(comp, comp2) def test_as_dict(self): c = Composition.from_dict({'Fe': 4, 'O': 6}) d = c.as_dict() correct_dict = {'Fe': 4.0, 'O': 6.0} self.assertEqual(d['Fe'], correct_dict['Fe']) self.assertEqual(d['O'], correct_dict['O']) correct_dict = {'Fe': 2.0, 'O': 3.0} d = c.to_reduced_dict self.assertEqual(d['Fe'], correct_dict['Fe']) self.assertEqual(d['O'], correct_dict['O']) def test_pickle(self): for c in self.comp: self.serialize_with_pickle(c, test_eq=True) def test_add(self): self.assertEqual((self.comp[0] + self.comp[2]).formula, "Li4 Mn2 Fe2 P3 O16", "Incorrect composition after addition!") self.assertEqual((self.comp[3] + {"Fe": 4, "O": 4}).formula, "Li4 Fe4 O8", "Incorrect composition after addition!") def test_sub(self): self.assertEqual((self.comp[0] - Composition("Li2O")).formula, "Li1 Fe2 P3 O11", "Incorrect composition after addition!") self.assertEqual((self.comp[0] - {"Fe": 2, "O": 3}).formula, "Li3 P3 O9") self.assertRaises(CompositionError, Composition('O').__sub__, Composition('H')) #check that S is completely removed by subtraction c1 = Composition({'S': 1 + Composition.amount_tolerance / 2, 'O': 1}) c2 = Composition({'S': 1}) self.assertEqual(len((c1 - c2).elements), 1) def test_mul(self): self.assertEqual((self.comp[0] * 4).formula, "Li12 Fe8 P12 O48") self.assertEqual((3 * self.comp[1]).formula, "Li9 Fe3 P3 O15") def test_div(self): self.assertEqual((self.comp[0] / 4).formula, 'Li0.75 Fe0.5 P0.75 O3') def test_equals(self): random_z = random.randint(1, 92) fixed_el = Element.from_Z(random_z) other_z = random.randint(1, 92) while other_z == random_z: other_z = random.randint(1, 92) comp1 = Composition({fixed_el: 1, Element.from_Z(other_z): 0}) other_z = random.randint(1, 92) while other_z == random_z: other_z = random.randint(1, 92) comp2 = Composition({fixed_el: 1, Element.from_Z(other_z): 0}) self.assertEqual(comp1, comp2, "Composition equality test failed. " + "%s should be equal to %s" % (comp1.formula, comp2.formula)) self.assertEqual(comp1.__hash__(), comp2.__hash__(), "Hashcode equality test failed!") def test_comparisons(self): c1 = Composition({'S': 1}) c1_1 = Composition({'S': 1.00000000000001}) c2 = Composition({'S': 2}) c3 = Composition({'O': 1}) c4 = Composition({'O': 1, 'S': 1}) self.assertFalse(c1 > c2) self.assertFalse(c1_1 > c1) self.assertFalse(c1_1 < c1) self.assertTrue(c1 > c3) self.assertTrue(c3 < c1) self.assertTrue(c4 > c1) self.assertEqual(sorted([c1, c1_1, c2, c4, c3]), [c3, c1, c1_1, c4, c2]) def test_almost_equals(self): c1 = Composition({'Fe': 2.0, 'O': 3.0, 'Mn': 0}) c2 = Composition({'O': 3.2, 'Fe': 1.9, 'Zn': 0}) c3 = Composition({'Ag': 2.0, 'O': 3.0}) c4 = Composition({'Fe': 2.0, 'O': 3.0, 'Ag': 2.0}) self.assertTrue(c1.almost_equals(c2, rtol=0.1)) self.assertFalse(c1.almost_equals(c2, rtol=0.01)) self.assertFalse(c1.almost_equals(c3, rtol=0.1)) self.assertFalse(c1.almost_equals(c4, rtol=0.1)) def test_equality(self): self.assertTrue(self.comp[0].__eq__(self.comp[0])) self.assertFalse(self.comp[0].__eq__(self.comp[1])) self.assertFalse(self.comp[0].__ne__(self.comp[0])) self.assertTrue(self.comp[0].__ne__(self.comp[1])) def test_fractional_composition(self): for c in self.comp: self.assertAlmostEqual(c.fractional_composition.num_atoms, 1) def test_init_numerical_tolerance(self): self.assertEqual(Composition({'B':1, 'C':-1e-12}), Composition('B')) def test_negative_compositions(self): self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).formula, 'Li-1 P4 O-4') self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).reduced_formula, 'Li-1(PO-1)4') self.assertEqual(Composition('Li-2Mg4', allow_negative=True).reduced_composition, Composition('Li-1Mg2', allow_negative=True)) self.assertEqual(Composition('Li-2.5Mg4', allow_negative=True).reduced_composition, Composition('Li-2.5Mg4', allow_negative=True)) #test math c1 = Composition('LiCl', allow_negative=True) c2 = Composition('Li') self.assertEqual(c1 - 2 * c2, Composition({'Li': -1, 'Cl': 1}, allow_negative=True)) self.assertEqual((c1 + c2).allow_negative, True) self.assertEqual(c1 / -1, Composition('Li-1Cl-1', allow_negative=True)) #test num_atoms c1 = Composition('Mg-1Li', allow_negative=True) self.assertEqual(c1.num_atoms, 2) self.assertEqual(c1.get_atomic_fraction('Mg'), 0.5) self.assertEqual(c1.get_atomic_fraction('Li'), 0.5) self.assertEqual(c1.fractional_composition, Composition('Mg-0.5Li0.5', allow_negative=True)) #test copy self.assertEqual(c1.copy(), c1) #test species c1 = Composition({'Mg':1, 'Mg2+':-1}, allow_negative=True) self.assertEqual(c1.num_atoms, 2) self.assertEqual(c1.element_composition, Composition()) self.assertEqual(c1.average_electroneg, 1.31) def test_special_formulas(self): special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2", "HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2", "O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2", "H": "H2"} for k, v in special_formulas.items(): self.assertEqual(Composition(k).reduced_formula, v) def test_oxi_state_guesses(self): self.assertEqual(Composition("LiFeO2").oxi_state_guesses(), [{"Li": 1, "Fe": 3, "O": -2}]) self.assertEqual(Composition("Fe4O5").oxi_state_guesses(), [{"Fe": 2.5, "O": -2}]) self.assertEqual(Composition("V2O3").oxi_state_guesses(), [{"V": 3, "O": -2}]) # all_oxidation_states produces *many* possible responses self.assertEqual(len(Composition("MnO").oxi_state_guesses( all_oxi_states=True)), 4) # can't balance b/c missing V4+ self.assertEqual(Composition("VO2").oxi_state_guesses( oxi_states_override={"V": [2, 3, 5]}), []) # missing V4+, but can balance due to additional sites self.assertEqual(Composition("V2O4").oxi_state_guesses( oxi_states_override={"V": [2, 3, 5]}), [{"V": 4, "O": -2}]) # multiple solutions - Mn/Fe = 2+/4+ or 3+/3+ or 4+/2+ self.assertEqual(len(Composition("MnFeO3").oxi_state_guesses( oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})), 3) # multiple solutions prefers 3/3 over 2/4 or 4/2 self.assertEqual(Composition("MnFeO3").oxi_state_guesses( oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})[0], {"Mn": 3, "Fe": 3, "O": -2}) # target charge of 1 self.assertEqual(Composition("V2O6").oxi_state_guesses( oxi_states_override={"V": [2, 3, 4, 5]}, target_charge=-2), [{"V": 5, "O": -2}]) # max_sites for very large composition - should timeout if incorrect self.assertEqual(Composition("Li10000Fe10000P10000O40000"). oxi_state_guesses(max_sites=7)[0], {"Li": 1, "Fe": 2, "P": 5, "O": -2}) # max_sites for very large composition - should timeout if incorrect self.assertEqual(Composition("Li10000Fe10000P10000O40000"). oxi_state_guesses(max_sites=-1)[0], {"Li": 1, "Fe": 2, "P": 5, "O": -2}) self.assertRaises(ValueError, Composition("V2O3"). oxi_state_guesses, max_sites=1) class ChemicalPotentialTest(unittest.TestCase): def test_init(self): d = {'Fe': 1, Element('Fe'): 1} self.assertRaises(ValueError, ChemicalPotential, d) for k in ChemicalPotential(Fe=1).keys(): self.assertIsInstance(k, Element) def test_math(self): fepot = ChemicalPotential({'Fe': 1}) opot = ChemicalPotential({'O': 2.1}) pots = ChemicalPotential({'Fe': 1, 'O': 2.1}) potsx2 = ChemicalPotential({'Fe': 2, 'O': 4.2}) feo2 = Composition('FeO2') # test get_energy() self.assertAlmostEqual(pots.get_energy(feo2), 5.2) self.assertAlmostEqual(fepot.get_energy(feo2, False), 1) self.assertRaises(ValueError, fepot.get_energy, feo2) # test multiplication self.assertRaises(TypeError, lambda: (pots * pots)) self.assertDictEqual(pots * 2, potsx2) self.assertDictEqual(2 * pots, potsx2) # test division self.assertDictEqual(potsx2 / 2, pots) self.assertRaises(TypeError, lambda: (pots / pots)) self.assertRaises(TypeError, lambda: (pots / feo2)) # test add/subtract self.assertDictEqual(pots + pots, potsx2) self.assertDictEqual(potsx2 - pots, pots) self.assertDictEqual(fepot + opot, pots) self.assertDictEqual(fepot - opot, pots - opot - opot) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
matk86/pymatgen
pymatgen/core/tests/test_composition.py
Python
mit
20,378
[ "pymatgen" ]
12695ce21e6663a81f05589e33c11d5a7c63f3e3cbbf2d230dfc9f5ef77ee86c
#!/usr/bin/python # # Created on Aug 25, 2016 # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # Avi Version: 17.1.1 # # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_stringgroup author: Gaurav Rastogi (grastogi@avinetworks.com) short_description: Module for setup of StringGroup Avi RESTful Object description: - This module is used to configure StringGroup object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.4" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent","present"] description: description: - User defined description for the object. kv: description: - Configure key value in the string group. name: description: - Name of the string group. required: true tenant_ref: description: - It is a reference to an object of type tenant. type: description: - Type of stringgroup. - Enum options - SG_TYPE_STRING, SG_TYPE_KEYVAL. - Default value when not specified in API or module is interpreted by Avi Controller as SG_TYPE_STRING. required: true url: description: - Avi controller URL of the object. uuid: description: - Uuid of the string group. extends_documentation_fragment: - avi ''' EXAMPLES = ''' - name: Create a string group configuration avi_stringgroup: controller: '' password: '' username: '' kv: - key: text/html - key: text/xml - key: text/plain - key: text/css - key: text/javascript - key: application/javascript - key: application/x-javascript - key: application/xml - key: application/pdf name: System-Compressible-Content-Types tenant_ref: admin type: SG_TYPE_STRING ''' RETURN = ''' obj: description: StringGroup (api/stringgroup) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), description=dict(type='str',), kv=dict(type='list',), name=dict(type='str', required=True), tenant_ref=dict(type='str',), type=dict(type='str', required=True), url=dict(type='str',), uuid=dict(type='str',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'stringgroup', set([])) if __name__ == '__main__': main()
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_stringgroup.py
Python
bsd-3-clause
4,045
[ "VisIt" ]
0e1b0db9cd05bfccca73c6b2eae76f2f8a233df7dc048561f8b5119ac328cde3
# # Copyright (C) 2009, Brian Tanner # #http://rl-glue-ext.googlecode.com/ # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # $Revision: 999 $ # $Date: 2009-02-09 09:39:12 -0700 (Mon, 09 Feb 2009) $ # $Author: brian@tannerpages.com $ # $HeadURL: http://rl-library.googlecode.com/svn/trunk/projects/packages/examples/mines-sarsa-python/sample_mines_environment.py $ import random import sys from rlglue.environment.Environment import Environment from rlglue.environment import EnvironmentLoader as EnvironmentLoader from rlglue.types import Observation from rlglue.types import Action from rlglue.types import Reward_observation_terminal # This is a very simple discrete-state, episodic grid world that has # exploding mines in it. If the agent steps on a mine, the episode # ends with a large negative reward. # # The reward per step is -1, with +10 for exiting the game successfully # and -100 for stepping on a mine. # TO USE THIS Environment [order doesn't matter] # NOTE: I'm assuming the Python codec is installed an is in your Python path # - Start the rl_glue executable socket server on your computer # - Run the SampleSarsaAgent and SampleExperiment from this or a # different codec (Matlab, Python, Java, C, Lisp should all be fine) # - Start this environment like: # $> python sample_mines_environment.py class mines_environment(Environment): WORLD_FREE = 0 WORLD_OBSTACLE = 1 WORLD_MINE = 2 WORLD_GOAL = 3 randGenerator=random.Random() fixedStartState=False startRow=1 startCol=1 currentState=10 def env_init(self): self.map=[ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] #The Python task spec parser is not yet able to build task specs programmatically return "VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR 1 OBSERVATIONS INTS (0 107) ACTIONS INTS (0 3) REWARDS (-100.0 10.0) EXTRA SampleMinesEnvironment(C/C++) by Brian Tanner." def env_start(self): if self.fixedStartState: stateValid=self.setAgentState(self.startRow,self.startCol) if not stateValid: print "The fixed start state was NOT valid: "+str(int(self.startRow))+","+str(int(self.startRow)) self.setRandomState() else: self.setRandomState() returnObs=Observation() returnObs.intArray=[self.calculateFlatState()] return returnObs def env_step(self,thisAction): # Make sure the action is valid assert len(thisAction.intArray)==1,"Expected 1 integer action." assert thisAction.intArray[0]>=0, "Expected action to be in [0,3]" assert thisAction.intArray[0]<4, "Expected action to be in [0,3]" self.updatePosition(thisAction.intArray[0]) theObs=Observation() theObs.intArray=[self.calculateFlatState()] returnRO=Reward_observation_terminal() returnRO.r=self.calculateReward() returnRO.o=theObs returnRO.terminal=self.checkCurrentTerminal() return returnRO def env_cleanup(self): pass def env_message(self,inMessage): # Message Description # 'set-random-start-state' #Action: Set flag to do random starting states (the default) if inMessage.startswith("set-random-start-state"): self.fixedStartState=False; return "Message understood. Using random start state."; # Message Description # 'set-start-state X Y' # Action: Set flag to do fixed starting states (row=X, col=Y) if inMessage.startswith("set-start-state"): splitString=inMessage.split(" "); self.startRow=int(splitString[1]); self.startCol=int(splitString[2]); self.fixedStartState=True; return "Message understood. Using fixed start state."; # Message Description # 'print-state' # Action: Print the map and the current agent location if inMessage.startswith("print-state"): self.printState(); return "Message understood. Printed the state."; return "SamplesMinesEnvironment(Python) does not respond to that message."; def setAgentState(self,row, col): self.agentRow=row self.agentCol=col return self.checkValid(row,col) and not self.checkTerminal(row,col) def setRandomState(self): numRows=len(self.map) numCols=len(self.map[0]) startRow=self.randGenerator.randint(0,numRows-1) startCol=self.randGenerator.randint(0,numCols-1) while not self.setAgentState(startRow,startCol): startRow=self.randGenerator.randint(0,numRows-1) startCol=self.randGenerator.randint(0,numCols-1) def checkValid(self,row, col): valid=False numRows=len(self.map) numCols=len(self.map[0]) if(row < numRows and row >= 0 and col < numCols and col >= 0): if self.map[row][col] != self.WORLD_OBSTACLE: valid=True return valid def checkTerminal(self,row,col): if (self.map[row][col] == self.WORLD_GOAL or self.map[row][col] == self.WORLD_MINE): return True return False def checkCurrentTerminal(self): return self.checkTerminal(self.agentRow,self.agentCol) def calculateFlatState(self): numRows=len(self.map) return self.agentCol * numRows + self.agentRow def updatePosition(self, theAction): # When the move would result in hitting an obstacles, the agent simply doesn't move newRow = self.agentRow; newCol = self.agentCol; if (theAction == 0):#move down newCol = self.agentCol - 1; if (theAction == 1): #move up newCol = self.agentCol + 1; if (theAction == 2):#move left newRow = self.agentRow - 1; if (theAction == 3):#move right newRow = self.agentRow + 1; #Check if new position is out of bounds or inside an obstacle if(self.checkValid(newRow,newCol)): self.agentRow = newRow; self.agentCol = newCol; def calculateReward(self): if(self.map[self.agentRow][self.agentCol] == self.WORLD_GOAL): return 10.0; if(self.map[self.agentRow][self.agentCol] == self.WORLD_MINE): return -100.0; return -1.0; def printState(self): numRows=len(self.map) numCols=len(self.map[0]) print "Agent is at: "+str(self.agentRow)+","+str(self.agentCol) print "Columns:0-10 10-17" print "Col ", for col in range(0,numCols): print col%10, for row in range(0,numRows): print print "Row: "+str(row)+" ", for col in range(0,numCols): if self.agentRow==row and self.agentCol==col: print "A", else: if self.map[row][col] == self.WORLD_GOAL: print "G", if self.map[row][col] == self.WORLD_MINE: print "M", if self.map[row][col] == self.WORLD_OBSTACLE: print "*", if self.map[row][col] == self.WORLD_FREE: print " ", print if __name__=="__main__": EnvironmentLoader.loadEnvironment(mines_environment())
mguzdial3/MineCode
python-codec/examples/mines-sarsa-example/sample_mines_environment.py
Python
apache-2.0
7,414
[ "Brian" ]
78be8f7c6935577125118e3a51d7873b626f227e90b19802b1436b2168115b41
# -*- coding: utf-8 -*- import sys from math import pi, log import numpy as np from numpy.linalg import eigh from scipy.special import gamma from scipy.linalg import solve_banded import ase.units as units from ase.utils import devnull, prnt from ase.data import atomic_numbers, atomic_names, chemical_symbols from gpaw.xc import XC from gpaw.gaunt import make_gaunt from gpaw.utilities import _fact as fac from gpaw.atom.configurations import configurations from gpaw.atom.radialgd import AERadialGridDescriptor # Velocity of light in atomic units: c = 2 * units._hplanck / (units._mu0 * units._c * units._e**2) # Colors for s, p, d, f, g: colors = 'krgbycmmmmm' class GaussianBasis: def __init__(self, l, alpha_B, rgd, eps=1.0e-7): """Guassian basis set for spherically symmetric atom. l: int Angular momentum quantum number. alpha_B: ndarray Exponents. rgd: GridDescriptor Grid descriptor. eps: float Cutoff for eigenvalues of overlap matrix.""" self.l = l self.alpha_B = alpha_B self.rgd = rgd self.eps = eps A_BB = np.add.outer(alpha_B, alpha_B) M_BB = np.multiply.outer(alpha_B, alpha_B) # Overlap matrix: S_BB = (2 * M_BB**0.5 / A_BB)**(l + 1.5) # Kinetic energy matrix: T_BB = 2**(l + 2.5) * M_BB**(0.5 * l + 0.75) / gamma(l + 1.5) * ( gamma(l + 2.5) * M_BB / A_BB**(l + 2.5) - 0.5 * (l + 1) * gamma(l + 1.5) / A_BB**(l + 0.5) + 0.25 * (l + 1) * (2 * l + 1) * gamma(l + 0.5) / A_BB**(l + 0.5)) # Derivative matrix: D_BB = 2**(l + 2.5) * M_BB**(0.5 * l + 0.75) / gamma(l + 1.5) * ( 0.5 * (l + 1) * gamma(l + 1) / A_BB**(l + 1) - gamma(l + 2) * alpha_B / A_BB**(l + 2)) # 1/r matrix: K_BB = 2**(l + 2.5) * M_BB**(0.5 * l + 0.75) / gamma(l + 1.5) * ( 0.5 * gamma(l + 1) / A_BB**(l + 1)) # Find set of linearly independent functions. # We have len(alpha_B) gaussians (index B) and self.nbasis # linearly independent functions (index b). s_B, U_BB = eigh(S_BB) self.nbasis = int((s_B > eps).sum()) Q_Bb = np.dot(U_BB[:, -self.nbasis:], np.diag(s_B[-self.nbasis:]**-0.5)) self.T_bb = np.dot(np.dot(Q_Bb.T, T_BB), Q_Bb) self.D_bb = np.dot(np.dot(Q_Bb.T, D_BB), Q_Bb) self.K_bb = np.dot(np.dot(Q_Bb.T, K_BB), Q_Bb) r_g = rgd.r_g # Avoid errors in debug mode from division by zero: old_settings = np.seterr(divide='ignore') self.basis_bg = (np.dot( Q_Bb.T, (2 * (2 * alpha_B[:, None])**(l + 1.5) / gamma(l + 1.5))**0.5 * np.exp(-np.multiply.outer(alpha_B, r_g**2))) * r_g**l) np.seterr(**old_settings) def __len__(self): return self.nbasis def expand(self, C_xb): return np.dot(C_xb, self.basis_bg) def calculate_potential_matrix(self, vr_g): vr2dr_g = vr_g * self.rgd.r_g * self.rgd.dr_g V_bb = np.inner(self.basis_bg[:, 1:], self.basis_bg[:, 1:] * vr2dr_g[1:]) return V_bb def coefs(rgd, l, vr_g, e, scalar_relativistic): d2gdr2_g = rgd.d2gdr2() r_g = rgd.r_g x0_g = 2 * (e * r_g - vr_g) x1_g = 2 * (l + 1) / rgd.dr_g + r_g * rgd.d2gdr2() x2_g = r_g / rgd.dr_g**2 if scalar_relativistic: r_g = r_g.copy() r_g[0] = 1.0 v_g = vr_g / r_g M_g = 1 + (e - v_g) / (2 * c**2) kappa_g = (rgd.derivative(vr_g) - v_g) / r_g / (2 * c**2 * M_g) x0_g *= M_g x0_g += l * kappa_g x1_g += r_g * kappa_g / rgd.dr_g cm1_g = x2_g - x1_g / 2 c0_g = x0_g - 2 * x2_g cp1_g = x2_g + x1_g / 2 return cm1_g, c0_g, cp1_g class Channel: def __init__(self, l, s=0, f_n=(), basis=None): self.l = l self.s = s self.basis = basis self.C_nb = None # eigenvectors self.e_n = None # eigenvalues self.f_n = np.array(f_n, dtype=float) # occupation numbers self.phi_ng = None # wave functions self.name = 'spdfg'[l] def solve(self, vr_g): """Diagonalize Schrödinger equation in basis set.""" H_bb = self.basis.calculate_potential_matrix(vr_g) H_bb += self.basis.T_bb self.e_n, C_bn = eigh(H_bb) self.C_nb = C_bn.T self.phi_ng = self.basis.expand(self.C_nb[:len(self.f_n)]) def solve2(self, vr_g, scalar_relativistic=False): rgd = self.basis.rgd r_g = rgd.r_g l = self.l u_g = rgd.empty() for n in range(len(self.f_n)): e = self.e_n[n] # Find classical turning point: x = vr_g * r_g + 0.5 * l * (l + 1) - e * r_g**2 g0 = rgd.round(4.0) while x[g0] > 0: g0 -= 1 iter = 0 ok = False while True: du1dr = self.integrate_outwards(u_g, rgd, vr_g, g0, e, scalar_relativistic) u1 = u_g[g0] du2dr = self.integrate_inwards(u_g, rgd, vr_g, g0, e, scalar_relativistic) u2 = u_g[g0] A = du1dr / u1 - du2dr / u2 u_g[g0:] *= u1 / u2 u_g /= (rgd.integrate(u_g**2, -2) / (4 * pi))**0.5 if abs(A) < 1e-5: ok = True break e += 0.5 * A * u_g[g0]**2 if e > 0: break iter += 1 assert iter < 400, (n, l, e) if ok: self.e_n[n] = e self.phi_ng[n, 1:] = u_g[1:] / r_g[1:] if self.l == 0: self.phi_ng[n, 0] = self.phi_ng[n, 1] def calculate_density(self, n=None): """Calculate density.""" if n is None: n_g = 0.0 for n, f in enumerate(self.f_n): n_g += f * self.calculate_density(n) else: n_g = self.phi_ng[n]**2 / (4 * pi) return n_g def get_eigenvalue_sum(self): f_n = self.f_n return np.dot(f_n, self.e_n[:len(f_n)]) def integrate_outwards(self, u_g, rgd, vr_g, g0, e, scalar_relativistic=False, pt_g=None): l = self.l r_g = rgd.r_g cm1_g, c0_g, cp1_g = coefs(rgd, l, vr_g, e, scalar_relativistic) c_xg = np.zeros((3, g0 + 2)) c_xg[0, :2] = 1.0 c_xg[0, 2:] = cp1_g[1:g0 + 1] c_xg[1, 1:-1] = c0_g[1:g0 + 1] c_xg[2, :-2] = cm1_g[1:g0 + 1] b_g = np.zeros(g0 + 2) if pt_g is not None: b_g[2:] = -2 * pt_g[1:g0 + 1] * r_g[1:g0 + 1]**(1 - l) a0 = pt_g[1] / r_g[1]**l / (vr_g[1] / r_g[1] - e) else: a0 = 1 a1 = a0 + vr_g[0] * rgd.dr_g[0] b_g[:2] = [a0, a1] a_g = solve_banded((2, 0), c_xg, b_g, overwrite_ab=True, overwrite_b=True) r = r_g[g0] dr = rgd.dr_g[g0] da = 0.5 * (a_g[g0 + 1] - a_g[g0 - 1]) dudr = (l + 1) * r**l * a_g[g0] + r**(l + 1) * da / dr u_g[:g0 + 2] = a_g * r_g[:g0 + 2]**(l + 1) return dudr def integrate_inwards(self, u_g, rgd, vr_g, g0, e, scalar_relativistic=False): l = self.l r_g = rgd.r_g cm1_g, c0_g, cp1_g = coefs(rgd, l, vr_g, e, scalar_relativistic) cm1_g[:g0] = 1.0 # prevent division by zero c0_g /= -cm1_g cp1_g /= -cm1_g g = len(u_g) - 2 agp1 = 1.0 u_g[-1] = agp1 * r_g[-1]**(l + 1) ag = np.exp(-(-2 * e)**0.5 * (rgd.r_g[-2] - rgd.r_g[-1])) while True: u_g[g] = ag * r_g[g]**(l + 1) if ag > 1e50: u_g[g:] /= 1e50 ag = ag / 1e50 agp1 = agp1 / 1e50 agm1 = agp1 * cp1_g[g] + ag * c0_g[g] if g == g0: break g -= 1 agp1 = ag ag = agm1 r = r_g[g] dr = rgd.dr_g[g] da = 0.5 * (agp1 - agm1) dudr = (l + 1) * r**l * ag + r**(l + 1) * da / dr return dudr class DiracChannel(Channel): def __init__(self, k, f_n, basis): l = (abs(2 * k + 1) - 1) // 2 Channel.__init__(self, l, 0, f_n, basis) self.k = k self.j = abs(k) - 0.5 self.c_nb = None # eigenvectors (small component) self.name += '(%d/2)' % (2 * self.j) def solve(self, vr_g): """Solve Dirac equation in basis set.""" nb = len(self.basis) V_bb = self.basis.calculate_potential_matrix(vr_g) H_bb = np.zeros((2 * nb, 2 * nb)) H_bb[:nb, :nb] = V_bb H_bb[nb:, nb:] = V_bb - 2 * c**2 * np.eye(nb) H_bb[nb:, :nb] = -c * (-self.basis.D_bb.T + self.k * self.basis.K_bb) e_n, C_bn = eigh(H_bb) if self.k < 0: n0 = nb else: n0 = nb + 1 self.e_n = e_n[n0:].copy() self.C_nb = C_bn[:nb, n0:].T.copy() # large component self.c_nb = C_bn[nb:, n0:].T.copy() # small component def calculate_density(self, n=None): """Calculate density.""" if n is None: n_g = Channel.calculate_density(self) else: n_g = (self.basis.expand(self.C_nb[n])**2 + self.basis.expand(self.c_nb[n])**2) / (4 * pi) if self.basis.l < 0: n_g[0] = n_g[1] return n_g class AllElectronAtom: def __init__(self, symbol, xc='LDA', spinpol=False, dirac=False, log=sys.stdout): """All-electron calculation for spherically symmetric atom. symbol: str (or int) Chemical symbol (or atomic number). xc: str Name of XC-functional. spinpol: bool If true, do spin-polarized calculation. Default is spin-paired. dirac: bool Solve Dirac equation instead of Schrödinger equation. log: stream Text output.""" if isinstance(symbol, int): symbol = chemical_symbols[symbol] self.symbol = symbol self.Z = atomic_numbers[symbol] self.nspins = 1 + int(bool(spinpol)) self.dirac = bool(dirac) self.scalar_relativistic = False if isinstance(xc, str): self.xc = XC(xc) else: self.xc = xc if log is None: log = devnull self.fd = log self.vr_sg = None # potential * r self.n_sg = 0.0 # density self.rgd = None # radial grid descriptor # Energies: self.ekin = None self.eeig = None self.eH = None self.eZ = None self.channels = None self.initialize_configuration() self.log('Z: ', self.Z) self.log('Name: ', atomic_names[self.Z]) self.log('Symbol: ', symbol) self.log('XC-functional: ', self.xc.name) self.log('Equation: ', ['Schrödinger', 'Dirac'][self.dirac]) self.method = 'Gaussian basis-set' def log(self, *args, **kwargs): prnt(file=self.fd, *args, **kwargs) def initialize_configuration(self): self.f_lsn = {} for n, l, f, e in configurations[self.symbol][1]: if l not in self.f_lsn: self.f_lsn[l] = [[] for s in range(self.nspins)] if self.nspins == 1: self.f_lsn[l][0].append(f) else: # Use Hund's rule: f0 = min(f, 2 * l + 1) self.f_lsn[l][0].append(f0) self.f_lsn[l][1].append(f - f0) def add(self, n, l, df=+1, s=None): """Add (remove) electrons.""" if s is None: if self.nspins == 1: s = 0 else: self.add(n, l, 0.5 * df, 0) self.add(n, l, 0.5 * df, 1) return if l not in self.f_lsn: self.f_lsn[l] = [[] for x in range(self.nspins)] f_n = self.f_lsn[l][s] if len(f_n) < n - l: f_n.extend([0] * (n - l - len(f_n))) f_n[n - l - 1] += df def initialize(self, ngpts=2000, rcut=50.0, alpha1=0.01, alpha2=None, ngauss=50, eps=1.0e-7): """Initialize basis sets and radial grid. ngpts: int Number of grid points for radial grid. rcut: float Cutoff for radial grid. alpha1: float Smallest exponent for gaussian. alpha2: float Largest exponent for gaussian. ngauss: int Number of gaussians. eps: float Cutoff for eigenvalues of overlap matrix.""" if alpha2 is None: alpha2 = 50.0 * self.Z**2 # Use grid with r(0)=0, r(1)=a and r(ngpts)=rcut: a = 1 / alpha2**0.5 / 20 b = (rcut - a * ngpts) / (rcut * ngpts) b = 1 / round(1 / b) self.rgd = AERadialGridDescriptor(a, b, ngpts) self.log('Grid points: %d (%.5f, %.5f, %.5f, ..., %.3f, %.3f)' % ((self.rgd.N,) + tuple(self.rgd.r_g[[0, 1, 2, -2, -1]]))) # Distribute exponents between alpha1 and alpha2: alpha_B = alpha1 * (alpha2 / alpha1)**np.linspace(0, 1, ngauss) self.log('Exponents: %d (%.3f, %.3f, ..., %.3f, %.3f)' % ((ngauss,) + tuple(alpha_B[[0, 1, -2, -1]]))) # Maximum l value: lmax = max(self.f_lsn.keys()) self.channels = [] nb_l = [] if not self.dirac: for l in range(lmax + 1): basis = GaussianBasis(l, alpha_B, self.rgd, eps) nb_l.append(len(basis)) for s in range(self.nspins): self.channels.append(Channel(l, s, self.f_lsn[l][s], basis)) else: for K in range(1, lmax + 2): leff = (K**2 - (self.Z / c)**2)**0.5 - 1 basis = GaussianBasis(leff, alpha_B, self.rgd, eps) nb_l.append(len(basis)) for k, l in [(-K, K - 1), (K, K)]: if l > lmax: continue f_n = self.f_lsn[l][0] j = abs(k) - 0.5 f_n = (2 * j + 1) / (4 * l + 2) * np.array(f_n) self.channels.append(DiracChannel(k, f_n, basis)) self.log('Basis functions: %s (%s)' % (', '.join([str(nb) for nb in nb_l]), ', '.join('spdf'[:lmax + 1]))) self.vr_sg = self.rgd.zeros(self.nspins) self.vr_sg[:] = -self.Z def solve(self): """Diagonalize Schrödinger equation.""" self.eeig = 0.0 for channel in self.channels: if self.method == 'Gaussian basis-set': channel.solve(self.vr_sg[channel.s]) else: channel.solve2(self.vr_sg[channel.s], self.scalar_relativistic) self.eeig += channel.get_eigenvalue_sum() def calculate_density(self): """Calculate elctron density and kinetic energy.""" self.n_sg = self.rgd.zeros(self.nspins) for channel in self.channels: self.n_sg[channel.s] += channel.calculate_density() def calculate_electrostatic_potential(self): """Calculate electrostatic potential and energy.""" n_g = self.n_sg.sum(0) self.vHr_g = self.rgd.poisson(n_g) self.eH = 0.5 * self.rgd.integrate(n_g * self.vHr_g, -1) self.eZ = -self.Z * self.rgd.integrate(n_g, -1) def calculate_xc_potential(self): self.vxc_sg = self.rgd.zeros(self.nspins) self.exc = self.xc.calculate_spherical(self.rgd, self.n_sg, self.vxc_sg) def step(self): self.solve() self.calculate_density() self.calculate_electrostatic_potential() self.calculate_xc_potential() self.vr_sg = self.vxc_sg * self.rgd.r_g self.vr_sg += self.vHr_g self.vr_sg -= self.Z self.ekin = (self.eeig - self.rgd.integrate((self.vr_sg * self.n_sg).sum(0), -1)) def run(self, mix=0.4, maxiter=117, dnmax=1e-9): if self.channels is None: self.initialize() if self.dirac: equation = 'Dirac' elif self.scalar_relativistic: equation = 'scalar-relativistic Schrödinger' else: equation = 'non-relativistic Schrödinger' self.log('\nSolving %s equation using %s:' % (equation, self.method)) dn = self.Z for iter in range(maxiter): self.log('.', end='') self.fd.flush() if iter > 0: self.vr_sg *= mix self.vr_sg += (1 - mix) * vr_old_sg dn = self.rgd.integrate(abs(self.n_sg - n_old_sg).sum(0)) if dn <= dnmax: self.log('\nConverged in', iter, 'steps') break vr_old_sg = self.vr_sg n_old_sg = self.n_sg self.step() self.summary() if dn > dnmax: raise RuntimeError('Did not converge!') def refine(self): self.method = 'finite difference' self.run(dnmax=1e-6, mix=0.14, maxiter=200) def summary(self): self.write_states() self.write_energies() def write_states(self): self.log('\n state occupation eigenvalue <r>') if self.dirac: self.log(' nl(j) [Hartree] [eV] [Bohr]') else: self.log(' nl [Hartree] [eV] [Bohr]') self.log('-----------------------------------------------------') states = [] for ch in self.channels: for n, f in enumerate(ch.f_n): states.append((ch.e_n[n], ch, n)) states.sort() for e, ch, n in states: name = str(n + ch.l + 1) + ch.name if self.nspins == 2: name += '(%s)' % '+-'[ch.s] n_g = ch.calculate_density(n) rave = self.rgd.integrate(n_g, 1) self.log(' %-7s %6.3f %13.6f %13.5f %6.3f' % (name, ch.f_n[n], e, e * units.Hartree, rave)) def write_energies(self): self.log('\nEnergies: [Hartree] [eV]') self.log('--------------------------------------------') for text, e in [('kinetic ', self.ekin), ('coulomb (e-e)', self.eH), ('coulomb (e-n)', self.eZ), ('xc ', self.exc), ('total ', self.ekin + self.eH + self.eZ + self.exc)]: self.log(' %s %+13.6f %+13.5f' % (text, e, units.Hartree * e)) self.calculate_exx() self.log('\nExact exchange energy: %.6f Hartree, %.5f eV' % (self.exx, self.exx * units.Hartree)) def get_channel(self, l=None, s=0, k=None): if self.dirac: for channel in self.channels: if channel.k == k: return channel else: for channel in self.channels: if channel.l == l and channel.s == s: return channel raise ValueError def get_orbital(self, n, l=None, s=0, k=None): channel = self.get_channel(l, s, k) return channel.basis.expand(channel.C_nb[n]) def plot_wave_functions(self, rc=4.0): import matplotlib.pyplot as plt for ch in self.channels: for n in range(len(ch.f_n)): fr_g = ch.basis.expand(ch.C_nb[n]) * self.rgd.r_g name = str(n + ch.l + 1) + ch.name lw = 2 if self.nspins == 2: name += '(%s)' % '+-'[ch.s] if ch.s == 1: lw = 1 if self.dirac and ch.k > 0: lw = 1 ls = ['-', '--', '-.', ':'][ch.l] n_g = ch.calculate_density(n) rave = self.rgd.integrate(n_g, 1) gave = self.rgd.round(rave) fr_g *= cmp(fr_g[gave], 0) plt.plot(self.rgd.r_g, fr_g, ls=ls, lw=lw, color=colors[n + ch.l], label=name) plt.legend(loc='best') plt.xlabel('r [Bohr]') plt.ylabel('$r\\phi(r)$') plt.axis(xmax=rc) plt.show() def logarithmic_derivative(self, l, energies, rcut): ch = Channel(l) gcut = self.rgd.round(rcut) u_g = self.rgd.empty() logderivs = [] for e in energies: dudr = ch.integrate_outwards(u_g, self.rgd, self.vr_sg[0], gcut, e, self.scalar_relativistic) logderivs.append(dudr / u_g[gcut]) return logderivs def calculate_exx(self, s=None): if s is None: self.exx = sum(self.calculate_exx(s) for s in range(self.nspins)) / self.nspins return self.exx states = [] lmax = 0 for ch in self.channels: l = ch.l for n, phi_g in enumerate(ch.phi_ng): f = ch.f_n[n] if f > 0 and ch.s == s: states.append((l, f * self.nspins / 2.0 / (2 * l + 1), phi_g)) if l > lmax: lmax = l G_LLL = make_gaunt(lmax) exx = 0.0 j1 = 0 for l1, f1, phi1_g in states: f = 1.0 for l2, f2, phi2_g in states[j1:]: n_g = phi1_g * phi2_g for l in range((l1 + l2) % 2, l1 + l2 + 1, 2): G = (G_LLL[l1**2:(l1 + 1)**2, l2**2:(l2 + 1)**2, l**2:(l + 1)**2]**2).sum() vr_g = self.rgd.poisson(n_g, l) e = f * self.rgd.integrate(vr_g * n_g, -1) / 4 / pi exx -= e * G * f1 * f2 f = 2.0 j1 += 1 return exx def build_parser(): from optparse import OptionParser parser = OptionParser(usage='%prog [options] element', version='%prog 0.1') parser.add_option('-f', '--xc-functional', type='string', default='LDA', help='Exchange-Correlation functional ' + '(default value LDA)', metavar='<XC>') parser.add_option('-a', '--add', metavar='states', help='Add electron(s). Use "1s0.5a" to add 0.5 1s ' + 'electrons to the alpha-spin channel (use "b" for ' + 'beta-spin). The number of electrons defaults to ' + 'one. Examples: "1s", "2p2b", "4f0.1b,3d-0.1a".') parser.add_option('--spin-polarized', action='store_true') parser.add_option('-d', '--dirac', action='store_true') parser.add_option('-p', '--plot', action='store_true') parser.add_option('-e', '--exponents', help='Exponents a: exp(-a*r^2). Use "-e 0.1:20.0:30" ' + 'to get 30 exponents from 0.1 to 20.0.') parser.add_option('-l', '--logarithmic-derivatives', metavar='spdfg,e1:e2:de,radius', help='Plot logarithmic derivatives. ' + 'Example: -l spdf,-1:1:0.05,1.3. ' + 'Energy range and/or radius can be left out.') parser.add_option('-r', '--refine', action='store_true') parser.add_option('-s', '--scalar-relativistic', action='store_true') return parser def parse_ld_str(s, energies=None, r=2.0): parts = s.split(',') lvalues = ['spdfg'.find(x) for x in parts.pop(0)] if parts: e1, e2, de = (float(x) for x in parts.pop(0).split(':')) else: e1, e2, de = energies if parts: r = float(parts.pop()) energies = np.linspace(e1, e2, int((e2 - e1) / de) + 1) return lvalues, energies, r def main(): parser = build_parser() opt, args = parser.parse_args() if len(args) != 1: parser.error('Incorrect number of arguments') symbol = args[0] nlfs = [] if opt.add: for x in opt.add.split(','): n = int(x[0]) l = 'spdfg'.find(x[1]) x = x[2:] if x and x[-1] in 'ab': s = int(x[-1] == 'b') opt.spin_polarized = True x = x[:-1] else: s = None if x: f = float(x) else: f = 1 nlfs.append((n, l, f, s)) aea = AllElectronAtom(symbol, xc=opt.xc_functional, spinpol=opt.spin_polarized, dirac=opt.dirac) kwargs = {} if opt.exponents: parts = opt.exponents.split(':') kwargs['alpha1'] = float(parts[0]) if len(parts) > 1: kwargs['alpha2'] = float(parts[1]) if len(parts) > 2: kwargs['ngauss'] = int(parts[2]) for n, l, f, s in nlfs: aea.add(n, l, f, s) aea.initialize(**kwargs) aea.run() if opt.refine: aea.refine() if opt.scalar_relativistic: aea.scalar_relativistic = True aea.refine() if opt.logarithmic_derivatives: lvalues, energies, r = parse_ld_str(opt.logarithmic_derivatives, (-1, 1, 0.05)) import matplotlib.pyplot as plt for l in lvalues: ld = aea.logarithmic_derivative(l, energies, r) plt.plot(energies, ld, colors[l]) plt.show() if opt.plot: aea.plot_wave_functions() if __name__ == '__main__': main()
robwarm/gpaw-symm
gpaw/atom/aeatom.py
Python
gpl-3.0
26,539
[ "ASE", "DIRAC", "GPAW", "Gaussian" ]
a1a25c1ab4839aef558a099f07516fc4d8d9907124c3791e6874f7b6cb5164ba