text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Migration script to add the cleanup_event* tables.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import sys, logging
from galaxy.model.custom_types import *
from sqlalchemy.exc import *
import datetime
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
log.setLevel( logging.DEBUG )
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
# New table to log cleanup events
CleanupEvent_table = Table( "cleanup_event", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "message", TrimmedString( 1024 ) ) )
CleanupEventDatasetAssociation_table = Table( "cleanup_event_dataset_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ) )
CleanupEventMetadataFileAssociation_table = Table( "cleanup_event_metadata_file_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "metadata_file_id", Integer, ForeignKey( "metadata_file.id" ), index=True ) )
CleanupEventHistoryAssociation_table = Table( "cleanup_event_history_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ) )
CleanupEventHistoryDatasetAssociationAssociation_table = Table( "cleanup_event_hda_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ) )
CleanupEventLibraryAssociation_table = Table( "cleanup_event_library_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "library_id", Integer, ForeignKey( "library.id" ), index=True ) )
CleanupEventLibraryFolderAssociation_table = Table( "cleanup_event_library_folder_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ) )
CleanupEventLibraryDatasetAssociation_table = Table( "cleanup_event_library_dataset_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), index=True ) )
CleanupEventLibraryDatasetDatasetAssociationAssociation_table = Table( "cleanup_event_ldda_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ) )
CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table = Table( "cleanup_event_icda_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ),
Column( "icda_id", Integer, ForeignKey( "implicitly_converted_dataset_association.id" ), index=True ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
CleanupEvent_table.create()
CleanupEventDatasetAssociation_table.create()
CleanupEventMetadataFileAssociation_table.create()
CleanupEventHistoryAssociation_table.create()
CleanupEventHistoryDatasetAssociationAssociation_table.create()
CleanupEventLibraryAssociation_table.create()
CleanupEventLibraryFolderAssociation_table.create()
CleanupEventLibraryDatasetAssociation_table.create()
CleanupEventLibraryDatasetDatasetAssociationAssociation_table.create()
CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table.create()
except Exception, e:
log.debug( "Creating table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table.drop()
CleanupEventLibraryDatasetDatasetAssociationAssociation_table.drop()
CleanupEventLibraryDatasetAssociation_table.drop()
CleanupEventLibraryFolderAssociation_table.drop()
CleanupEventLibraryAssociation_table.drop()
CleanupEventHistoryDatasetAssociationAssociation_table.drop()
CleanupEventHistoryAssociation_table.drop()
CleanupEventMetadataFileAssociation_table.drop()
CleanupEventDatasetAssociation_table.drop()
CleanupEvent_table.drop()
except Exception, e:
log.debug( "Dropping table failed: %s" % str( e ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0105_add_cleanup_event_table.py
|
Python
|
gpl-3.0
| 6,015
|
[
"Galaxy"
] |
08eed784f0e4acd93fb9b6201c856163bcdc4725f07df9b2bdaa9b46251f6a9c
|
# Copyright (C) 2007, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, subprocess, tempfile
from zeroinstall import SafeException
from logging import info, warn
from support import unpack_tarball
class SCM:
def __init__(self, root_dir, options):
self.options = options
self.root_dir = root_dir
assert type(root_dir) == str, root_dir
class GIT(SCM):
def _run(self, args, **kwargs):
info("Running git %s (in %s)", ' '.join(args), self.root_dir)
return subprocess.Popen(["git"] + args, cwd = self.root_dir, **kwargs)
def _run_check(self, args, **kwargs):
child = self._run(args, **kwargs)
code = child.wait()
if code:
raise SafeException("Git %s failed with exit code %d" % (repr(args), code))
def _run_stdout(self, args, **kwargs):
child = self._run(args, stdout = subprocess.PIPE, encoding = 'utf-8', **kwargs)
stdout, unused = child.communicate()
if child.returncode:
raise SafeException('Failed to get current branch! Exit code %d: %s' % (child.returncode, stdout))
return stdout
def ensure_versioned(self, path):
"""Ensure path is a file tracked by the version control system.
@raise SafeException: if file is not tracked"""
out = self._run_stdout(['ls-tree', 'HEAD', path]).strip()
if not out:
raise SafeException("File '%s' is not under version control, according to git-ls-tree" % path)
def reset_hard(self, revision):
self._run_check(['reset', '--hard', revision])
def ensure_committed(self):
child = self._run(["status", "--porcelain", "-uno"], stdout = subprocess.PIPE, encoding = 'utf-8')
stdout, unused = child.communicate()
if child.returncode == 0:
# Git >= 1.7
if stdout.strip():
raise SafeException('Uncommitted changes! Use "git-commit -a" to commit them. Changes are:\n' + stdout)
return
else:
# Old Git
child = self._run(["status", "-a"], stdout = subprocess.PIPE, encoding = 'utf-8')
stdout, unused = child.communicate()
if not child.returncode:
raise SafeException('Uncommitted changes! Use "git-commit -a" to commit them. Changes are:\n' + stdout)
for scm in self._submodules():
scm.ensure_committed()
def _submodules(self):
for line in self._run_stdout(['submodule', 'status']).split('\n'):
if not line: continue
r, subdir = line.strip().split(' ')[:2]
scm = GIT(os.path.join(self.root_dir, subdir), self.options)
scm.rev = r
scm.rel_path = subdir
yield scm
def make_tag(self, version):
return 'v' + version
def tag(self, version, revision):
tag = self.make_tag(version)
if self.options.key:
key_opts = ['-u', self.options.key]
else:
key_opts = []
self._run_check(['tag', '-s'] + key_opts + ['-m', 'Release %s' % version, tag, revision])
print("Tagged as %s" % tag)
def get_current_branch(self):
current_branch = self._run_stdout(['symbolic-ref', 'HEAD']).strip()
info("Current branch is %s", current_branch)
return current_branch
def get_tagged_versions(self):
child = self._run(['tag', '-l', 'v*'], stdout = subprocess.PIPE, encoding = 'utf-8')
stdout, unused = child.communicate()
status = child.wait()
if status:
raise SafeException("git tag failed with exit code %d" % status)
return [v[1:] for v in stdout.split('\n') if v]
def delete_branch(self, branch):
self._run_check(['branch', '-D', branch])
def push_head_and_release(self, version):
self._run_check(['push', self.options.public_scm_repository, self.make_tag(version), self.get_current_branch()])
def ensure_no_tag(self, version):
tag = self.make_tag(version)
child = self._run(['tag', '-l', tag], stdout = subprocess.PIPE, encoding = 'utf-8')
stdout, unused = child.communicate()
if tag in stdout.split('\n'):
raise SafeException(("Release %s is already tagged! If you want to replace it, do\n" +
"git tag -d %s") % (version, tag))
def export(self, prefix, archive_file, revision):
child = self._run(['archive', '--format=tar', '--prefix=' + prefix + os.sep, revision], stdout = subprocess.PIPE)
with open(archive_file, 'wb') as stream:
subprocess.check_call(['bzip2', '-'], stdin = child.stdout, stdout = stream)
status = child.wait()
if status:
if os.path.exists(archive_file):
os.unlink(archive_file)
raise SafeException("git-archive failed with exit code %d" % status)
def export_submodules(self, target):
# Export all sub-modules under target
cwd = os.getcwd()
target = os.path.abspath(target)
for scm in self._submodules():
tmp = tempfile.NamedTemporaryFile(prefix = '0release-')
try:
scm.export(prefix = '.', archive_file = tmp.name, revision = scm.rev)
os.chdir(os.path.join(target, scm.rel_path))
unpack_tarball(tmp.name)
finally:
tmp.close()
os.chdir(cwd)
def commit(self, message, branch, parent):
self._run_check(['add', '-u']) # Commit all changed tracked files to index
tree = self._run_stdout(['write-tree']).strip()
child = self._run(['commit-tree', tree, '-p', parent], stdin = subprocess.PIPE, stdout = subprocess.PIPE, encoding = 'utf-8')
stdout, unused = child.communicate(message)
commit = stdout.strip()
info("Committed as %s", commit)
self._run_check(['branch', '-f', branch, commit])
return commit
def get_head_revision(self):
proc = self._run(['rev-parse', 'HEAD'], stdout = subprocess.PIPE, encoding = 'utf-8')
stdout, unused = proc.communicate()
if proc.returncode:
raise Exception("git rev-parse failed with exit code %d" % proc.returncode)
head = stdout.strip()
assert head
return head
def export_changelog(self, last_release_version, head, stream):
if last_release_version:
self._run_check(['log', 'refs/tags/v' + last_release_version + '..' + head], stdout = stream)
else:
self._run_check(['log', head], stdout = stream)
def grep(self, pattern):
child = self._run(['grep', pattern])
child.wait()
if child.returncode in [0, 1]:
return
warn("git grep returned exit code %d", child.returncode)
def has_submodules(self):
return os.path.isfile(os.path.join(self.root_dir, '.gitmodules'))
def get_scm(local_feed, options):
start_dir = os.path.dirname(os.path.abspath(local_feed.local_path))
current = start_dir
while True:
if os.path.exists(os.path.join(current, '.git')):
return GIT(current, options)
parent = os.path.dirname(current)
if parent == current:
raise SafeException("Unable to determine which version control system is being used. Couldn't find .git in %s or any parent directory." % start_dir)
current = parent
|
0install/0release
|
scm.py
|
Python
|
lgpl-2.1
| 6,517
|
[
"VisIt"
] |
dfff801e727f292a1a3036161700a39db6297244d0bab4859aaf0c1d4433fae5
|
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, burrito development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from future.utils import implements_iterator
import os
from os import remove, system, mkdir, getcwd
from os.path import isabs, exists
from random import choice
from tempfile import gettempdir
from copy import deepcopy
from itertools import product
from burrito.parameters import Parameters, FilePath
# the following are used to create temp file names
from string import ascii_letters, digits
_all_chars = ascii_letters + digits
def which(executable_name, env_var='PATH'):
"""Equivalent to ``which executable_name`` in a *nix environment.
Will return ``None`` if ``executable_name`` cannot be found in ``env_var``
or if ``env_var`` is not set. Otherwise will return the first match in
``env_var``.
Code taken and modified from:
http://www.velocityreviews.com/forums/
t689526-python-library-call-equivalent-to-which-command.html
http://stackoverflow.com/a/379535
"""
exec_fp = None
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
if env_var in os.environ:
for path in os.environ.get(env_var, "").split(os.pathsep):
for curr_exec_fp in ext_candidates(os.path.join(path, executable_name)):
if is_exe(curr_exec_fp):
exec_fp = curr_exec_fp
break
return exec_fp
class ApplicationError(OSError):
pass
class ApplicationNotFoundError(ApplicationError):
pass
class ResultPath(object):
""" Hold a file path a boolean value specifying whether file was written
"""
def __init__(self, Path, IsWritten=True):
""" Initialize the ResultPath object
Path: a string representing the absolute or relative path where
the file can be found
IsWritten: a boolean specifying whether the file has been written,
default = True
"""
self.Path = FilePath(Path)
self.IsWritten = IsWritten
class CommandLineAppResult(dict):
""" Class for holding the result of a CommandLineApplication run """
def __init__(self, out, err, exit_status, result_paths):
"""Initialization of CommandLineAppResult
out: a file handler to the file containing the stdout
err: a file handler to the file containing the stderr
exit_status: the exit status of the program, 0 if run ok, 1 else.
result_paths: dictionary containing ResultPath objects for each
output file that could be written
"""
self['StdOut'] = out
self['StdErr'] = err
self['ExitStatus'] = exit_status
self.file_keys = result_paths.keys()
for key, value in result_paths.items():
if value.IsWritten:
try:
self[key] = open(value.Path)
except IOError:
raise ApplicationError('Could not open %s' % value.Path)
else:
self[key] = None
def cleanUp(self):
""" Delete files that are written by CommandLineApplication from disk
WARNING: after cleanUp() you may still have access to part of
your result data, but you should be aware that if the file
size exceeds the size of the buffer you will only have part
of the file. To be safe, you should not use cleanUp() until
you are done with the file or have copied it to a different
location.
"""
file_keys = self.file_keys
for item in file_keys:
if self[item] is not None:
self[item].close()
remove(self[item].name)
# remove input handler temp files
if hasattr(self, "_input_filename"):
remove(self._input_filename)
def __del__(self):
""" Delete temporary files created by the CommandLineApplication
"""
if self['StdOut'] is not None:
remove(self['StdOut'].name)
if self['StdErr'] is not None:
remove(self['StdErr'].name)
class Application(object):
""" Generic Class for controlling an application """
_command = None
_command_delimiter = ' '
_parameters = {}
_synonyms = {}
def __init__(self, params=None):
"""
params: a dict of parameters which should be turned on where the
key is either the parameter id or a synonym for the parameter
and the value is either the value for the parameter or None
"""
self.Parameters = Parameters(self._parameters, self._synonyms)
if params:
for key, v in params.items():
try:
self.Parameters[key].on(v)
except TypeError:
self.Parameters[key].on()
class CommandLineApplication(Application):
""" Generic class for controlling command line applications
"""
_input_handler = '_input_as_string'
_suppress_stderr = False
_suppress_stdout = False
_working_dir = None
def __init__(self, params=None, InputHandler=None, SuppressStderr=None,
SuppressStdout=None, WorkingDir=None, TmpDir=gettempdir(),
TmpNameLen=20, HALT_EXEC=False):
""" Initialize the CommandLineApplication object
params: a dictionary mapping the Parameter id or synonym to its
value (or None for FlagParameters or MixedParameters in flag
mode) for Parameters that should be turned on
InputHandler: this is the method to be run on data when it is
passed into call. This should be a string containing the
method name. The default is _input_as_string which casts data
to a string before appending it to the command line argument
SuppressStderr: if set to True, will route standard error to
/dev/null, False by default
SuppressStdout: if set to True, will route standard out to
/dev/null, False by default
WorkingDir: the directory where you want the application to run,
default is the current working directory, but is useful to
change in cases where the program being run creates output
to its current working directory and you either don't want
it to end up where you are running the program, or the user
running the script doesn't have write access to the current
working directory
WARNING: WorkingDir MUST be an absolute path!
TmpDir: the directory where temp files will be created
TmpNameLen: the length of the temp file name
HALT_EXEC: if True, raises exception w/ command output just
before execution, doesn't clean up temp files. Default False.
"""
# Determine if the application is installed, and raise an error if not
self._error_on_missing_application(params)
# set attributes to parameter that was passed in or class default
if InputHandler is not None:
self.InputHandler = InputHandler
else:
self.InputHandler = self._input_handler
if SuppressStderr is not None:
self.SuppressStderr = SuppressStderr
else:
self.SuppressStderr = self._suppress_stderr
if SuppressStdout is not None:
self.SuppressStdout = SuppressStdout
else:
self.SuppressStdout = self._suppress_stdout
if WorkingDir is not None:
working_dir = WorkingDir
else:
working_dir = self._working_dir or getcwd()
self.WorkingDir = FilePath(working_dir)
if not TmpDir.endswith("/"):
TmpDir += "/"
self.TmpDir = FilePath(TmpDir)
self.TmpNameLen = TmpNameLen
self.HaltExec = HALT_EXEC
# create a variable to hold the name of the file being used as
# input to the application. this is important especially when
# you are using an input handler which creates a temporary file
# and the output filenames are based on the input filenames
self._input_filename = None
super(CommandLineApplication, self).__init__(params=params)
def __call__(self, data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
"""
input_handler = self.InputHandler
suppress_stdout = self.SuppressStdout
suppress_stderr = self.SuppressStderr
if suppress_stdout:
outfile = FilePath('/dev/null')
else:
outfile = self.getTmpFilename(self.TmpDir)
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self, input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,
[self.BaseCommand,
str(input_arg),
'>', str(outfile),
'2>', str(errfile)]))
if self.HaltExec:
raise AssertionError("Halted exec with command:\n" + command)
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError('Unacceptable application exit ' +
'status: %s\n' % str(exit_status) +
'Command:\n%s\n' % command +
'StdOut:\n%s\n' % open(outfile).read() +
'StdErr:\n%s\n' % open(errfile).read())
# open the stdout and stderr if not being suppressed
out = None
if not suppress_stdout:
out = open(outfile, "r")
err = None
if not suppress_stderr:
err = open(errfile, "r")
result_paths = self._get_result_paths(data)
try:
result = \
CommandLineAppResult(out, err, exit_status,
result_paths=result_paths)
except ApplicationError:
result = \
self._handle_app_result_build_failure(out, err, exit_status,
result_paths)
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _handle_app_result_build_failure(
self,
out,
err,
exit_status,
result_paths):
"""Called if ApplicationError raised on building CommandLineAppResult
This is useful for checking log files or other special handling
in cases when expected files aren't present.
"""
raise ApplicationError("Error constructing CommandLineAppResult.")
def _input_as_string(self, data):
""" Return data as a string """
return str(data)
def _input_as_multiline_string(self, data):
"""Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename, 'w')
data_file.write(data)
data_file.close()
return filename
def _input_as_lines(self, data):
""" Write a seq of lines to a temp file and return the filename string
data: a sequence to be written to a file, each element of the
sequence will compose a line in the file
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
* Note: '\n' will be stripped off the end of each sequence element
before writing to a file in order to avoid multiple new lines
accidentally be written to a file
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
filename = FilePath(filename)
data_file = open(filename, 'w')
data_to_file = '\n'.join([str(d).strip('\n') for d in data])
data_file.write(data_to_file)
data_file.close()
return filename
def _input_as_path(self, data):
""" Return data as string with the path wrapped in quotes
data: path or filename, most likely as a string
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
return FilePath(data)
def _input_as_paths(self, data):
""" Return data as a space delimited string with each path quoted
data: paths or filenames, most likely as a list of
strings
"""
return self._command_delimiter.join(
map(str, map(self._input_as_path, data)))
def _absolute(self, path):
""" Convert a filename to an absolute path """
path = FilePath(path)
if isabs(path):
return path
else:
# these are both Path objects, so joining with + is acceptable
return self.WorkingDir + path
def _get_base_command(self):
""" Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = self.Parameters
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._command_delimiter.join(filter(
None, (map(str, parameters.values())))))
return self._command_delimiter.join(command_parts).strip()
BaseCommand = property(_get_base_command)
def _get_WorkingDir(self):
"""Gets the working directory"""
return self._curr_working_dir
def _set_WorkingDir(self, path):
"""Sets the working directory
Appends a slash to the end of path
The reasoning behind this is that the user may or may not pass
in a path with a '/' at the end. Since having multiple
'/' at the end doesn't hurt anything, it's convienient to
be able to rely on it, and not have to check for it
"""
self._curr_working_dir = FilePath(path) + '/'
try:
mkdir(self.WorkingDir)
except OSError:
# Directory already exists
pass
WorkingDir = property(_get_WorkingDir, _set_WorkingDir)
def _error_on_missing_application(self, params):
""" Raise an ApplicationNotFoundError if the app is not accessible
This method checks in the system path (usually $PATH) or for
the existence of self._command. If self._command is not found
in either place, an ApplicationNotFoundError is raised to
inform the user that the application they are trying to access is
not available.
This method should be overwritten when self._command does not
represent the relevant executable (e.g., self._command = 'prog -a')
or in more complex cases where the file to be executed may be
passed as a parameter (e.g., with java jar files, where the
jar file is passed to java via '-jar'). It can also be overwritten
to by-pass testing for application presence by never raising an
error.
"""
command = self._command
# strip off " characters, in case we got a FilePath object
found_in_path = which(command.strip('"')) is not None
if not (exists(command) or found_in_path):
raise ApplicationNotFoundError("Cannot find %s. Is it installed? "
"Is it in your path?" % command)
def _accept_exit_status(self, exit_status):
""" Return False to raise an error due to exit_status of applciation
This method should be overwritten if you'd like to raise an error
based on certain exit statuses of the application that was run. The
default is that no value of exit_status will raise an error.
"""
return True
def _get_result_paths(self, data):
""" Return dict of ResultPath objects representing all possible output
This method should be overwritten if the application creates
output other than stdout and stderr. This dictionary will have
keys based on the name that you'd like to access the file by in
the CommandLineAppResult object that will be created, and the
values which are ResultPath objects. For an example of how this
should be written see the rnaview or vienna_package classes.
WARNING: be sure that the path that you give a file is accurate
from any directory where the program could be running. For
that reason, absolute paths are very good. Relative paths
can also be used as long as you are careful. For cases where
the application leaves files in the current working directory,
you should append self.WorkingDir to the beginning of the file
name. It would be a very bad idea to just use a file name as
the path, in some cases that you might not be testing for.
"""
return {}
def getTmpFilename(self, tmp_dir=None, prefix='tmp', suffix='.txt',
include_class_id=False, result_constructor=FilePath):
""" Return a temp filename
tmp_dir: directory where temporary files will be stored
prefix: text to append to start of file name
suffix: text to append to end of file name
include_class_id: if True, will append a class identifier (built
from the class name) to the filename following prefix. This is
False by default b/c there is some string processing overhead
in getting the class name. This will probably be most useful for
testing: if temp files are being left behind by tests, you can
turn this on in here (temporarily) to find out which tests are
leaving the temp files.
result_constructor: the constructor used to build the result
(default: cogent.app.parameters.FilePath). Note that joining
FilePath objects with one another or with strings, you must use
the + operator. If this causes trouble, you can pass str as the
the result_constructor.
"""
# check not none
if not tmp_dir:
tmp_dir = self.TmpDir
# if not current directory, append "/" if not already on path
elif not tmp_dir.endswith("/"):
tmp_dir += "/"
if include_class_id:
# Append the classname to the prefix from the class name
# so any problematic temp files can be associated with
# the class that created them. This should be especially
# useful for testing, but is turned off by default to
# avoid the string-parsing overhead.
class_id = str(self.__class__())
prefix = ''.join([prefix,
class_id[class_id.rindex('.') + 1:
class_id.index(' ')]])
try:
mkdir(tmp_dir)
except OSError:
# Directory already exists
pass
# note: it is OK to join FilePath objects with +
return result_constructor(tmp_dir) + result_constructor(prefix) + \
result_constructor(''.join([choice(_all_chars)
for i in range(self.TmpNameLen)])) +\
result_constructor(suffix)
@implements_iterator
class ParameterIterBase(object):
"""Base class for parameter iteration objects
This class provides base functionality for parameter iteration objects.
A parameter iteration object acts like a generator and returns
parameter dicts of varying values. The specific keys and ranges of values
can be specified. Subclasses of this object implement the way in which
the parameter values are chosen."""
def __init__(self, Application, Parameters, AlwaysOn=None):
"""Initialize the ParameterIterBase
Application : A CommandLineApplication subclass
Parameters : A dict keyed by the application paramter, value by
the range of parameters to enumerate over. For
FlagParameters, unless specified in AlwaysOn, the value
will cycle between True/False (on/off). For
MixedParameters, include [None] specifically to utilize
flag functionality.
AlwaysOn : List of parameters that will always be on
Parameters is checked against the applications known parameters, but
only performed superficially: only keys are validated. AlwaysOn
values must have entries within Parameters.
NOTE: If the parameter is not specified in AlwaysOn, a False value
is appended so that the parameter can be turned off. Multiple False
states for a parameter will result if False is specified without
adding the parameter to AlwaysOn. If a parameter has a default value,
then that parameter is implicitly always on.
"""
self.AppParams = Application._parameters
# Validate Parameters
param_set = set(Parameters.keys())
app_param_set = set(self.AppParams.keys())
if not param_set.issubset(app_param_set):
not_present = str(param_set.difference(app_param_set))
raise ValueError(
"Parameter(s) %s not present in app" %
not_present)
# Validate AlwaysOn
alwayson_set = set(AlwaysOn)
if not alwayson_set.issubset(param_set):
not_present = str(alwayson_set.difference(param_set))
raise ValueError("AlwaysOn value(s) %s not in Parameters" %
not_present)
# Make sure all values are lists
for k, v in Parameters.items():
if not isinstance(v, list):
Parameters[k] = [v]
_my_params = Parameters
# Append "off states" to relevant parameters
for k in param_set.difference(alwayson_set):
_my_params[k].append(False)
# Create seperate key/value lists preserving index relation
self._keys, self._values = zip(*sorted(_my_params.items()))
# Construct generator
self._generator = self._init_generator()
def _init_generator(self):
"""Must be implemented in the subclass"""
pass
def _make_app_params(self, values):
"""Returns app's param dict with values set as described by values
"""
# A deep copy is necessary. Otherwise the dict values refer to
# the same object.
app_params = deepcopy(self.AppParams)
for key, value in zip(self._keys, values):
if value is False:
app_params[key].off()
elif value is True:
app_params[key].on()
else:
app_params[key].on(value)
return app_params
def __iter__(self):
return self
def __next__(self):
return next(self._generator)
def reset(self):
self._generator = self._init_generator()
class ParameterCombinations(ParameterIterBase):
"""Iterates over all combinations of parameters lexiographically"""
def _init_generator(self):
"""Iterates over all possible combinations of parameters
This method iterates over the cartesian product of parameter values
"""
for vals in product(*self._values):
yield self._make_app_params(vals)
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None,
PathsToInputs=None, PathToOutput=None,
PathToStderr='/dev/null', PathToStdout='/dev/null',
UniqueOutputs=False, InputParam=None,
OutputParam=None):
"""Generates command lines that can be used in a cluster environment
param_iter : ParameterIterBase subclass instance
PathToBin : Absolute location primary command (i.e. Python)
PathToCmd : Absolute location of the command
PathsToInputs : Absolute location(s) of input file(s)
PathToOutput : Absolute location of output file
PathToStderr : Path to stderr
PathToStdout : Path to stdout
UniqueOutputs : Generate unique tags for output files
InputParam : Application input parameter (if not specified, assumes
stdin is to be used)
OutputParam : Application output parameter (if not specified, assumes
stdout is to be used)
"""
# Make sure we have input(s) and output
if not PathsToInputs:
raise ValueError("No input file(s) specified.")
if not PathToOutput:
raise ValueError("No output file specified.")
if not isinstance(PathsToInputs, list):
PathsToInputs = [PathsToInputs]
# PathToBin and PathToCmd can be blank
if PathToBin is None:
PathToBin = ''
if PathToCmd is None:
PathToCmd = ''
# stdout_ and stderr_ do not have to be redirected
if PathToStdout is None:
stdout_ = ''
else:
stdout_ = '> "%s"' % PathToStdout
if PathToStderr is None:
stderr_ = ''
else:
stderr_ = '2> "%s"' % PathToStderr
# Output can be redirected to stdout or specified output argument
if OutputParam is None:
output = '> "%s"' % PathToOutput
stdout_ = ''
else:
output_param = param_iter.AppParams[OutputParam]
output_param.on('"%s"' % PathToOutput)
output = str(output_param)
output_param.off()
output_count = 0
base_command = ' '.join([PathToBin, PathToCmd])
for params in param_iter:
# Support for multiple input files
for inputfile in PathsToInputs:
cmdline = [base_command]
cmdline.extend(sorted(filter(None, map(str, params.values()))))
# Input can come from stdin or specified input argument
if InputParam is None:
input = '< "%s"' % inputfile
else:
input_param = params[InputParam]
input_param.on('"%s"' % inputfile)
input = str(input_param)
input_param.off()
cmdline.append(input)
if UniqueOutputs:
cmdline.append(''.join([output, str(output_count)]))
output_count += 1
else:
cmdline.append(output)
cmdline.append(stdout_)
cmdline.append(stderr_)
yield ' '.join(cmdline)
def get_tmp_filename(tmp_dir=gettempdir(), prefix="tmp", suffix=".txt",
result_constructor=FilePath):
""" Generate a temporary filename and return as a FilePath object
tmp_dir: the directory to house the tmp_filename
prefix: string to append to beginning of filename
Note: It is very useful to have prefix be descriptive of the
process which is creating the temporary file. For example, if
your temp file will be used to build a temporary blast database,
you might pass prefix=TempBlastDB
suffix: the suffix to be appended to the temp filename
result_constructor: the constructor used to build the result filename
(default: cogent.app.parameters.FilePath). Note that joining
FilePath objects with one another or with strings, you must use
the + operator. If this causes trouble, you can pass str as the
the result_constructor.
"""
# check not none
if not tmp_dir:
tmp_dir = ""
# if not current directory, append "/" if not already on path
elif not tmp_dir.endswith("/"):
tmp_dir += "/"
chars = "abcdefghigklmnopqrstuvwxyz"
picks = chars + chars.upper() + "0123456790"
return result_constructor(tmp_dir) + result_constructor(prefix) +\
result_constructor("%s%s" %
(''.join([choice(picks) for i in range(20)]),
suffix))
def guess_input_handler(seqs, add_seq_names=False):
"""Returns the name of the input handler for seqs."""
if isinstance(seqs, str):
if '\n' in seqs: # can't be a filename...
return '_input_as_multiline_string'
else: # assume it was a filename
return '_input_as_string'
if isinstance(seqs, list) and len(seqs) and isinstance(seqs[0], tuple):
return '_input_as_seq_id_seq_pairs'
if add_seq_names:
return '_input_as_seqs'
return '_input_as_lines'
|
ssorgatem/burrito
|
burrito/util.py
|
Python
|
bsd-3-clause
| 31,476
|
[
"BLAST"
] |
f80f5f67d614eba3be116c2c7a8ad2f54c1e2fae1ba8054544129d5030a1bcc9
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.func_graph import FuncGraph
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_state
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import object_identity
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
def _MarkReachedOps(from_ops, reached_ops, func_graphs):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: set of Operations.
func_graphs: list of FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if op not in reached_ops:
reached_ops.add(op)
for output in op.outputs:
if _IsBackpropagatable(output):
queue.extend(_Consumers(output, func_graphs))
def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,
xs_set):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op]' indicates the number of backprop inputs
to this operation.
Args:
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
func_graphs: list of FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops. This is
useful if to_ops occur in a function and from_ops are in an outer function
or graph.
xs_set: ObjectIdentitySet of Tensors.
Returns:
A tuple containing: (1) the subset of to_ops reachable from from_ops by a
path of zero or more backpropagatable tensors, (2) a mapping from operation
to the number of backprop inputs to that op, and (3) a ControlFlowState
object which is not None if the ops between from_ops and to_ops contain
control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = set()
_MarkReachedOps(from_ops, reached_ops, func_graphs)
# X in reached_ops iff X is reachable from from_ops by a path of zero or more
# backpropagatable tensors.
reachable_to_ops = set(op for op in to_ops if op in reached_ops)
# Mark between ops.
between_ops = set()
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if op in reached_ops:
between_ops.add(op)
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops.remove(op)
for inp in _NonEagerInputs(op, xs_set):
queue.append(inp.op)
# X in between_ops iff X is on a path of zero or more backpropagatable tensors
# between from_ops and to_ops
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_state.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = collections.defaultdict(int)
for op in between_op_list:
for x in _NonEagerInputs(op, xs_set):
if x.op in between_ops:
pending_count[x.op] += 1
return reachable_to_ops, pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys,
ys,
colocate_gradients_with_ops,
gradient_uid="__unsupported__"):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(
array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError(
"Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" % (dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError(
"Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
elif y.dtype == dtypes.variant:
if grad_y.dtype != dtypes.variant:
raise TypeError(
"Gradient type %s generated for variant "
"tensor %s with type %s must be variant" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
elif y.dtype == dtypes.resource:
# We assume y is the handle of a ResourceVariable. The gradient of a
# ResourceVariable should be a numeric value, not another resource.
if grad_y.dtype == dtypes.resource:
raise TypeError("Input gradient %s for resource tensor %s should not "
"be a resource" % (grad_y, y))
else:
raise TypeError(
"Tensor %s with type %s must be numeric "
"to obtain a default gradient" % (y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsBackpropagatable(tensor):
if backprop_util.IsTrainable(tensor):
return True
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype == dtypes.bfloat16
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
# While ops have inputs added to them during the gradient computation, so we
# skip the below check. See while_v2 for details.
if op.type == "While" or op.type == "StatelessWhile":
return
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: mapping from operation to number of backprop inputs.
xs_set: ObjectIdentitySet of Tensors.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in _NonEagerInputs(op, xs_set):
if pending_count[inp.op] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op)
stop_ops.update(op for op in stop_gradient_ops)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access
yield
else:
yield
def _IsPartitionedCall(op):
return op.type == "PartitionedCall" or op.type == "StatefulPartitionedCall"
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs]
f = attr_value_pb2.NameAttrList()
if _IsPartitionedCall(op):
f.name = op.get_attr("f").name
else:
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
in_grads = functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
xla_compile = False
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set):
"""Raises an error if we backprop through a loop var."""
# Find the nearest 'to_op' reachable from 'op' to provide a more helpful error
# message.
target_op = None
queue = collections.deque([op])
visited = set()
while queue:
curr_op = queue.popleft()
if curr_op in visited: continue
visited.add(curr_op)
if curr_op in from_ops:
target_op = curr_op
break
queue.extend(t.op for t in _NonEagerInputs(curr_op, xs_set))
assert target_op
raise ValueError(
"Cannot compute gradient inside while loop with respect to op '%s'. "
"We do not support taking the gradient wrt or through the initial value "
"of a loop variable. Gradients can be computed through loop invariants "
"or wrt the input parameters to the loop body."
% target_op.name)
def _IsFunction(graph):
return (isinstance(graph, FuncGraph) or
isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access
def _Captures(func_graph):
if isinstance(func_graph, FuncGraph):
return func_graph.captures
else:
assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access
return func_graph.captures
def _MaybeCaptured(t):
"""If t is a captured value placeholder, returns the original captured value.
Args:
t: Tensor
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
# pylint: disable=protected-access
if (not isinstance(t, ops.EagerTensor) and
_IsFunction(t.op.graph) and t.op.type == "Placeholder"):
for input_t, placeholder_t in _Captures(t.op.graph):
if t is placeholder_t:
return _MaybeCaptured(input_t)
# pylint: enable=protected-access
return t
def _NonEagerInputs(op, xs_set):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Does not return any captured EagerTensors, i.e., the number of tensors
returned may be less than than the actual number of inputs.
Args:
op: Operation
xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
"""
return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]
# TODO(skyewm): plumbing xs through everywhere is ugly, consider making
# _GradientsHelper a class with xs as a member variable.
def _Inputs(op, xs_set):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Args:
op: Operation
xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
is in a FuncGraph and has captured inputs.
"""
if _IsFunction(op.graph): # pylint: disable=protected-access
inputs = []
for t in op.inputs:
# If we're differentiating w.r.t. `t`, do not attempt to traverse through
# it to a captured value. The algorithm needs to "see" `t` in this case,
# even if it's a function input for a captured value, whereas usually we'd
# like to traverse through these closures as if the captured value was the
# direct input to op.
if t not in xs_set:
t = _MaybeCaptured(t)
inputs.append(t)
return inputs
else:
return op.inputs
def _Consumers(t, func_graphs):
"""Returns the consumers of t, crossing closure boundaries where necessary.
Args:
t: Tensor
func_graphs: a list of FuncGraphs that may have captured t.
Returns:
A list of tensors. The tensors will be from the current graph and/or
func_graphs.
"""
consumers = t.consumers()
for func in func_graphs:
for input_t, placeholder in _Captures(func):
if input_t is t:
consumers.extend(_Consumers(placeholder, func_graphs))
return consumers
def _GradientsHelper(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE,
src_graph=None):
"""Implementation of gradients()."""
if context.executing_eagerly():
raise RuntimeError("tf.gradients is not supported when eager execution "
"is enabled. Use tf.GradientTape instead.")
if src_graph is None:
src_graph = ops.get_default_graph()
try:
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
except ValueError:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
# If src_graph is a _FuncGraph (i.e. a function body), gather it and all
# ancestor graphs. This is necessary for correctly handling captured values.
func_graphs = []
curr_graph = src_graph
while _IsFunction(curr_graph):
func_graphs.append(curr_graph)
if isinstance(curr_graph, FuncGraph):
curr_graph = curr_graph.outer_graph
else:
assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access
curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
# Get a uid for this call to gradients that can be used to help
# cluster ops for compilation.
gradient_uid = ops.get_default_graph().unique_name("uid")
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [
x.handle if resource_variable_ops.is_resource_variable(x) else x
for x in xs
]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(
xs, name="x", as_ref=True)
xs_set = object_identity.ObjectIdentitySet(xs)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,
gradient_uid)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
reachable_to_ops, pending_count, loop_state = _PendingCount(
to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs_set)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
ready = (pending_count[op] == 0)
if ready and op not in to_ops_set and op in reachable_to_ops:
to_ops_set.add(op)
queue.append(op)
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if backprop_util.IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs_set)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,
aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
func_call = None
is_partitioned_call = _IsPartitionedCall(op)
# pylint: disable=protected-access
is_func_call = (
src_graph._is_function(op.type) or is_partitioned_call)
# pylint: enable=protected-access
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op not in stop_ops):
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
if is_func_call:
if is_partitioned_call:
func_name = compat.as_bytes(op.get_attr("f").name)
func_call = src_graph._get_function( # pylint: disable=protected-access
func_name)
# When a graph is imported, the FunctionDefs are not copied over
# to each sub-graph so we recursively search the outer graphs
# for the FunctionDef.
if not func_call and hasattr(src_graph, "outer_graph"):
graph = src_graph.outer_graph
while graph is not None:
func_call = graph._get_function(func_name) # pylint: disable=protected-access
if func_call is not None:
break
if hasattr(graph, "outer_graph"):
graph = graph.outer_graph
else:
break
else:
func_call = src_graph._get_function(op.type) # pylint: disable=protected-access
# Note that __defun is not set if the graph is
# imported. If it's set, we prefer to access the original
# defun.
func_call = getattr(op, "__defun", func_call)
grad_fn = func_call.python_grad_func
else:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
# NOTE(skyewm): We don't support computing gradients wrt a loop variable
# unless it's within the context of a single iteration (i.e. the
# gradient is wrt to the loop parameter in the body function, not wrt or
# through the initial value). This means if we're in a while loop
# context, we should never see a switch node from this context.
# pylint: disable=protected-access
if (control_flow_util.IsSwitch(op) and
op._control_flow_context is not None and
op._control_flow_context.IsWhileContext() and
op._control_flow_context ==
ops.get_default_graph()._get_control_flow_context()):
_RaiseNoGradWrtInitialLoopValError(op, from_ops, xs_set)
# pylint: enable=protected-access
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (
(not grad_fn and is_func_call)
or backprop_util.IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLikeV1WhileLoop(op, i)
elif default_gradient.supports_default_grad(op.outputs[i]):
# TODO(b/143286622): The supports_default_grad check is needed
# because While op emits non-differentiable resource tensors
# as outputs. Remove this check when that is not the case.
out_grads[i] = control_flow_state.ZerosLike(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with src_graph._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
if x is not None]) > 1:
with ops.device(None):
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
None,
gradient_uid,
ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(_Inputs(op, xs_set))
# Note: we don't filter out eager inputs here because the inputs need to
# line up with in_grads.
for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs_set), in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s" %
(op.name, i, t_in.shape, in_grad.shape))
if not isinstance(t_in, ops.EagerTensor):
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs_set)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x, unconnected_gradients) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections_abc.Sequence):
if any(g is not None for g in out_grad):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs_set):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in _NonEagerInputs(op, xs_set):
pending_count[x.op] -= 1
ready = (pending_count[x.op] == 0)
if loop_state and not ready:
ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if backprop_util.IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _ZerosLike(t):
t_dtype = default_gradient.get_zeros_dtype(t)
if t.dtype == dtypes.resource:
return array_ops.zeros(
resource_variable_ops.variable_shape(t), dtype=t_dtype)
else:
return array_ops.zeros_like(t, dtype=t_dtype)
def _GetGrad(grads, t, unconnected_gradients):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
if unconnected_gradients == UnconnectedGradients.ZERO:
return _ZerosLike(t)
elif unconnected_gradients == UnconnectedGradients.NONE:
return None
else:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
t_grad = op_grads[t.value_index]
# This can happen if some other output of `t.op` has non-None grad.
if unconnected_gradients == UnconnectedGradients.ZERO and t_grad is None:
return _ZerosLike(t)
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join(x.name for x in out_grads if _FilterGrad(x)))
logging.vlog(1, " out --> %s",
", ".join(x.name for x in in_grads if _FilterGrad(x)))
def _MultiDeviceAddN(tensor_list, gradient_uid):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(tensors_on_device, key=DeviceKey):
tensors = tensors_on_device[dev]
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
tensors[0].op,
gradient_uid,
ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
@tf_export("AggregationMethod")
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph.
The following aggregation methods are part of the stable API for
aggregating gradients:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op (see `tf.add_n`). This
method has the property that all gradients must be ready and
buffered separately in memory before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
The following aggregation methods are experimental and may not
be supported in future releases:
* `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using
using the "AddN" op. This method of summing gradients may reduce
performance, but it can improve memory utilization because the
gradients can be released earlier.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2 # An alias for EXPERIMENTAL_ADD_N = 1
def _AggregatedGrads(grads,
op,
gradient_uid,
loop_state,
aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections_abc.Sequence) and not all(
isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad
if g is not None)):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None):
tensor_shape = _AccumulatorShape(out_grad)
if aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grads[i] = backprop.aggregate_indexed_slices_gradients(out_grad) # pylint: disable=protected-access
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# Represents the output of TFE_Py_TapeSetPossibleGradientTypes. Real enums are
# unfortunately too slow to use here.
POSSIBLE_GRADIENT_TYPES_NONE = 0
POSSIBLE_GRADIENT_TYPES_FIRST_ORDER = 1
POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER = 2
def PossibleTapeGradientTypes(tensors):
"""Determines whether and how `args` may require tape gradients."""
return pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes(tensors)
|
aam-at/tensorflow
|
tensorflow/python/ops/gradients_util.py
|
Python
|
apache-2.0
| 40,282
|
[
"VisIt"
] |
f088e7db9134439fcdf16c16b37dcca3f3aa91589ca54da3ab5c5df0421d25ad
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
class GMM(estimator.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See TensorFlowEstimator
steps: See TensorFlowEstimator
continue_training: See TensorFlowEstimator
config: See TensorFlowEstimator
verbose: See TensorFlowEstimator
"""
super(GMM, self).__init__(
model_dir=model_dir,
config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See TensorFlowEstimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def clusters(self):
"""Returns cluster centers."""
clusters = tf.contrib.framework.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return tf.contrib.framework.load_variable(
self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
return features
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss = tf.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.SCORES: tf.reduce_sum(losses),
}
|
neilhan/tensorflow
|
tensorflow/contrib/factorization/python/ops/gmm.py
|
Python
|
apache-2.0
| 7,521
|
[
"Gaussian"
] |
c63b2de54613dc01f7c9a4f19fd3756427afe473f94f8bbc2336407f94142a21
|
""" Main class for doing consistency checks, between files in:
- File Catalog
- TransformationSystem
Should be extended to include the Storage (in DIRAC)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import os
import time
import sys
import re
import errno
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.Core.Utilities.Adler import compareAdler
class ConsistencyInspector(object):
""" A class for handling some consistency checks
"""
def __init__(self, interactive=True, transClient=None, dm=None, fc=None, dic=None):
""" c'tor
interactive: Data Manager (True) or DIRAC Agente (False)
transClient: TransformationClient() if None, else transClient params
dm: DataManager() if None, else dm params
fc: FileCatalog() if None, else fc params
One object for every production/directoriesList...
"""
self.interactive = interactive
self.transClient = TransformationClient() if transClient is None else transClient
self.dataManager = dm if dm else DataManager()
self.fileCatalog = fc if fc else FileCatalog()
self.dic = dic if dic else DataIntegrityClient()
self.dirac = Dirac()
# Base elements from which to start the consistency checks
self._prod = 0
self._bkQuery = None
self._fileType = []
self._fileTypesExcluded = []
self._lfns = []
self.directories = []
# Accessory elements
self.runsList = []
self.runStatus = None
self.fromProd = None
self.transType = ''
self.cachedReplicas = {}
self.prcdWithDesc = []
self.prcdWithoutDesc = []
self.prcdWithMultDesc = []
self.nonPrcdWithDesc = []
self.nonPrcdWithoutDesc = []
self.nonPrcdWithMultDesc = []
self.descForPrcdLFNs = []
self.descForNonPrcdLFNs = []
self.removedFiles = []
self.absentLFNsInFC = []
self.existLFNsNoSE = {}
self.existLFNsBadReplicas = {}
self.existLFNsBadFiles = {}
self.existLFNsNotExisting = {}
self.commonAncestors = {}
self.multipleDescendants = {}
self.ancestors = {}
self._verbose = False
def __logVerbose(self, msg, msg1=''):
""" logger helper for verbose information """
if self._verbose:
newMsg = '[ConsistencyChecks] ' + ('[%s] ' % str(self.prod)) if self.prod else ''
# Add that prefix to all lines of the message
newMsg1 = msg1.replace('\n', '\n' + newMsg)
newMsg += msg.replace('\n', '\n' + newMsg)
gLogger.notice(newMsg, newMsg1)
else:
gLogger.verbose(msg, msg1)
##########################################################################
def checkFC2SE(self):
""" check files vs SE information """
repDict = self.compareChecksum(self.lfns)
self.existLFNsNoSE = repDict['MissingReplica']
self.existLFNsNotExisting = repDict['MissingAllReplicas']
self.existLFNsBadReplicas = repDict['SomeReplicasCorrupted']
self.existLFNsBadFiles = repDict['AllReplicasCorrupted']
def getReplicasPresence(self, lfns):
""" get the replicas using the standard FileCatalog.getReplicas()
"""
present = set()
notPresent = set()
chunkSize = 100
printProgress = (len(lfns) > chunkSize)
startTime = time.time()
self.__write("Checking replicas for %d files%s" %
(len(lfns), (' (chunks of %d)' % chunkSize) if printProgress else '... '))
for chunk in breakListIntoChunks(lfns, chunkSize):
if printProgress:
self.__write('.')
for _ in range(1, 10):
res = self.fileCatalog.getReplicas(chunk)
if res['OK']:
present.update(res['Value']['Successful'])
self.cachedReplicas.update(res['Value']['Successful'])
notPresent.update(res['Value']['Failed'])
break
else:
time.sleep(0.1)
self.__write(' (%.1f seconds)\n' % (time.time() - startTime))
if notPresent:
self.__logVerbose("Files without replicas:",
'\n'.join([''] + sorted(notPresent)))
return list(present), list(notPresent)
##########################################################################
def getReplicasPresenceFromDirectoryScan(self, lfns):
""" Get replicas scanning the directories. Might be faster.
"""
dirs = {}
present = []
notPresent = []
compare = True
for lfn in lfns:
dirN = os.path.dirname(lfn)
if lfn == dirN + '/':
compare = False
dirs.setdefault(dirN, []).append(lfn)
if compare:
self.__write("Checking File Catalog for %d files from %d directories " % (
len(lfns), len(dirs)))
else:
self.__write("Getting files from %d directories " % len(dirs))
startTime = time.time()
for dirN in sorted(dirs):
startTime1 = time.time()
self.__write('.')
lfnsFound = self._getFilesFromDirectoryScan(dirN)
gLogger.verbose("Obtained %d files in %.1f seconds" %
(len(lfnsFound), time.time() - startTime1))
if compare:
pr, notPr = self.__compareLFNLists(dirs[dirN], lfnsFound)
notPresent += notPr
present += pr
else:
present += lfnsFound
self.__write(' (%.1f seconds)\n' % (time.time() - startTime))
gLogger.info("Found %d files with replicas and %d without" %
(len(present), len(notPresent)))
return present, notPresent
##########################################################################
def __compareLFNLists(self, lfns, lfnsFound):
""" return files in both lists and files in lfns and not in lfnsFound
"""
present = []
notPresent = lfns
startTime = time.time()
self.__logVerbose("Comparing list of %d LFNs with second list of %d" % (
len(lfns), len(lfnsFound)))
if lfnsFound:
setLfns = set(lfns)
setLfnsFound = set(lfnsFound)
present = list(setLfns & setLfnsFound)
notPresent = list(setLfns - setLfnsFound)
self.__logVerbose("End of comparison: %.1f seconds" %
(time.time() - startTime))
return present, notPresent
def _getFilesFromDirectoryScan(self, dirs):
""" calls dm.getFilesFromDirectory
"""
level = gLogger.getLevel()
gLogger.setLevel('FATAL')
res = self.dataManager.getFilesFromDirectory(dirs)
gLogger.setLevel(level)
if not res['OK']:
if 'No such file or directory' not in res['Message']:
gLogger.error("Error getting files from directories %s:" %
dirs, res['Message'])
return []
if res['Value']:
lfnsFound = res['Value']
else:
lfnsFound = []
return lfnsFound
##########################################################################
def _getTSFiles(self):
""" Helper function - get files from the TS
"""
selectDict = {'TransformationID': self.prod}
if self._lfns:
selectDict['LFN'] = self._lfns
elif self.runStatus and self.fromProd:
res = self.transClient.getTransformationRuns(
{'TransformationID': self.fromProd, 'Status': self.runStatus})
if not res['OK']:
gLogger.error("Failed to get runs for transformation %d" % self.prod)
else:
if res['Value']:
self.runsList.extend(
[run['RunNumber'] for run in res['Value'] if run['RunNumber'] not in self.runsList])
gLogger.notice("%d runs selected" % len(res['Value']))
elif not self.runsList:
gLogger.notice("No runs selected, check completed")
DIRAC.exit(0)
if not self._lfns and self.runsList:
selectDict['RunNumber'] = self.runsList
res = self.transClient.getTransformation(self.prod)
if not res['OK']:
gLogger.error("Failed to find transformation %s" % self.prod)
return [], [], []
status = res['Value']['Status']
if status not in ('Active', 'Stopped', 'Completed', 'Idle'):
gLogger.notice("Transformation %s in status %s, will not check if files are processed" % (
self.prod, status))
processedLFNs = []
nonProcessedLFNs = []
nonProcessedStatuses = []
if self._lfns:
processedLFNs = self._lfns
else:
res = self.transClient.getTransformationFiles(selectDict)
if not res['OK']:
gLogger.error("Failed to get files for transformation %d" %
self.prod, res['Message'])
return [], [], []
else:
processedLFNs = [item['LFN']
for item in res['Value'] if item['Status'] == 'Processed']
nonProcessedLFNs = [item['LFN']
for item in res['Value'] if item['Status'] != 'Processed']
nonProcessedStatuses = list(
set(item['Status'] for item in res['Value'] if item['Status'] != 'Processed'))
return processedLFNs, nonProcessedLFNs, nonProcessedStatuses
def __getDirectories(self):
""" get the directories where to look into (they are either given, or taken from the transformation ID
"""
if self.directories:
directories = []
printout = False
for directory in self.directories:
if not directory.endswith('...'):
directories.append(directory)
else:
printout = True
topDir = os.path.dirname(directory)
res = self.fileCatalog.listDirectory(topDir)
if not res['OK']:
# DError(errno.ENOENT, res['Message'] )
return S_ERROR(errno.ENOENT, res['Message'])
else:
matchDir = directory.split('...')[0]
directories += [d for d in res['Value']['Successful'].get(topDir, {}).get('SubDirs', [])
if d.startswith(matchDir)]
if printout:
gLogger.always('Expanded list of %d directories:\n%s' %
(len(directories), '\n'.join(directories)))
return directories
else:
return S_ERROR(errno.ENOENT, 'Need to specify the directories')
##########################################################################
def __write(self, text):
if self.interactive:
sys.stdout.write(text)
sys.stdout.flush()
##########################################################################
def _selectByFileType(self, lfnDict, fileTypes=None, fileTypesExcluded=None):
""" Select only those files from the values of lfnDict that have a certain type
"""
if not lfnDict:
return {}
if not fileTypes:
fileTypes = self.fileType
if not fileTypesExcluded:
fileTypesExcluded = self.fileTypesExcluded
else:
fileTypesExcluded += [
ft for ft in self.fileTypesExcluded if ft not in fileTypesExcluded]
# lfnDict is a dictionary of dictionaries including the metadata, create a
# deep copy to get modified
ancDict = dict(lfnDict)
if fileTypes == ['']:
fileTypes = []
# and loop on the original dictionaries
for ancestor in lfnDict:
for desc in list(lfnDict[ancestor]):
ft = lfnDict[ancestor][desc]['FileType']
if ft in fileTypesExcluded or (fileTypes and ft not in fileTypes):
ancDict[ancestor].pop(desc)
if not len(ancDict[ancestor]):
ancDict.pop(ancestor)
return ancDict
@staticmethod
def _getFileTypesCount(lfnDict):
""" return file types count
"""
ft_dict = {}
for ancestor in lfnDict:
t_dict = {}
for desc in lfnDict[ancestor]:
ft = lfnDict[ancestor][desc]['FileType']
t_dict[ft] = t_dict.setdefault(ft, 0) + 1
ft_dict[ancestor] = t_dict
return ft_dict
def __getLFNsFromFC(self):
""" Check if a list of LFNs is in the FC or not """
if not self.lfns:
directories = []
for dirName in self.__getDirectories():
if not dirName.endswith('/'):
dirName += '/'
directories.append(dirName)
present, notPresent = self.getReplicasPresenceFromDirectoryScan(
directories)
else:
present, notPresent = self.getReplicasPresence(self.lfns)
return present, notPresent
def compareChecksum(self, lfns):
"""compare the checksum of the file in the FC and the checksum of the physical replicas.
Returns a dictionary containing 3 sub-dictionaries: one with files with missing PFN, one with
files with all replicas corrupted, and one with files with some replicas corrupted and at least
one good replica
"""
retDict = {'AllReplicasCorrupted': {},
'SomeReplicasCorrupted': {},
'MissingReplica': {},
'MissingAllReplicas': {},
'NoReplicas': {}}
chunkSize = 100
replicas = {}
setLfns = set(lfns)
cachedLfns = setLfns & set(self.cachedReplicas)
for lfn in cachedLfns:
replicas[lfn] = self.cachedReplicas[lfn]
lfnsLeft = list(setLfns - cachedLfns)
if lfnsLeft:
self.__write("Get replicas for %d files (chunks of %d): " %
(len(lfnsLeft), chunkSize))
for lfnChunk in breakListIntoChunks(lfnsLeft, chunkSize):
self.__write('.')
replicasRes = self.fileCatalog.getReplicas(lfnChunk)
if not replicasRes['OK']:
gLogger.error("error: %s" % replicasRes['Message'])
return S_ERROR(errno.ENOENT, "error: %s" % replicasRes['Message'])
replicasRes = replicasRes['Value']
if replicasRes['Failed']:
retDict['NoReplicas'].update(replicasRes['Failed'])
replicas.update(replicasRes['Successful'])
self.__write("Get FC metadata for %d files to be checked: " % len(lfns))
metadata = {}
for lfnChunk in breakListIntoChunks(replicas, chunkSize):
self.__write('.')
res = self.fileCatalog.getFileMetadata(lfnChunk)
if not res['OK']:
return S_ERROR(errno.ENOENT, "error %s" % res['Message'])
metadata.update(res['Value']['Successful'])
gLogger.notice("Check existence and compare checksum file by file...")
csDict = {}
seFiles = {}
# Reverse the LFN->SE dictionary
nReps = 0
for lfn in replicas:
csDict.setdefault(lfn, {})['FCChecksum'] = metadata.get(
lfn, {}).get('Checksum')
for se in replicas[lfn]:
seFiles.setdefault(se, []).append(lfn)
nReps += 1
gLogger.notice('Getting checksum of %d replicas in %d SEs' %
(nReps, len(seFiles)))
checkSum = {}
lfnNotExisting = {}
lfnNoInfo = {}
logLevel = gLogger.getLevel()
gLogger.setLevel('FATAL')
for num, se in enumerate(sorted(seFiles)):
self.__write('\n%d. At %s (%d files): ' % (num, se, len(seFiles[se])))
oSe = StorageElement(se)
notFound = 0
for surlChunk in breakListIntoChunks(seFiles[se], chunkSize):
self.__write('.')
metadata = oSe.getFileMetadata(surlChunk)
if not metadata['OK']:
gLogger.error("Error: getFileMetadata returns %s. Ignore those replicas" % (
metadata['Message']))
# Remove from list of replicas as we don't know whether it is OK or
# not
for lfn in seFiles[se]:
lfnNoInfo.setdefault(lfn, []).append(se)
else:
metadata = metadata['Value']
notFound += len(metadata['Failed'])
for lfn in metadata['Failed']:
lfnNotExisting.setdefault(lfn, []).append(se)
for lfn in metadata['Successful']:
checkSum.setdefault(
lfn, {})[se] = metadata['Successful'][lfn]['Checksum']
if notFound:
gLogger.error('%d files not found' % notFound)
gLogger.setLevel(logLevel)
gLogger.notice('Verifying checksum of %d files' % len(replicas))
for lfn in replicas:
# get the lfn checksum from the FC
replicaDict = replicas[lfn]
oneGoodReplica = False
allGoodReplicas = True
fcChecksum = csDict[lfn].pop('FCChecksum')
for se in replicaDict:
# If replica doesn't exist skip check
if se in lfnNotExisting.get(lfn, []):
allGoodReplicas = False
continue
if se in lfnNoInfo.get(lfn, []):
# If there is no info, a priori it could be good
oneGoodReplica = True
continue
# get the surls metadata and compare the checksum
surlChecksum = checkSum.get(lfn, {}).get(se, '')
if not surlChecksum or not compareAdler(fcChecksum, surlChecksum):
# if fcChecksum does not match surlChecksum
csDict[lfn][se] = {'PFNChecksum': surlChecksum}
gLogger.info("ERROR!! checksum mismatch at %s for LFN %s: FC checksum: %s , PFN checksum : %s "
% (se, lfn, fcChecksum, surlChecksum))
allGoodReplicas = False
else:
oneGoodReplica = True
if not oneGoodReplica:
if lfn in lfnNotExisting:
gLogger.info("=> All replicas are missing", lfn)
retDict['MissingAllReplicas'][lfn] = 'All'
else:
gLogger.info("=> All replicas have bad checksum", lfn)
retDict['AllReplicasCorrupted'][lfn] = csDict[lfn]
elif not allGoodReplicas:
if lfn in lfnNotExisting:
gLogger.info("=> At least one replica missing", lfn)
retDict['MissingReplica'][lfn] = lfnNotExisting[lfn]
else:
gLogger.info("=> At least one replica with good Checksum", lfn)
retDict['SomeReplicasCorrupted'][lfn] = csDict[lfn]
return S_OK(retDict)
##########################################################################
# properties
def set_prod(self, value):
""" Setter """
if value:
value = int(value)
res = self.transClient.getTransformation(value, extraParams=False)
if not res['OK']:
S_ERROR(errno.ENOENT, "Couldn't find transformation %d: %s" %
(value, res['Message']))
else:
self.transType = res['Value']['Type']
if self.interactive:
gLogger.info("Production %d has type %s" % (value, self.transType))
else:
value = 0
self._prod = value
def get_prod(self):
""" Getter """
return self._prod
prod = property(get_prod, set_prod)
def set_fileType(self, value):
""" Setter """
self._fileType = [ft.upper() for ft in value]
def get_fileType(self):
""" Getter """
return self._fileType
fileType = property(get_fileType, set_fileType)
def set_fileTypesExcluded(self, value):
""" Setter """
self._fileTypesExcluded = [ft.upper() for ft in value]
def get_fileTypesExcluded(self):
""" Getter """
return self._fileTypesExcluded
fileTypesExcluded = property(get_fileTypesExcluded, set_fileTypesExcluded)
def set_lfns(self, value):
""" Setter """
if isinstance(value, six.string_types):
value = [value]
value = [v.replace(' ', '').replace('//', '/') for v in value]
self._lfns = value
def get_lfns(self):
""" Getter """
return self._lfns
lfns = property(get_lfns, set_lfns)
##########################################################################
#
# This part was backported from DataIntegrityClient
#
#
# This section contains the specific methods for File Catalog->SE checks
#
def catalogDirectoryToSE(self, lfnDir):
""" This obtains the replica and metadata information from the catalog
for the supplied directory and checks against the storage elements.
"""
gLogger.info("-" * 40)
gLogger.info("Performing the FC->SE check")
gLogger.info("-" * 40)
if isinstance(lfnDir, six.string_types):
lfnDir = [lfnDir]
res = self._getCatalogDirectoryContents(lfnDir)
if not res['OK']:
return res
replicas = res['Value']['Replicas']
catalogMetadata = res['Value']['Metadata']
res = self.checkPhysicalFiles(replicas, catalogMetadata)
if not res['OK']:
return res
resDict = {'CatalogMetadata': catalogMetadata, 'CatalogReplicas': replicas}
return S_OK(resDict)
def catalogFileToSE(self, lfns):
""" This obtains the replica and metadata information from the catalog and checks against the storage elements.
"""
gLogger.info("-" * 40)
gLogger.info("Performing the FC->SE check")
gLogger.info("-" * 40)
if isinstance(lfns, six.string_types):
lfns = [lfns]
res = self._getCatalogMetadata(lfns)
if not res['OK']:
return res
catalogMetadata, _missingCatalogFiles, _zeroSizeFiles = res['Value']
res = self._getCatalogReplicas(list(catalogMetadata))
if not res['OK']:
return res
replicas, _zeroReplicaFiles = res['Value']
res = self.checkPhysicalFiles(replicas, catalogMetadata)
if not res['OK']:
return res
resDict = {'CatalogMetadata': catalogMetadata, 'CatalogReplicas': replicas}
return S_OK(resDict)
def checkPhysicalFiles(self, replicas, catalogMetadata, ses=None):
""" This method takes the supplied replica and metadata information obtained
from the catalog and checks against the storage elements.
"""
# FIXME: we better use the compareChecksum function instead of this one!
# or maybe directly checkFC2SE
gLogger.info("-" * 40)
gLogger.info("Performing the FC->SE check")
gLogger.info("-" * 40)
seLfns = {}
for lfn, replicaDict in replicas.items():
for se, _url in replicaDict.items():
if (ses) and (se not in ses):
continue
seLfns.setdefault(se, []).append(lfn)
gLogger.info('%s %s' % ('Storage Element'.ljust(20), 'Replicas'.rjust(20)))
for se in sorted(seLfns):
files = len(seLfns[se])
gLogger.info('%s %s' % (se.ljust(20), str(files).rjust(20)))
lfns = seLfns[se]
sizeMismatch = []
res = self.__checkPhysicalFileMetadata(lfns, se)
if not res['OK']:
gLogger.error('Failed to get physical file metadata.', res['Message'])
return res
for lfn, metadata in res['Value'].items():
if lfn in catalogMetadata:
# and ( metadata['Size'] != 0 ):
if metadata['Size'] != catalogMetadata[lfn]['Size']:
sizeMismatch.append(
(lfn, 'deprecatedUrl', se, 'CatalogPFNSizeMismatch'))
if sizeMismatch:
self.dic.reportProblematicReplicas(
sizeMismatch, se, 'CatalogPFNSizeMismatch')
return S_OK()
def __checkPhysicalFileMetadata(self, lfns, se):
""" Check obtain the physical file metadata and check the files are available
"""
gLogger.info('Checking the integrity of %s physical files at %s' %
(len(lfns), se))
res = StorageElement(se).getFileMetadata(lfns)
if not res['OK']:
gLogger.error('Failed to get metadata for lfns.', res['Message'])
return res
pfnMetadata = res['Value']['Successful']
# If the replicas are completely missing
missingReplicas = []
for lfn, reason in res['Value']['Failed'].items():
if re.search('File does not exist', reason):
missingReplicas.append((lfn, 'deprecatedUrl', se, 'PFNMissing'))
if missingReplicas:
self.dic.reportProblematicReplicas(missingReplicas, se, 'PFNMissing')
lostReplicas = []
unavailableReplicas = []
zeroSizeReplicas = []
# If the files are not accessible
for lfn, metadata in pfnMetadata.items():
if metadata.get('Lost'):
lostReplicas.append((lfn, 'deprecatedUrl', se, 'PFNLost'))
if metadata.get('Unavailable') or not metadata['Accessible']:
unavailableReplicas.append(
(lfn, 'deprecatedUrl', se, 'PFNUnavailable'))
if not metadata['Size']:
zeroSizeReplicas.append((lfn, 'deprecatedUrl', se, 'PFNZeroSize'))
if lostReplicas:
self.dic.reportProblematicReplicas(lostReplicas, se, 'PFNLost')
if unavailableReplicas:
self.dic.reportProblematicReplicas(
unavailableReplicas, se, 'PFNUnavailable')
if zeroSizeReplicas:
self.dic.reportProblematicReplicas(zeroSizeReplicas, se, 'PFNZeroSize')
gLogger.info(
'Checking the integrity of physical files at %s complete' % se)
return S_OK(pfnMetadata)
##########################################################################
#
# This section contains the specific methods for SE->File Catalog checks
#
def _getCatalogDirectoryContents(self, lfnDirs):
""" Obtain the contents of the supplied directory, recursively
"""
def _getDirectoryContent(directory):
""" Inner function: recursively scan a directory, returns list of LFNs
"""
filesInDirectory = {}
gLogger.debug("Examining %s" % directory)
res = self.fileCatalog.listDirectory(directory)
if not res['OK']:
gLogger.error('Failed to get directory contents', res['Message'])
return res
if directory in res['Value']['Failed']:
gLogger.error('Failed to get directory content', '%s %s' %
(directory, res['Value']['Failed'][directory]))
return S_ERROR('Failed to get directory content')
if directory not in res['Value']['Successful']:
return S_ERROR('Directory not existing?')
# first, adding the files found in the current directory
gLogger.debug("Files in %s: %d" % (directory, len(
res['Value']['Successful'][directory]['Files'])))
filesInDirectory.update(res['Value']['Successful'][directory]['Files'])
# then, looking for subDirectories content
if res['Value']['Successful'][directory]['SubDirs']:
for l_dir in res['Value']['Successful'][directory]['SubDirs']:
# recursion here
subDirContent = _getDirectoryContent(l_dir)
if not subDirContent['OK']:
return subDirContent
else:
filesInDirectory.update(subDirContent['Value'])
return S_OK(filesInDirectory)
gLogger.info(
'Obtaining the catalog contents for %d directories' % len(lfnDirs))
allFiles = {}
for lfnDir in lfnDirs:
dirContent = _getDirectoryContent(lfnDir)
if not dirContent['OK']:
return dirContent
else:
gLogger.debug("Content of directory %s: %d files" %
(lfnDir, len(dirContent['Value'])))
allFiles.update(dirContent['Value'])
gLogger.debug("Content of directories examined: %d files" % len(allFiles))
replicas = self.fileCatalog.getReplicas(list(allFiles))
if not replicas['OK']:
return replicas
if replicas['Value']['Failed']:
return S_ERROR("Failures in replicas discovery")
return S_OK({'Metadata': allFiles, 'Replicas': replicas['Value']['Successful']})
def _getCatalogReplicas(self, lfns):
""" Obtain the file replicas from the catalog while checking that there are replicas
"""
gLogger.info('Obtaining the replicas for %s files' % len(lfns))
zeroReplicaFiles = []
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
gLogger.error('Failed to get catalog replicas', res['Message'])
return res
allReplicas = res['Value']['Successful']
for lfn, error in res['Value']['Failed'].items():
if re.search('File has zero replicas', error):
zeroReplicaFiles.append(lfn)
gLogger.info('Obtaining the replicas for files complete')
return S_OK((allReplicas, zeroReplicaFiles))
def _getCatalogMetadata(self, lfns):
""" Obtain the file metadata from the catalog while checking they exist
"""
if not lfns:
return S_OK({})
gLogger.info('Obtaining the catalog metadata for %s files' % len(lfns))
missingCatalogFiles = []
zeroSizeFiles = []
res = self.fileCatalog.getFileMetadata(lfns)
if not res['OK']:
gLogger.error('Failed to get catalog metadata', res['Message'])
return res
allMetadata = res['Value']['Successful']
for lfn, error in res['Value']['Failed'].items():
if re.search('No such file or directory', error):
missingCatalogFiles.append(lfn)
gLogger.info('Obtaining the catalog metadata complete')
return S_OK((allMetadata, missingCatalogFiles, zeroSizeFiles))
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Client/ConsistencyInspector.py
|
Python
|
gpl-3.0
| 28,520
|
[
"DIRAC"
] |
3d9617b2fa596905c035d4d221a40bb17bd9c8d4669b2f7cc0871eb868c4d87d
|
"""Polynomial factorization routines in characteristic zero. """
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_degree, gf_from_dict,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcd, gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC, dmp_ground_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dup_from_dict, dmp_from_dict,
dmp_zero, dmp_zero_p,
dmp_one, dmp_one_p,
dmp_nest, dmp_raise,
dup_strip, dmp_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr, dmp_sqr,
dup_pow, dmp_pow,
dup_div, dmp_div,
dup_rem, dmp_rem,
dup_quo, dmp_quo,
dup_expand, dmp_expand,
dup_add_mul, dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift, dup_rshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm, dmp_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content, dmp_ground_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dup_eval, dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dup_compose, dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_gcd, dmp_gcd,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p, dmp_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets, cythonized
from math import ceil, log
from random import randint
@cythonized("k")
def dup_trial_division(f, factors, K):
"""Determine multiplicities of factors using trial division. """
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k+1
else:
break
result.append((factor, k))
return _sort_factors(result)
@cythonized("u,k")
def dmp_trial_division(f, factors, u, K):
"""Determine multiplicities of factors using trial division. """
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k+1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(K(n+1))*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n+1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f == g*h (mod m)
s*g + t*h == 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) == 1
deg(f) == deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f == G*H (mod m**2)
S*G + T**H == 1 (mod m**2)
**References**
1. [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
@cythonized("l,r,k,d")
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
**References**
1. [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(ceil(log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k+1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d+1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
@cythonized("l,s")
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n+1))*2**n*A*b))
C = int((n+1)**(2*n)*A**(2*n-1))
gamma = int(ceil(2*log(C, 2)))
bound = int(2*gamma*log(gamma))
for p in xrange(3, bound+1):
if not isprime(p) or b % p == 0:
continue
p = K.convert(p)
F = gf_from_int_poly(f, p)
if gf_sqf_p(F, p, K):
break
l = int(ceil(log(2*B + 1, p)))
modular = []
for ff in gf_factor_sqf(F, p, K)[1]:
modular.append(gf_to_int_poly(ff, p))
g = dup_zz_hensel_lift(p, f, modular, l, K)
T = set(range(len(g)))
factors, s = [], 1
while 2*s <= len(T):
for S in subsets(T, s):
G, H = [b], [b]
S = set(S)
for i in S:
G = dup_mul(G, g[i], K)
for i in T-S:
H = dup_mul(H, g[i], K)
G = dup_trunc(G, p**l, K)
H = dup_trunc(H, p**l, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T - S
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.iterkeys():
if (lc % p) and (tc % p**2):
return True
@cythonized("n,i")
def dup_zz_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polnomial.
**Examples**
>>> from sympy.polys.factortools import dup_zz_cyclotomic_p
>>> from sympy.polys.domains import ZZ
>>> f = [1, 0, 1, 0, 0, 0,-1, 0, 1, 0,-1, 0, 0, 0, 1, 0, 1]
>>> dup_zz_cyclotomic_p(f, ZZ)
False
>>> g = [1, 0, 1, 0, 0, 0,-1, 0,-1, 0,-1, 0, 0, 0, 1, 0, 1]
>>> dup_zz_cyclotomic_p(g, ZZ)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in xrange(n, -1, -2):
g.insert(0, f[i])
for i in xrange(n-1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_zz_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_zz_cyclotomic_p(G, K):
return True
return False
@cythonized("n,p,k")
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polnomial. """
h = [K.one,-K.one]
for p, k in factorint(n).iteritems():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k-1), K)
return h
@cythonized("n,p,k,i")
def _dup_cyclotomic_decompose(n, K):
H = [[K.one,-K.one]]
for p, k in factorint(n).iteritems():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in xrange(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
@cythonized("n")
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
**References**
1. [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any([ bool(cf) for cf in f[1:-1] ]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
@cythonized("n")
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polyomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
@cythonized("n,k")
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys.factortools import dup_zz_factor
>>> from sympy.polys.domains import ZZ
>>> dup_zz_factor([2, 0, 0, 0, -2], ZZ)
(2, [([1, -1], 1), ([1, 1], 1), ([1, 0, 1], 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
**References**
1. [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H, factors = None, []
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
for h in H:
k = 0
while True:
q, r = dup_div(f, h, K)
if not r:
f, k = q, k+1
else:
break
factors.append((h, k))
return cont, _sort_factors(factors)
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
@cythonized("u,v")
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u-1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u-1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
@cythonized("u,v,i,j,k")
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u-1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(xrange(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k+1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any([ not j for j in J ]):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H)-1), u, K)
return f, HHH, CCC
@cythonized("m")
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
@cythonized("u,v,d,n,i,j,k")
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n-i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in xrange(0, d):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k+1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k+1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
@cythonized("u,v,d,dj,n,i,j,k,w")
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u-1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n-i, u-i, K)
S.insert(0, dmp_ground_trunc(s, p, v-i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(xrange(2, n+2), S, A):
G, w = list(H), j-1
I, J = A[:j-2], A[j-1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w-1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w-1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in xrange(0, dj):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k+1, a, w, w, K)
if not dmp_zero_p(C, w-1):
C = dmp_quo_ground(C, K.factorial(k+1), w-1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w-1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w-1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
@cythonized("u,mod,i,j,s_arg,negative")
def dmp_zz_wang(f, u, K, mod=None):
"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which
is primitive and square-free in `x_1`, computes factorization
of `f` into irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate
polynomial in `Z[x_2,...,x_n][x_1]`, for which an evaluation
mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers.
The mapping is used to transform `f` into a univariate polynomial
in `Z[x_1]`, which can be factored efficiently using Zassenhaus
algorithm. The last step is to lift univariate factors to obtain
true multivariate factors. For this purpose a parallel Hensel
lifting procedure is used.
**References**
1. [Wang78]_
2. [Geddes92]_
"""
ct, T = dmp_zz_factor(dmp_LC(f, K), u-1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
bad_points = set([tuple(A)])
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in xrange(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in xrange(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(f, u, K, mod+1)
else:
raise ExtraneousFactors("we need to restart algorithm with better parameters")
negative, result = 0, []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
@cythonized("u,d,k")
def dmp_zz_factor(f, u, K):
"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys.factortools import dmp_zz_factor
>>> from sympy.polys.domains import ZZ
>>> dmp_zz_factor([[2], [], [-2, 0, 0]], 1, ZZ)
(2, [([[1], [-1, 0]], 1), ([[1], [1, 0]], 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
**References**
1. [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all([ d <= 0 for d in dmp_degree_list(g, u) ]):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
for h in H:
k = 0
while True:
q, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
f, k = q, k+1
else:
break
factors.append((h, k))
for g, k in dmp_zz_factor(G, u-1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
@cythonized("u")
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all([ d <= 0 for d in dmp_degree_list(f, u) ]):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
coeff, factors = lc, [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
@cythonized("i")
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise DomainError('multivariate polynomials over %s' % K)
@cythonized("i,k,u")
def dup_factor_list(f, K0):
"""Factor polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
if not K0.has_CharacteristicZero:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.has_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.has_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
denom = K0.convert(denom, K)
coeff = K0.quo(coeff, denom)
if K0_inexact is not None:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K0, K0_inexact), k)
coeff = K0_inexact.convert(coeff, K0)
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
@cythonized("u,v,i,k")
def dmp_factor_list(f, u, K0):
"""Factor polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
if not K0.has_CharacteristicZero: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.has_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.has_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
denom = K0.convert(denom, K)
coeff = K0.quo(coeff, denom)
if K0_inexact is not None:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K0, K0_inexact), k)
coeff = K0_inexact.convert(coeff, K0)
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u-i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff, _sort_factors(factors)
@cythonized("u")
def dmp_factor_list_include(f, u, K):
"""Factor polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/polys/factortools.py
|
Python
|
agpl-3.0
| 32,721
|
[
"Gaussian"
] |
f550c3714d85c6b2d96cc7dd157d4582be6aad628d8d71dc9d506067fe00eff5
|
"""A module containing convenient methods for general machine learning"""
from __future__ import print_function
from builtins import object
__author__ = 'wittawat'
import autograd.numpy as np
import time
class ContextTimer(object):
"""
A class used to time an executation of a code snippet.
Use it with with .... as ...
For example,
with ContextTimer() as t:
# do something
time_spent = t.secs
From https://www.huyng.com/posts/python-performance-analysis
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
if self.verbose:
print('elapsed time: %f ms' % (self.secs*1000))
# end class ContextTimer
class NumpySeedContext(object):
"""
A context manager to reset the random seed by numpy.random.seed(..).
Set the seed back at the end of the block.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
rstate = np.random.get_state()
self.cur_state = rstate
np.random.seed(self.seed)
return self
def __exit__(self, *args):
np.random.set_state(self.cur_state)
# end class NumpySeedContext
class ChunkIterable(object):
"""
Construct an Iterable such that each call to its iterator returns a tuple
of two indices (f, t) where f is the starting index, and t is the ending
index of a chunk. f and t are (chunk_size) apart except for the last tuple
which will always cover the rest.
"""
def __init__(self, start, end, chunk_size):
self.start = start
self.end = end
self.chunk_size = chunk_size
def __iter__(self):
s = self.start
e = self.end
c = self.chunk_size
# Probably not a good idea to use list. Waste memory.
L = list(range(s, e, c))
L.append(e)
return zip(L, L[1:])
# end ChunkIterable
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
def is_real_num(x):
"""return true if x is a real number"""
try:
float(x)
return not (np.isnan(x) or np.isinf(x))
except ValueError:
return False
def tr_te_indices(n, tr_proportion, seed=9282 ):
"""Get two logical vectors for indexing train/test points.
Return (tr_ind, te_ind)
"""
rand_state = np.random.get_state()
np.random.seed(seed)
Itr = np.zeros(n, dtype=bool)
tr_ind = np.random.choice(n, int(tr_proportion*n), replace=False)
Itr[tr_ind] = True
Ite = np.logical_not(Itr)
np.random.set_state(rand_state)
return (Itr, Ite)
def subsample_ind(n, k, seed=28):
"""
Return a list of indices to choose k out of n without replacement
"""
rand_state = np.random.get_state()
np.random.seed(seed)
ind = np.random.choice(n, k, replace=False)
np.random.set_state(rand_state)
return ind
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
|
wittawatj/interpretable-test
|
freqopttest/util.py
|
Python
|
mit
| 5,510
|
[
"Gaussian"
] |
6ea2cc8fc08f6e2b8d2f0633336b9ec06a72df73f6113007841da1c1223e33d1
|
import numpy as np
from numba import vectorize, float32, float64
from math import exp
def hist2d(x, y, nbins=30, norm=False, rx=0.08):
''' Computes the 2D histogram of the data and the x,y coordinates
of the middle of the bins. OPTIONS; nbins (number of bins), norm,
rx (range in x as % of the min/max, previous default 0.08).'''
# Increments in x and y
xinc = (np.max(x) - np.min(x)) / nbins
yinc = (np.max(y) - np.min(y)) / nbins
# Define bin edges: either from rx or 1.5 increments,
# whichever is wider
xlow = min(np.min(x) * (1 - rx), np.min(x) - 1.5 * xinc)
xhigh = max(np.max(x) * (1 + rx), np.max(x) + 1.5 * xinc)
ylow = min(np.min(y) * (1 - rx), np.min(y) - 1.5 * yinc)
yhigh = max(np.max(y) * (1 + rx), np.max(y) + 1.5 * yinc)
r = [[xlow, xhigh], [ylow, yhigh]]
hist, xi, yi = np.histogram2d(x, y, bins=nbins, range=r, normed=norm)
# Take the middle point of the bins
xbin = (xi[1:] + xi[:-1]) / 2.
ybin = (yi[1:] + yi[:-1]) / 2.
return np.transpose(hist), xbin, ybin
def stat2d(x, y, x_range=None, nbins=10, percentiles=[25, 50, 75]):
"""
Computes the median, and two quartils for 2D data by binning in the
x axis. Range of binning set by x_range, number of bins by nbins.
Parameters
----------
x: 1-D array
Abcissa for the data.
y: 1-D array
Values for the data.
x_range: 2-element list/array
Min and max for the binning. If not set, will use x.max() and x.min()
nbins: integer
Number of bins to use.
percentiles: list with 3 elements
Percentile values for q1, q2, q3
Returns
-------
xbins: 1-D array
Contains the abcissa for the bins
q1: 1-D array
First quartile
q2: 1-D array
Second quartile (median)
q3: 1-D array
Third quartile.
"""
if x_range is None:
x_range = [np.min(x), np.max(x)]
bins = np.linspace(x_range[0], x_range[1], nbins + 1)
xbins = 0.5 * (bins[1:] + bins[:-1])
q1 = np.zeros(nbins)
q2 = np.zeros(nbins)
q3 = np.zeros(nbins)
for i in range(nbins):
idx = (x >= bins[i]) & (x < bins[i + 1])
sdata = y[idx]
q1[i] = np.percentile(sdata, percentiles[0])
q2[i] = np.percentile(sdata, percentiles[1])
q3[i] = np.percentile(sdata, percentiles[2])
return xbins, q1, q2, q3
def planck(w, T, units='cgs_AA'):
''' Returns the Planck function for wavelength in nm and T in Kelvin.
Units depend on input:
cgs_AA: erg s^-1 cm^-2 AA^-1 sr^-1
cgs_nm: erg s^-1 cm^-2 nm^-1 sr^-1
Hz : J s^-1 m^-2 Hz^-1 sr^-1
If using the brightness temperature units, then w must be a single value
(T can be an array).
For solid angle integrated one must multiply it by pi.'''
from scipy.constants import c, h, k
JOULE_TO_ERG = 1.e7
CM_TO_M = 1.e-2
NM_TO_M = 1.e-9
AA_TO_M = 1.e-10
if units in ['cgs_AA', 'cgs_nm']:
wave = w * 10. # to AA
c /= AA_TO_M
h *= JOULE_TO_ERG
k *= JOULE_TO_ERG
iplanck = 2 * h * c**2 / wave**5 / (np.exp(h * c / (wave * k * T)) - 1)
# convert from AA-2 to cm-2
iplanck *= (1e8)**2
if units == 'cgs_nm':
iplanck *= 10.
elif units == 'Hz':
wave = w * NM_TO_M # wave in m
iplanck = 2 * h * c / wave**3 / (np.exp(h * c / (wave * k * T)) - 1)
else:
raise ValueError('planck: invalid units (%s)' % units)
return iplanck
def int2bt(inu, w):
''' Converts from radiation intensity (in J s^-1 m^-2 Hz^-1 sr^-1 units)
to brightness temperature units (in K), at a given wavelength wave
(in nm). '''
from scipy.constants import c, h, k
return h * c / (w * 1e-9 * k * np.log(2 * h * c / (wave**3 * inu) + 1))
def trapz2d(z, x=None, y=None, dx=1., dy=1.):
''' Integrates a regularly spaced 2D grid using the composite
trapezium rule.
IN:
z : 2D array
x : (optional) grid values for x (1D array)
y : (optional) grid values for y (1D array)
dx: if x is not supplied, set it to the x grid interval
dy: if y is not supplied, set it to the x grid interval
--Tiago, 20090501
'''
if x != None:
dx = (x[-1] - x[0]) / (np.shape(x)[0] - 1)
if y != None:
dy = (y[-1] - y[0]) / (np.shape(y)[0] - 1)
s1 = z[0, 0] + z[-1, 0] + z[0, -1] + z[-1, -1]
s2 = np.sum(z[1:-1, 0]) + np.sum(z[1:-1, -1]) + \
np.sum(z[0, 1:-1]) + np.sum(z[-1, 1:-1])
s3 = np.sum(z[1:-1, 1:-1])
return 0.25 * dx * dy * (s1 + 2 * s2 + 4 * s3)
def translate(data, z, mu, phi, dx=1, dy=1):
"""
Horizontally rotates a 3D array with periodic horizontal boundaries
by a polar and azimuthal angle. Uses cubic splines, modifies data in-place
(therefore the rotation leads to an array with the same dimensions).
Parameters
----------
data : 3D array, 32-bit float, F contiguous
Array with values. Last index should be height, the
non-periodic dimension. The rotation keeps the top and
bottom layers
z : 1D array, 32-bit float
Array with heights.
mu : float
Cosine of polar angle.
phi : float
Azimuthal angle in radians.
dx : float, optional
Grid separation in x dimension (same units as height). Default is 1.
dy : float, optional
Grid separation in y dimension (same units as height). Default is 1.
Returns
-------
None, data are modified in-place.
"""
from math import acos, sin, cos
from .trnslt import trnslt
assert data.shape[-1] == z.shape[0]
assert data.flags['F_CONTIGUOUS']
assert data.dtype == np.dtype("float32")
theta = acos(mu)
sinth = sin(theta)
tanth = sinth / mu
cosphi = cos(phi)
sinphi = sin(phi)
dxdz = tanth * cosphi
dydz = tanth * sinphi
trnslt(dx, dy, z, data, dxdz, dydz)
@vectorize([float32(float32, float32), float64(float64, float64)])
def voigt(a, v):
"""
Returns the Voigt function:
H(a,v) = a/pi * \int_{-Inf}^{+Inf} exp(-y**2)/[(v-y)**2 + a**2] dy
Based on approximation from old Fortran routine voigtv from Aake Nordlund.
Makes use of numba vectorize, can be used as numpy ufunc.
Parameters
----------
a : scalar or n-D array (float)
Parameter 'a' in Voigt function, typically a scalar. If n-D, must
have same shape of v.
v : scalar or n-D array (float)
Velocity or Doppler value for Voigt function, typically a 1D array.
Returns
-------
h : scalar or n-D array (float)
Voigt function. Same shape and type as inputs.
"""
a0 = 122.607931777104326
a1 = 214.382388694706425
a2 = 181.928533092181549
a3 = 93.155580458138441
a4 = 30.180142196210589
a5 = 5.912626209773153
a6 = 0.564189583562615
b0 = 122.607931773875350
b1 = 352.730625110963558
b2 = 457.334478783897737
b3 = 348.703917719495792
b4 = 170.354001821091472
b5 = 53.992906912940207
b6 = 10.479857114260399
if a == 0:
return exp(-v ** 2)
z = v * 1j + a
h = (((((((a6 * z + a5) * z + a4) * z + a3) * z + a2) * z + a1) * z + a0) /
(((((((z + b6) * z + b5) * z + b4) * z + b3) * z + b2) * z + b1) * z + b0))
return h.real
def voigt_sigma(sigma, gamma, r):
''' Returns the Voigt function, defined in terms of sigma (Gaussian sdev)
and gamma (Lorentzian FWHM). '''
tt = np.sqrt(2) * sigma
v = r / tt
a = gamma / (2 * tt)
return voigt(a, v) / (tt * np.sqrt(np.pi))
def stat(a):
''' Returns some statistics on a given array '''
mm = np.nanmean(a)
ss = float(np.nanstd(a)) # float for the memmap bug
mi = np.nanmin(a)
ma = np.nanmax(a)
print(('aver = %.3e' % mm))
print(('rms = %.3e rms/aver = %.3e' % (ss, ss / mm)))
print(('min = %.3e min/aver = %.3e' % (mi, mi / mm)))
print(('max = %.3e max/aver = %.3e' % (ma, ma / mm)))
def bin_quantities(x, y, bins, func, *args, **kwargs):
"""
Perform a certain function on x-bins of a x/y relation.
Parameters
----------
x - n-D array
Array with the abcissa. If more than 1D, it will be flattened.
y - n-D array
Array with the coordinates. If more than 1D, it will be flattened.
bins - array-like (1D)
Values for the abcissa bins.
func - [numpy] function
Function to operate. Must work on arrays.
*args, **kwargs: arguments and keyword arguments for func.
Returns
-------
result - 1D array
Array with same shape as bins, containing the results of running
func in the different regions.
"""
xx = x.ravel()
yy = y.ravel()
idx = np.digitize(xx, bins)
result = np.zeros(len(bins))
for i in range(len(bins)):
if np.sum(idx == i) > 0:
result[i] = func(yy[idx == i], *args, **kwargs)
return result
def peakdetect(y_axis, x_axis=None, lookahead=300, delta=0):
"""
Tiago: downloaded from https://gist.github.com/1178136
Converted from/based on a MATLAB script at:
http://billauer.co.il/peakdet.html
function for detecting local maximas and minmias in a signal.
Discovers peaks by searching for values which are surrounded by lower
or larger values for maximas and minimas respectively
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- (optional) A x-axis whose values correspond to the y_axis list
and is used in the return to specify the postion of the peaks. If
omitted an index of the y_axis is used. (default: None)
lookahead -- (optional) distance to look ahead from a peak candidate to
determine if it is the actual peak (default: 200)
'(sample / period) / f' where '4 >= f >= 1.25' might be a good value
delta -- (optional) this specifies a minimum difference between a peak and
the following points, before a peak may be considered a peak. Useful
to hinder the function from picking up false peaks towards to end of
the signal. To work well delta should be set to delta >= RMSnoise * 5.
(default: 0)
delta function causes a 20% decrease in speed, when omitted
Correctly used it can double the speed of the function
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tupple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*tab)
"""
max_peaks = []
min_peaks = []
dump = [] # Used to pop the first hit which almost always is false
# Store data length for later use
length = len(y_axis)
# Perform some checks
if lookahead < 1:
raise ValueError("Lookahead must be '1' or above in value")
if not (np.isscalar(delta) and delta >= 0):
raise ValueError("delta must be a positive number")
# maxima and minima candidates are temporarily stored in
# mx and mn respectively
mn, mx = np.Inf, -np.Inf
# Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead],
y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
# look for max
if y < mx - delta and mx != np.Inf:
# Maxima peak candidate found
# look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index + lookahead].max() < mx:
max_peaks.append([mxpos, mx])
dump.append(True)
# set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if index + lookahead >= length:
# end is within lookahead no more peaks can be found
break
continue
# look for min
if y > mn + delta and mn != -np.Inf:
# Minima peak candidate found
# look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index + lookahead].min() > mn:
min_peaks.append([mnpos, mn])
dump.append(False)
# set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
if index + lookahead >= length:
# end is within lookahead no more peaks can be found
break
# Remove the false hit on the first value of the y_axis
try:
if dump[0]:
max_peaks.pop(0)
else:
min_peaks.pop(0)
del dump
except IndexError:
# no peaks were found
pass
return [np.array(max_peaks).T, np.array(min_peaks).T]
def lclxtrem(vec, in_width, maxima=False):
"""
Finds peaks in data. Converted from lclxtrem.
"""
width = abs(in_width)
# First derivative
vecp = np.diff(vec)
# Collapse the derivative to just +1, 0, or -1
vecps = np.zeros(vecp.shape, dtype='i')
vecps[vecp > 0.] = 1
vecps[vecp < 0.] = -1
# Derivative of the sign vectors
vecpps = np.diff(vecps)
# Keep the appropriate extremum
if maxima:
z = np.where(vecpps < 0)[0]
else:
z = np.where(vecpps > 0)[0]
nidx = len(z)
flags = np.ones(nidx, dtype=np.bool)
# Create an index vector with just the good points.
if nidx == 0:
if maxima:
idx = (vec == np.max(vec))
else:
idx = (vec == np.min(vec))
else:
idx = z + 1
# Sort the extrema (actually, the absolute value)
sidx = idx[np.argsort(np.abs(vec[idx]))[::-1]]
# Scan down the list of extrema, start with the brightest and take out
# all extrema within width of the position. Any that are too close should
# be removed from further consideration.
if width > 1:
i = 0
for i in range(nidx - 1):
if flags[i]:
flags[i + 1:][np.abs(sidx[i + 1:] - sidx[i]) <= width] = False
# The ones that survive are returned.
return np.sort(sidx[flags])
def peakdetect_lcl(y_axis, x_axis=None, lookahead=300, delta=0):
"""
Wrapper to lclxtrem to mimic the behaviour of peakdetect.
"""
maxima = lclxtrem(y_axis, lookahead, maxima=True)
minima = lclxtrem(y_axis, lookahead, maxima=False)
return np.array([x_axis[maxima], y_axis[maxima]]), \
np.array([x_axis[minima], y_axis[minima]])
def pinterp3d(x, y, new_x):
"""
pinterp3d(x, y, new_x)
Performs linear interpolation over the last dimension of a 3D array,
according to new values from a 2D array new_x. Thus, interpolate
y[i, j, :] for new_x[i, j].
Parameters
----------
x : 1-D ndarray (double type)
Array containg the x (abcissa) values. Must be monotonically
increasing.
y : 3-D ndarray (double type)
Array containing the y values to interpolate.
x_new: 2-D ndarray (double type)
Array with new abcissas to interpolate.
Returns
-------
new_y : 3-D ndarray
Interpolated values.
"""
nx = y.shape[0]
ny = y.shape[1]
nz = y.shape[2]
new_y = np.zeros((nx, ny))
for i in range(nx):
for j in range(ny):
for k in range(1, nz):
if x[k] > new_x[i, j]:
new_y[i, j] = (y[i, j, k] - y[i, j, k - 1]) * \
(new_x[i, j] - x[k - 1]) / \
(x[k] - x[k - 1]) + y[i, j, k - 1]
break
return new_y
def pystat2d_idx(x, idx_low, idx_high):
"""
vz2d(x, idx_low, idx_high)
"""
nx = x.shape[0]
ny = x.shape[1]
nz = x.shape[2]
res = np.empty((4, nx, ny), dtype='d')
for i in range(nx):
for j in range(ny):
if (idx_low[i, j] < idx_high[i, j]) and \
(idx_low[i, j] >= 0) and (idx_high[i, j] < nz):
arr = x[i, j, idx_low[i, j]:idx_high[i, j]]
res[0, i, j] = np.max(arr)
res[1, i, j] = np.min(arr)
res[2, i, j] = np.mean(arr)
res[3, i, j] = np.std(arr)
return res
def madmax(image):
"""
Computes a multidirectional maximum of (weighted second order difference)
using 8 directions step=2 in horizontal and vertical directions
weight=distance between extreme pixels.
Uses algorithm from ; Koutchmy,O. and Koutchmy, S. (1988),
Proc. of the 10th NSO/SPO, 1989, 217, O. von der Luhe Ed.
Adapted from madmax.pro.
"""
from scipy import ndimage
nx, ny = image.shape
# Determine some constants and arrays.
h1 = 0.5
h2 = 0.2 * np.sqrt(5.)
h3 = 0.25 * np.sqrt(2.)
d = np.empty((nx, ny, 8))
mat = image.copy()
shifts = [[(0, -2), (0, 2)], [(-1, -2), (1, 2)], [(-2, -2), (2, 2)],
[(-2, -1), (2, 1)], [(-2, 0), (2, 0)], [(-2, 1), (2, -1)],
[(-2, 2), (2, -2)], [(-1, 2), (1, -2)]]
hh = [h1, h2, h3, h2, h1, h2, h3, h2]
for i, h, shft in zip(list(range(8)), hh, shifts):
s1, s2 = shft
d[..., i] = h * (mat - 0.5 *
(np.roll(np.roll(mat, s1[0], axis=0), s1[1], axis=1)
+ np.roll(np.roll(mat, s2[0], axis=0), s2[1], axis=1)))
mat = d.max(-1)
del d
# border
mat = ndimage.map_coordinates(mat[4:-4, 4:-4],
np.mgrid[0:nx - 9:nx * 1j, 0:ny - 9:ny * 1j],
order=3)
return mat - mat.min() # make matrix always positive
def make_composite_array(f1, f2, f3, l1, l2, l3):
"""
Makes a composite RGB image based on the arrays f1, f2, f3 at saturations
l1, l2, l3.
"""
a1 = f1.copy()
a1[a1 > l1] = l1
a1[a1 < 0] = 0.
a1 /= a1.max()
a2 = f2.copy()
a2[a2 > l2] = l2
a2[a2 < 0] = 0.
a2 /= a2.max()
a3 = f3.copy()
a3[a3 > l3] = l3
a3[a3 < 0] = 0.
a3 /= a3.max()
return np.transpose(np.array([a1, a2, a3]), axes=[1, 2, 0])
def make_composite_array2(f1, f2, l1, l2, color=None, negative=False):
"""
Makes a composite RGB image based on two arrays f1, f2 at saturations
l1, l2. Colour can be set as RGB tuple (each value from 0 to 255),
otherwise a default will be used. "color" sets the colour of the first
array f1, the second array f2 will have its complementary.
"""
a1 = f1.copy()
a1[a1 > l1] = l1
a1[a1 < 0] = 0.
a1 /= a1.max()
a2 = f2.copy()
a2[a2 > l2] = l2
a2[a2 < 0] = 0.
a2 /= a2.max()
if color is None:
if negative:
result = np.transpose(np.array([a1, a2, a2]), axes=[1, 2, 0])
else:
result = np.transpose(np.array([a2, a1, a1]), axes=[1, 2, 0])
else:
result = np.zeros(f1.shape + (3,))
if negative:
a1 = 1 - a1
a2 = 1 - a2
result[..., 0] = a1 * color[0] + a2 * (255 - color[0])
result[..., 1] = a1 * color[1] + a2 * (255 - color[1])
result[..., 2] = a1 * color[2] + a2 * (255 - color[2])
result /= 255.
return result
def make_composite_array3(f1, f2, f3, l1, l2, l3, color1=None, color2=None,
negative=False):
"""
Makes a composite RGB image based on three arrays f1, f2, f3, at
saturations l1, l2, l3. Colours for first two can be set as RGB tuple
(each value from 0 to 255). The third array f3 will have the complementary
colour.
"""
a1 = f1.copy()
a1[a1 > l1] = l1
a1[a1 < 0] = 0.
a1 /= a1.max()
a2 = f2.copy()
a2[a2 > l2] = l2
a2[a2 < 0] = 0.
a2 /= a2.max()
a3 = f3.copy()
a3[a3 > l3] = l3
a3[a3 < 0] = 0.
a3 /= a3.max()
# color to complement
color3 = 255 - (np.array(color1) + np.array(color2))
color3[color3 > 255] = 255.
color3[color3 < 0] = 0.
print(color3)
result = np.zeros(f1.shape + (3,))
if negative:
a1 = 1 - a1
a2 = 1 - a2
a3 = 1 - a3
result[..., 0] = a1 * color1[0] + a2 * color2[0] + a3 * color3[0]
result[..., 1] = a1 * color1[1] + a2 * color2[1] + a3 * color3[1]
result[..., 2] = a1 * color1[2] + a2 * color2[2] + a3 * color3[2]
result[result > 255] = 255.
result /= 255.
return result
def get_equidistant_points(x, y, scale=1., npts=100, order=3):
"""
Returns a (x, y) set of points equidistant points from a smoothed cubic
spline to the original. Equidistance is approximate (within 1% of scale)
due to the numerical scheme used.
Parameters
----------
x : 1D array
Input x axis.
y : 1D array
Input y axis
scale : float, optional
Distance between points (in pixels). Default is 1.
npts : int, optional
Number of points to use. Default is 100. If npts implies total length
larger than the distance given by (x, y), then it is truncated.
Returns
-------
result : 2D array
Array of points. First index is coordinate (x, y), second index is
point number.
"""
from scipy import interpolate as interp
incr = 0.01
newy = np.arange(y.min(), y.max(), incr / scale)
newx = interp.splev(newy, interp.splrep(y, x, k=order, s=3))
res = []
st = 0
for i in range(npts):
d = np.sqrt((newx - newx[st]) ** 2 + (newy - newy[st]) ** 2)
idx = np.argmin(np.abs(d[st + 1:] - scale))
st += idx
res.append([newx[st], newy[st]])
if (newx.shape[0] - st < scale / incr): # limit of points reached
break
return np.array(res).T
def doppler_shift(wave, data, vel, order="linear"):
"""
Doppler shifts a quantity that is a function of wavelength.
Parameters
----------
wave : 1-D array
Wavelength values in nm.
data : ndarray (1-D or 2-D)
Data to shift. The last dimension should correspond to wavelength.
vel : number or 1-D array
Velocities in km/s.
order : string, optional
Interpolation order. Could be 'linear' (default), 'nearest', 'linear'
'quadratic', 'cubic'. The last three refer to spline interpolation of
first, second, and third order.
Returns
-------
data_shift : ndarray
Shifted values, same shape as data.
"""
from scipy.constants import c
from scipy.interpolate import interp1d
wave_shift = wave * (1. + 1.e3 / c * vel)
fill = {"linear": "extrapolate", "nearest": 0., "slinear": 0.,
"quadratic": 0., "cubic": 0}
f = interp1d(wave, data, kind=order, bounds_error=False,
fill_value=fill[order])
return f(wave_shift)
|
M1kol4j/helita
|
helita/utils/utilsmath.py
|
Python
|
bsd-3-clause
| 23,032
|
[
"Gaussian"
] |
0c09fd39878bc463c212bd2c5a7eb7a755a93cdec8b7c882be6865a801d038fd
|
# $Id$
# Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
Template for a plugin
======================
You can use this file to write plugins that conform to the plugin API.
Names that are supposed to be changed to more sensible values have
*TEMPLATE* in their name.
.. note::
This plugin is the canonical example for how to structure plugins that
conform to the plugin API (see docs :mod:`gromacs.analysis.core` for
details).
Plugin class
------------
.. autoclass:: TEMPLATEplugin
:members: worker_class
:undoc-members:
Worker class
------------
The worker class performs the analysis.
.. autoclass:: _TEMPLATEplugin
:members:
"""
from __future__ import with_statement
__docformat__ = "restructuredtext en"
import os.path
import warnings
import gromacs
from gromacs.utilities import AttributeDict
from gromacs.analysis.core import Worker, Plugin
import logging
logger = logging.getLogger('gromacs.analysis.plugins.TEMPLATE')
# Worker classes that are registered via Plugins (see below)
# ----------------------------------------------------------
# These must be defined before the plugins.
class _TEMPLATEplugin(Worker):
"""TEMPLATE worker class."""
def __init__(self,**kwargs):
"""Set up TEMPLATE analysis.
This is the worker class; this is where all the real analysis is done.
:Arguments:
*keyword_1*
description
*keyword_2*
description
"""
# specific arguments: take them before calling the super class that
# does not know what to do with them
## x1 = kwargs.pop('keyword_1',None)
## x2 = kwargs.pop('keyword_1', 1.234) # nm
# super class init: do this before doing anything else
# (also sets up self.parameters and self.results)
super(_TEMPLATEplugin, self).__init__(**kwargs)
# process specific parameters now and set instance variables
# ....
# self.parameters.filenames = { 'xxx': 'yyy', ....}
# ....
# self.simulation might have been set by the super class
# already; just leave this snippet at the end. Do all
# initialization that requires the simulation class in the
# _register_hook() method.
if not self.simulation is None:
self._register_hook()
def _register_hook(self, **kwargs):
"""Run when registering; requires simulation."""
super(_TEMPLATEplugin, self)._register_hook(**kwargs)
assert not self.simulation is None
# EXAMPLES:
# filename of the index file that we generate for the cysteines
## self.parameters.ndx = self.plugindir('cys.ndx')
# output filenames for g_dist, indexed by Cys resid
## self.parameters.filenames = dict(\
## [(resid, self.plugindir('Cys%d_OW_dist.txt.bz2' % resid))
## for resid in self.parameters.cysteines])
# default filename for the combined plot
## self.parameters.figname = self.figdir('mindist_S_OW')
# override 'API' methods of base class
def run(self, cutoff=None, force=False, **gmxargs):
"""Short description of what is performed.
The run method typically processes trajectories and writes data files.
"""
# filename = self.parameters.filenames['XXXX']
# if not self.check_file_exists(filename, resolve='warning') or force:
# logger.info("Analyzing TEMPLATE...")
pass
def analyze(self,**kwargs):
"""Short description of postprocessing.
The analyze method typically postprocesses the data files
generated by run. Splitting the complete analysis task into
two parts (*run* and *analyze*) is advantageous because in
this way parameters of postprocessing steps can be easily
changed without having to rerun the time consuming trajectory
analysis.
:Keywords:
*kw1*
description
:Returns: a dictionary of the results and also sets ``self.results``.
"""
from gromacs.formats import XVG
results = AttributeDict()
# - Do postprocessing here.
# - Store results of calculation in results[key] where key can be chosen freely
# but *must* be provided so that other functions can uniformly access results.
# - You are encouraged to store class instances with a plot() method; if you do
# this then you can just don't have to change the plot() method below.
# For instance you can use gromacs.formats.XVG(filename) to create
# a object from a xvg file that knows how to plot itself.
self.results = results
return results
def plot(self, **kwargs):
"""Plot all results in one graph, labelled by the result keys.
:Keywords:
figure
- ``True``: save figures in the given formats
- "name.ext": save figure under this filename (``ext`` -> format)
- ``False``: only show on screen
formats : sequence
sequence of all formats that should be saved [('png', 'pdf')]
plotargs
keyword arguments for pylab.plot()
"""
import pylab
figure = kwargs.pop('figure', False)
extensions = kwargs.pop('formats', ('pdf','png'))
for name,result in self.results.items():
kwargs['label'] = name
try:
result.plot(**kwargs) # This requires result classes with a plot() method!!
except AttributeError:
warnings.warn("Sorry, plotting of result %(name)r is not implemented" % vars(),
category=UserWarning)
pylab.legend(loc='best')
if figure is True:
for ext in extensions:
self.savefig(ext=ext)
elif figure:
self.savefig(filename=figure)
# Public classes that register the worker classes
#------------------------------------------------
class TEMPLATEplugin(Plugin):
"""*TEMPLATE* plugin.
Describe the plugin in detail here. This is what the user will
see. Add citations etc.
# explicitly describe the call/init signature of the plugin here;
# note that *all* arguments are technically keyword arguments
# (this is a requirement of the API) but if there are required
# parameters feel free to write them without square brackets in
# the call signature as done for parameter_1 below.
#
# The name and simulation parameters are always present.
.. class:: TEMPLATEplugin(parameter_1[, kwparameter_2[, name[, simulation]]])
:Arguments:
*parameter_1*
required, otherwise the plugin won't be able to do anything
*kwparameter_2*
this optional parameter tunes the frobbnification
*name* : string
plugin name (used to access it)
*simulation* : instance
The :class:`gromacs.analysis.Simulation` instance that owns the plugin.
"""
worker_class = _TEMPLATEplugin
|
pslacerda/GromacsWrapper
|
gromacs/analysis/plugins/template_plugin.py
|
Python
|
gpl-3.0
| 7,235
|
[
"Gromacs"
] |
8d8909121f876a7d8bd6f54ca1354f7902fc05ec84c63415018e3343db88a14b
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import lib
from pyscf.tdscf import uks
from pyscf.pbc.tdscf.uhf import TDA
from pyscf.pbc.tdscf.uhf import TDHF as TDDFT
RPA = TDUKS = TDDFT
class TDDFTNoHybrid(uks.TDDFTNoHybrid):
def gen_vind(self, mf):
vind, hdiag = uks.TDDFTNoHybrid.gen_vind(self, mf)
def vindp(x):
with lib.temporary_env(mf, exxdiv=None):
return vind(x)
return vindp, hdiag
def nuc_grad_method(self):
raise NotImplementedError
def tddft(mf):
'''Driver to create TDDFT or TDDFTNoHybrid object'''
if mf._numint.libxc.is_hybrid_xc(mf.xc):
return TDDFT(mf)
else:
return TDDFTNoHybrid(mf)
from pyscf.pbc import dft
dft.uks.UKS.TDA = lib.class_as_method(TDA)
dft.uks.UKS.TDHF = None
dft.uks.UKS.TDDFT = tddft
#dft.rks.RKS.dTDA = lib.class_as_method(dTDA)
#dft.rks.RKS.dRPA = lib.class_as_method(dRPA)
|
sunqm/pyscf
|
pyscf/pbc/tdscf/uks.py
|
Python
|
apache-2.0
| 1,608
|
[
"PySCF"
] |
5b60785a6dd8905439f32ad1e6efd8eb00e6a685eadb48d48667965ee683fc0a
|
# Copyright 2013 by Zheng Ruan (zruan1991@gmail.com).
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for Codon Alphabet.
CodonAlphabet class is inherited from Alphabet class. It is an
alphabet for CodonSeq class.
"""
import copy
try:
from itertools import izip
except ImportError:
izip = zip
from Bio.Alphabet import IUPAC, Gapped, HasStopCodon, Alphabet
from Bio.Data.CodonTable import generic_by_id
default_codon_table = copy.deepcopy(generic_by_id[1])
def get_codon_alphabet(alphabet, gap="-", stop="*"):
"""Gets alignment alphabet for codon alignment.
Only nucleotide alphabet is accepted. Raise an error when the type of
alphabet is incompatible.
"""
from Bio.Alphabet import NucleotideAlphabet
if isinstance(alphabet, NucleotideAlphabet):
alpha = alphabet
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
else:
raise TypeError("Only Nuclteotide Alphabet is accepted!")
return alpha
default_alphabet = get_codon_alphabet(IUPAC.unambiguous_dna)
class CodonAlphabet(Alphabet):
"""Generic Codon Alphabet with a size of three"""
size = 3
letters = None
name = ''
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.names[0])
def get_codon_alphabet(codon_table, gap_char="-"):
letters = list(codon_table.forward_table.keys())
letters.extend(codon_table.stop_codons)
letters.extend(codon_table.start_codons)
if gap_char:
letters.append(gap_char * 3)
generic_codon_alphabet = CodonAlphabet()
generic_codon_alphabet.letters = letters
generic_codon_alphabet.gap_char = '-'
generic_codon_alphabet.names = codon_table.names
return generic_codon_alphabet
default_codon_alphabet = get_codon_alphabet(default_codon_table)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/codonalign/codonalphabet.py
|
Python
|
mit
| 2,100
|
[
"Biopython"
] |
1f310d89e20af243d2112849ad25874ad014aca76de9b10be77433e141a11034
|
#########################################################
# YAM(BO)PY(THON) Library
#
# Generation of Yambo input files using python
#
# Authors: A Molina-Sanchez, HPC Miranda
#
# January 2016
#########################################################
# Bethe-Salpeter spectra calculations at given times
# of a Real-Time simulation
# Author: Alejandro Molina-Sanchez
#########################################################
#########################################################
from __future__ import print_function
from yambopy import *
from schedulerpy import *
############## SETTINGS ##############
yambo_module = 'yambo/intel-4.1'
yambo_rt = 'yambo_rt'
source = 'QSSIN-1e+03-70.0fs-2.0eV-0K'
folder_rt = 'rt-24x24'
folder_gw = 'gw-24x24' # Where the ndb.QP is located (see l. 93)
BSRTmode = 'XRK' # X: Screening, R: Residuals, K: Kernel
CSRTmode = 'XG' # X: Screening, G: GFs
QPdata = 'N' # E: Equilibrium QPs, N: Non-Equilibrium QPs, L: Scissor operator
bs_nodes = 1
bs_cores = 12
time_probe = range(0,610,150)
#time_probe=(0,)
print(time_probe)
##########################################################
# Scissors Operator
# GW for 6x6 12x12 18x18 24x24 30x30 42x42 51x51
gw = [2.0, 1.41334, 1.16886, 1.04588, 0.98218, 0.9214, 0.8995]
##########################################################
bs = YamboIn('%s -r -b -o b -k sex -y d -Q'%yambo_rt,folder=folder_rt) # BS
#bs = YamboIn('%s -o g -Q'%yambo_rt,vim=False) # BS
bs['DBsIOoff'] = ''
bs['BSKmod'] = "SEX" # [BSE] IP/Hartree/HF/ALDA/SEX/BSfxc
bs['X_all_q_ROLEs'] = 'q.k.c.v'
bs['X_all_q_CPU'] = '1.%d.2.1'%(bs_nodes*bs_cores/2) # bs_nodes bs_cores
bs['BS_ROLEs'] = 'k.eh.t'
bs['BS_CPU'] = '%d.1.1'%(bs_nodes*bs_cores)
bs['RandQpts'] = 1000000
bs['RandGvec'] = [ 1, 'RL' ]
bs['CUTGeo'] = "box z"
bs['CUTBox'] = [ 0.00, 0.00, 38.0]
# Common variables
#bs['KfnQP_E'] = [gw[3], 1.00, 1.00] # [EXTQP BSK BSS] E parameters (c/v) eV|adim|adim
bs['KfnQP_E'] = [0.0, 1.00, 1.00] # [EXTQP BSK BSS] E parameters (c/v) eV|adim|adim
bs['FFTGvecs'] = [ 20 , 'Ha' ]
bs['BSENGexx'] = [ 20 , 'Ha' ]
bs['BSENGBlk'] = [ 1000 , 'mHa' ]
bs['NGsBlkXs'] = [ 1000 , 'mHa' ]
bs['BndsRnXs'] = [1 ,60]
bs['BSEBands'] = [25,28]
bs['Gauge'] = "length"
bs['BSEmod'] = "causal"
bs['BSSmod'] = "d"
bs['BEnRange'] = [ [0.0 , 4.0] , 'eV' ] # Energy range spectra
bs['BLongDir'] = [ 1.0, 0.0, 0.0 ] # [BSS] [cc] Electric Field
bs['BDmRange'] = [ [0.100 , 0.100] , 'eV' ] # Width
bs['BEnSteps'] = 1000 # Energy steps
bs.arguments.append('WRbsWF')
#bs.arguments.append('ForceEqTrans')
# Submission of the jobs for each time
print('Running BSE calculations...')
print('RT source calculation: %s \n' % source)
for time in time_probe:
print('Time of carriers database %d' % time)
if 'X' in BSRTmode:
bs['XfnRTdb'] = 'f @ %d fs < ./pulse/ndb.RT_carriers' % ( time )
if 'R' in BSRTmode:
bs['RfnRTdb'] = 'f @ %d fs < ./pulse/ndb.RT_carriers' % ( time )
if 'K' in BSRTmode:
bs['KfnRTdb'] = 'f @ %d fs < ./pulse/ndb.RT_carriers' % ( time )
if 'E' in QPdata:
bs['KfnQPdb'] = 'E < ../../%s/ndb.QP' % folder_gw # GW database
namebs = 'B-%s-%s-t%d' % ( BSRTmode, QPdata, time )
if 'N' in QPdata:
namebs = 'B-%s-%s-t%d' % ( BSRTmode, CSRTmode, time )
name_merged = 'M-%s-t%d' % ( CSRTmode, time )
bs['KfnQPdb'] = 'E < ./%s/ndb.QP' % name_merged # GW + NEQ_COHSEX - EQ_COHSEX
if 'L' in QPdata:
namebs = 'B-L-t%d' % ( time )
print('Use LDA+Scissor')
bs.write('%s/%s/%s.in' %(folder_rt, source, namebs))
yambo = oarsub(nodes=bs_nodes,core=bs_cores,dependent=0,name='bse-rt',walltime="10:00:00")
yambo.add_command('module load %s'%yambo_module)
yambo.add_command('export OMP_NUM_THREADS=1')
yambo.add_command('cd %s/%s ; mpirun -hostfile \$OAR_NODEFILE %s -F %s.in -J %s -C %s -I \'../\''%(folder_rt,source,yambo_rt,namebs,namebs,namebs))
yambo.write('%s/%s/%s.ll' % (folder_rt, source, namebs))
yambo.run()
|
henriquemiranda/yambopy
|
scripts/realtime/rt-bse.py
|
Python
|
bsd-3-clause
| 4,146
|
[
"Yambo"
] |
4c2ebec02fb921f8c4a8d608b96a4fbfa600939c9335f75ce4c3f627dc16369c
|
# -*- coding:utf-8 -*-
## src/chat_control.py
##
## Copyright (C) 2006 Dimitur Kirov <dkirov AT gmail.com>
## Copyright (C) 2006-2017 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2006-2008 Jean-Marie Traissard <jim AT lapin.org>
## Nikos Kouremenos <kourem AT gmail.com>
## Travis Shirk <travis AT pobox.com>
## Copyright (C) 2007 Lukas Petrovicky <lukas AT petrovicky.net>
## Julien Pivotto <roidelapluie AT gmail.com>
## Copyright (C) 2007-2008 Brendan Taylor <whateley AT gmail.com>
## Stephan Erb <steve-e AT h3c.de>
## Copyright (C) 2008 Jonathan Schleifer <js-gajim AT webkeks.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import os
import time
import gtk
import pango
import gobject
import gtkgui_helpers
import gui_menu_builder
import message_control
import dialogs
import history_window
import notify
import re
from common import events
from common import gajim
from common import helpers
from common import exceptions
from common import ged
from common import i18n
from message_control import MessageControl
from conversation_textview import ConversationTextview
from message_textview import MessageTextView
from common.stanza_session import EncryptedStanzaSession, ArchivingStanzaSession
from common.contacts import GC_Contact
from common.logger import constants
from nbxmpp.protocol import NS_XHTML, NS_XHTML_IM, NS_FILE, NS_MUC
from nbxmpp.protocol import NS_ESESSION
from nbxmpp.protocol import NS_JINGLE_RTP_AUDIO, NS_JINGLE_RTP_VIDEO
from nbxmpp.protocol import NS_JINGLE_ICE_UDP, NS_JINGLE_FILE_TRANSFER
from nbxmpp.protocol import NS_CHATSTATES
from common.connection_handlers_events import MessageOutgoingEvent
from common.exceptions import GajimGeneralException
from command_system.implementation.middleware import ChatCommandProcessor
from command_system.implementation.middleware import CommandTools
from command_system.implementation.hosts import ChatCommands
# Here we load the module with the standard commands, so they are being detected
# and dispatched.
from command_system.implementation.standard import StandardChatCommands
from command_system.implementation.execute import Execute, Show
try:
import gtkspell
HAS_GTK_SPELL = True
except ImportError:
HAS_GTK_SPELL = False
################################################################################
class ChatControlBase(MessageControl, ChatCommandProcessor, CommandTools):
"""
A base class containing a banner, ConversationTextview, MessageTextView
"""
keymap = gtk.gdk.keymap_get_default()
try:
keycode_c = keymap.get_entries_for_keyval(gtk.keysyms.c)[0][0]
except TypeError:
keycode_c = 54
try:
keycode_ins = keymap.get_entries_for_keyval(gtk.keysyms.Insert)[0][0]
except TypeError:
keycode_ins = 118
def make_href(self, match):
url_color = gajim.config.get('urlmsgcolor')
url = match.group()
if not '://' in url:
url = 'http://' + url
return '<a href="%s"><span color="%s">%s</span></a>' % (url,
url_color, match.group())
def get_font_attrs(self):
"""
Get pango font attributes for banner from theme settings
"""
theme = gajim.config.get('roster_theme')
bannerfont = gajim.config.get_per('themes', theme, 'bannerfont')
bannerfontattrs = gajim.config.get_per('themes', theme, 'bannerfontattrs')
if bannerfont:
font = pango.FontDescription(bannerfont)
else:
font = pango.FontDescription('Normal')
if bannerfontattrs:
# B attribute is set by default
if 'B' in bannerfontattrs:
font.set_weight(pango.WEIGHT_HEAVY)
if 'I' in bannerfontattrs:
font.set_style(pango.STYLE_ITALIC)
font_attrs = 'font_desc="%s"' % font.to_string()
# in case there is no font specified we use x-large font size
if font.get_size() == 0:
font_attrs = '%s size="x-large"' % font_attrs
font.set_weight(pango.WEIGHT_NORMAL)
font_attrs_small = 'font_desc="%s" size="small"' % font.to_string()
return (font_attrs, font_attrs_small)
def get_nb_unread(self):
jid = self.contact.jid
if self.resource:
jid += '/' + self.resource
type_ = self.type_id
return len(gajim.events.get_events(self.account, jid, ['printed_' + type_,
type_]))
def draw_banner(self):
"""
Draw the fat line at the top of the window that houses the icon, jid, etc
Derived types MAY implement this.
"""
self.draw_banner_text()
self._update_banner_state_image()
gajim.plugin_manager.gui_extension_point('chat_control_base_draw_banner',
self)
def update_toolbar(self):
"""
update state of buttons in toolbar
"""
self._update_toolbar()
gajim.plugin_manager.gui_extension_point(
'chat_control_base_update_toolbar', self)
def draw_banner_text(self):
"""
Derived types SHOULD implement this
"""
pass
def update_ui(self):
"""
Derived types SHOULD implement this
"""
self.draw_banner()
def repaint_themed_widgets(self):
"""
Derived types MAY implement this
"""
self._paint_banner()
self.draw_banner()
def _update_banner_state_image(self):
"""
Derived types MAY implement this
"""
pass
def _update_toolbar(self):
"""
Derived types MAY implement this
"""
pass
def _nec_our_status(self, obj):
if self.account != obj.conn.name:
return
if obj.show == 'offline' or (obj.show == 'invisible' and \
obj.conn.is_zeroconf):
self.got_disconnected()
else:
# Other code rejoins all GCs, so we don't do it here
if not self.type_id == message_control.TYPE_GC:
self.got_connected()
if self.parent_win:
self.parent_win.redraw_tab(self)
def _nec_ping_sent(self, obj):
if self.contact != obj.contact:
return
self.print_conversation(_('Ping?'), 'status')
def _nec_ping_error(self, obj):
if self.contact != obj.contact:
return
self.print_conversation(_('Error.'), 'status')
def handle_message_textview_mykey_press(self, widget, event_keyval,
event_keymod):
"""
Derives types SHOULD implement this, rather than connection to the even
itself
"""
event = gtk.gdk.Event(gtk.gdk.KEY_PRESS)
event.keyval = event_keyval
event.state = event_keymod
event.time = 0
_buffer = widget.get_buffer()
start, end = _buffer.get_bounds()
if event.keyval -- gtk.keysyms.Tab:
position = _buffer.get_insert()
end = _buffer.get_iter_at_mark(position)
text = _buffer.get_text(start, end, False)
text = text.decode('utf8')
splitted = text.split()
if (text.startswith(self.COMMAND_PREFIX) and not
text.startswith(self.COMMAND_PREFIX * 2) and len(splitted) == 1):
text = splitted[0]
bare = text.lstrip(self.COMMAND_PREFIX)
if len(text) == 1:
self.command_hits = []
for command in self.list_commands():
for name in command.names:
self.command_hits.append(name)
else:
if (self.last_key_tabs and self.command_hits and
self.command_hits[0].startswith(bare)):
self.command_hits.append(self.command_hits.pop(0))
else:
self.command_hits = []
for command in self.list_commands():
for name in command.names:
if name.startswith(bare):
self.command_hits.append(name)
if self.command_hits:
_buffer.delete(start, end)
_buffer.insert_at_cursor(self.COMMAND_PREFIX + self.command_hits[0] + ' ')
self.last_key_tabs = True
return True
self.last_key_tabs = False
def status_url_clicked(self, widget, url):
helpers.launch_browser_mailer('url', url)
def setup_seclabel(self, combo):
self.seclabel_combo = combo
self.seclabel_combo.hide()
self.seclabel_combo.set_no_show_all(True)
lb = gtk.ListStore(str)
self.seclabel_combo.set_model(lb)
cell = gtk.CellRendererText()
cell.set_property('xpad', 5) # padding for status text
self.seclabel_combo.pack_start(cell, True)
# text to show is in in first column of liststore
self.seclabel_combo.add_attribute(cell, 'text', 0)
if gajim.connections[self.account].seclabel_supported:
gajim.connections[self.account].seclabel_catalogue(self.contact.jid, self.on_seclabels_ready)
def on_seclabels_ready(self):
lb = self.seclabel_combo.get_model()
lb.clear()
i = 0
sel = 0
catalogue = gajim.connections[self.account].seclabel_catalogues[
self.contact.jid]
for label in catalogue[2]:
lb.append([label])
if label == catalogue[3]:
sel = i
i += 1
self.seclabel_combo.set_active(sel)
self.seclabel_combo.set_no_show_all(False)
self.seclabel_combo.show_all()
def __init__(self, type_id, parent_win, widget_name, contact, acct,
resource=None):
# Undo needs this variable to know if space has been pressed.
# Initialize it to True so empty textview is saved in undo list
self.space_pressed = True
if resource is None:
# We very likely got a contact with a random resource.
# This is bad, we need the highest for caps etc.
c = gajim.contacts.get_contact_with_highest_priority(acct,
contact.jid)
if c and not isinstance(c, GC_Contact):
contact = c
MessageControl.__init__(self, type_id, parent_win, widget_name,
contact, acct, resource=resource)
widget = self.xml.get_object('history_button')
# set document-open-recent icon for history button
if gtkgui_helpers.gtk_icon_theme.has_icon('document-open-recent'):
if widget_name == 'groupchat_control':
img = self.xml.get_object('image8')
else:
img = self.xml.get_object('image5')
img.set_from_icon_name('document-open-recent', gtk.ICON_SIZE_MENU)
id_ = widget.connect('clicked', self._on_history_menuitem_activate)
self.handlers[id_] = widget
# when/if we do XHTML we will put formatting buttons back
widget = self.xml.get_object('emoticons_button')
widget.set_sensitive(False)
id_ = widget.connect('clicked', self.on_emoticons_button_clicked)
self.handlers[id_] = widget
# Create banner and connect signals
widget = self.xml.get_object('banner_eventbox')
id_ = widget.connect('button-press-event',
self._on_banner_eventbox_button_press_event)
self.handlers[id_] = widget
self.urlfinder = re.compile(
r"(www\.(?!\.)|[a-z][a-z0-9+.-]*://)[^\s<>'\"]+[^!,\.\s<>\)'\"\]]")
self.banner_status_label = self.xml.get_object('banner_label')
id_ = self.banner_status_label.connect('populate_popup',
self.on_banner_label_populate_popup)
self.handlers[id_] = self.banner_status_label
# Init DND
self.TARGET_TYPE_URI_LIST = 80
self.dnd_list = [('text/uri-list', 0, self.TARGET_TYPE_URI_LIST),
('MY_TREE_MODEL_ROW', gtk.TARGET_SAME_APP, 0)]
id_ = self.widget.connect('drag_data_received',
self._on_drag_data_received)
self.handlers[id_] = self.widget
self.widget.drag_dest_set(gtk.DEST_DEFAULT_MOTION |
gtk.DEST_DEFAULT_HIGHLIGHT |
gtk.DEST_DEFAULT_DROP,
self.dnd_list, gtk.gdk.ACTION_COPY)
# Create textviews and connect signals
self.conv_textview = ConversationTextview(self.account)
id_ = self.conv_textview.connect('quote', self.on_quote)
self.handlers[id_] = self.conv_textview.tv
id_ = self.conv_textview.tv.connect('key_press_event',
self._conv_textview_key_press_event)
self.handlers[id_] = self.conv_textview.tv
# FIXME: DND on non editable TextView, find a better way
self.drag_entered = False
id_ = self.conv_textview.tv.connect('drag_data_received',
self._on_drag_data_received)
self.handlers[id_] = self.conv_textview.tv
id_ = self.conv_textview.tv.connect('drag_motion', self._on_drag_motion)
self.handlers[id_] = self.conv_textview.tv
id_ = self.conv_textview.tv.connect('drag_leave', self._on_drag_leave)
self.handlers[id_] = self.conv_textview.tv
self.conv_textview.tv.drag_dest_set(gtk.DEST_DEFAULT_MOTION |
gtk.DEST_DEFAULT_HIGHLIGHT |
gtk.DEST_DEFAULT_DROP,
self.dnd_list, gtk.gdk.ACTION_COPY)
self.conv_scrolledwindow = self.xml.get_object(
'conversation_scrolledwindow')
self.conv_scrolledwindow.add(self.conv_textview.tv)
widget = self.conv_scrolledwindow.get_vadjustment()
id_ = widget.connect('value-changed',
self.on_conversation_vadjustment_value_changed)
self.handlers[id_] = widget
id_ = widget.connect('changed',
self.on_conversation_vadjustment_changed)
self.handlers[id_] = widget
self.scroll_to_end_id = None
self.was_at_the_end = True
self.correcting = False
self.last_sent_msg = None
self.last_sent_txt = None
self.last_received_txt = {} # one per name
self.last_received_id = {} # one per name
# add MessageTextView to UI and connect signals
self.msg_scrolledwindow = self.xml.get_object('message_scrolledwindow')
self.msg_textview = MessageTextView()
id_ = self.msg_textview.connect('mykeypress',
self._on_message_textview_mykeypress_event)
self.handlers[id_] = self.msg_textview
self.msg_scrolledwindow.add(self.msg_textview)
id_ = self.msg_textview.connect('key_press_event',
self._on_message_textview_key_press_event)
self.handlers[id_] = self.msg_textview
id_ = self.msg_textview.connect('size-request', self.size_request)
self.handlers[id_] = self.msg_textview
id_ = self.msg_textview.connect('populate_popup',
self.on_msg_textview_populate_popup)
self.handlers[id_] = self.msg_textview
# Setup DND
id_ = self.msg_textview.connect('drag_data_received',
self._on_drag_data_received)
self.handlers[id_] = self.msg_textview
self.msg_textview.drag_dest_set(gtk.DEST_DEFAULT_MOTION |
gtk.DEST_DEFAULT_HIGHLIGHT,
self.dnd_list, gtk.gdk.ACTION_COPY)
self.update_font()
# Hook up send button
widget = self.xml.get_object('send_button')
id_ = widget.connect('clicked', self._on_send_button_clicked)
widget.set_sensitive(False)
self.handlers[id_] = widget
widget = self.xml.get_object('formattings_button')
id_ = widget.connect('clicked', self.on_formattings_button_clicked)
self.handlers[id_] = widget
# the following vars are used to keep history of user's messages
self.sent_history = []
self.sent_history_pos = 0
self.received_history = []
self.received_history_pos = 0
self.orig_msg = None
# Emoticons menu
# set image no matter if user wants at this time emoticons or not
# (so toggle works ok)
img = self.xml.get_object('emoticons_button_image')
img.set_from_file(os.path.join(gajim.DATA_DIR, 'emoticons', 'static',
'smile.png'))
self.toggle_emoticons()
# Attach speller
if gajim.config.get('use_speller') and HAS_GTK_SPELL:
self.set_speller()
self.conv_textview.tv.show()
self._paint_banner()
# For XEP-0172
self.user_nick = None
self.smooth = True
self.command_hits = []
self.last_key_tabs = False
# PluginSystem: adding GUI extension point for ChatControlBase
# instance object (also subclasses, eg. ChatControl or GroupchatControl)
gajim.plugin_manager.gui_extension_point('chat_control_base', self)
gajim.ged.register_event_handler('our-show', ged.GUI1,
self._nec_our_status)
gajim.ged.register_event_handler('ping-sent', ged.GUI1,
self._nec_ping_sent)
gajim.ged.register_event_handler('ping-reply', ged.GUI1,
self._nec_ping_reply)
gajim.ged.register_event_handler('ping-error', ged.GUI1,
self._nec_ping_error)
# This is bascially a very nasty hack to surpass the inability
# to properly use the super, because of the old code.
CommandTools.__init__(self)
def set_speller(self):
# now set the one the user selected
per_type = 'contacts'
if self.type_id == message_control.TYPE_GC:
per_type = 'rooms'
lang = gajim.config.get_per(per_type, self.contact.jid,
'speller_language')
if not lang:
# use the default one
lang = gajim.config.get('speller_language')
if not lang:
lang = gajim.LANG
if lang:
try:
gtkspell.Spell(self.msg_textview, lang)
self.msg_textview.lang = lang
except (gobject.GError, RuntimeError, TypeError, OSError):
dialogs.AspellDictError(lang)
def on_banner_label_populate_popup(self, label, menu):
"""
Override the default context menu and add our own menutiems
"""
item = gtk.SeparatorMenuItem()
menu.prepend(item)
menu2 = self.prepare_context_menu()
i = 0
for item in menu2:
menu2.remove(item)
menu.prepend(item)
menu.reorder_child(item, i)
i += 1
menu.show_all()
def shutdown(self):
super(ChatControlBase, self).shutdown()
# PluginSystem: removing GUI extension points connected with ChatControlBase
# instance object
gajim.plugin_manager.remove_gui_extension_point('chat_control_base',
self)
gajim.plugin_manager.remove_gui_extension_point(
'chat_control_base_draw_banner', self)
gajim.plugin_manager.remove_gui_extension_point('print_special_text',
self)
gajim.ged.remove_event_handler('our-show', ged.GUI1,
self._nec_our_status)
def on_msg_textview_populate_popup(self, textview, menu):
"""
Override the default context menu and we prepend an option to switch
languages
"""
def _on_select_dictionary(widget, lang):
per_type = 'contacts'
if self.type_id == message_control.TYPE_GC:
per_type = 'rooms'
if not gajim.config.get_per(per_type, self.contact.jid):
gajim.config.add_per(per_type, self.contact.jid)
gajim.config.set_per(per_type, self.contact.jid, 'speller_language',
lang)
spell = gtkspell.get_from_text_view(self.msg_textview)
self.msg_textview.lang = lang
spell.set_language(lang)
widget.set_active(True)
item = gtk.ImageMenuItem(gtk.STOCK_UNDO)
menu.prepend(item)
id_ = item.connect('activate', self.msg_textview.undo)
self.handlers[id_] = item
item = gtk.SeparatorMenuItem()
menu.prepend(item)
item = gtk.ImageMenuItem(gtk.STOCK_CLEAR)
menu.prepend(item)
id_ = item.connect('activate', self.msg_textview.clear)
self.handlers[id_] = item
menu.show_all()
def on_quote(self, widget, text):
text = '>' + text.replace('\n', '\n>') + '\n'
message_buffer = self.msg_textview.get_buffer()
message_buffer.insert_at_cursor(text)
# moved from ChatControl
def _on_banner_eventbox_button_press_event(self, widget, event):
"""
If right-clicked, show popup
"""
if event.button == 3: # right click
self.parent_win.popup_menu(event)
def _on_send_button_clicked(self, widget):
"""
When send button is pressed: send the current message
"""
message_buffer = self.msg_textview.get_buffer()
start_iter = message_buffer.get_start_iter()
end_iter = message_buffer.get_end_iter()
message = message_buffer.get_text(start_iter, end_iter, 0).decode('utf-8')
xhtml = self.msg_textview.get_xhtml()
# send the message
self.send_message(message, xhtml=xhtml)
def _paint_banner(self):
"""
Repaint banner with theme color
"""
theme = gajim.config.get('roster_theme')
bgcolor = gajim.config.get_per('themes', theme, 'bannerbgcolor')
textcolor = gajim.config.get_per('themes', theme, 'bannertextcolor')
# the backgrounds are colored by using an eventbox by
# setting the bg color of the eventbox and the fg of the name_label
banner_eventbox = self.xml.get_object('banner_eventbox')
banner_name_label = self.xml.get_object('banner_name_label')
self.disconnect_style_event(banner_name_label)
self.disconnect_style_event(self.banner_status_label)
if bgcolor:
banner_eventbox.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(bgcolor))
default_bg = False
else:
default_bg = True
if textcolor:
banner_name_label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(textcolor))
self.banner_status_label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(textcolor))
default_fg = False
else:
default_fg = True
if default_bg or default_fg:
self._on_style_set_event(banner_name_label, None, default_fg,
default_bg)
if self.banner_status_label.flags() & gtk.REALIZED:
# Widget is realized
self._on_style_set_event(self.banner_status_label, None, default_fg,
default_bg)
def disconnect_style_event(self, widget):
# Try to find the event_id
for id_ in self.handlers.keys():
if self.handlers[id_] == widget:
widget.disconnect(id_)
del self.handlers[id_]
break
def connect_style_event(self, widget, set_fg=False, set_bg=False):
self.disconnect_style_event(widget)
id_ = widget.connect('style-set', self._on_style_set_event, set_fg,
set_bg)
self.handlers[id_] = widget
def _on_style_set_event(self, widget, style, *opts):
"""
Set style of widget from style class *.Frame.Eventbox
opts[0] == True -> set fg color
opts[1] == True -> set bg color
"""
banner_eventbox = self.xml.get_object('banner_eventbox')
self.disconnect_style_event(widget)
if opts[1]:
bg_color = widget.style.bg[gtk.STATE_SELECTED]
banner_eventbox.modify_bg(gtk.STATE_NORMAL, bg_color)
if opts[0]:
fg_color = widget.style.fg[gtk.STATE_SELECTED]
widget.modify_fg(gtk.STATE_NORMAL, fg_color)
self.connect_style_event(widget, opts[0], opts[1])
def _conv_textview_key_press_event(self, widget, event):
# translate any layout to latin_layout
keymap = gtk.gdk.keymap_get_default()
keycode = keymap.get_entries_for_keyval(event.keyval)[0][0]
if (event.state & gtk.gdk.CONTROL_MASK and keycode in (self.keycode_c,
self.keycode_ins)) or (event.state & gtk.gdk.SHIFT_MASK and \
event.keyval in (gtk.keysyms.Page_Down, gtk.keysyms.Page_Up)):
return False
self.parent_win.notebook.emit('key_press_event', event)
return True
def show_emoticons_menu(self):
if not gajim.config.get('emoticons_theme'):
return
def set_emoticons_menu_position(w, msg_tv=self.msg_textview):
window = msg_tv.get_window(gtk.TEXT_WINDOW_WIDGET)
# get the window position
origin = window.get_origin()
size = window.get_size()
buf = msg_tv.get_buffer()
# get the cursor position
cursor = msg_tv.get_iter_location(buf.get_iter_at_mark(
buf.get_insert()))
cursor = msg_tv.buffer_to_window_coords(gtk.TEXT_WINDOW_TEXT,
cursor.x, cursor.y)
x = origin[0] + cursor[0]
y = origin[1] + size[1]
menu_height = gajim.interface.emoticons_menu.size_request()[1]
#FIXME: get_line_count is not so good
#get the iter of cursor, then tv.get_line_yrange
# so we know in which y we are typing (not how many lines we have
# then go show just above the current cursor line for up
# or just below the current cursor line for down
#TEST with having 3 lines and writing in the 2nd
if y + menu_height > gtk.gdk.screen_height():
# move menu just above cursor
y -= menu_height + (msg_tv.allocation.height / buf.get_line_count())
#else: # move menu just below cursor
# y -= (msg_tv.allocation.height / buf.get_line_count())
return (x, y, True) # push_in True
gajim.interface.emoticon_menuitem_clicked = self.append_emoticon
gajim.interface.emoticons_menu.popup(None, None,
set_emoticons_menu_position, 1, 0)
def _on_message_textview_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.space:
self.space_pressed = True
elif (self.space_pressed or self.msg_textview.undo_pressed) and \
event.keyval not in (gtk.keysyms.Control_L, gtk.keysyms.Control_R) and \
not (event.keyval == gtk.keysyms.z and event.state & gtk.gdk.CONTROL_MASK):
# If the space key has been pressed and now it hasnt,
# we save the buffer into the undo list. But be carefull we're not
# pressiong Control again (as in ctrl+z)
_buffer = widget.get_buffer()
start_iter, end_iter = _buffer.get_bounds()
self.msg_textview.save_undo(_buffer.get_text(start_iter, end_iter))
self.space_pressed = False
# Ctrl [+ Shift] + Tab are not forwarded to notebook. We handle it here
if self.widget_name == 'groupchat_control':
if event.keyval not in (gtk.keysyms.ISO_Left_Tab, gtk.keysyms.Tab):
self.last_key_tabs = False
if event.state & gtk.gdk.SHIFT_MASK:
# CTRL + SHIFT + TAB
if event.state & gtk.gdk.CONTROL_MASK and \
event.keyval == gtk.keysyms.ISO_Left_Tab:
self.parent_win.move_to_next_unread_tab(False)
return True
# SHIFT + PAGE_[UP|DOWN]: send to conv_textview
elif event.keyval == gtk.keysyms.Page_Down or \
event.keyval == gtk.keysyms.Page_Up:
self.conv_textview.tv.emit('key_press_event', event)
return True
elif event.state & gtk.gdk.CONTROL_MASK:
if event.keyval == gtk.keysyms.Tab: # CTRL + TAB
self.parent_win.move_to_next_unread_tab(True)
return True
return False
def _on_message_textview_mykeypress_event(self, widget, event_keyval,
event_keymod):
"""
When a key is pressed: if enter is pressed without the shift key, message
(if not empty) is sent and printed in the conversation
"""
# NOTE: handles mykeypress which is custom signal connected to this
# CB in new_tab(). for this singal see message_textview.py
message_textview = widget
message_buffer = message_textview.get_buffer()
start_iter, end_iter = message_buffer.get_bounds()
message = message_buffer.get_text(start_iter, end_iter, False).decode(
'utf-8')
xhtml = self.msg_textview.get_xhtml()
# construct event instance from binding
event = gtk.gdk.Event(gtk.gdk.KEY_PRESS) # it's always a key-press here
event.keyval = event_keyval
event.state = event_keymod
event.time = 0 # assign current time
if event.keyval == gtk.keysyms.Up:
if event.state == gtk.gdk.CONTROL_MASK: # Ctrl+UP
self.scroll_messages('up', message_buffer, 'sent')
# Ctrl+Shift+UP
elif event.state == (gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK):
self.scroll_messages('up', message_buffer, 'received')
elif event.keyval == gtk.keysyms.Down:
if event.state == gtk.gdk.CONTROL_MASK: # Ctrl+Down
self.scroll_messages('down', message_buffer, 'sent')
# Ctrl+Shift+Down
elif event.state == (gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK):
self.scroll_messages('down', message_buffer, 'received')
elif event.keyval == gtk.keysyms.Return or \
event.keyval == gtk.keysyms.KP_Enter: # ENTER
# NOTE: SHIFT + ENTER is not needed to be emulated as it is not
# binding at all (textview's default action is newline)
if gajim.config.get('send_on_ctrl_enter'):
# here, we emulate GTK default action on ENTER (add new line)
# normally I would add in keypress but it gets way to complex
# to get instant result on changing this advanced setting
if event.state == 0: # no ctrl, no shift just ENTER add newline
end_iter = message_buffer.get_end_iter()
message_buffer.insert_at_cursor('\n')
send_message = False
elif event.state & gtk.gdk.CONTROL_MASK: # CTRL + ENTER
send_message = True
else: # send on Enter, do newline on Ctrl Enter
if event.state & gtk.gdk.CONTROL_MASK: # Ctrl + ENTER
end_iter = message_buffer.get_end_iter()
message_buffer.insert_at_cursor('\n')
send_message = False
else: # ENTER
send_message = True
if gajim.connections[self.account].connected < 2 and send_message:
# we are not connected
dialogs.ErrorDialog(_('A connection is not available'),
_('Your message can not be sent until you are connected.'),
transient_for=self.parent_win.window)
send_message = False
if send_message:
self.send_message(message, xhtml=xhtml) # send the message
elif event.keyval == gtk.keysyms.z: # CTRL+z
if event.state & gtk.gdk.CONTROL_MASK:
self.msg_textview.undo()
else:
# Give the control itself a chance to process
self.handle_message_textview_mykey_press(widget, event_keyval,
event_keymod)
def _on_drag_data_received(self, widget, context, x, y, selection,
target_type, timestamp):
"""
Derived types SHOULD implement this
"""
pass
def _on_drag_leave(self, widget, context, time):
# FIXME: DND on non editable TextView, find a better way
self.drag_entered = False
self.conv_textview.tv.set_editable(False)
def _on_drag_motion(self, widget, context, x, y, time):
# FIXME: DND on non editable TextView, find a better way
if not self.drag_entered:
# We drag new data over the TextView, make it editable to catch dnd
self.drag_entered_conv = True
self.conv_textview.tv.set_editable(True)
def get_seclabel(self):
label = None
if self.seclabel_combo is not None:
idx = self.seclabel_combo.get_active()
if idx != -1:
cat = gajim.connections[self.account].seclabel_catalogues[self.contact.jid]
lname = cat[2][idx]
label = cat[1][lname]
return label
def send_message(self, message, keyID='', type_='chat', chatstate=None,
msg_id=None, resource=None, xhtml=None, callback=None, callback_args=[],
process_commands=True, attention=False):
"""
Send the given message to the active tab. Doesn't return None if error
"""
if not message or message == '\n':
return None
if process_commands and self.process_as_command(message):
return
label = self.get_seclabel()
def _cb(msg, cb, *cb_args):
self.last_sent_msg = msg
self.last_sent_txt = cb_args[0]
if cb:
cb(msg, *cb_args)
if self.correcting and self.last_sent_msg:
correction_msg = self.last_sent_msg
else:
correction_msg = None
gajim.nec.push_outgoing_event(MessageOutgoingEvent(None,
account=self.account, jid=self.contact.jid, message=message,
keyID=keyID, type_=type_, chatstate=chatstate, msg_id=msg_id,
resource=resource, user_nick=self.user_nick, xhtml=xhtml,
label=label, callback=_cb, callback_args=[callback] + callback_args,
control=self, attention=attention, correction_msg=correction_msg, automatic_message=False))
# Record the history of sent messages
self.save_message(message, 'sent')
# Be sure to send user nickname only once according to JEP-0172
self.user_nick = None
# Clear msg input
message_buffer = self.msg_textview.get_buffer()
message_buffer.set_text('') # clear message buffer (and tv of course)
def save_message(self, message, msg_type):
# save the message, so user can scroll though the list with key up/down
if msg_type == 'sent':
history = self.sent_history
pos = self.sent_history_pos
else:
history = self.received_history
pos = self.received_history_pos
size = len(history)
scroll = False if pos == size else True # are we scrolling?
# we don't want size of the buffer to grow indefinately
max_size = gajim.config.get('key_up_lines')
for i in xrange(size - max_size + 1):
if pos == 0:
break
history.pop(0)
pos -= 1
history.append(message)
if not scroll or msg_type == 'sent':
pos = len(history)
if msg_type == 'sent':
self.sent_history_pos = pos
self.orig_msg = None
else:
self.received_history_pos = pos
def print_conversation_line(self, text, kind, name, tim,
other_tags_for_name=[], other_tags_for_time=[], other_tags_for_text=[],
count_as_new=True, subject=None, xhtml=None, simple=False,
xep0184_id=None, graphics=True, displaymarking=None, msg_log_id=None,
correct_id=None):
"""
Print 'chat' type messages
correct_id = (message_id, correct_id)
"""
jid = self.contact.jid
full_jid = self.get_full_jid()
textview = self.conv_textview
end = False
if self.was_at_the_end or kind == 'outgoing':
end = True
old_txt = ''
if name in self.last_received_txt:
old_txt = self.last_received_txt[name]
if correct_id and correct_id[1] and \
name in self.conv_textview.last_received_message_marks and \
correct_id[1] == self.last_received_id[name]:
self.conv_textview.correct_last_received_message(text, xhtml,
name, old_txt)
elif correct_id and correct_id[1] and \
self.conv_textview.last_sent_message_marks[0] and \
correct_id[1] == self.last_received_id[name]:
# this is for carbon copied messages that are sent from another
# resource
self.conv_textview.correct_last_sent_message(text, xhtml,
self.get_our_nick(), old_txt)
else:
textview.print_conversation_line(text, jid, kind, name, tim,
other_tags_for_name, other_tags_for_time, other_tags_for_text,
subject, xhtml, simple=simple, graphics=graphics,
displaymarking=displaymarking)
if xep0184_id is not None:
textview.add_xep0184_mark(xep0184_id)
if not count_as_new:
return
if kind in ('incoming', 'incoming_queue', 'outgoing'):
self.last_received_txt[name] = text
if correct_id:
self.last_received_id[name] = correct_id[0]
if kind == 'incoming':
if not self.type_id == message_control.TYPE_GC or \
gajim.config.get('notify_on_all_muc_messages') or \
'marked' in other_tags_for_text:
# it's a normal message, or a muc message with want to be
# notified about if quitting just after
# other_tags_for_text == ['marked'] --> highlighted gc message
gajim.last_message_time[self.account][full_jid] = time.time()
if kind in ('incoming', 'incoming_queue'):
# Record the history of received messages
self.save_message(text, 'received')
if kind in ('incoming', 'incoming_queue', 'error'):
gc_message = False
if self.type_id == message_control.TYPE_GC:
gc_message = True
if ((self.parent_win and (not self.parent_win.get_active_control() or \
self != self.parent_win.get_active_control() or \
not self.parent_win.is_active() or not end)) or \
(gc_message and \
jid in gajim.interface.minimized_controls[self.account])) and \
kind in ('incoming', 'incoming_queue', 'error'):
# we want to have save this message in events list
# other_tags_for_text == ['marked'] --> highlighted gc message
if gc_message:
if 'marked' in other_tags_for_text:
event_type = events.PrintedMarkedGcMsgEvent
else:
event_type = events.PrintedGcMsgEvent
event = 'gc_message_received'
else:
if self.type_id == message_control.TYPE_CHAT:
event_type = events.PrintedChatEvent
else:
event_type = events.PrintedPmEvent
event = 'message_received'
show_in_roster = notify.get_show_in_roster(event,
self.account, self.contact, self.session)
show_in_systray = notify.get_show_in_systray(event,
self.account, self.contact, event_type.type_)
event = event_type(text, subject, self, msg_log_id,
show_in_roster=show_in_roster,
show_in_systray=show_in_systray)
gajim.events.add_event(self.account, full_jid, event)
# We need to redraw contact if we show in roster
if show_in_roster:
gajim.interface.roster.draw_contact(self.contact.jid,
self.account)
if not self.parent_win:
return
if (not self.parent_win.get_active_control() or \
self != self.parent_win.get_active_control() or \
not self.parent_win.is_active() or not end) and \
kind in ('incoming', 'incoming_queue', 'error'):
self.parent_win.redraw_tab(self)
if not self.parent_win.is_active():
self.parent_win.show_title(True, self) # Enabled Urgent hint
else:
self.parent_win.show_title(False, self) # Disabled Urgent hint
def toggle_emoticons(self):
"""
Hide show emoticons_button and make sure emoticons_menu is always there
when needed
"""
emoticons_button = self.xml.get_object('emoticons_button')
if gajim.config.get('emoticons_theme'):
emoticons_button.show()
emoticons_button.set_no_show_all(False)
else:
emoticons_button.hide()
emoticons_button.set_no_show_all(True)
def append_emoticon(self, str_):
buffer_ = self.msg_textview.get_buffer()
if buffer_.get_char_count():
buffer_.insert_at_cursor(' %s ' % str_)
else: # we are the beginning of buffer
buffer_.insert_at_cursor('%s ' % str_)
self.msg_textview.grab_focus()
def on_emoticons_button_clicked(self, widget):
"""
Popup emoticons menu
"""
gajim.interface.emoticon_menuitem_clicked = self.append_emoticon
gajim.interface.popup_emoticons_under_button(widget, self.parent_win)
def on_formattings_button_clicked(self, widget):
"""
Popup formattings menu
"""
menu = gtk.Menu()
menuitems = ((_('Bold'), 'bold'),
(_('Italic'), 'italic'),
(_('Underline'), 'underline'),
(_('Strike'), 'strike'))
active_tags = self.msg_textview.get_active_tags()
for menuitem in menuitems:
item = gtk.CheckMenuItem(menuitem[0])
if menuitem[1] in active_tags:
item.set_active(True)
else:
item.set_active(False)
item.connect('activate', self.msg_textview.set_tag,
menuitem[1])
menu.append(item)
item = gtk.SeparatorMenuItem() # separator
menu.append(item)
item = gtk.ImageMenuItem(_('Color'))
icon = gtk.image_new_from_stock(gtk.STOCK_SELECT_COLOR, gtk.ICON_SIZE_MENU)
item.set_image(icon)
item.connect('activate', self.on_color_menuitem_activale)
menu.append(item)
item = gtk.ImageMenuItem(_('Font'))
icon = gtk.image_new_from_stock(gtk.STOCK_SELECT_FONT, gtk.ICON_SIZE_MENU)
item.set_image(icon)
item.connect('activate', self.on_font_menuitem_activale)
menu.append(item)
item = gtk.SeparatorMenuItem() # separator
menu.append(item)
item = gtk.ImageMenuItem(_('Clear formating'))
icon = gtk.image_new_from_stock(gtk.STOCK_CLEAR, gtk.ICON_SIZE_MENU)
item.set_image(icon)
item.connect('activate', self.msg_textview.clear_tags)
menu.append(item)
menu.show_all()
gtkgui_helpers.popup_emoticons_under_button(menu, widget,
self.parent_win)
def on_color_menuitem_activale(self, widget):
color_dialog = gtk.ColorSelectionDialog('Select a color')
color_dialog.connect('response', self.msg_textview.color_set,
color_dialog.colorsel)
color_dialog.show_all()
def on_font_menuitem_activale(self, widget):
font_dialog = gtk.FontSelectionDialog('Select a font')
font_dialog.connect('response', self.msg_textview.font_set,
font_dialog.fontsel)
font_dialog.show_all()
def on_actions_button_clicked(self, widget):
"""
Popup action menu
"""
menu = self.prepare_context_menu(hide_buttonbar_items=True)
menu.show_all()
gtkgui_helpers.popup_emoticons_under_button(menu, widget,
self.parent_win)
def update_font(self):
font = pango.FontDescription(gajim.config.get('conversation_font'))
self.conv_textview.tv.modify_font(font)
self.msg_textview.modify_font(font)
def update_tags(self):
self.conv_textview.update_tags()
def clear(self, tv):
buffer_ = tv.get_buffer()
start, end = buffer_.get_bounds()
buffer_.delete(start, end)
def _on_history_menuitem_activate(self, widget=None, jid=None):
"""
When history menuitem is pressed: call history window
"""
if not jid:
jid = self.contact.jid
if 'logs' in gajim.interface.instances:
gajim.interface.instances['logs'].window.present()
gajim.interface.instances['logs'].open_history(jid, self.account)
else:
gajim.interface.instances['logs'] = \
history_window.HistoryWindow(jid, self.account)
def _on_send_file(self, gc_contact=None):
"""
gc_contact can be set when we are in a groupchat control
"""
def _on_ok(c):
gajim.interface.instances['file_transfers'].show_file_send_request(
self.account, c)
if self.TYPE_ID == message_control.TYPE_PM:
gc_contact = self.gc_contact
if gc_contact:
# gc or pm
gc_control = gajim.interface.msg_win_mgr.get_gc_control(
gc_contact.room_jid, self.account)
self_contact = gajim.contacts.get_gc_contact(self.account,
gc_control.room_jid, gc_control.nick)
if gc_control.is_anonymous and gc_contact.affiliation not in ['admin',
'owner'] and self_contact.affiliation in ['admin', 'owner']:
contact = gajim.contacts.get_contact(self.account, gc_contact.jid)
if not contact or contact.sub not in ('both', 'to'):
prim_text = _('Really send file?')
sec_text = _('If you send a file to %s, he/she will know your '
'real Jabber ID.') % gc_contact.name
dialog = dialogs.NonModalConfirmationDialog(prim_text,
sec_text, on_response_ok=(_on_ok, gc_contact))
dialog.popup()
return
_on_ok(gc_contact)
return
_on_ok(self.contact)
def on_minimize_menuitem_toggled(self, widget):
"""
When a grouchat is minimized, unparent the tab, put it in roster etc
"""
old_value = False
minimized_gc = gajim.config.get_per('accounts', self.account,
'minimized_gc').split()
if self.contact.jid in minimized_gc:
old_value = True
minimize = widget.get_active()
if minimize and not self.contact.jid in minimized_gc:
minimized_gc.append(self.contact.jid)
if not minimize and self.contact.jid in minimized_gc:
minimized_gc.remove(self.contact.jid)
if old_value != minimize:
gajim.config.set_per('accounts', self.account, 'minimized_gc',
' '.join(minimized_gc))
def set_control_active(self, state):
if state:
jid = self.contact.jid
if self.was_at_the_end:
# we are at the end
type_ = ['printed_' + self.type_id]
if self.type_id == message_control.TYPE_GC:
type_ = ['printed_gc_msg', 'printed_marked_gc_msg']
if not gajim.events.remove_events(self.account, self.get_full_jid(),
types=type_):
# There were events to remove
self.redraw_after_event_removed(jid)
def bring_scroll_to_end(self, textview, diff_y=0):
"""
Scroll to the end of textview if end is not visible
"""
if self.scroll_to_end_id:
# a scroll is already planned
return
buffer_ = textview.get_buffer()
end_iter = buffer_.get_end_iter()
end_rect = textview.get_iter_location(end_iter)
visible_rect = textview.get_visible_rect()
# scroll only if expected end is not visible
if end_rect.y >= (visible_rect.y + visible_rect.height + diff_y):
self.scroll_to_end_id = gobject.idle_add(self.scroll_to_end_iter,
textview)
def scroll_to_end_iter(self, textview):
buffer_ = textview.get_buffer()
end_iter = buffer_.get_end_iter()
textview.scroll_to_iter(end_iter, 0, False, 1, 1)
self.scroll_to_end_id = None
return False
def size_request(self, msg_textview, requisition):
"""
When message_textview changes its size: if the new height will enlarge
the window, enable the scrollbar automatic policy. Also enable scrollbar
automatic policy for horizontal scrollbar if message we have in
message_textview is too big
"""
if msg_textview.window is None:
return
min_height = self.conv_scrolledwindow.get_property('height-request')
conversation_height = self.conv_textview.tv.window.get_size()[1]
message_height = msg_textview.window.get_size()[1]
message_width = msg_textview.window.get_size()[0]
# new tab is not exposed yet
if conversation_height < 2:
return
if conversation_height < min_height:
min_height = conversation_height
# we don't want to always resize in height the message_textview
# so we have minimum on conversation_textview's scrolled window
# but we also want to avoid window resizing so if we reach that
# minimum for conversation_textview and maximum for message_textview
# we set to automatic the scrollbar policy
diff_y = message_height - requisition.height
if diff_y != 0:
if conversation_height + diff_y < min_height:
if message_height + conversation_height - min_height > min_height:
policy = self.msg_scrolledwindow.get_property(
'vscrollbar-policy')
if policy != gtk.POLICY_AUTOMATIC:
self.msg_scrolledwindow.set_property('vscrollbar-policy',
gtk.POLICY_AUTOMATIC)
self.msg_scrolledwindow.set_property('height-request',
message_height + conversation_height - min_height)
else:
self.msg_scrolledwindow.set_property('vscrollbar-policy',
gtk.POLICY_NEVER)
self.msg_scrolledwindow.set_property('height-request', -1)
self.smooth = True # reinit the flag
# enable scrollbar automatic policy for horizontal scrollbar
# if message we have in message_textview is too big
if requisition.width > message_width:
self.msg_scrolledwindow.set_property('hscrollbar-policy',
gtk.POLICY_AUTOMATIC)
else:
self.msg_scrolledwindow.set_property('hscrollbar-policy',
gtk.POLICY_NEVER)
return True
def on_conversation_vadjustment_changed(self, adjustment):
# used to stay at the end of the textview when we shrink conversation
# textview.
if self.was_at_the_end:
if self.conv_textview.at_the_end():
# we are at the end
self.conv_textview.bring_scroll_to_end(-18)
else:
self.conv_textview.bring_scroll_to_end(-18, use_smooth=False)
self.was_at_the_end = (adjustment.upper - adjustment.value - \
adjustment.page_size) < 18
def on_conversation_vadjustment_value_changed(self, adjustment):
# stop automatic scroll when we manually scroll
if not self.conv_textview.auto_scrolling:
self.conv_textview.stop_scrolling()
self.was_at_the_end = (adjustment.upper - adjustment.value - \
adjustment.page_size) < 18
if self.resource:
jid = self.contact.get_full_jid()
else:
jid = self.contact.jid
types_list = []
type_ = self.type_id
if type_ == message_control.TYPE_GC:
type_ = 'gc_msg'
types_list = ['printed_' + type_, type_, 'printed_marked_gc_msg']
else: # Not a GC
types_list = ['printed_' + type_, type_]
if not len(gajim.events.get_events(self.account, jid, types_list)):
return
if not self.parent_win:
return
if self.conv_textview.at_the_end() and \
self.parent_win.get_active_control() == self and \
self.parent_win.window.is_active():
# we are at the end
if self.type_id == message_control.TYPE_GC:
if not gajim.events.remove_events(self.account, jid,
types=types_list):
self.redraw_after_event_removed(jid)
elif self.session and self.session.remove_events(types_list):
# There were events to remove
self.redraw_after_event_removed(jid)
def redraw_after_event_removed(self, jid):
"""
We just removed a 'printed_*' event, redraw contact in roster or
gc_roster and titles in roster and msg_win
"""
self.parent_win.redraw_tab(self)
self.parent_win.show_title()
# TODO : get the contact and check notify.get_show_in_roster()
if self.type_id == message_control.TYPE_PM:
room_jid, nick = gajim.get_room_and_nick_from_fjid(jid)
groupchat_control = gajim.interface.msg_win_mgr.get_gc_control(
room_jid, self.account)
if room_jid in gajim.interface.minimized_controls[self.account]:
groupchat_control = \
gajim.interface.minimized_controls[self.account][room_jid]
contact = gajim.contacts.get_contact_with_highest_priority(
self.account, room_jid)
if contact:
gajim.interface.roster.draw_contact(room_jid, self.account)
if groupchat_control:
groupchat_control.draw_contact(nick)
if groupchat_control.parent_win:
groupchat_control.parent_win.redraw_tab(groupchat_control)
else:
gajim.interface.roster.draw_contact(jid, self.account)
gajim.interface.roster.show_title()
def scroll_messages(self, direction, msg_buf, msg_type):
if msg_type == 'sent':
history = self.sent_history
pos = self.sent_history_pos
self.received_history_pos = len(self.received_history)
else:
history = self.received_history
pos = self.received_history_pos
self.sent_history_pos = len(self.sent_history)
size = len(history)
if self.orig_msg is None:
# user was typing something and then went into history, so save
# whatever is already typed
start_iter = msg_buf.get_start_iter()
end_iter = msg_buf.get_end_iter()
self.orig_msg = msg_buf.get_text(start_iter, end_iter, 0).decode(
'utf-8')
if pos == size and size > 0 and direction == 'up' and \
msg_type == 'sent' and not self.correcting and (not \
history[pos - 1].startswith('/') or history[pos - 1].startswith('/me')):
self.correcting = True
self.old_message_tv_color = self.msg_textview.get_style().base[0]
self.msg_textview.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(
'PaleGoldenrod'))
message = history[pos - 1]
msg_buf.set_text(message)
return
if self.correcting:
# We were previously correcting
self.msg_textview.modify_base(gtk.STATE_NORMAL,
self.old_message_tv_color)
self.correcting = False
pos += -1 if direction == 'up' else +1
if pos == -1:
return
if pos >= size:
pos = size
message = self.orig_msg
self.orig_msg = None
else:
message = history[pos]
if msg_type == 'sent':
self.sent_history_pos = pos
else:
self.received_history_pos = pos
if self.orig_msg is not None:
message = '> %s\n' % message.replace('\n', '\n> ')
msg_buf.set_text(message)
def lighten_color(self, color):
p = 0.4
mask = 0
color.red = int((color.red * p) + (mask * (1 - p)))
color.green = int((color.green * p) + (mask * (1 - p)))
color.blue = int((color.blue * p) + (mask * (1 - p)))
return color
def widget_set_visible(self, widget, state):
"""
Show or hide a widget
"""
# make the last message visible, when changing to "full view"
if not state:
gobject.idle_add(self.conv_textview.scroll_to_end_iter)
widget.set_no_show_all(state)
if state:
widget.hide()
else:
widget.show_all()
def chat_buttons_set_visible(self, state):
"""
Toggle chat buttons
"""
MessageControl.chat_buttons_set_visible(self, state)
self.widget_set_visible(self.xml.get_object('actions_hbox'), state)
def got_connected(self):
self.msg_textview.set_sensitive(True)
self.msg_textview.set_editable(True)
self.update_toolbar()
def got_disconnected(self):
self.msg_textview.set_sensitive(False)
self.msg_textview.set_editable(False)
self.conv_textview.tv.grab_focus()
self.no_autonegotiation = False
self.update_toolbar()
################################################################################
class ChatControl(ChatControlBase):
"""
A control for standard 1-1 chat
"""
(
JINGLE_STATE_NULL,
JINGLE_STATE_CONNECTING,
JINGLE_STATE_CONNECTION_RECEIVED,
JINGLE_STATE_CONNECTED,
JINGLE_STATE_ERROR
) = range(5)
TYPE_ID = message_control.TYPE_CHAT
# Set a command host to bound to. Every command given through a chat will be
# processed with this command host.
COMMAND_HOST = ChatCommands
def __init__(self, parent_win, contact, acct, session, resource=None):
ChatControlBase.__init__(self, self.TYPE_ID, parent_win,
'chat_control', contact, acct, resource)
self.gpg_is_active = False
self.last_recv_message_id = None
self.last_recv_message_marks = None
# for muc use:
# widget = self.xml.get_object('muc_window_actions_button')
self.actions_button = self.xml.get_object('message_window_actions_button')
id_ = self.actions_button.connect('clicked',
self.on_actions_button_clicked)
self.handlers[id_] = self.actions_button
self._formattings_button = self.xml.get_object('formattings_button')
self._add_to_roster_button = self.xml.get_object(
'add_to_roster_button')
id_ = self._add_to_roster_button.connect('clicked',
self._on_add_to_roster_menuitem_activate)
self.handlers[id_] = self._add_to_roster_button
self._audio_button = self.xml.get_object('audio_togglebutton')
id_ = self._audio_button.connect('toggled', self.on_audio_button_toggled)
self.handlers[id_] = self._audio_button
# add a special img
gtkgui_helpers.add_image_to_button(self._audio_button,
'gajim-mic_inactive')
self._video_button = self.xml.get_object('video_togglebutton')
id_ = self._video_button.connect('toggled', self.on_video_button_toggled)
self.handlers[id_] = self._video_button
# add a special img
gtkgui_helpers.add_image_to_button(self._video_button,
'gajim-cam_inactive')
self._send_file_button = self.xml.get_object('send_file_button')
# add a special img for send file button
pixbuf = gtkgui_helpers.get_icon_pixmap('document-send', quiet=True)
img = gtk.image_new_from_pixbuf(pixbuf)
self._send_file_button.set_image(img)
id_ = self._send_file_button.connect('clicked',
self._on_send_file_menuitem_activate)
self.handlers[id_] = self._send_file_button
self._convert_to_gc_button = self.xml.get_object(
'convert_to_gc_button')
id_ = self._convert_to_gc_button.connect('clicked',
self._on_convert_to_gc_menuitem_activate)
self.handlers[id_] = self._convert_to_gc_button
self._contact_information_button = self.xml.get_object(
'contact_information_button')
id_ = self._contact_information_button.connect('clicked',
self._on_contact_information_menuitem_activate)
self.handlers[id_] = self._contact_information_button
compact_view = gajim.config.get('compact_view')
self.chat_buttons_set_visible(compact_view)
self.widget_set_visible(self.xml.get_object('banner_eventbox'),
gajim.config.get('hide_chat_banner'))
self.authentication_button = self.xml.get_object(
'authentication_button')
id_ = self.authentication_button.connect('clicked',
self._on_authentication_button_clicked)
self.handlers[id_] = self.authentication_button
# Add lock image to show chat encryption
self.lock_image = self.xml.get_object('lock_image')
# Convert to GC icon
img = self.xml.get_object('convert_to_gc_button_image')
img.set_from_pixbuf(gtkgui_helpers.load_icon(
'muc_active').get_pixbuf())
self._audio_banner_image = self.xml.get_object('audio_banner_image')
self._video_banner_image = self.xml.get_object('video_banner_image')
self.audio_sid = None
self.audio_state = self.JINGLE_STATE_NULL
self.audio_available = False
self.video_sid = None
self.video_state = self.JINGLE_STATE_NULL
self.video_available = False
self.update_toolbar()
self._pep_images = {}
self._pep_images['mood'] = self.xml.get_object('mood_image')
self._pep_images['activity'] = self.xml.get_object('activity_image')
self._pep_images['tune'] = self.xml.get_object('tune_image')
self._pep_images['location'] = self.xml.get_object('location_image')
self.update_all_pep_types()
# keep timeout id and window obj for possible big avatar
# it is on enter-notify and leave-notify so no need to be
# per jid
self.show_bigger_avatar_timeout_id = None
self.bigger_avatar_window = None
self.show_avatar()
# chatstate timers and state
self.reset_kbd_mouse_timeout_vars()
self._schedule_activity_timers()
# Hook up signals
id_ = self.parent_win.window.connect('motion-notify-event',
self._on_window_motion_notify)
self.handlers[id_] = self.parent_win.window
message_tv_buffer = self.msg_textview.get_buffer()
id_ = message_tv_buffer.connect('changed',
self._on_message_tv_buffer_changed)
self.handlers[id_] = message_tv_buffer
widget = self.xml.get_object('avatar_eventbox')
widget.set_property('height-request', gajim.config.get(
'chat_avatar_height'))
id_ = widget.connect('enter-notify-event',
self.on_avatar_eventbox_enter_notify_event)
self.handlers[id_] = widget
id_ = widget.connect('leave-notify-event',
self.on_avatar_eventbox_leave_notify_event)
self.handlers[id_] = widget
id_ = widget.connect('button-press-event',
self.on_avatar_eventbox_button_press_event)
self.handlers[id_] = widget
widget = self.xml.get_object('location_eventbox')
id_ = widget.connect('button-release-event',
self.on_location_eventbox_button_release_event)
self.handlers[id_] = widget
id_ = widget.connect('enter-notify-event',
self.on_location_eventbox_enter_notify_event)
self.handlers[id_] = widget
id_ = widget.connect('leave-notify-event',
self.on_location_eventbox_leave_notify_event)
self.handlers[id_] = widget
for key in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '*', '0', '#'):
widget = self.xml.get_object(key + '_button')
id_ = widget.connect('pressed', self.on_num_button_pressed, key)
self.handlers[id_] = widget
id_ = widget.connect('released', self.on_num_button_released)
self.handlers[id_] = widget
self.dtmf_window = self.xml.get_object('dtmf_window')
self.dtmf_window.get_child().set_direction(gtk.TEXT_DIR_LTR)
id_ = self.dtmf_window.connect('focus-out-event',
self.on_dtmf_window_focus_out_event)
self.handlers[id_] = self.dtmf_window
widget = self.xml.get_object('dtmf_button')
id_ = widget.connect('clicked', self.on_dtmf_button_clicked)
self.handlers[id_] = widget
widget = self.xml.get_object('mic_hscale')
id_ = widget.connect('value_changed', self.on_mic_hscale_value_changed)
self.handlers[id_] = widget
widget = self.xml.get_object('sound_hscale')
id_ = widget.connect('value_changed', self.on_sound_hscale_value_changed)
self.handlers[id_] = widget
self.info_bar = gtk.InfoBar()
content_area = self.info_bar.get_content_area()
self.info_bar_label = gtk.Label()
self.info_bar_label.set_use_markup(True)
self.info_bar_label.set_alignment(0, 0)
content_area.add(self.info_bar_label)
self.info_bar.set_no_show_all(True)
widget = self.xml.get_object('vbox2')
widget.pack_start(self.info_bar, expand=False, padding=5)
widget.reorder_child(self.info_bar, 1)
# List of waiting infobar messages
self.info_bar_queue = []
self.subscribe_events()
if not session:
# Don't use previous session if we want to a specific resource
# and it's not the same
if not resource:
resource = contact.resource
session = gajim.connections[self.account].find_controlless_session(
self.contact.jid, resource)
self.setup_seclabel(self.xml.get_object('label_selector'))
if session:
session.control = self
self.session = session
if session.enable_encryption:
self.print_esession_details()
# Enable encryption if needed
self.no_autonegotiation = False
e2e_is_active = self.session and self.session.enable_encryption
gpg_pref = gajim.config.get_per('contacts', contact.jid, 'gpg_enabled')
# try GPG first
if not e2e_is_active and gpg_pref and \
gajim.config.get_per('accounts', self.account, 'keyid') and \
gajim.connections[self.account].USE_GPG:
self.gpg_is_active = True
gajim.encrypted_chats[self.account].append(contact.jid)
msg = _('OpenPGP encryption enabled')
ChatControlBase.print_conversation_line(self, msg, 'status', '',
None)
if self.session:
self.session.loggable = gajim.config.get_per('accounts',
self.account, 'log_encrypted_sessions')
# GPG is always authenticated as we use GPG's WoT
self._show_lock_image(self.gpg_is_active, 'OpenPGP',
self.gpg_is_active, self.session and self.session.is_loggable(),
True)
self.update_ui()
# restore previous conversation
self.restore_conversation()
self.msg_textview.grab_focus()
gajim.ged.register_event_handler('pep-received', ged.GUI1,
self._nec_pep_received)
gajim.ged.register_event_handler('vcard-received', ged.GUI1,
self._nec_vcard_received)
gajim.ged.register_event_handler('failed-decrypt', ged.GUI1,
self._nec_failed_decrypt)
gajim.ged.register_event_handler('chatstate-received', ged.GUI1,
self._nec_chatstate_received)
gajim.ged.register_event_handler('caps-received', ged.GUI1,
self._nec_caps_received)
# PluginSystem: adding GUI extension point for this ChatControl
# instance object
gajim.plugin_manager.gui_extension_point('chat_control', self)
def subscribe_events(self):
"""
Register listeners to the events class
"""
gajim.events.event_added_subscribe(self.on_event_added)
gajim.events.event_removed_subscribe(self.on_event_removed)
def unsubscribe_events(self):
"""
Unregister listeners to the events class
"""
gajim.events.event_added_unsubscribe(self.on_event_added)
gajim.events.event_removed_unsubscribe(self.on_event_removed)
def _update_toolbar(self):
if (gajim.connections[self.account].connected > 1 and not \
self.TYPE_ID == 'pm') or (self.contact.show != 'offline' and \
self.TYPE_ID == 'pm'):
emoticons_button = self.xml.get_object('emoticons_button')
emoticons_button.set_sensitive(True)
send_button = self.xml.get_object('send_button')
send_button.set_sensitive(True)
# Formatting
if self.contact.supports(NS_XHTML_IM) and not self.gpg_is_active:
self._formattings_button.set_sensitive(True)
self._formattings_button.set_tooltip_text(_(
'Show a list of formattings'))
else:
self._formattings_button.set_sensitive(False)
if self.contact.supports(NS_XHTML_IM):
self._formattings_button.set_tooltip_text(_('Formattings are '
'not available when GPG is active'))
else:
self._formattings_button.set_tooltip_text(_('This contact does '
'not support HTML'))
# Add to roster
if not isinstance(self.contact, GC_Contact) \
and _('Not in Roster') in self.contact.groups and \
gajim.connections[self.account].roster_supported:
self._add_to_roster_button.show()
else:
self._add_to_roster_button.hide()
# Jingle detection
if self.contact.supports(NS_JINGLE_ICE_UDP) and \
gajim.HAVE_FARSTREAM and self.contact.resource:
self.audio_available = self.contact.supports(NS_JINGLE_RTP_AUDIO)
self.video_available = self.contact.supports(NS_JINGLE_RTP_VIDEO)
else:
if self.video_available or self.audio_available:
self.stop_jingle()
self.video_available = False
self.audio_available = False
# Audio buttons
self._audio_button.set_sensitive(self.audio_available)
# Video buttons
self._video_button.set_sensitive(self.video_available)
# change tooltip text for audio and video buttons if python-farstream is
# not installed
audio_tooltip_text = _('Toggle audio session') + '\n'
video_tooltip_text = _('Toggle video session') + '\n'
if not gajim.HAVE_FARSTREAM:
ext_text = _('Feature not available, see Help->Features')
self._audio_button.set_tooltip_text(audio_tooltip_text + ext_text)
self._video_button.set_tooltip_text(video_tooltip_text + ext_text)
elif not self.audio_available :
ext_text =_('Feature not supported by remote client')
self._audio_button.set_tooltip_text(audio_tooltip_text + ext_text)
self._video_button.set_tooltip_text(video_tooltip_text + ext_text)
else:
self._audio_button.set_tooltip_text(audio_tooltip_text[:-1])
self._video_button.set_tooltip_text(video_tooltip_text[:-1])
# Send file
if ((self.contact.supports(NS_FILE) or \
self.contact.supports(NS_JINGLE_FILE_TRANSFER)) and \
(self.type_id == 'chat' or self.gc_contact.resource)) and \
self.contact.show != 'offline':
self._send_file_button.set_sensitive(True)
self._send_file_button.set_tooltip_text(_('Send files'))
else:
self._send_file_button.set_sensitive(False)
if not (self.contact.supports(NS_FILE) or self.contact.supports(
NS_JINGLE_FILE_TRANSFER)):
self._send_file_button.set_tooltip_text(_(
"This contact does not support file transfer."))
else:
self._send_file_button.set_tooltip_text(
_("You need to know the real JID of the contact to send "
"him or her a file."))
# Convert to GC
if gajim.config.get_per('accounts', self.account, 'is_zeroconf'):
self._convert_to_gc_button.set_no_show_all(True)
self._convert_to_gc_button.hide()
else:
if self.contact.supports(NS_MUC):
self._convert_to_gc_button.set_sensitive(True)
else:
self._convert_to_gc_button.set_sensitive(False)
# Information
if gajim.account_is_disconnected(self.account):
self._contact_information_button.set_sensitive(False)
else:
self._contact_information_button.set_sensitive(True)
def update_all_pep_types(self):
for pep_type in self._pep_images:
self.update_pep(pep_type)
def update_pep(self, pep_type):
if isinstance(self.contact, GC_Contact):
return
if pep_type not in self._pep_images:
return
pep = self.contact.pep
img = self._pep_images[pep_type]
if pep_type in pep:
img.set_from_pixbuf(pep[pep_type].asPixbufIcon())
img.set_tooltip_markup(pep[pep_type].asMarkupText())
img.show()
else:
img.hide()
def _nec_pep_received(self, obj):
if obj.conn.name != self.account:
return
if obj.jid != self.contact.jid:
return
if obj.pep_type == 'nickname':
self.update_ui()
self.parent_win.redraw_tab(self)
self.parent_win.show_title()
else:
self.update_pep(obj.pep_type)
def _update_jingle(self, jingle_type):
if jingle_type not in ('audio', 'video'):
return
banner_image = getattr(self, '_' + jingle_type + '_banner_image')
state = getattr(self, jingle_type + '_state')
if state == self.JINGLE_STATE_NULL:
banner_image.hide()
else:
banner_image.show()
if state == self.JINGLE_STATE_CONNECTING:
banner_image.set_from_stock(
gtk.STOCK_CONVERT, 1)
elif state == self.JINGLE_STATE_CONNECTION_RECEIVED:
banner_image.set_from_stock(
gtk.STOCK_NETWORK, 1)
elif state == self.JINGLE_STATE_CONNECTED:
banner_image.set_from_stock(
gtk.STOCK_CONNECT, 1)
elif state == self.JINGLE_STATE_ERROR:
banner_image.set_from_stock(
gtk.STOCK_DIALOG_WARNING, 1)
self.update_toolbar()
def update_audio(self):
self._update_jingle('audio')
hbox = self.xml.get_object('audio_buttons_hbox')
if self.audio_state == self.JINGLE_STATE_CONNECTED:
# Set volume from config
input_vol = gajim.config.get('audio_input_volume')
output_vol = gajim.config.get('audio_output_volume')
input_vol = max(min(input_vol, 100), 0)
output_vol = max(min(output_vol, 100), 0)
self.xml.get_object('mic_hscale').set_value(input_vol)
self.xml.get_object('sound_hscale').set_value(output_vol)
# Show vbox
hbox.set_no_show_all(False)
hbox.show_all()
elif not self.audio_sid:
hbox.set_no_show_all(True)
hbox.hide()
def update_video(self):
self._update_jingle('video')
def change_resource(self, resource):
old_full_jid = self.get_full_jid()
self.resource = resource
new_full_jid = self.get_full_jid()
# update gajim.last_message_time
if old_full_jid in gajim.last_message_time[self.account]:
gajim.last_message_time[self.account][new_full_jid] = \
gajim.last_message_time[self.account][old_full_jid]
# update events
gajim.events.change_jid(self.account, old_full_jid, new_full_jid)
# update MessageWindow._controls
self.parent_win.change_jid(self.account, old_full_jid, new_full_jid)
def stop_jingle(self, sid=None, reason=None):
if self.audio_sid and sid in (self.audio_sid, None):
self.close_jingle_content('audio')
if self.video_sid and sid in (self.video_sid, None):
self.close_jingle_content('video')
def _set_jingle_state(self, jingle_type, state, sid=None, reason=None):
if jingle_type not in ('audio', 'video'):
return
if state in ('connecting', 'connected', 'stop', 'error') and reason:
str = _('%(type)s state : %(state)s, reason: %(reason)s') % {
'type': jingle_type.capitalize(), 'state': state, 'reason': reason}
self.print_conversation(str, 'info')
states = {'connecting': self.JINGLE_STATE_CONNECTING,
'connection_received': self.JINGLE_STATE_CONNECTION_RECEIVED,
'connected': self.JINGLE_STATE_CONNECTED,
'stop': self.JINGLE_STATE_NULL,
'error': self.JINGLE_STATE_ERROR}
jingle_state = states[state]
if getattr(self, jingle_type + '_state') == jingle_state or state == 'error':
return
if state == 'stop' and getattr(self, jingle_type + '_sid') not in (None, sid):
return
setattr(self, jingle_type + '_state', jingle_state)
if jingle_state == self.JINGLE_STATE_NULL:
setattr(self, jingle_type + '_sid', None)
if state in ('connection_received', 'connecting'):
setattr(self, jingle_type + '_sid', sid)
getattr(self, '_' + jingle_type + '_button').set_active(jingle_state != self.JINGLE_STATE_NULL)
getattr(self, 'update_' + jingle_type)()
def set_audio_state(self, state, sid=None, reason=None):
self._set_jingle_state('audio', state, sid=sid, reason=reason)
def set_video_state(self, state, sid=None, reason=None):
self._set_jingle_state('video', state, sid=sid, reason=reason)
def _get_audio_content(self):
session = gajim.connections[self.account].get_jingle_session(
self.contact.get_full_jid(), self.audio_sid)
return session.get_content('audio')
def on_num_button_pressed(self, widget, num):
self._get_audio_content()._start_dtmf(num)
def on_num_button_released(self, released):
self._get_audio_content()._stop_dtmf()
def on_dtmf_button_clicked(self, widget):
self.dtmf_window.show_all()
def on_dtmf_window_focus_out_event(self, widget, event):
self.dtmf_window.hide()
def on_mic_hscale_value_changed(self, widget, value):
self._get_audio_content().set_mic_volume(value / 100)
# Save volume to config
gajim.config.set('audio_input_volume', value)
def on_sound_hscale_value_changed(self, widget, value):
self._get_audio_content().set_out_volume(value / 100)
# Save volume to config
gajim.config.set('audio_output_volume', value)
def on_avatar_eventbox_enter_notify_event(self, widget, event):
"""
Enter the eventbox area so we under conditions add a timeout to show a
bigger avatar after 0.5 sec
"""
jid = self.contact.jid
avatar_pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache(jid)
if avatar_pixbuf in ('ask', None):
return
avatar_w = avatar_pixbuf.get_width()
avatar_h = avatar_pixbuf.get_height()
scaled_buf = self.xml.get_object('avatar_image').get_pixbuf()
scaled_buf_w = scaled_buf.get_width()
scaled_buf_h = scaled_buf.get_height()
# do we have something bigger to show?
if avatar_w > scaled_buf_w or avatar_h > scaled_buf_h:
# wait for 0.5 sec in case we leave earlier
if self.show_bigger_avatar_timeout_id is not None:
gobject.source_remove(self.show_bigger_avatar_timeout_id)
self.show_bigger_avatar_timeout_id = gobject.timeout_add(500,
self.show_bigger_avatar, widget)
def on_avatar_eventbox_leave_notify_event(self, widget, event):
"""
Left the eventbox area that holds the avatar img
"""
# did we add a timeout? if yes remove it
if self.show_bigger_avatar_timeout_id is not None:
gobject.source_remove(self.show_bigger_avatar_timeout_id)
self.show_bigger_avatar_timeout_id = None
def on_avatar_eventbox_button_press_event(self, widget, event):
"""
If right-clicked, show popup
"""
if event.button == 3: # right click
menu = gtk.Menu()
menuitem = gtk.ImageMenuItem(gtk.STOCK_SAVE_AS)
id_ = menuitem.connect('activate',
gtkgui_helpers.on_avatar_save_as_menuitem_activate,
self.contact.jid, self.contact.get_shown_name())
self.handlers[id_] = menuitem
menu.append(menuitem)
menu.show_all()
menu.connect('selection-done', lambda w: w.destroy())
# show the menu
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
return True
def on_location_eventbox_button_release_event(self, widget, event):
if 'location' in self.contact.pep:
location = self.contact.pep['location']._pep_specific_data
if ('lat' in location) and ('lon' in location):
uri = 'http://www.openstreetmap.org/?' + \
'mlat=%(lat)s&mlon=%(lon)s&zoom=16' % {'lat': location['lat'],
'lon': location['lon']}
helpers.launch_browser_mailer('url', uri)
def on_location_eventbox_leave_notify_event(self, widget, event):
"""
Just moved the mouse so show the cursor
"""
cursor = gtk.gdk.Cursor(gtk.gdk.LEFT_PTR)
self.parent_win.window.window.set_cursor(cursor)
def on_location_eventbox_enter_notify_event(self, widget, event):
cursor = gtk.gdk.Cursor(gtk.gdk.HAND2)
self.parent_win.window.window.set_cursor(cursor)
def _on_window_motion_notify(self, widget, event):
"""
It gets called no matter if it is the active window or not
"""
if self.parent_win.get_active_jid() == self.contact.jid:
# if window is the active one, change vars assisting chatstate
self.mouse_over_in_last_5_secs = True
self.mouse_over_in_last_30_secs = True
def _schedule_activity_timers(self):
self.possible_paused_timeout_id = gobject.timeout_add_seconds(5,
self.check_for_possible_paused_chatstate, None)
self.possible_inactive_timeout_id = gobject.timeout_add_seconds(30,
self.check_for_possible_inactive_chatstate, None)
def update_ui(self):
# The name banner is drawn here
ChatControlBase.update_ui(self)
self.update_toolbar()
def _update_banner_state_image(self):
contact = gajim.contacts.get_contact_with_highest_priority(self.account,
self.contact.jid)
if not contact or self.resource:
# For transient contacts
contact = self.contact
show = contact.show
jid = contact.jid
# Set banner image
img_32 = gajim.interface.roster.get_appropriate_state_images(jid,
size='32', icon_name=show)
img_16 = gajim.interface.roster.get_appropriate_state_images(jid,
icon_name=show)
if show in img_32 and img_32[show].get_pixbuf():
# we have 32x32! use it!
banner_image = img_32[show]
use_size_32 = True
else:
banner_image = img_16[show]
use_size_32 = False
banner_status_img = self.xml.get_object('banner_status_image')
if banner_image.get_storage_type() == gtk.IMAGE_ANIMATION:
banner_status_img.set_from_animation(banner_image.get_animation())
else:
pix = banner_image.get_pixbuf()
if pix is not None:
if use_size_32:
banner_status_img.set_from_pixbuf(pix)
else: # we need to scale 16x16 to 32x32
scaled_pix = pix.scale_simple(32, 32,
gtk.gdk.INTERP_BILINEAR)
banner_status_img.set_from_pixbuf(scaled_pix)
def draw_banner_text(self):
"""
Draw the text in the fat line at the top of the window that houses the
name, jid
"""
contact = self.contact
jid = contact.jid
banner_name_label = self.xml.get_object('banner_name_label')
name = contact.get_shown_name()
if self.resource:
name += '/' + self.resource
if self.TYPE_ID == message_control.TYPE_PM:
name = i18n.direction_mark + _(
'%(nickname)s from group chat %(room_name)s') % \
{'nickname': name, 'room_name': self.room_name}
name = i18n.direction_mark + gobject.markup_escape_text(name)
# We know our contacts nick, but if another contact has the same nick
# in another account we need to also display the account.
# except if we are talking to two different resources of the same contact
acct_info = ''
for account in gajim.contacts.get_accounts():
if account == self.account:
continue
if acct_info: # We already found a contact with same nick
break
for jid in gajim.contacts.get_jid_list(account):
other_contact_ = \
gajim.contacts.get_first_contact_from_jid(account, jid)
if other_contact_.get_shown_name() == self.contact.get_shown_name():
acct_info = i18n.direction_mark + ' (%s)' % \
gobject.markup_escape_text(self.account)
break
status = contact.status
if status is not None:
banner_name_label.set_ellipsize(pango.ELLIPSIZE_END)
self.banner_status_label.set_ellipsize(pango.ELLIPSIZE_END)
status_reduced = helpers.reduce_chars_newlines(status, max_lines=1)
else:
status_reduced = ''
status_escaped = gobject.markup_escape_text(status_reduced)
font_attrs, font_attrs_small = self.get_font_attrs()
st = gajim.config.get('displayed_chat_state_notifications')
cs = contact.chatstate
if cs and st in ('composing_only', 'all'):
if contact.show == 'offline':
chatstate = ''
elif st == 'all' or cs == 'composing':
chatstate = helpers.get_uf_chatstate(cs)
else:
chatstate = ''
label_text = '<span %s>%s</span><span %s>%s %s</span>' \
% (font_attrs, name, font_attrs_small, acct_info, chatstate)
if acct_info:
acct_info = i18n.direction_mark + ' ' + acct_info
label_tooltip = '%s%s %s' % (name, acct_info, chatstate)
else:
# weight="heavy" size="x-large"
label_text = '<span %s>%s</span><span %s>%s</span>' % \
(font_attrs, name, font_attrs_small, acct_info)
if acct_info:
acct_info = i18n.direction_mark + ' ' + acct_info
label_tooltip = '%s%s' % (name, acct_info)
if status_escaped:
status_text = self.urlfinder.sub(self.make_href, status_escaped)
status_text = '<span %s>%s</span>' % (font_attrs_small, status_text)
self.banner_status_label.set_tooltip_text(status)
self.banner_status_label.set_no_show_all(False)
self.banner_status_label.show()
else:
status_text = ''
self.banner_status_label.hide()
self.banner_status_label.set_no_show_all(True)
self.banner_status_label.set_markup(status_text)
# setup the label that holds name and jid
banner_name_label.set_markup(label_text)
banner_name_label.set_tooltip_text(label_tooltip)
def close_jingle_content(self, jingle_type):
sid = getattr(self, jingle_type + '_sid')
if not sid:
return
setattr(self, jingle_type + '_sid', None)
setattr(self, jingle_type + '_state', self.JINGLE_STATE_NULL)
session = gajim.connections[self.account].get_jingle_session(
self.contact.get_full_jid(), sid)
if session:
content = session.get_content(jingle_type)
if content:
session.remove_content(content.creator, content.name)
getattr(self, '_' + jingle_type + '_button').set_active(False)
getattr(self, 'update_' + jingle_type)()
def on_jingle_button_toggled(self, widget, jingle_type):
img_name = 'gajim-%s_%s' % ({'audio': 'mic', 'video': 'cam'}[jingle_type],
{True: 'active', False: 'inactive'}[widget.get_active()])
path_to_img = gtkgui_helpers.get_icon_path(img_name)
if widget.get_active():
if getattr(self, jingle_type + '_state') == \
self.JINGLE_STATE_NULL:
if jingle_type == 'video':
video_hbox = self.xml.get_object('video_hbox')
video_hbox.set_no_show_all(False)
if gajim.config.get('video_see_self'):
fixed = self.xml.get_object('outgoing_fixed')
fixed.set_no_show_all(False)
video_hbox.show_all()
if os.name == 'nt':
out_xid = self.xml.get_object(
'outgoing_drawingarea').window.handle
else:
out_xid = self.xml.get_object(
'outgoing_drawingarea').window.xid
else:
out_xid = None
video_hbox.show_all()
if os.name == 'nt':
in_xid = self.xml.get_object('incoming_drawingarea').\
window.handle
else:
in_xid = self.xml.get_object('incoming_drawingarea').\
window.xid
sid = gajim.connections[self.account].start_video(
self.contact.get_full_jid(), in_xid, out_xid)
else:
sid = getattr(gajim.connections[self.account],
'start_' + jingle_type)(self.contact.get_full_jid())
getattr(self, 'set_' + jingle_type + '_state')('connecting', sid)
else:
video_hbox = self.xml.get_object('video_hbox')
video_hbox.set_no_show_all(True)
video_hbox.hide()
fixed = self.xml.get_object('outgoing_fixed')
fixed.set_no_show_all(True)
self.close_jingle_content(jingle_type)
img = getattr(self, '_' + jingle_type + '_button').get_property('image')
img.set_from_file(path_to_img)
def on_audio_button_toggled(self, widget):
self.on_jingle_button_toggled(widget, 'audio')
def on_video_button_toggled(self, widget):
self.on_jingle_button_toggled(widget, 'video')
def _toggle_gpg(self):
if not self.gpg_is_active and not self.contact.keyID:
dialogs.ErrorDialog(_('No OpenPGP key assigned'),
_('No OpenPGP key is assigned to this contact. So you cannot '
'encrypt messages with OpenPGP.'))
return
ec = gajim.encrypted_chats[self.account]
if self.gpg_is_active:
# Disable encryption
ec.remove(self.contact.jid)
self.gpg_is_active = False
loggable = False
msg = _('OpenPGP encryption disabled')
ChatControlBase.print_conversation_line(self, msg, 'status', '',
None)
if self.session:
self.session.loggable = True
else:
# Enable encryption
ec.append(self.contact.jid)
self.gpg_is_active = True
msg = _('OpenPGP encryption enabled')
ChatControlBase.print_conversation_line(self, msg, 'status', '',
None)
loggable = gajim.config.get_per('accounts', self.account,
'log_encrypted_sessions')
if self.session:
self.session.loggable = loggable
loggable = self.session.is_loggable()
else:
loggable = loggable and gajim.config.should_log(self.account,
self.contact.jid)
if loggable:
msg = _('Session WILL be logged')
else:
msg = _('Session WILL NOT be logged')
ChatControlBase.print_conversation_line(self, msg,
'status', '', None)
gajim.config.set_per('contacts', self.contact.jid,
'gpg_enabled', self.gpg_is_active)
self._show_lock_image(self.gpg_is_active, 'OpenPGP',
self.gpg_is_active, loggable, True)
def _show_lock_image(self, visible, enc_type='', enc_enabled=False,
chat_logged=False, authenticated=False):
"""
Set lock icon visibility and create tooltip
"""
#encryption %s active
status_string = enc_enabled and _('is') or _('is NOT')
#chat session %s be logged
logged_string = chat_logged and _('will') or _('will NOT')
if authenticated:
#About encrypted chat session
authenticated_string = _('and authenticated')
img_path = gtkgui_helpers.get_icon_path('security-high')
else:
#About encrypted chat session
authenticated_string = _('and NOT authenticated')
img_path = gtkgui_helpers.get_icon_path('security-low')
self.lock_image.set_from_file(img_path)
#status will become 'is' or 'is not', authentificaed will become
#'and authentificated' or 'and not authentificated', logged will become
#'will' or 'will not'
tooltip = _('%(type)s encryption %(status)s active %(authenticated)s.\n'
'Your chat session %(logged)s be logged.') % {'type': enc_type,
'status': status_string, 'authenticated': authenticated_string,
'logged': logged_string}
self.authentication_button.set_tooltip_text(tooltip)
self.widget_set_visible(self.authentication_button, not visible)
self.lock_image.set_sensitive(enc_enabled)
def _on_authentication_button_clicked(self, widget):
if self.gpg_is_active:
dialogs.GPGInfoWindow(self, self.parent_win.window)
elif self.session and self.session.enable_encryption:
dialogs.ESessionInfoWindow(self.session, self.parent_win.window)
def send_message(self, message, keyID='', chatstate=None, xhtml=None,
process_commands=True, attention=False):
"""
Send a message to contact
"""
message = helpers.remove_invalid_xml_chars(message)
if message in ('', None, '\n'):
return None
# refresh timers
self.reset_kbd_mouse_timeout_vars()
contact = self.contact
encrypted = bool(self.session) and self.session.enable_encryption
keyID = ''
if self.gpg_is_active:
keyID = contact.keyID
encrypted = True
if not keyID:
keyID = 'UNKNOWN'
if self.contact.jid == gajim.get_jid_from_account(self.account):
chatstates_on = False
else:
chatstates_on = gajim.config.get('outgoing_chat_state_notifications') != \
'disabled'
chatstate_to_send = None
if chatstates_on and contact is not None:
if contact.supports(NS_CHATSTATES):
# send active chatstate on every message (as XEP says)
chatstate_to_send = 'active'
contact.our_chatstate = 'active'
gobject.source_remove(self.possible_paused_timeout_id)
gobject.source_remove(self.possible_inactive_timeout_id)
self._schedule_activity_timers()
def _on_sent(msg_stanza, message, encrypted, xhtml, label, old_txt):
id_ = msg_stanza.getID()
xep0184_id = None
if self.contact.jid != gajim.get_jid_from_account(self.account):
if gajim.config.get_per('accounts', self.account, 'request_receipt'):
xep0184_id = id_
if label:
displaymarking = label.getTag('displaymarking')
else:
displaymarking = None
if self.correcting and \
self.conv_textview.last_sent_message_marks[0]:
self.conv_textview.correct_last_sent_message(message, xhtml,
self.get_our_nick(), old_txt)
self.correcting = False
self.msg_textview.modify_base(gtk.STATE_NORMAL,
self.old_message_tv_color)
return
self.print_conversation(message, self.contact.jid,
encrypted=encrypted, xep0184_id=xep0184_id, xhtml=xhtml,
displaymarking=displaymarking)
ChatControlBase.send_message(self, message, keyID, type_='chat',
chatstate=chatstate_to_send, xhtml=xhtml, callback=_on_sent,
callback_args=[message, encrypted, xhtml, self.get_seclabel(),
self.last_sent_txt], process_commands=process_commands,
attention=attention)
def check_for_possible_paused_chatstate(self, arg):
"""
Did we move mouse of that window or write something in message textview
in the last 5 seconds? If yes - we go active for mouse, composing for
kbd. If not - we go paused if we were previously composing
"""
contact = self.contact
jid = contact.jid
current_state = contact.our_chatstate
if current_state is False: # jid doesn't support chatstates
return False # stop looping
message_buffer = self.msg_textview.get_buffer()
if self.kbd_activity_in_last_5_secs and message_buffer.get_char_count():
# Only composing if the keyboard activity was in text entry
self.send_chatstate('composing')
elif self.mouse_over_in_last_5_secs and current_state == 'inactive' and\
jid == self.parent_win.get_active_jid():
self.send_chatstate('active')
else:
if current_state == 'composing':
self.send_chatstate('paused') # pause composing
# assume no activity and let the motion-notify or 'insert-text' make them
# True refresh 30 seconds vars too or else it's 30 - 5 = 25 seconds!
self.reset_kbd_mouse_timeout_vars()
return True # loop forever
def check_for_possible_inactive_chatstate(self, arg):
"""
Did we move mouse over that window or wrote something in message textview
in the last 30 seconds? if yes - we go active. If no - we go inactive
"""
contact = self.contact
current_state = contact.our_chatstate
if current_state is False: # jid doesn't support chatstates
return False # stop looping
if self.mouse_over_in_last_5_secs or self.kbd_activity_in_last_5_secs:
return True # loop forever
if not self.mouse_over_in_last_30_secs or \
self.kbd_activity_in_last_30_secs:
self.send_chatstate('inactive', contact)
# assume no activity and let the motion-notify or 'insert-text' make them
# True refresh 30 seconds too or else it's 30 - 5 = 25 seconds!
self.reset_kbd_mouse_timeout_vars()
return True # loop forever
def reset_kbd_mouse_timeout_vars(self):
self.kbd_activity_in_last_5_secs = False
self.mouse_over_in_last_5_secs = False
self.mouse_over_in_last_30_secs = False
self.kbd_activity_in_last_30_secs = False
def on_cancel_session_negotiation(self):
msg = _('Session negotiation cancelled')
ChatControlBase.print_conversation_line(self, msg, 'status', '', None)
def print_archiving_session_details(self):
"""
Print esession settings to textview
"""
archiving = bool(self.session) and isinstance(self.session,
ArchivingStanzaSession) and self.session.archiving
if archiving:
msg = _('This session WILL be archived on server')
else:
msg = _('This session WILL NOT be archived on server')
ChatControlBase.print_conversation_line(self, msg, 'status', '', None)
def print_esession_details(self):
"""
Print esession settings to textview
"""
e2e_is_active = bool(self.session) and self.session.enable_encryption
if e2e_is_active:
msg = _('This session is encrypted')
if self.session.is_loggable():
msg += _(' and WILL be logged')
else:
msg += _(' and WILL NOT be logged')
ChatControlBase.print_conversation_line(self, msg, 'status', '', None)
if not self.session.verified_identity:
ChatControlBase.print_conversation_line(self, _("Remote contact's identity not verified. Click the shield button for more details."), 'status', '', None)
else:
msg = _('E2E encryption disabled')
ChatControlBase.print_conversation_line(self, msg, 'status', '', None)
self._show_lock_image(e2e_is_active, 'E2E', e2e_is_active, self.session and \
self.session.is_loggable(), self.session and self.session.verified_identity)
def print_session_details(self, old_session=None):
if isinstance(self.session, EncryptedStanzaSession) or \
(old_session and isinstance(old_session, EncryptedStanzaSession)):
self.print_esession_details()
elif isinstance(self.session, ArchivingStanzaSession):
self.print_archiving_session_details()
def get_our_nick(self):
return gajim.nicks[self.account]
def print_conversation(self, text, frm='', tim=None, encrypted=False,
subject=None, xhtml=None, simple=False, xep0184_id=None,
displaymarking=None, msg_log_id=None, correct_id=None):
"""
Print a line in the conversation
If frm is set to status: it's a status message.
if frm is set to error: it's an error message. The difference between
status and error is mainly that with error, msg count as a new message
(in systray and in control).
If frm is set to info: it's a information message.
If frm is set to print_queue: it is incomming from queue.
If frm is set to another value: it's an outgoing message.
If frm is not set: it's an incomming message.
"""
contact = self.contact
if frm == 'status':
if not gajim.config.get('print_status_in_chats'):
return
kind = 'status'
name = ''
elif frm == 'error':
kind = 'error'
name = ''
elif frm == 'info':
kind = 'info'
name = ''
else:
if self.session and self.session.enable_encryption:
# ESessions
if not encrypted:
msg = _('The following message was NOT encrypted')
ChatControlBase.print_conversation_line(self, msg, 'status',
'', tim)
else:
# GPG encryption
if encrypted and not self.gpg_is_active:
msg = _('The following message was encrypted')
ChatControlBase.print_conversation_line(self, msg, 'status',
'', tim)
# turn on OpenPGP if this was in fact a XEP-0027 encrypted
# message
if encrypted == 'xep27':
self._toggle_gpg()
elif not encrypted and self.gpg_is_active:
msg = _('The following message was NOT encrypted')
ChatControlBase.print_conversation_line(self, msg, 'status',
'', tim)
if not frm:
kind = 'incoming'
name = contact.get_shown_name()
elif frm == 'print_queue': # incoming message, but do not update time
kind = 'incoming_queue'
name = contact.get_shown_name()
else:
kind = 'outgoing'
name = self.get_our_nick()
if not xhtml and not (encrypted and self.gpg_is_active) and \
gajim.config.get('rst_formatting_outgoing_messages'):
from common.rst_xhtml_generator import create_xhtml
xhtml = create_xhtml(text)
if xhtml:
xhtml = '<body xmlns="%s">%s</body>' % (NS_XHTML, xhtml)
ChatControlBase.print_conversation_line(self, text, kind, name, tim,
subject=subject, xhtml=xhtml,
simple=simple, xep0184_id=xep0184_id, displaymarking=displaymarking,
msg_log_id=msg_log_id, correct_id=correct_id)
def get_tab_label(self, chatstate):
unread = ''
if self.resource:
jid = self.contact.get_full_jid()
else:
jid = self.contact.jid
num_unread = len(gajim.events.get_events(self.account, jid,
['printed_' + self.type_id, self.type_id]))
if num_unread == 1 and not gajim.config.get('show_unread_tab_icon'):
unread = '*'
elif num_unread > 1:
unread = '[' + unicode(num_unread) + ']'
# Draw tab label using chatstate
theme = gajim.config.get('roster_theme')
color = None
if not chatstate:
chatstate = self.contact.chatstate
if chatstate is not None:
if chatstate == 'composing':
color = gajim.config.get_per('themes', theme,
'state_composing_color')
elif chatstate == 'inactive':
color = gajim.config.get_per('themes', theme,
'state_inactive_color')
elif chatstate == 'gone':
color = gajim.config.get_per('themes', theme,
'state_gone_color')
elif chatstate == 'paused':
color = gajim.config.get_per('themes', theme,
'state_paused_color')
if color:
# We set the color for when it's the current tab or not
color = gtk.gdk.colormap_get_system().alloc_color(color)
# In inactive tab color to be lighter against the darker inactive
# background
if chatstate in ('inactive', 'gone') and\
self.parent_win.get_active_control() != self:
color = self.lighten_color(color)
else: # active or not chatstate, get color from gtk
color = self.parent_win.notebook.style.fg[gtk.STATE_ACTIVE]
name = self.contact.get_shown_name()
if self.resource:
name += '/' + self.resource
label_str = gobject.markup_escape_text(name)
if num_unread: # if unread, text in the label becomes bold
label_str = '<b>' + unread + label_str + '</b>'
return (label_str, color)
def get_tab_image(self, count_unread=True):
if self.resource:
jid = self.contact.get_full_jid()
else:
jid = self.contact.jid
if count_unread:
num_unread = len(gajim.events.get_events(self.account, jid,
['printed_' + self.type_id, self.type_id]))
else:
num_unread = 0
# Set tab image (always 16x16); unread messages show the 'event' image
tab_img = None
if num_unread and gajim.config.get('show_unread_tab_icon'):
img_16 = gajim.interface.roster.get_appropriate_state_images(
self.contact.jid, icon_name='event')
tab_img = img_16['event']
else:
contact = gajim.contacts.get_contact_with_highest_priority(
self.account, self.contact.jid)
if not contact or self.resource:
# For transient contacts
contact = self.contact
img_16 = gajim.interface.roster.get_appropriate_state_images(
self.contact.jid, icon_name=contact.show)
tab_img = img_16[contact.show]
return tab_img
def prepare_context_menu(self, hide_buttonbar_items=False):
"""
Set compact view menuitem active state sets active and sensitivity state
for toggle_gpg_menuitem sets sensitivity for history_menuitem (False for
tranasports) and file_transfer_menuitem and hide()/show() for
add_to_roster_menuitem
"""
if gajim.jid_is_transport(self.contact.jid):
menu = gui_menu_builder.get_transport_menu(self.contact,
self.account)
else:
menu = gui_menu_builder.get_contact_menu(self.contact, self.account,
use_multiple_contacts=False, show_start_chat=False,
show_encryption=True, control=self,
show_buttonbar_items=not hide_buttonbar_items)
return menu
def send_chatstate(self, state, contact=None):
"""
Send OUR chatstate as STANDLONE chat state message (eg. no body)
to contact only if new chatstate is different from the previous one
if jid is not specified, send to active tab
"""
# JEP 85 does not allow resending the same chatstate
# this function checks for that and just returns so it's safe to call it
# with same state.
# This functions also checks for violation in state transitions
# and raises RuntimeException with appropriate message
# more on that http://xmpp.org/extensions/xep-0085.html#statechart
# do not send nothing if we have chat state notifications disabled
# that means we won't reply to the <active/> from other peer
# so we do not broadcast jep85 capabalities
chatstate_setting = gajim.config.get('outgoing_chat_state_notifications')
if chatstate_setting == 'disabled':
return
if self.contact.jid == gajim.get_jid_from_account(self.account):
return
elif chatstate_setting == 'composing_only' and state != 'active' and\
state != 'composing':
return
if contact is None:
contact = self.parent_win.get_active_contact()
if contact is None:
# contact was from pm in MUC, and left the room so contact is None
# so we cannot send chatstate anymore
return
# Don't send chatstates to offline contacts
if contact.show == 'offline':
return
if not contact.supports(NS_CHATSTATES):
return
if contact.our_chatstate == False:
return
# if the new state we wanna send (state) equals
# the current state (contact.our_chatstate) then return
if contact.our_chatstate == state:
return
# if wel're inactive prevent composing (XEP violation)
if contact.our_chatstate == 'inactive' and state == 'composing':
# go active before
gajim.nec.push_outgoing_event(MessageOutgoingEvent(None,
account=self.account, jid=self.contact.jid, chatstate='active',
control=self))
contact.our_chatstate = 'active'
self.reset_kbd_mouse_timeout_vars()
gajim.nec.push_outgoing_event(MessageOutgoingEvent(None,
account=self.account, jid=self.contact.jid, chatstate=state,
msg_id=contact.msg_log_id, control=self))
contact.our_chatstate = state
if state == 'active':
self.reset_kbd_mouse_timeout_vars()
def shutdown(self):
# PluginSystem: removing GUI extension points connected with ChatControl
# instance object
gajim.plugin_manager.remove_gui_extension_point('chat_control', self)
gajim.ged.remove_event_handler('pep-received', ged.GUI1,
self._nec_pep_received)
gajim.ged.remove_event_handler('vcard-received', ged.GUI1,
self._nec_vcard_received)
gajim.ged.remove_event_handler('failed-decrypt', ged.GUI1,
self._nec_failed_decrypt)
gajim.ged.remove_event_handler('chatstate-received', ged.GUI1,
self._nec_chatstate_received)
gajim.ged.remove_event_handler('caps-received', ged.GUI1,
self._nec_caps_received)
self.unsubscribe_events()
# Send 'gone' chatstate
self.send_chatstate('gone', self.contact)
self.contact.chatstate = None
self.contact.our_chatstate = None
for jingle_type in ('audio', 'video'):
self.close_jingle_content(jingle_type)
# disconnect self from session
if self.session:
self.session.control = None
# Disconnect timer callbacks
gobject.source_remove(self.possible_paused_timeout_id)
gobject.source_remove(self.possible_inactive_timeout_id)
# Remove bigger avatar window
if self.bigger_avatar_window:
self.bigger_avatar_window.destroy()
# Clean events
gajim.events.remove_events(self.account, self.get_full_jid(),
types=['printed_' + self.type_id, self.type_id])
# Remove contact instance if contact has been removed
key = (self.contact.jid, self.account)
roster = gajim.interface.roster
if key in roster.contacts_to_be_removed.keys() and \
not roster.contact_has_pending_roster_events(self.contact,
self.account):
backend = roster.contacts_to_be_removed[key]['backend']
del roster.contacts_to_be_removed[key]
roster.remove_contact(self.contact.jid, self.account, force=True,
backend=backend)
# remove all register handlers on widgets, created by self.xml
# to prevent circular references among objects
for i in self.handlers.keys():
if self.handlers[i].handler_is_connected(i):
self.handlers[i].disconnect(i)
del self.handlers[i]
self.conv_textview.del_handlers()
if gajim.config.get('use_speller') and HAS_GTK_SPELL:
spell_obj = gtkspell.get_from_text_view(self.msg_textview)
if spell_obj:
spell_obj.detach()
self.msg_textview.destroy()
# PluginSystem: calling shutdown of super class (ChatControlBase) to let
# it remove it's GUI extension points
super(ChatControl, self).shutdown()
def minimizable(self):
return False
def safe_shutdown(self):
return False
def allow_shutdown(self, method, on_yes, on_no, on_minimize):
if time.time() - gajim.last_message_time[self.account]\
[self.get_full_jid()] < 2:
# 2 seconds
def on_ok():
on_yes(self)
def on_cancel():
on_no(self)
dialogs.ConfirmationDialog(
#%s is being replaced in the code with JID
_('You just received a new message from "%s"') % \
self.contact.jid,
_('If you close this tab and you have history disabled, '\
'this message will be lost.'), on_response_ok=on_ok,
on_response_cancel=on_cancel,
transient_for=self.parent_win.window)
return
on_yes(self)
def _nec_chatstate_received(self, obj):
"""
Handle incoming chatstate that jid SENT TO us
"""
self.draw_banner_text()
# update chatstate in tab for this chat
self.parent_win.redraw_tab(self, self.contact.chatstate)
def _nec_caps_received(self, obj):
if obj.conn.name != self.account:
return
if self.TYPE_ID == 'chat' and obj.jid != self.contact.jid:
return
if self.TYPE_ID == 'pm' and obj.fjid != self.contact.jid:
return
self.update_ui()
def _nec_ping_reply(self, obj):
if obj.control:
if obj.control != self:
return
else:
if self.contact != obj.contact:
return
self.print_conversation(_('Pong! (%s s.)') % obj.seconds, 'status')
def set_control_active(self, state):
ChatControlBase.set_control_active(self, state)
# send chatstate inactive to the one we're leaving
# and active to the one we visit
if state:
message_buffer = self.msg_textview.get_buffer()
if message_buffer.get_char_count():
self.send_chatstate('paused', self.contact)
else:
self.send_chatstate('active', self.contact)
self.reset_kbd_mouse_timeout_vars()
gobject.source_remove(self.possible_paused_timeout_id)
gobject.source_remove(self.possible_inactive_timeout_id)
self._schedule_activity_timers()
else:
self.send_chatstate('inactive', self.contact)
# Hide bigger avatar window
if self.bigger_avatar_window:
self.bigger_avatar_window.destroy()
self.bigger_avatar_window = None
# Re-show the small avatar
self.show_avatar()
def show_avatar(self):
if not gajim.config.get('show_avatar_in_chat'):
return
jid_with_resource = self.contact.get_full_jid()
pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache(jid_with_resource)
if pixbuf == 'ask':
# we don't have the vcard
if self.TYPE_ID == message_control.TYPE_PM:
if self.gc_contact.jid:
# We know the real jid of this contact
real_jid = self.gc_contact.jid
if self.gc_contact.resource:
real_jid += '/' + self.gc_contact.resource
else:
real_jid = jid_with_resource
gajim.connections[self.account].request_vcard(real_jid,
jid_with_resource)
else:
gajim.connections[self.account].request_vcard(jid_with_resource)
return
elif pixbuf:
scaled_pixbuf = gtkgui_helpers.get_scaled_pixbuf(pixbuf, 'chat')
else:
scaled_pixbuf = None
image = self.xml.get_object('avatar_image')
image.set_from_pixbuf(scaled_pixbuf)
image.show_all()
def _nec_vcard_received(self, obj):
if obj.conn.name != self.account:
return
j = gajim.get_jid_without_resource(self.contact.jid)
if obj.jid != j:
return
self.show_avatar()
def _on_drag_data_received(self, widget, context, x, y, selection,
target_type, timestamp):
if not selection.data:
return
if self.TYPE_ID == message_control.TYPE_PM:
c = self.gc_contact
else:
c = self.contact
if target_type == self.TARGET_TYPE_URI_LIST:
if not c.resource: # If no resource is known, we can't send a file
return
uri = selection.data.strip()
uri_splitted = uri.split() # we may have more than one file dropped
for uri in uri_splitted:
path = helpers.get_file_path_from_dnd_dropped_uri(uri)
if os.path.isfile(path): # is it file?
ft = gajim.interface.instances['file_transfers']
ft.send_file(self.account, c, path)
return
# chat2muc
treeview = gajim.interface.roster.tree
model = treeview.get_model()
data = selection.data
path = treeview.get_selection().get_selected_rows()[1][0]
iter_ = model.get_iter(path)
type_ = model[iter_][2]
if type_ != 'contact': # source is not a contact
return
dropped_jid = data.decode('utf-8')
dropped_transport = gajim.get_transport_name_from_jid(dropped_jid)
c_transport = gajim.get_transport_name_from_jid(c.jid)
if dropped_transport or c_transport:
return # transport contacts cannot be invited
dialogs.TransformChatToMUC(self.account, [c.jid], [dropped_jid])
def _on_message_tv_buffer_changed(self, textbuffer):
self.kbd_activity_in_last_5_secs = True
self.kbd_activity_in_last_30_secs = True
if textbuffer.get_char_count():
self.send_chatstate('composing', self.contact)
e2e_is_active = self.session and \
self.session.enable_encryption
e2e_pref = gajim.config.get_per('accounts', self.account,
'enable_esessions') and gajim.config.get_per('accounts',
self.account, 'autonegotiate_esessions') and gajim.config.get_per(
'contacts', self.contact.jid, 'autonegotiate_esessions')
want_e2e = not e2e_is_active and not self.gpg_is_active \
and e2e_pref
if want_e2e and not self.no_autonegotiation \
and gajim.HAVE_PYCRYPTO and self.contact.supports(NS_ESESSION):
self.begin_e2e_negotiation()
elif (not self.session or not self.session.status) and \
gajim.connections[self.account].archiving_136_supported:
self.begin_archiving_negotiation()
else:
self.send_chatstate('active', self.contact)
def restore_conversation(self):
jid = self.contact.jid
# don't restore lines if it's a transport
if gajim.jid_is_transport(jid):
return
# number of messages that are in queue and are already logged, we want
# to avoid duplication
pending_how_many = len(gajim.events.get_events(self.account, jid,
['chat', 'pm']))
if self.resource:
pending_how_many += len(gajim.events.get_events(self.account,
self.contact.get_full_jid(), ['chat', 'pm']))
try:
rows = gajim.logger.get_last_conversation_lines(
jid, pending_how_many, self.account)
except exceptions.DatabaseMalformed:
import common.logger
dialogs.ErrorDialog(_('Database Error'),
_('The database file (%s) cannot be read. Try to repair it or '
'remove it (all history will be lost).') % common.logger.LOG_DB_PATH)
rows = []
self.conv_textview.just_cleared = True
for row in rows: # row[0] time, row[1] has kind, row[2] the message
msg = row[2]
if not msg: # message is empty, we don't print it
continue
if row[1] in (constants.KIND_CHAT_MSG_SENT,
constants.KIND_SINGLE_MSG_SENT):
kind = 'outgoing'
name = self.get_our_nick()
elif row[1] in (constants.KIND_SINGLE_MSG_RECV,
constants.KIND_CHAT_MSG_RECV):
kind = 'incoming'
name = self.contact.get_shown_name()
elif row[1] == constants.KIND_ERROR:
kind = 'status'
name = self.contact.get_shown_name()
tim = time.localtime(float(row[0]))
if gajim.config.get('restored_messages_small'):
small_attr = ['small']
else:
small_attr = []
xhtml = None
if msg.startswith('<body '):
xhtml = msg
if row[3]:
msg = _('Subject: %(subject)s\n%(message)s') % \
{'subject': row[3], 'message': msg}
ChatControlBase.print_conversation_line(self, msg, kind, name,
tim, small_attr, small_attr + ['restored_message'],
small_attr + ['restored_message'], False, xhtml=xhtml)
if len(rows):
self.conv_textview.print_empty_line()
def read_queue(self):
"""
Read queue and print messages containted in it
"""
jid = self.contact.jid
jid_with_resource = jid
if self.resource:
jid_with_resource += '/' + self.resource
events = gajim.events.get_events(self.account, jid_with_resource)
# list of message ids which should be marked as read
message_ids = []
for event in events:
if event.type_ != self.type_id:
continue
if event.kind == 'error':
kind = 'info'
else:
kind = 'print_queue'
if event.sent_forwarded:
kind = 'out'
self.print_conversation(event.message, kind, tim=event.time,
encrypted=event.encrypted, subject=event.subject,
xhtml=event.xhtml, displaymarking=event.displaymarking,
correct_id=event.correct_id)
if isinstance(event.msg_log_id, int):
message_ids.append(event.msg_log_id)
if event.session and not self.session:
self.set_session(event.session)
if message_ids:
gajim.logger.set_read_messages(message_ids)
gajim.events.remove_events(self.account, jid_with_resource,
types=[self.type_id])
typ = 'chat' # Is it a normal chat or a pm ?
# reset to status image in gc if it is a pm
# Is it a pm ?
room_jid, nick = gajim.get_room_and_nick_from_fjid(jid)
control = gajim.interface.msg_win_mgr.get_gc_control(room_jid,
self.account)
if control and control.type_id == message_control.TYPE_GC:
control.update_ui()
control.parent_win.show_title()
typ = 'pm'
self.redraw_after_event_removed(jid)
if (self.contact.show in ('offline', 'error')):
show_offline = gajim.config.get('showoffline')
show_transports = gajim.config.get('show_transports_group')
if (not show_transports and gajim.jid_is_transport(jid)) or \
(not show_offline and typ == 'chat' and \
len(gajim.contacts.get_contacts(self.account, jid)) < 2):
gajim.interface.roster.remove_to_be_removed(self.contact.jid,
self.account)
elif typ == 'pm':
control.remove_contact(nick)
def show_bigger_avatar(self, small_avatar):
"""
Resize the avatar, if needed, so it has at max half the screen size and
shows it
"""
if not small_avatar.window:
# Tab has been closed since we hovered the avatar
return
avatar_pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache(
self.contact.jid)
if avatar_pixbuf in ('ask', None):
return
# Hide the small avatar
# this code hides the small avatar when we show a bigger one in case
# the avatar has a transparency hole in the middle
# so when we show the big one we avoid seeing the small one behind.
# It's why I set it transparent.
image = self.xml.get_object('avatar_image')
pixbuf = image.get_pixbuf()
pixbuf.fill(0xffffff00L) # RGBA
image.queue_draw()
screen_w = gtk.gdk.screen_width()
screen_h = gtk.gdk.screen_height()
avatar_w = avatar_pixbuf.get_width()
avatar_h = avatar_pixbuf.get_height()
half_scr_w = screen_w / 2
half_scr_h = screen_h / 2
if avatar_w > half_scr_w:
avatar_w = half_scr_w
if avatar_h > half_scr_h:
avatar_h = half_scr_h
window = gtk.Window(gtk.WINDOW_POPUP)
self.bigger_avatar_window = window
pixmap, mask = avatar_pixbuf.render_pixmap_and_mask()
window.set_size_request(avatar_w, avatar_h)
# we should make the cursor visible
# gtk+ doesn't make use of the motion notify on gtkwindow by default
# so this line adds that
window.set_events(gtk.gdk.POINTER_MOTION_MASK)
window.set_app_paintable(True)
window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_TOOLTIP)
window.realize()
window.window.set_back_pixmap(pixmap, False) # make it transparent
window.window.shape_combine_mask(mask, 0, 0)
# make the bigger avatar window show up centered
x0, y0 = small_avatar.window.get_origin()
x0 += small_avatar.allocation.x
y0 += small_avatar.allocation.y
center_x= x0 + (small_avatar.allocation.width / 2)
center_y = y0 + (small_avatar.allocation.height / 2)
pos_x, pos_y = center_x - (avatar_w / 2), center_y - (avatar_h / 2)
window.move(pos_x, pos_y)
# make the cursor invisible so we can see the image
invisible_cursor = gtkgui_helpers.get_invisible_cursor()
window.window.set_cursor(invisible_cursor)
# we should hide the window
window.connect('leave_notify_event',
self._on_window_avatar_leave_notify_event)
window.connect('motion-notify-event',
self._on_window_motion_notify_event)
window.show_all()
def _on_window_avatar_leave_notify_event(self, widget, event):
"""
Just left the popup window that holds avatar
"""
self.bigger_avatar_window.destroy()
self.bigger_avatar_window = None
# Re-show the small avatar
self.show_avatar()
def _on_window_motion_notify_event(self, widget, event):
"""
Just moved the mouse so show the cursor
"""
cursor = gtk.gdk.Cursor(gtk.gdk.LEFT_PTR)
self.bigger_avatar_window.window.set_cursor(cursor)
def _on_send_file_menuitem_activate(self, widget):
self._on_send_file()
def _on_add_to_roster_menuitem_activate(self, widget):
dialogs.AddNewContactWindow(self.account, self.contact.jid)
def _on_contact_information_menuitem_activate(self, widget):
gajim.interface.roster.on_info(widget, self.contact, self.account)
def _on_toggle_gpg_menuitem_activate(self, widget):
self._toggle_gpg()
def _on_convert_to_gc_menuitem_activate(self, widget):
"""
User wants to invite some friends to chat
"""
dialogs.TransformChatToMUC(self.account, [self.contact.jid])
def _on_toggle_e2e_menuitem_activate(self, widget):
if self.session and self.session.enable_encryption:
# e2e was enabled, disable it
jid = str(self.session.jid)
thread_id = self.session.thread_id
self.session.terminate_e2e()
gajim.connections[self.account].delete_session(jid, thread_id)
# presumably the user had a good reason to shut it off, so
# disable autonegotiation too
self.no_autonegotiation = True
else:
self.begin_e2e_negotiation()
def begin_negotiation(self):
self.no_autonegotiation = True
if not self.session:
fjid = self.contact.get_full_jid()
new_sess = gajim.connections[self.account].make_new_session(fjid, type_=self.type_id)
self.set_session(new_sess)
def begin_e2e_negotiation(self):
self.begin_negotiation()
self.session.resource = self.contact.resource
self.session.negotiate_e2e(False)
def begin_archiving_negotiation(self):
self.begin_negotiation()
self.session.negotiate_archiving()
def _nec_failed_decrypt(self, obj):
if obj.session != self.session:
return
details = _('Unable to decrypt message from %s\nIt may have been '
'tampered with.') % obj.fjid
self.print_conversation_line(details, 'status', '', obj.timestamp)
# terminate the session
thread_id = self.session.thread_id
self.session.terminate_e2e()
obj.conn.delete_session(obj.fjid, thread_id)
# restart the session
self.begin_e2e_negotiation()
# Stop emission so it doesn't go to gui_interface
return True
def got_connected(self):
ChatControlBase.got_connected(self)
# Refreshing contact
contact = gajim.contacts.get_contact_with_highest_priority(
self.account, self.contact.jid)
if isinstance(contact, GC_Contact):
contact = contact.as_contact()
if contact:
self.contact = contact
self.draw_banner()
emoticons_button = self.xml.get_object('emoticons_button')
emoticons_button.set_sensitive(True)
send_button = self.xml.get_object('send_button')
send_button.set_sensitive(True)
def got_disconnected(self):
# Emoticons button
emoticons_button = self.xml.get_object('emoticons_button')
emoticons_button.set_sensitive(False)
send_button = self.xml.get_object('send_button')
send_button.set_sensitive(False)
# Add to roster
self._add_to_roster_button.hide()
# Audio button
self._audio_button.set_sensitive(False)
# Video button
self._video_button.set_sensitive(False)
# Send file button
self._send_file_button.set_tooltip_text('')
self._send_file_button.set_sensitive(False)
# Convert to GC button
self._convert_to_gc_button.set_sensitive(False)
ChatControlBase.got_disconnected(self)
def update_status_display(self, name, uf_show, status):
"""
Print the contact's status and update the status/GPG image
"""
self.update_ui()
self.parent_win.redraw_tab(self)
self.print_conversation(_('%(name)s is now %(status)s') % {'name': name,
'status': uf_show}, 'status')
if status:
self.print_conversation(' (', 'status', simple=True)
self.print_conversation('%s' % (status), 'status', simple=True)
self.print_conversation(')', 'status', simple=True)
def _info_bar_show_message(self):
if self.info_bar.get_visible():
# A message is already shown
return
if not self.info_bar_queue:
return
markup, buttons, args, type_ = self.info_bar_queue[0]
self.info_bar_label.set_markup(markup)
# Remove old buttons
area = self.info_bar.get_action_area()
for b in area.get_children():
area.remove(b)
# Add new buttons
for button in buttons:
self.info_bar.add_action_widget(button, 0)
self.info_bar.set_message_type(type_)
self.info_bar.set_no_show_all(False)
self.info_bar.show_all()
def _add_info_bar_message(self, markup, buttons, args,
type_=gtk.MESSAGE_INFO):
self.info_bar_queue.append((markup, buttons, args, type_))
self._info_bar_show_message()
def _get_file_props_event(self, file_props, type_):
evs = gajim.events.get_events(self.account, self.contact.jid, [type_])
for ev in evs:
if ev.file_props == file_props:
return ev
return None
def _on_accept_file_request(self, widget, file_props):
gajim.interface.instances['file_transfers'].on_file_request_accepted(
self.account, self.contact, file_props)
ev = self._get_file_props_event(file_props, 'file-request')
if ev:
gajim.events.remove_events(self.account, self.contact.jid, event=ev)
def _on_cancel_file_request(self, widget, file_props):
gajim.connections[self.account].send_file_rejection(file_props)
ev = self._get_file_props_event(file_props, 'file-request')
if ev:
gajim.events.remove_events(self.account, self.contact.jid, event=ev)
def _got_file_request(self, file_props):
"""
Show an InfoBar on top of control
"""
markup = '<b>%s:</b> %s' % (_('File transfer'), file_props.name)
if file_props.desc:
markup += ' (%s)' % file_props.desc
markup += '\n%s: %s' % (_('Size'), helpers.convert_bytes(
file_props.size))
b1 = gtk.Button(_('_Accept'))
b1.connect('clicked', self._on_accept_file_request, file_props)
b2 = gtk.Button(stock=gtk.STOCK_CANCEL)
b2.connect('clicked', self._on_cancel_file_request, file_props)
self._add_info_bar_message(markup, [b1, b2], file_props,
gtk.MESSAGE_QUESTION)
def _on_open_ft_folder(self, widget, file_props):
path = os.path.split(file_props.file_name)[0]
if os.path.exists(path) and os.path.isdir(path):
helpers.launch_file_manager(path)
ev = self._get_file_props_event(file_props, 'file-completed')
if ev:
gajim.events.remove_events(self.account, self.contact.jid, event=ev)
def _on_ok(self, widget, file_props, type_):
ev = self._get_file_props_event(file_props, type_)
if ev:
gajim.events.remove_events(self.account, self.contact.jid, event=ev)
def _got_file_completed(self, file_props):
markup = '<b>%s:</b> %s' % (_('File transfer completed'),
file_props.name)
if file_props.desc:
markup += ' (%s)' % file_props.desc
b1 = gtk.Button(_('_Open Containing Folder'))
b1.connect('clicked', self._on_open_ft_folder, file_props)
b2 = gtk.Button(stock=gtk.STOCK_OK)
b2.connect('clicked', self._on_ok, file_props, 'file-completed')
self._add_info_bar_message(markup, [b1, b2], file_props)
def _got_file_error(self, file_props, type_, pri_txt, sec_txt):
markup = '<b>%s:</b> %s' % (pri_txt, sec_txt)
b = gtk.Button(stock=gtk.STOCK_OK)
b.connect('clicked', self._on_ok, file_props, type_)
self._add_info_bar_message(markup, [b], file_props, gtk.MESSAGE_ERROR)
def _on_accept_gc_invitation(self, widget, event):
try:
if event.is_continued:
gajim.interface.join_gc_room(self.account, event.room_jid,
gajim.nicks[self.account], event.password,
is_continued=True)
else:
dialogs.JoinGroupchatWindow(self.account, event.room_jid)
except GajimGeneralException:
pass
gajim.events.remove_events(self.account, self.contact.jid, event=event)
def _on_cancel_gc_invitation(self, widget, event):
gajim.events.remove_events(self.account, self.contact.jid, event=event)
def _get_gc_invitation(self, event):
markup = '<b>%s:</b> %s' % (_('Groupchat Invitation'), event.room_jid)
if event.comment:
markup += ' (%s)' % event.comment
b1 = gtk.Button(_('_Join'))
b1.connect('clicked', self._on_accept_gc_invitation, event)
b2 = gtk.Button(stock=gtk.STOCK_CANCEL)
b2.connect('clicked', self._on_cancel_gc_invitation, event)
self._add_info_bar_message(markup, [b1, b2], (event.room_jid,
event.comment), gtk.MESSAGE_QUESTION)
def on_event_added(self, event):
if event.account != self.account:
return
if event.jid != self.contact.jid:
return
if event.type_ == 'file-request':
self._got_file_request(event.file_props)
elif event.type_ == 'file-completed':
self._got_file_completed(event.file_props)
elif event.type_ in ('file-error', 'file-stopped'):
msg_err = ''
if event.file_props.error == -1:
msg_err = _('Remote contact stopped transfer')
elif event.file_props.error == -6:
msg_err = _('Error opening file')
self._got_file_error(event.file_props, event.type_,
_('File transfer stopped'), msg_err)
elif event.type_ in ('file-request-error', 'file-send-error'):
self._got_file_error(event.file_props, event.type_,
_('File transfer cancelled'),
_('Connection with peer cannot be established.'))
elif event.type_ == 'gc-invitation':
self._get_gc_invitation(event)
def on_event_removed(self, event_list):
"""
Called when one or more events are removed from the event list
"""
for ev in event_list:
if ev.account != self.account:
continue
if ev.jid != self.contact.jid:
continue
if ev.type_ not in ('file-request', 'file-completed', 'file-error',
'file-stopped', 'file-request-error', 'file-send-error',
'gc-invitation'):
continue
i = 0
removed = False
for ib_msg in self.info_bar_queue:
if ev.type_ == 'gc-invitation':
if ev.room_jid == ib_msg[2][0]:
self.info_bar_queue.remove(ib_msg)
removed = True
else: # file-*
if ib_msg[2] == ev.file_props:
self.info_bar_queue.remove(ib_msg)
removed = True
if removed:
if i == 0:
# We are removing the one currently displayed
self.info_bar.set_no_show_all(True)
self.info_bar.hide()
# show next one?
gobject.idle_add(self._info_bar_show_message)
break
i += 1
|
jabber-at/gajim
|
src/chat_control.py
|
Python
|
gpl-3.0
| 144,734
|
[
"VisIt"
] |
4d00427ba86765daf7933bf90b1971a8ac6edd2570fb8e5f0e2143b9331b864c
|
from random import randint
from time import sleep
from lettuce import *
from rapidsms.contrib.locations.models import Location
from survey.features.page_objects.question import BatchQuestionsListPage, AddQuestionPage, ListAllQuestionsPage, CreateNewQuestionPage, CreateNewSubQuestionPage, EditQuestionPage
from survey.models import Batch, QuestionModule, BatchQuestionOrder
from survey.models.question import Question, QuestionOption
from survey.models.householdgroups import HouseholdMemberGroup
from survey.models.answer_rule import AnswerRule
@step(u'And I have 100 questions under the batch')
def and_i_have_100_questions_under_the_batch(step):
for i in xrange(100):
q = Question.objects.create(text="some questions %d" % i,
answer_type=Question.NUMBER, identifier='ID %d' % i, order=i)
q.batches.add(world.batch)
BatchQuestionOrder.objects.create(batch=world.batch, question=q, order=i)
@step(u'And I visit questions listing page of the batch')
def and_i_visit_questions_listing_page_of_the_batch(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.visit()
@step(u'Then I should see the questions list paginated')
def then_i_should_see_the_questions_list_paginated(step):
world.page.validate_fields()
world.page.validate_pagination()
world.page.validate_fields()
@step(u'When I change to 100 questions per page')
def when_i_change_to_100_questions_per_page(step):
world.page.fill_valid_values({'number_of_questions_per_page':100})
world.page.click_by_css('#a-question-list')
@step(u'Then I should not see pagination')
def then_i_should_not_see_pagination(step):
world.page.validate_pagination(False)
@step(u'And I have no questions under the batch')
def and_i_have_no_questions_under_the_batch(step):
Question.objects.filter(batches=world.batch).delete()
@step(u'Then I should see error message on the page')
def then_i_should_see_error_message_on_the_page(step):
world.page.is_text_present("There are no questions associated with this batch yet.")
@step(u'And I click add question button')
def and_i_click_add_question_button(step):
world.page.click_link_by_text("Select Question")
@step(u'Then I should see a add question page')
def then_i_should_see_a_add_question_page(step):
world.page = AddQuestionPage(world.browser, world.batch)
world.page.validate_url()
@step(u'When I fill the details for add question form')
def when_i_fill_the_details_for_add_question_form(step):
data = {'module': world.module.id,
'text': 'hritik question',
'answer_type': Question.NUMBER,
'identifier': 'ID 1'}
world.page.fill_valid_values(data)
@step(u'Then I should go back to questions listing page')
def then_i_should_go_back_to_questions_listing_page(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.validate_url()
@step(u'And I should see question successfully added message')
def and_i_should_see_question_successfully_added_message(step):
world.page.is_text_present("Question successfully added.")
@step(u'And I have a member group')
def and_i_have_a_member_group(step):
world.household_member_group = HouseholdMemberGroup.objects.create(name='Age 4-5', order=1)
@step(u'And I visit add new question page of the batch')
def and_i_visit_add_new_question_page_of_the_batch(step):
world.page = AddQuestionPage(world.browser, world.batch)
world.page.visit()
@step(u'And I fill the details for question')
def and_i_fill_the_details_for_question(step):
world.page.fill_valid_values({'identifier': 'ID 1', 'module': world.module.id, 'text': 'hritik question'})
world.page.select('group', [world.household_member_group.pk])
@step(u'When I select multichoice for answer type')
def when_i_select_multichoice_for_answer_type(step):
world.page.select('answer_type', [Question.MULTICHOICE])
@step(u'Then I should see one option field')
def then_i_should_see_one_option_field(step):
world.page.see_one_option_field("Option 1")
world.page.see_option_add_and_remove_buttons(1)
@step(u'When I click add-option icon')
def when_i_click_add_option_icon(step):
world.page.click_by_css(".icon-plus")
@step(u'Then I should see two options field')
def then_i_should_see_two_options_field(step):
world.page.see_one_option_field("Option 1")
world.page.see_one_option_field("Option 2")
world.page.see_option_add_and_remove_buttons(2)
@step(u'When I click remove-option icon')
def when_i_click_remove_option_icon(step):
world.page.click_by_css(".icon-remove")
@step(u'Then I should see only one option field')
def then_i_should_see_only_one_option_field(step):
world.page.see_one_option_field("Option 1")
world.page.see_option_add_and_remove_buttons(1)
world.page.option_not_present("Option 2")
@step(u'And I fill an option question')
def and_i_fill_an_option_question(step):
world.option = {'options': 'some option question text'}
world.page.fill_valid_values(world.option)
@step(u'And I have more than 50 questions')
def and_i_have_100_questions(step):
for i in xrange(100):
Question.objects.create(text="some questions %d" % i, answer_type=Question.NUMBER, identifier='ID %d' % i,
order=i)
@step(u'And I visit questions list page')
def and_i_visit_questions_list_page(step):
world.page = ListAllQuestionsPage(world.browser)
world.page.visit()
@step(u'And If I click create new question link')
def and_if_i_click_create_new_question_link(step):
world.page.click_link_by_text("Create New Question")
@step(u'Then I should see create new question page')
def then_i_should_see_create_new_question_page(step):
world.page = CreateNewQuestionPage(world.browser)
world.page.validate_url()
@step(u'And I visit create new question page')
def and_i_visit_create_new_question_page(step):
world.page = CreateNewQuestionPage(world.browser)
world.page.visit()
@step(u'And I have a multichoice question')
def and_i_have_a_multichoice_question(step):
world.multi_choice_question = Question.objects.create(module=world.module, text="Are these insecticide?",
answer_type=Question.MULTICHOICE, order=6,
group=world.household_member_group, identifier='ID 1')
world.option1 = QuestionOption.objects.create(question=world.multi_choice_question, text="Yes", order=1)
world.option2 = QuestionOption.objects.create(question=world.multi_choice_question, text="No", order=2)
world.option3 = QuestionOption.objects.create(question=world.multi_choice_question, text="Dont Know", order=3)
@step(u'And I click on view options link')
def and_i_click_on_view_options_link(step):
world.page.click_link_by_partial_href("#view_options_%d" % world.multi_choice_question.id)
@step(u'Then I should see the question options in a modal')
def then_i_should_see_the_question_options_in_a_modal(step):
world.page.validate_fields_present([world.multi_choice_question.text, "Text", "Order"])
@step(u'And when I click the close button')
def and_when_i_click_the_close_button(step):
world.page.click_link_by_text("Close")
@step(u'Then I should be back to questions list page')
def then_i_should_see_questions_list_page(step):
sleep(2)
world.page.validate_fields()
@step(u'And I click on view add subquestion link')
def and_i_click_on_view_add_subquestion_link(step):
world.browser.click_link_by_text("Add Subquestion")
@step(u'Then I should go to add subquestion page')
def then_i_should_go_to_add_subquestion_page(step):
world.page = CreateNewSubQuestionPage(world.browser, question=world.multi_choice_question)
world.page.validate_url()
@step(u'When I fill in subquestion details')
def when_i_fill_in_subquestion_details(step):
world.page.fill_valid_values({'module': world.module.id, 'text': 'hritik question', 'identifier': 'Q001'})
world.page.select('group', [world.household_member_group.pk])
world.page.select('answer_type', [Question.NUMBER])
@step(u'And I should see subquestion successfully added message')
def and_i_should_see_subquestion_successfully_added_message(step):
world.page.see_success_message('Sub question', 'added')
@step(u'And I fill the invalid details details for question')
def and_i_fill_the_invalid_details_details_for_question(step):
a_very_long_text = "Is there something here I'm missing? Is uni_form " \
"overriding the setting somehow? If not, any advice as " \
"to what I might look for in debug to see where/why this is happening?"
world.page.fill_valid_values({'text': a_very_long_text})
@step(u'And I should see question was not added')
def and_i_should_see_question_was_not_added(step):
world.page.see_message("Question was not added.")
@step(u'And I should see that option in the form')
def and_i_should_see_that_option_in_the_form(step):
world.page.see_option_text(world.option['options'], 'options')
@step(u'And I visit question listing page')
def and_i_visit_question_listing_page(step):
world.page = ListAllQuestionsPage(world.browser)
world.page.visit()
@step(u'And I click the edit question link')
def and_i_click_the_edit_question_link(step):
world.page.click_link_by_text(" Edit")
@step(u'Then I should see the edit question page')
def then_i_should_see_the_edit_question_page(step):
world.page = EditQuestionPage(world.browser, world.multi_choice_question)
world.page.validate_url()
@step(u'And I see the question form with values')
def and_i_see_the_question_form_with_values(step):
world.form = {'module': 'Module',
'text': 'Text',
'group': 'Group',
'answer_type': 'Answer type'}
form_values = {'module': world.module.id,
'text': world.multi_choice_question.text,
'group': world.multi_choice_question.group.id,
'answer_type': world.multi_choice_question.answer_type}
world.page.validate_form_present(world.form)
world.page.validate_form_values(form_values)
@step(u'When I fill in edited question details')
def when_i_fill_in_edited_question_details(step):
world.edited_question_details = {'module': world.module.id,
'text': 'edited question',
'group': world.multi_choice_question.group.id
}
world.page.see_select_option(['Number'], 'answer_type')
world.page.fill_valid_values(world.edited_question_details)
@step(u'Then I should see the question successfully edited')
def then_i_should_see_the_question_successfully_edited(step):
world.page.see_success_message("Question", "edited")
@step(u'And I click on delete question link')
def and_i_click_on_delete_question_link(step):
world.page.click_link_by_partial_href("#delete_question_%d" % world.multi_choice_question.id)
@step(u'Then I should see a delete question confirmation modal')
def then_i_should_see_a_delete_question_confirmation_modal(step):
world.page.see_confirm_modal_message(world.multi_choice_question.text)
@step(u'Then I should see that the question was deleted successfully')
def then_i_should_see_that_the_question_was_deleted_successfully(step):
world.page.see_success_message("Question", "deleted")
@step(u'And I have a sub question for that question')
def and_i_have_a_sub_question_for_that_question(step):
world.sub_question = Question.objects.create(module=world.module, parent=world.multi_choice_question,
text="Sub Question 2?",
answer_type=Question.NUMBER, subquestion=True, identifier='Q101')
@step(u'Then I should not see the sub question')
def then_i_should_not_see_the_sub_question(step):
world.page.is_text_present(world.sub_question.text, False)
@step(u'And I have a non multichoice question')
def and_i_have_a_non_multi_choice_question(step):
world.multi_choice_question = Question.objects.create(module=world.module, text="Are these insecticide?",
answer_type=Question.NUMBER, order=7,
group=world.household_member_group, identifier='Q921')
world.multi_choice_question.batches.add(world.batch)
BatchQuestionOrder.objects.create(batch=world.batch, question=world.multi_choice_question, order=1)
@step(u'When I click on the question')
def and_i_click_on_the_question(step):
world.page.click_link_by_text(world.multi_choice_question.text)
@step(u'Then I should see the sub question below the question')
def then_i_should_see_the_sub_question_below_the_question(step):
world.page.is_text_present("Subquestion")
world.page.is_text_present(world.sub_question.text)
@step(u'And I have a rule linking one option with that subquestion')
def and_i_have_a_rule_linking_one_option_with_that_subquestion(step):
world.answer_rule = AnswerRule.objects.create(question=world.multi_choice_question,
action=AnswerRule.ACTIONS['ASK_SUBQUESTION'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'],
validate_with_option=world.option3, next_question=world.sub_question)
@step(u'And I have a subquestion under that question')
def and_i_have_a_subquestion_under_that_question(step):
world.sub_question = Question.objects.create(module=world.module, subquestion=True,
parent=world.multi_choice_question,
text="this is a subquestion", identifier='Q022')
@step(u'When I fill in duplicate subquestion details')
def when_i_fill_in_duplicate_subquestion_details(step):
world.page.fill_valid_values({'module': world.module.id, 'identifier': 'ID 1', 'text': world.sub_question.text})
world.page.select('group', [world.household_member_group.pk])
world.page.select('answer_type', [Question.NUMBER])
@step(u'And I should see subquestion not added message')
def and_i_should_see_subquestion_not_added_message(step):
world.page.is_text_present("Sub question not saved.")
@step(u'And I have a rule on value with that subquestion')
def and_i_have_a_rule_on_value_with_that_subquestion(step):
world.answer_rule = AnswerRule.objects.create(question=world.multi_choice_question, validate_with_value=1,
condition=AnswerRule.CONDITIONS['EQUALS'],
action=AnswerRule.ACTIONS['ASK_SUBQUESTION'],
next_question=world.sub_question, batch=world.batch)
@step(u'And I click on view logic link')
def and_i_click_on_view_logic_link(step):
world.page.click_link_by_partial_href("#view_logic_%d" % world.multi_choice_question.id)
@step(u'Then I should see the logic in a modal')
def then_i_should_see_the_logic_in_a_modal(step):
world.page.validate_fields_present(
[world.multi_choice_question.text, "Eligibility Criteria", "Question/Value/Option", "Action"])
@step(u'Then I should see delete logic icon')
def then_i_should_delete_logic_icon(step):
world.browser.find_by_css('.icon-trash')
@step(u'When I click delete logic icon')
def when_i_click_delete_logic_icon(step):
world.page.click_by_css('#delete-icon-%s' % world.answer_rule.id)
@step(u'And I click confirm delete')
def and_i_click_confirm_delete(step):
world.page.click_by_css('#delete-logic-%s' % world.answer_rule.id)
@step(u'Then I should redirected to batch question page')
def then_i_should_redirected_to_batch_question_page(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.validate_url()
@step(u'Then I should see special characters message')
def and_i_should_see_special_characters_message(step):
special_characters = "Please note that the following special characters will be removed ["
for character in Question.IGNORED_CHARACTERS:
special_characters = special_characters + character + " "
special_characters = special_characters.strip() + "]"
world.page.is_text_present(special_characters)
@step(u'And I click delete sub question link')
def and_i_click_delete_sub_question_link(step):
sleep(3)
world.page.click_delete_subquestion()
@step(u'Then I should see a confirm delete subqestion modal')
def then_i_should_see_a_confirm_delete_subqestion_modal(step):
world.page.see_confirm_modal_message(world.sub_question.text)
@step(u'Then I should see the sub question deleted successfully')
def then_i_should_see_the_sub_question_deleted_successfully(step):
world.page.see_success_message("Sub question", "deleted")
@step(u'When I click confirm delete')
def when_i_click_confirm_delete(step):
world.page.click_by_css("#delete-subquestion-%s" % world.sub_question.id)
@step(u'And I click edit sub question link')
def and_i_click_edit_sub_question_link(step):
sleep(3)
world.page.click_by_css("#edit_subquestion_%s" % world.sub_question.id)
@step(u'Then I see the sub question form with values')
def then_i_see_the_sub_question_form_with_values(step):
form_values = {'module': world.module.id, 'text': world.sub_question.text,
'group': world.multi_choice_question.group.id,
'identifier': "Q101",
'answer_type': world.sub_question.answer_type}
world.page.validate_form_values(form_values)
@step(u'When I fill in edited sub question details')
def when_i_fill_in_edited_sub_question_details(step):
world.edited_sub_question_details = {'identifier': 'Q101', 'module': world.module.id, 'text': 'edited question',
'group': world.multi_choice_question.group.id
}
world.page.see_select_option(['Number'], 'answer_type')
world.page.fill_valid_values(world.edited_sub_question_details)
@step(u'Then I should see the sub question successfully edited')
def then_i_should_see_the_sub_question_successfully_edited(step):
world.page.see_success_message("Sub question", "edited")
@step(u'And I click delete question rule')
def and_i_click_delete_question_rule(step):
sleep(2)
world.page.click_by_css('#delete-icon-%s' % world.answer_rule.id)
@step(u'And I should see that the logic was deleted successfully')
def and_i_should_see_that_the_logic_was_deleted_successfully(step):
world.page.see_success_message("Logic", "deleted")
@step(u'And I select multichoice question in batch')
def and_i_select_multichoice_question_in_batch(step):
world.batch = Batch.objects.create(order=1, name="Batch A", description='description', survey=world.survey)
world.multi_choice_question.batches.add(world.batch)
BatchQuestionOrder.objects.create(batch=world.batch, question=world.multi_choice_question, order=1)
@step(u'And I have a module')
def and_i_have_a_module(step):
world.module = QuestionModule.objects.create(name="Education")
@step(u'And I have a location')
def and_i_have_a_location(step):
world.kampala = Location.objects.create(name="Kampala")
@step(u'And I have an open batch in that location')
def and_i_have_an_open_batch_in_that_location(step):
world.batch = Batch.objects.create(order=1, name="Batch A", description='description', survey=world.survey)
world.batch.open_for_location(world.kampala)
@step(u'Then I should see question list with only view options action')
def then_i_should_see_question_list_with_only_view_options_action(step):
world.page.validate_only_view_options_action_exists()
@step(u'And I have a multichoice and numeric questions with logics')
def and_i_have_a_multichoice_and_numeric_questions(step):
world.numeric_question = Question.objects.create(text="some questions", answer_type=Question.NUMBER,
identifier='ID', order=1)
world.multi_choice_question = Question.objects.create(text="Are these insecticide?",
answer_type=Question.MULTICHOICE, order=6, identifier='ID 1')
world.option3 = QuestionOption.objects.create(text="haha", order=1, question=world.multi_choice_question)
world.numeric_question.batches.add(world.batch)
world.multi_choice_question.batches.add(world.batch)
BatchQuestionOrder.objects.create(batch=world.batch, question=world.numeric_question, order=1)
BatchQuestionOrder.objects.create(batch=world.batch, question=world.multi_choice_question, order=2)
AnswerRule.objects.create(batch=world.batch, question=world.multi_choice_question,
action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'],
validate_with_option=world.option3)
AnswerRule.objects.create(batch=world.batch, question=world.numeric_question,
action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'],
validate_with_value=2)
@step(u'Then I should see field required error message')
def then_i_should_see_field_required_error_message(step):
world.page.is_text_present("This field is required.")
@step(u'And I should be able to export questions')
def and_i_should_be_able_to_export_questions(step):
world.page.find_by_css("#export_question", "Export Questions")
|
antsmc2/mics
|
survey/features/Question-steps.py
|
Python
|
bsd-3-clause
| 21,831
|
[
"VisIt"
] |
4a2e335a508fdf338376cf435e2fbd69bc505fedea04f9f45e9676be4f936c46
|
import os
import click
from cli.form import CLIFormWrapper
from errors import MissingCredentialsError
from medicover import Medicover
VISIT_PREFERENCE_PARAMS_DATA = (
('time_from', 'Not before hour? FORMAT: HH:MM e.g. 12:00'),
('time_to', 'Not after hour? FORMAT: HH:MM e.g. 12:00'),
('date_from', 'Not before date? FORMAT: DD.MM.YYYY eg. 01.01.2016'),
('date_to', 'Not after date? FORMAT: DD.MM.YYYY eg. 01.01.2016'),
('weekday', 'What day of the week? FORMAT: full weekday name e.g. wednesday')
)
@click.group()
@click.option('-u', default=lambda: os.environ.get('MEDICOVER_USER', None), show_default=False,
help='Your Medicover card number')
@click.option('-p', default=lambda: os.environ.get('MEDICOVER_PASSWORD', None), show_default=False,
help='Your Medicover password')
@click.pass_context
def medicover(ctx, u, p):
if not u or not p:
raise MissingCredentialsError
ctx.obj['M'] = Medicover(user=u, password=p)
@medicover.command()
@click.pass_context
def form(ctx):
m = ctx.obj['M']
cli_form = CLIFormWrapper(m.form)
cli_form.start()
# not needed for now
def prompt_for_visit_preference_data():
click.echo('Please provide visit preference details. Press ENTER to skip a field')
params = {}
for param_name, help_text in VISIT_PREFERENCE_PARAMS_DATA:
value = click.prompt(help_text, default='')
if value:
params[param_name] = value
return params
def main():
return medicover(obj={})
|
jakubkbak/medicover-cli
|
medicover/main.py
|
Python
|
mit
| 1,524
|
[
"VisIt"
] |
8f28e541f70858572e43c4557d3f9a1ace3ae73c9561d0b5062c381272c20783
|
"""moose_to_neuron.py:
Convert moose model to NEURON.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import moose
import moose.utils as mu
import numpy as np
import re
compts_ = set()
nrn_txt_ = {}
# This is output sampling time.
dt_ = 1e3*5e-5
plot_dt_ = 1e3*1e-4
def nrn_name(compt):
assert type(compt) != moose.vec, compt
path = re.sub('\[\d+\]', "", compt.path)
path = path.split('/')[-1]
return path.translate(None, "[]/")
def moose_compt_to_nrn_section_params(mooseCompt):
"""Convert moose compartment properties to NEURON section propterties """
length = mooseCompt.length
diameter = mooseCompt.diameter
sarea = np.pi * diameter * length
ra = mooseCompt.Ra * (np.pi * diameter * diameter / 4.0) / length
props = {}
props['L'] = length * 1e6
props['diam'] = diameter * 1e6
props['Ra'] = ra * 1e2 # m to cm
props['cm'] = 1e2 * mooseCompt.Cm / sarea # F/m^2 -> uF/cm^2
props['Rm'] = mooseCompt.Rm * sarea / 1e-4
props['sarea'] = sarea
return props
def write_mechanism_line(chan, props):
mech = chan.name
param = {}
line = [ "insert %s {" % mech ]
gbar, ek = chan.Gbar, chan.Ek
gbar = gbar / props['sarea']
nrn_gbar = gbar * 1e-4
param['gbar_%s' % mech] = nrn_gbar
param['e_%s' % mech] = 1e3*float(ek)
if "na" in mech.lower():
param['e_na'] = 1e3*float(ek)
elif "k" in mech.lower():
param['e_k'] = 1e3*float(ek)
line += ["%s=%s" % (k, param[k]) for k in param]
line.append("}")
return " ".join(line)
def create_section_in_neuron(mooseCompt):
secname = nrn_name(mooseCompt)
text = [ "create %s" % secname ]
params = [ "%s { " % secname ]
# Here we fill in the mechanism.
params += [ "nseg = 1" ]
props = moose_compt_to_nrn_section_params(mooseCompt)
params += [ "%s = %s" % (p, props[p]) for p in ["L", "diam", "cm", "Ra", "Rm"]]
params.append('insert pas { g_pas=1/Rm Ra=Ra cm=cm e_pas=-60.0 }')
channels = mooseCompt.neighbors['channel']
# In this particular script, we just add the hh mechanism and nothing else.
# Make sure that MOOSE only has HH-Mehcanism loaded. Passive properties must
# be same as HH mehcanism.
for chanVec in channels:
for chan in chanVec:
mech = chan.name
params.append(write_mechanism_line(chan, props))
text.append("\n\t".join(params))
text.append("}\n\n")
return "\n".join(text)
def connect_neuron_sections(compt):
global nrn_text_
srcSec = nrn_name(compt)
context = []
neighbours = compt.neighbors['axial']
for c in neighbours:
for tgt in c:
tgtSec = nrn_name(tgt)
context.append('connect %s(0), %s(1)' % (tgtSec, srcSec))
context.append("\n")
return "\n".join(context)
def insert_pulsegen(stim):
stimname = nrn_name(stim)
text = []
text.append('\nobjectvar %s' % stimname)
for comptVecs in stim.neighbors['output']:
for compt in comptVecs:
targetName = nrn_name(compt)
text.append("%s %s = new IClamp(0.5)" % (targetName, stimname))
text.append("%s.amp = %s" % (stimname, stim.level[0] * 1e9))
text.append("%s.del = %s" % (stimname, stim.delay[0] * 1e3))
text.append("%s.dur = %s" % (stimname, (stim.delay[0] + stim.width[0])*1e3))
return "\n".join(text)+"\n"
def insert_record(index, table):
global dt_, plot_dt_
text = []
for targetVecs in table.neighbors['requestOut']:
for target in targetVecs:
targetName = nrn_name(target)
tableName = "table_%s" % targetName
text.append("objref %s" % tableName)
text.append("%s = new Vector()" % tableName)
text.append('%s.record(&%s.v(0.5))'%(tableName, targetName))
return "\n".join(text), tableName
def stimulus_text():
stimtext = [ 'load_file("stdrun.hoc")' ]
mu.info(" Default sim time is 0.1 second. Change it in script.")
#stimtext.append('dt=%s' % plot_dt_)
stimtext.append('tstop=%s' % 100)
stimtext.append('cvode.active(1)')
stimtext.append('finitialize()')
stimtext.append('run()')
stimtext.append("\n")
stimtext = "\n".join(stimtext)
return stimtext
def plot_text(tableList):
plottext = ["objref outF"]
plottext.append("outF = new File()")
plottext.append('outF.wopen("nrn_out.dat")')
plottext.append('outF.printf("time,%s\\n")' % ",".join(tableList))
plottext.append('for i=0,rect.size()-1 {\n')
glist, plotlist = ["%g"], ["rect.x(i)"]
for t in tableList:
glist.append("%g")
plotlist.append("%s.x(i)" % t)
plottext.append('\toutF.printf("%s\\n", %s)' % (",".join(glist), ",".join(plotlist)))
plottext.append("}")
plottext.append("\n")
return "\n".join(plottext)
def to_neuron(path, **kwargs):
moose.reinit()
mooseCompts = moose.wildcardFind('%s/##[TYPE=Compartment]' % path)
zombiles = moose.wildcardFind('%s/##[TYPE=ZombieCompartment]'% path)
compts = set(mooseCompts).union(set(zombiles))
headerText = []
comptText = []
for c in compts:
comptText.append(create_section_in_neuron(c))
connectionText = []
for c in compts:
connectionText.append(connect_neuron_sections(c))
pulsetext = []
for stim in moose.wildcardFind('%s/##[TYPE=PulseGen]' % path):
pulsetext.append(insert_pulsegen(stim))
recordText, tableList = [], []
text = []
text.append('objref rect')
text.append('rect = new Vector()')
text.append('rect.record(&t)')
recordText.append("\n".join(text))
for i, table in enumerate(moose.wildcardFind('%s/##[TYPE=Table]' % path)):
text, tableName = insert_record(i, table)
recordText.append(text)
tableList.append(tableName)
stimtext = stimulus_text()
plottext = plot_text(tableList)
outfile = kwargs.get('outfile', 'moose_to_neuron.hoc')
mu.info("Writing neuron model to %s" % outfile)
with open(outfile, "w") as f:
f.writelines(headerText)
f.writelines(comptText)
f.writelines(connectionText)
f.writelines(recordText)
f.writelines(pulsetext)
f.writelines(stimtext)
f.writelines(plottext)
|
BhallaLab/benchmarks
|
moose_nrn_equivalence_testing/comparision_with_simple_HH_model_additional_mechanism/moose_to_neuron.py
|
Python
|
gpl-2.0
| 6,581
|
[
"MOOSE",
"NEURON"
] |
f1bf485d12b663ff1f876e7587537f713efa5f8d0a6161791453ea656a0ea65e
|
# -*- coding: utf-8 -*-
#
# Calls tracer after every successful transaction.
# Also supplies the 'tracer' command.
#
# Copyright (C) 2015 Jakub Kadlčík
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
import time
import traceback
import dnf.cli
import dnf.util
import dnfpluginsextras
from tracer import Query, Package
from tracer.views.default import DefaultView
_ = dnfpluginsextras._
class Tracer(dnf.Plugin):
"""DNF plugin for `tracer` command"""
name = "tracer"
def __init__(self, base, cli):
super(Tracer, self).__init__(base, cli)
self.timestamp = time.time()
self.base = base
self.cli = cli
def transaction(self):
"""
Call after successful transaction
See https://rpm-software-management.github.io/dnf/api_transaction.html
"""
# Don't run tracer when uninstalling it
if dnfpluginsextras.is_erasing(self.base.transaction,
"tracer"):
return
# Don't run tracer when preparing chroot for mock
if self.base.conf.installroot != "/":
return
# Don't run tracer when "nothing to do"
if not len(self.base.transaction):
return
installed = set([package.name for package in
self.base.transaction.install_set])
erased = set([package.name for package in
self.base.transaction.remove_set])
packages = [Package(p, time.time()) for p in list(installed | erased)]
try:
tracer = TracerFacade(packages)
tracer.render()
if len(tracer.apps) != 0:
print("\n" + _("For more information run:"))
print(" sudo tracer -iat " + str(self.timestamp))
except Exception:
render_error(traceback.format_exc())
class TracerFacade(object):
def __init__(self, packages, args=None):
self.apps = self.get_apps(packages)
self.args = args
def get_apps(self, packages):
query = Query()
return query.from_packages(packages).now().affected_applications().get()
def render(self):
# @TODO It is not in the Tracer API yet
args = self.args if self.args else dnf.util.Bunch(all=False, quiet=False)
view = DefaultView()
view.assign("applications", self.apps)
view.assign("args", args)
return view.render()
def render_error(err):
print("Tracer:")
print(" " + _("Call to Tracer API ended unexpectedly:") + "\n")
print(err)
print(_("Please visit https://github.com/FrostyX/tracer/issues "
"and submit the issue. Thank you"))
print(_("We apologize for any inconvenience"))
|
jsilhan/dnf-plugins-extras
|
plugins/tracer.py
|
Python
|
gpl-2.0
| 3,649
|
[
"VisIt"
] |
c6e8b55032683887c8ce3e20515a530198201eb2fcf1190d7155850889ca65f9
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.kpath import KPathLatimerMunro
from pymatgen.analysis.magnetism.analyzer import CollinearMagneticStructureAnalyzer
test_dir_structs = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files")
class KPathLatimerMunroTest(PymatgenTest):
def test_kpath_generation(self):
triclinic = [1, 2]
monoclinic = range(3, 16)
orthorhombic = range(16, 75)
tetragonal = range(75, 143)
rhombohedral = range(143, 168)
hexagonal = range(168, 195)
cubic = range(195, 231)
species = ["K", "La", "Ti"]
coords = [[0.345, 5, 0.77298], [0.1345, 5.1, 0.77298], [0.7, 0.8, 0.9]]
for i in range(230):
sg_num = i + 1
if sg_num in triclinic:
lattice = Lattice(
[[3.0233057319441246, 1, 0], [0, 7.9850357844548681, 1], [0, 1.2, 8.1136762279561818]]
)
elif sg_num in monoclinic:
lattice = Lattice.monoclinic(2, 9, 1, 99)
elif sg_num in orthorhombic:
lattice = Lattice.orthorhombic(2, 9, 1)
elif sg_num in tetragonal:
lattice = Lattice.tetragonal(2, 9)
elif sg_num in rhombohedral:
lattice = Lattice.hexagonal(2, 95)
elif sg_num in hexagonal:
lattice = Lattice.hexagonal(2, 9)
elif sg_num in cubic:
lattice = Lattice.cubic(2)
struct = Structure.from_spacegroup(sg_num, lattice, species, coords)
kpath = KPathLatimerMunro(struct) # Throws error if something doesn't work, causing test to fail.
struct_file_path = os.path.join(test_dir_structs, "AgO_kpath_test.cif")
struct = Structure.from_file(struct_file_path)
kpath = KPathLatimerMunro(struct) # Throws error if something doesn't work, causing test to fail.
def test_kpath_acentered(self):
species = ["K", "La", "Ti"]
coords = [[0.345, 5, 0.77298], [0.1345, 5.1, 0.77298], [0.7, 0.8, 0.9]]
lattice = Lattice.orthorhombic(2, 9, 1)
struct = Structure.from_spacegroup(38, lattice, species, coords)
sga = SpacegroupAnalyzer(struct)
struct_prim = sga.get_primitive_standard_structure(international_monoclinic=False)
kpath = KPathLatimerMunro(struct_prim)
kpoints = kpath._kpath["kpoints"]
labels = list(kpoints.keys())
self.assertEqual(
sorted(labels), sorted(["a", "b", "c", "d", "d_{1}", "e", "f", "q", "q_{1}", "Γ"]),
)
self.assertAlmostEqual(kpoints["a"][0], 0.0)
self.assertAlmostEqual(kpoints["a"][1], 0.4999999999999997)
self.assertAlmostEqual(kpoints["a"][2], 0.0)
self.assertAlmostEqual(kpoints["f"][0], -0.49999999999999933)
self.assertAlmostEqual(kpoints["f"][1], 0.4999999999999992)
self.assertAlmostEqual(kpoints["f"][2], 0.4999999999999999)
self.assertAlmostEqual(kpoints["c"][0], 0.0)
self.assertAlmostEqual(kpoints["c"][1], 0.0)
self.assertAlmostEqual(kpoints["c"][2], 0.5)
self.assertAlmostEqual(kpoints["b"][0], -0.5000000000000002)
self.assertAlmostEqual(kpoints["b"][1], 0.500000000000000)
self.assertAlmostEqual(kpoints["b"][2], 0.0)
self.assertAlmostEqual(kpoints["Γ"][0], 0)
self.assertAlmostEqual(kpoints["Γ"][1], 0)
self.assertAlmostEqual(kpoints["Γ"][2], 0)
self.assertAlmostEqual(kpoints["e"][0], 0.0)
self.assertAlmostEqual(kpoints["e"][1], 0.49999999999999956)
self.assertAlmostEqual(kpoints["e"][2], 0.5000000000000002)
d = False
if np.allclose(kpoints["d_{1}"], [0.2530864197530836, 0.25308641975308915, 0.0], atol=1e-5) or np.allclose(
kpoints["d"], [0.2530864197530836, 0.25308641975308915, 0.0], atol=1e-5
):
d = True
self.assertTrue(d)
q = False
if np.allclose(kpoints["q_{1}"], [0.2530864197530836, 0.25308641975308915, 0.5], atol=1e-5) or np.allclose(
kpoints["q"], [0.2530864197530836, 0.25308641975308915, 0.5], atol=1e-5
):
q = True
self.assertTrue(q)
def test_magnetic_kpath_generation(self):
struct_file_path = os.path.join(test_dir_structs, "LaMnO3_magnetic.mcif")
struct = Structure.from_file(struct_file_path)
mga = CollinearMagneticStructureAnalyzer(struct)
col_spin_orig = mga.get_structure_with_spin()
col_spin_orig.add_spin_by_site([0.0] * 20)
col_spin_sym = SpacegroupAnalyzer(col_spin_orig)
col_spin_prim = col_spin_sym.get_primitive_standard_structure(international_monoclinic=False)
magmom_vec_list = [np.zeros(3) for site in col_spin_prim]
magmom_vec_list[4:8] = [
np.array([3.87, 3.87, 0.0]),
np.array([3.87, 3.87, 0.0]),
np.array([-3.87, -3.87, 0.0]),
np.array([-3.87, -3.87, 0.0]),
]
col_spin_prim.add_site_property("magmom", magmom_vec_list)
kpath = KPathLatimerMunro(col_spin_prim, has_magmoms=True)
kpoints = kpath._kpath["kpoints"]
labels = list(kpoints.keys())
self.assertEqual(
sorted(labels), sorted(["a", "b", "c", "d", "d_{1}", "e", "f", "g", "g_{1}", "Γ"]),
)
self.assertAlmostEqual(kpoints["e"][0], -0.4999999999999998)
self.assertAlmostEqual(kpoints["e"][1], 0.0)
self.assertAlmostEqual(kpoints["e"][2], 0.5000000000000002)
self.assertAlmostEqual(kpoints["g"][0], -0.4999999999999999)
self.assertAlmostEqual(kpoints["g"][1], -0.49999999999999994)
self.assertAlmostEqual(kpoints["g"][2], 0.5000000000000002)
self.assertAlmostEqual(kpoints["a"][0], -0.4999999999999999)
self.assertAlmostEqual(kpoints["a"][1], 0.0)
self.assertAlmostEqual(kpoints["a"][2], 0.0)
self.assertAlmostEqual(kpoints["g_{1}"][0], 0.4999999999999999)
self.assertAlmostEqual(kpoints["g_{1}"][1], -0.5)
self.assertAlmostEqual(kpoints["g_{1}"][2], 0.5000000000000001)
self.assertAlmostEqual(kpoints["f"][0], 0.0)
self.assertAlmostEqual(kpoints["f"][1], -0.5)
self.assertAlmostEqual(kpoints["f"][2], 0.5000000000000002)
self.assertAlmostEqual(kpoints["c"][0], 0.0)
self.assertAlmostEqual(kpoints["c"][1], 0.0)
self.assertAlmostEqual(kpoints["c"][2], 0.5000000000000001)
self.assertAlmostEqual(kpoints["b"][0], 0.0)
self.assertAlmostEqual(kpoints["b"][1], -0.5)
self.assertAlmostEqual(kpoints["b"][2], 0.0)
self.assertAlmostEqual(kpoints["Γ"][0], 0)
self.assertAlmostEqual(kpoints["Γ"][1], 0)
self.assertAlmostEqual(kpoints["Γ"][2], 0)
d = False
if np.allclose(kpoints["d_{1}"], [-0.5, -0.5, 0.0], atol=1e-5) or np.allclose(
kpoints["d"], [-0.5, -0.5, 0.0], atol=1e-5
):
d = True
self.assertTrue(d)
g = False
if np.allclose(kpoints["g_{1}"], [-0.5, -0.5, 0.5], atol=1e-5) or np.allclose(
kpoints["g"], [-0.5, -0.5, 0.5], atol=1e-5
):
g = True
self.assertTrue(g)
if __name__ == "__main__":
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/symmetry/tests/test_kpath_lm.py
|
Python
|
mit
| 7,626
|
[
"pymatgen"
] |
bc2db52c5fa7b435f20441eb163e2f69d83862c5664c4d031289416aca623209
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import copy
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import mooseutils
from .ParameterInfo import ParameterInfo
class BlockInfo(object):
"""
Holds information about a block.
"""
def __init__(self, parent, path, hard=False, description=""):
"""
Input:
parent[BlockInfo]: Parent of this block
path[str]: Path of this block
hard[bool]: Whether this is a hard path.
description[str]: Description of this block.
"""
super(BlockInfo, self).__init__()
self.parameters = {}
self.parameters_list = []
self.parameters_write_first = []
self.user_added = False
self.star = False
self.star_node = None
self.types = {}
self.included = False
self.comments = ""
self.name = os.path.basename(path)
self.path = path
self.children = {}
self.children_list = []
self.children_write_first = []
self.hard = hard
self.description = ""
self.parent = parent
self.changed_by_user = False
def checkInactive(self):
return not self.included and self.wantsToSave()
def wantsToSave(self):
return self.changed_by_user or self.user_added or self.included or self.childrenWantToSave()
def childrenWantToSave(self):
for key in self.children_list:
if self.children[key].wantsToSave():
return True
return False
def getParamInfo(self, param):
"""
Gets a ParameterInfo with the given name.
This looks in the current type if applicable.
Input:
param[str]: Parameter name
Return:
ParameterInfo if found else None
"""
param_info = self.parameters.get(param)
if param_info:
return param_info
type_block = self.getTypeBlock()
if type_block:
return type_block.parameters.get(param)
def orderedParameters(self):
"""
Utility function to get a list of ParameterInfos in
the same order as in self.parameters_list
Return:
list[ParameterInfo]
"""
ps = []
for p in self.parameters_list:
ps.append(self.parameters[p])
return ps
def getTypeBlock(self):
"""
Gets the block for the current type.
Return:
BlockInfo if found else None
"""
if self.types and self.parameters.get("type"):
type_info = self.parameters["type"]
if type_info.value and type_info.value in self.types:
return self.types[type_info.value]
def paramValue(self, param):
"""
Gets the value of a parameter.
Input:
param[str]: Name of the parameter
Return:
str: Value of the parameter or None
"""
param_info = self.getParamInfo(param)
if param_info:
return param_info.value
def setParamValue(self, param, val):
"""
Sets the value of a parameter.
Input:
param[str]: Name of the parameter
val[str]: New value of the parameter
"""
param_info = self.getParamInfo(param)
if param_info:
param_info.value = val
def blockType(self):
"""
Gets the current type for this block.
Return:
block type [str] if found else None
"""
pinfo = self.parameters.get("type")
if pinfo:
return pinfo.value
def setBlockType(self, type_name):
"""
Set the type for this block.
Input:
type_name[str]: New type name
"""
pinfo = self.parameters.get("type")
if pinfo:
pinfo.value = type_name
def addChildBlock(self, child_info):
"""
Add a new child to this block.
Input:
child_info[BlockInfo]: Child to be added
"""
child_info.parent = self
self.children[child_info.name] = child_info
self.children_list.append(child_info.name)
child_info.updatePaths()
def updatePaths(self):
"""
Make sure this node and all of its children have the correct paths
"""
self.path = os.path.join(self.parent.path, self.name)
for c in self.children.values():
c.updatePaths()
def removeChildBlock(self, name):
"""
Remove a child.
Input:
name[str]: Name of the child to remove
Return:
BlockInfo of the removed child
"""
child = self.children.get(name)
if child:
del self.children[name]
self.children_list.remove(name)
child.parent = None
return child
def renameChildBlock(self, oldname, newname):
"""
Rename one of the children
Input:
oldname[str]: Current name of the child
newname[str]: New name of the child
"""
tmp_child = self.children.get(newname)
if tmp_child:
mooseutils.mooseWarning("Tried to rename %s to %s but %s already exists." % (oldname, newname, newname))
return
child = self.children.get(oldname)
if child:
del self.children[oldname]
self.children[newname] = child
idx = self.children_list.index(oldname)
self.children_list.remove(oldname)
self.children_list.insert(idx, newname)
child.name = newname
child.updatePaths()
def addParameter(self, param):
"""
Adds a parameter.
Input:
param[ParameterInfo]: New parameter to be added
"""
param.parent = self
self.parameters[param.name] = param
if param not in self.parameters_list:
self.parameters_list.append(param.name)
def addUserParam(self, param, value):
"""
Adds a user parameter.
Input:
param[str]: Name of the parameter to add
value[str]: Initial value of the parameter
Return:
ParameterInfo: The new parameter
"""
pinfo = self.getParamInfo(param)
if pinfo:
mooseutils.mooseWarning("Tried to add a user parameter when that name already exists: %s:%s" % (self.path, param))
return
pinfo = ParameterInfo(self, param)
pinfo.user_added = True
pinfo.value = value
self.addParameter(pinfo)
return pinfo
def removeUserParam(self, name, force=False):
"""
Remove a user added parameter.
Input:
name[str]: Name of the parameter to remove.
"""
pinfo = self.getParamInfo(name)
if pinfo and (pinfo.user_added or force):
del self.parameters[pinfo.name]
self.parameters_list.remove(name)
pinfo.parent = None
def renameUserParam(self, oldname, newname):
"""
Rename a user parameter.
Input:
oldname[str]: Current name of the parameter.
newname[str]: New name of the parameter.
"""
pinfo = self.getParamInfo(oldname)
if pinfo and pinfo.user_added:
del self.parameters[oldname]
orig_index = self.parameters_list.index(oldname)
self.parameters_list.insert(orig_index, newname)
self.parameters_list.remove(oldname)
pinfo.name = newname
self.parameters[newname] = pinfo
def moveUserParam(self, param, new_index):
"""
Move a user parameter. This just changes the order in which it will be written out.
Input:
param[str]: Name of the parameter.
new_index[int]: New index in the parameter list
"""
pinfo = self.getParamInfo(param)
if pinfo:
self.parameters_list.remove(param)
self.parameters_list.insert(new_index, param)
def moveChildBlock(self, name, new_index):
"""
Moves a child block. This just changes the order in which it will be written out.
Input:
name[str]: Name of the child block.
new_index[int]: New index of the child.
"""
cinfo = self.children.get(name)
if cinfo:
self.children_list.remove(name)
self.children_list.insert(new_index, name)
def copy(self, parent):
"""
Makes a copy of this node.
Makes a recursive copy of all children, types, star node, etc.
Input:
parent[BlockInfo]: Parent of the copied block.
Return:
BlockInfo: A copy of this block
"""
new = copy.copy(self)
new.parent = parent
new.children = {}
new.children_list = []
new.children_write_first = []
for key in self.children_list:
c = self.children[key]
new.children_list.append(c.name)
new.children[key] = c.copy(new)
if self.star_node:
new.star_node = self.star_node.copy(new)
new.types = {}
for key, val in self.types.items():
new.types[key] = val.copy(new)
new.parameters = {}
new.parameters_list = []
new.parameters_write_first = []
for key in self.parameters_list:
p = self.parameters[key]
new.parameters_list.append(p.name)
new.parameters[p.name] = p.copy(new)
return new
def addBlockType(self, type_info):
"""
Adds a new type block.
Input:
type_info[BlockInfo]: A new type block
"""
self.types[type_info.name] = type_info
type_info.parent = self
def setStarInfo(self, star_info):
"""
Sets the star node for this block
Input:
star_info[BlockInfo]: The star block
"""
self.star_node = star_info
self.star = True
star_info.parent = self
def toolTip(self):
"""
A suitable description that could be used in a tool tip.
Return:
str: A description of this block.
"""
return self.description
def findFreeChildName(self):
"""
Tries to find a free node name.
Starts from 0 and looks for an existing child block with name base_name{number}
Returns:
str: freely available node name
"""
for i in range(10000):
name = "New_%s" % i
if name not in self.children_list:
return name
def dump(self, indent=0, sep=' '):
"""
Provides a description of this block with all of its children, types, etc.
Input:
indent[int]: Current indentation level
sep[str]: The indent string
Return:
str: The dump of this block.
"""
o = StringIO()
i_str = sep*indent
o.write("%sPath: %s\n" % (i_str, self.path))
indent += 1
if self.parent:
o.write("%sParent: %s\n" % (i_str, self.parent.path))
o.write("%sChildren: %s\n" % (i_str, self.children_list))
o.write("%sTypes: %s\n" % (i_str, self.types.keys()))
o.write("%sHard: %s\n" % (i_str, self.hard))
o.write("%sUser: %s\n" % (i_str, self.user_added))
o.write("%sStar: %s\n" % (i_str, self.star))
o.write("%sIncluded: %s\n" % (i_str, self.included))
o.write("%sDescription: %s\n" % (i_str, self.description))
o.write("%sParameters:\n" % i_str)
for p in self.parameters.values():
o.write("%s%s:\n" % ((indent+1)*sep, p.name))
p.dump(o, indent+2, sep)
o.write("%sChildren:\n" % (indent*sep))
for name in self.children_list:
c = self.children[name]
o.write(c.dump(indent+1, sep))
o.write("%sStar node:\n" % (indent*sep))
if self.star_node:
o.write(self.star_node.dump(indent+1, sep))
o.write("%sType nodes:\n" % (indent*sep))
for t in self.types.values():
o.write(t.dump(indent+1, sep))
return o.getvalue()
def getParamNames(self):
"""
Get the parameter names in the required order.
Parameter names specified in the input file are printed
out in the same order as in the original input file,
followed by any other parameters that were changed.
Return:
list[str]: List of parameter names
"""
return self._orderedNames(self.parameters_write_first, self.parameters_list)
def getChildNames(self):
"""
Get the child names in the required order.
Child names specified in the input file are printed
out in the same order as in the original input file,
followed by any other children that were changed.
Return:
list[str]: List of child names
"""
return self._orderedNames(self.children_write_first, self.children_list)
def _orderedNames(self, first, complete):
"""
Add in elements from the list "complete" to the end
of the "first" if they are not already in "first"
Input:
first[list]: These elements will be first in the returned list
complete[list]: These elements will come after first
Return:
list: The elements in "complete" with elements in "first" first.
"""
l = first[:]
for x in complete:
if x not in l:
l.append(x)
return l
|
nuclear-wizard/moose
|
python/peacock/Input/BlockInfo.py
|
Python
|
lgpl-2.1
| 14,068
|
[
"MOOSE"
] |
f27e6bd6a9034b9a1622f18cebdc4a4c65fac255b87008ad0447b862f39c5348
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from subprocess import call, Popen
import random
import re
import tempfile
import flask
import numpy as np
import werkzeug.exceptions
from .forms import ImageClassificationModelForm
from .job import ImageClassificationModelJob
import digits
from digits import frameworks
from digits import utils
from digits.config import config_value
from digits.dataset import ImageClassificationDatasetJob
from digits.inference import ImageInferenceJob
from digits.status import Status
from digits.utils import filesystem as fs
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import app, scheduler
blueprint = flask.Blueprint(__name__, __name__)
# TODO: Move this somewhere else!
import h5py
from datetime import datetime
"""
Read image list
"""
def read_image_list(image_list, image_folder, num_test_images):
paths = []
ground_truths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
else:
path = line
ground_truth = None
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
paths.append(path)
ground_truths.append(ground_truth)
if num_test_images is not None and len(paths) >= num_test_images:
break
return paths, ground_truths
@blueprint.route('/new', methods=['GET'])
@utils.auth.requires_login
def new():
"""
Return a form for a new ImageClassificationModelJob
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('models/images/classification/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
)
@blueprint.route('.json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create():
"""
Create a new ImageClassificationModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('models/images/classification/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
# sweeps will be a list of the the permutations of swept fields
# Get swept learning_rate
sweeps = [{'learning_rate': v} for v in form.learning_rate.data]
add_learning_rate = len(form.learning_rate.data) > 1
# Add swept batch_size
sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]
add_batch_size = len(form.batch_size.data) > 1
n_jobs = len(sweeps)
jobs = []
for sweep in sweeps:
# Populate the form with swept data to be used in saving and
# launching jobs.
form.learning_rate.data = sweep['learning_rate']
form.batch_size.data = sweep['batch_size']
# Augment Job Name
extra = ''
if add_learning_rate:
extra += ' learning_rate:%s' % str(form.learning_rate.data[0])
if add_batch_size:
extra += ' batch_size:%d' % form.batch_size.data[0]
job = None
try:
job = ImageClassificationModelJob(
username = utils.auth.get_username(),
name = form.model_name.data + extra,
dataset_id = datasetJob.id(),
)
# get handle to framework object
fw = frameworks.get_framework_by_id(form.framework.data)
pretrained_model = None
if form.method.data == 'standard':
found = False
# can we find it in standard networks?
network_desc = fw.get_standard_network_desc(form.standard_networks.data)
if network_desc:
found = True
network = fw.get_network_from_desc(network_desc)
if not found:
raise werkzeug.exceptions.BadRequest(
'Unknown standard model "%s"' % form.standard_networks.data)
elif form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
use_same_dataset = (old_job.dataset_id == job.dataset_id)
network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch == 0:
pass
elif epoch == -1:
pretrained_model = old_job.train_task().pretrained_model
else:
for filename, e in old_job.train_task().snapshots:
if e == epoch:
pretrained_model = filename
break
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
break
elif form.method.data == 'custom':
network = fw.get_network_from_desc(form.custom_network.data)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe_root')['multi_gpu']:
if form.select_gpus.data:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
elif form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
gpu_count = 1
selected_gpus = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
# Python Layer File may be on the server or copied from the client.
fs.copy_python_layer_file(
bool(form.python_layer_from_client.data),
job.dir(),
(flask.request.files[form.python_layer_client_file.name]
if form.python_layer_client_file.name in flask.request.files
else ''), form.python_layer_server_file.data)
job.tasks.append(fw.create_train_task(
job = job,
dataset = datasetJob,
train_epochs = form.train_epochs.data,
snapshot_interval = form.snapshot_interval.data,
learning_rate = form.learning_rate.data[0],
lr_policy = policy,
gpu_count = gpu_count,
selected_gpus = selected_gpus,
batch_size = form.batch_size.data[0],
batch_accumulation = form.batch_accumulation.data,
val_interval = form.val_interval.data,
pretrained_model = pretrained_model,
crop_size = form.crop_size.data,
use_mean = form.use_mean.data,
network = network,
random_seed = form.random_seed.data,
solver_type = form.solver_type.data,
shuffle = form.shuffle.data,
)
)
## Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
jobs.append(job)
scheduler.add_job(job)
if n_jobs == 1:
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
if request_wants_json():
return flask.jsonify(jobs=[job.json_dict() for job in jobs])
# If there are multiple jobs launched, go to the home page.
return flask.redirect('/')
def show(job, related_jobs=None):
"""
Called from digits.model.views.models_show()
"""
return flask.render_template('models/images/classification/show.html', job=job, framework_ids = [fw.get_id() for fw in frameworks.get_frameworks()], related_jobs=related_jobs)
@blueprint.route('/large_graph', methods=['GET'])
def large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/images/classification/large_graph.html', job=job)
@blueprint.route('/layer_visualizations', methods=['POST', 'GET'])
def layer_visualizations():
# Get initial visualizations from job request:
model_job = job_from_request()
task = model_job.train_task()
net = task.get_net(None,0)
o = "digits/layer_outputs/"
# Copy files to layer_outputs (as this is the directory visualizations
# will read from:
prototxt_path = o+"deploy.prototxt"
call(["cp", task.get_depoly_prototxt(), prototxt_path])
call(["cp", task.get_caffemodel(), o+"model.caffemodel"])
# Add input param force_backward: true to enable deconv:
with file(prototxt_path, 'r') as original: data = original.read()
if 'force_backward' not in data:
with file(prototxt_path, 'w') as modified: modified.write("force_backward: true\n" + data)
# Get list of pre-trained models available for visualizations:
lst = os.listdir(o+"jobs")
pretrained = []
for path in lst:
f = h5py.File(o+'jobs/'+path+'/vis_data.hdf5')
jobname = f['jobname'].attrs['jobname']
pretrained.append({'jobname': jobname, 'path': path})
# Render view:
prototxt = open(prototxt_path,'r').read()
return flask.render_template('models/images/classification/layer_visualizations.html',
model_job = model_job,
prototxt = prototxt,
pretrained = pretrained
)
@blueprint.route('/run_model.json', methods=['POST', 'GET'])
@blueprint.route('/run_model', methods=['POST', 'GET'])
def run_model():
"""
Save a pre-trained model
prototxt <file> -- deploy.prototxt file
caffemodel <file> -- ***.caffemodel
jobname <form> -- what to save job as
"""
timestamp = datetime.now().strftime('%Y-%m-%d%H-%M-%S')
# Get prototxt (model definition) from form:
prototxt = tempfile.mkstemp(suffix='.prototxt')
flask.request.files['prototxt'].save(prototxt[1])
prototxt_path = prototxt[1]
os.close(prototxt[0])
# Get weights from form:
caffemodel = tempfile.mkstemp(suffix='.caffemodel')
flask.request.files['weights'].save(caffemodel[1])
caffemodel_path = caffemodel[1]
os.close(caffemodel[0])
outputs_path = os.path.abspath(digits.__path__[0])+"/layer_outputs/"
# Move the model definition and weights into the layer_outputs folder:
call(["mv", prototxt_path, outputs_path+"deploy.prototxt"])
call(["mv", caffemodel_path, outputs_path+"model.caffemodel"])
# Add force_backward to input parameters for deconv:
filename = outputs_path+"deploy.prototxt"
with file(filename, 'r') as original: data = original.read()
if 'force_backward' not in data:
with file(filename, 'w') as modified: modified.write("force_backward: true\n" + data)
o_proto = outputs_path+"jobs/"+timestamp+"/deploy.prototxt"
o_caffmodel = outputs_path+"jobs/"+timestamp+"/model.caffemodel"
# Add prototxt and caffe model to jobs directory:
call(["mkdir", outputs_path+"jobs/"+timestamp])
call(["cp", outputs_path+"deploy.prototxt", o_proto])
call(["cp", outputs_path+"model.caffemodel", o_caffmodel])
# Write a database to hold vis information:
f = h5py.File('digits/layer_outputs/jobs/'+timestamp+'/vis_data.hdf5','w-')
dset = f.create_dataset("jobname", (1,) , dtype="i")
dset.attrs['jobname'] = flask.request.form['jobname']
dset.attrs['timestamp'] = timestamp
# Save Weights:
p = Popen(["python", outputs_path+"get_weights.py", o_proto, o_caffmodel, outputs_path+"jobs/"+timestamp])
p.wait()
# Render new model def in view:
prototxt = open(filename,'r').read()
return flask.jsonify({'data': {"prototxt": prototxt}})
@blueprint.route('/load_pretrained_model.json', methods=['POST'])
@blueprint.route('/load_pretrained_model', methods=['POST', 'GET'])
def load_pretrained_model():
"""
Load a pre-trained model
path <Args> -- path to job
"""
path = "jobs/"+flask.request.args['path'] + "/"
outputs_path = os.path.abspath(digits.__path__[0]) + "/layer_outputs/"
job_path = outputs_path + path
prototxt_path = job_path+"deploy.prototxt"
caffemodel_path = job_path+"model.caffemodel"
# Get all the images stored for this job:
f = h5py.File(job_path+'/activations.hdf5','a')
image_data = []
for key in f:
image_data.append({"key": key , "img": f[key]['data'][:].tolist()})
# Get last backprop job:
b = h5py.File(job_path+'/backprops.hdf5','a')
if "0" in b:
backprop_info = {
"data": b["0"]["info"][:].tolist(),
"attrs": dict(b["0"]["info"].attrs.items())
}
else:
backprop_info = {}
# Render new model def in view:
prototxt = open(prototxt_path,'r').read()
return flask.jsonify({'data': {"prototxt": prototxt, "backprop": backprop_info}, 'images': image_data})
@blueprint.route('/get_backprop_from_neuron_in_layer.json', methods=['POST'])
@blueprint.route('/get_backprop_from_neuron_in_layer', methods=['POST', 'GET'])
def get_backprop_from_neuron_in_layer():
"""
Runs backprop from the a neuron in a specified layer
path <Args> -- path to job
image_key <Args> -- image_key in activations.h5py (0..N=num images)
layer_name <Args> -- layer that neuron resides in
neuron_index <Args> -- index of selected neuron
"""
# Get layer and neuron from inputs:
delchars = ''.join(c for c in map(chr, range(256)) if not c.isalnum())
o = os.path.abspath(digits.__path__[0])+"/layer_outputs/"
# Get params from flask args:
path = o+"jobs/"+flask.request.args['path']
layer_name = str(flask.request.args['layer_name'])
neuron_index = str(flask.request.args['neuron_index'])
image_key = str(flask.request.args['image_key'])
# Run the backprop script:
p = Popen(["python", o+"get_backprops.py", path, layer_name, neuron_index])
p.wait()
# Return data for this layer:
return flask.jsonify(backprops(path,image_key,layer_name))
@blueprint.route('/deconv_neuron_in_layer.json', methods=['POST'])
@blueprint.route('/deconv_neuron_in_layer', methods=['POST', 'GET'])
def deconv_neuron_in_layer():
"""
Sends the neuron features from a given layer into Deconv Network
and returns the the output of the 'data' layer
path <Args> -- path to job
image_key <Args> -- key to image ("0" .. num of images in db)
layer_name <Args> -- layer that neuron resides in
layer_type <Args> -- layer that neuron resides in
neuron_index <Args> -- index of selected neuron
"""
delchars = ''.join(c for c in map(chr, range(256)) if not c.isalnum())
o = os.path.abspath(digits.__path__[0])+"/layer_outputs/"
# Get params from flask args:
path = o+"jobs/"+flask.request.args['path']
image_key = flask.request.args['image_key']
layer_name = str(flask.request.args['layer_name'])
neuron_index = str(flask.request.args['neuron_index'])
# Run the deconvolution script
p = Popen(["python", o+"get_deconv.py", path, image_key, layer_name, neuron_index])
p.wait()
# Get outputs:
deconv_path = o+"deconv/" + layer_name.translate(None,delchars)+".npy"
data = np.load(deconv_path)
return flask.jsonify({'data': data.tolist()})
@blueprint.route('/get_outputs.json', methods=['POST', 'GET'])
@blueprint.route('/get_outputs', methods=['POST', 'GET'])
def get_outputs():
"""
Return the outputs of weights and activations for a given layer
path <Args> -- path to job
layer_name <Args> -- name of layer
image_key <Args> -- key to image ("0" .. num of images in db)
"""
path = os.path.abspath(digits.__path__[0])+"/layer_outputs/jobs/"+ str(flask.request.args['path'])+"/"
image_key = flask.request.args['image_key']
layer_name = flask.request.args['layer_name']
layers = []
layers.append({'type':'weights', 'data': weights(path,layer_name)})
layers.append({'type':'activations', 'data': activations(path,image_key,layer_name)})
layers.append({'type':'backprops', 'data': backprops(path,image_key,layer_name)["data"]})
return flask.jsonify({'layers': layers})
@blueprint.route('/get_weights.json', methods=['POST', 'GET'])
@blueprint.route('/get_weights', methods=['POST', 'GET'])
def get_weights():
"""
Returns the weights for a selected layer
path <Args> -- path to job
layer_name <Args> -- name of layer
"""
# Get Path and Layer Name from input arguments:
path = os.path.abspath(digits.__path__[0])+"/layer_outputs/jobs/"+ str(flask.request.args['path'])+"/"
layer_name = str(flask.request.args['layer_name'])
return flask.jsonify({'data': weights(path, layer_name)})
@blueprint.route('/get_activations.json', methods=['POST', 'GET'])
@blueprint.route('/get_activations', methods=['POST', 'GET'])
def get_activations():
"""
Return the activations for a specific layer and image
path <Args> -- path to job
layer_name <Args> -- name of layer
image_key <Args> -- key to image ("0" .. num of images in db)
"""
# Get Path and Layer Name from input arguments:
path = os.path.abspath(digits.__path__[0])+"/layer_outputs/jobs/"+ str(flask.request.args['path'])+"/"
layer_name = str(flask.request.args['layer_name'])
image_key = str(flask.request.args['image_key'])
return flask.jsonify({'data': activations(path,image_key,layer_name)})
@blueprint.route('/get_backprops.json', methods=['POST', 'GET'])
@blueprint.route('/get_backprops', methods=['POST', 'GET'])
def get_backprops():
"""
Return the activations for a specific layer and image
path <Args> -- path to job
layer_name <Args> -- name of layer
image_key <Args> -- key to image ("0" .. num of images in db)
"""
# Get Path and Layer Name from input arguments:
path = os.path.abspath(digits.__path__[0])+"/layer_outputs/jobs/"+ str(flask.request.args['path'])+"/"
layer_name = str(flask.request.args['layer_name'])
image_key = str(flask.request.args['image_key'])
return flask.jsonify({'data': backprops(path,image_key,layer_name)["data"]})
def backprops(path,image_key,layer_name):
# Read backprops file, and group containing activations for given image:
f = h5py.File(path+'/backprops.hdf5','a')
data = []
info = {}
if image_key in f:
if layer_name in f[image_key]:
s = int(f[image_key][layer_name].shape[1]/40 +1)
data = f[image_key][layer_name][:100,::s,::s].tolist()
info = {
"data": f[image_key]["info"][:].tolist(),
"attrs": dict(f[image_key]["info"].attrs.items())
}
return {"data": data, "info":info}
def activations(path,image_key,layer_name):
# Read activations file, and group containing activations for given image:
f = h5py.File(path+'activations.hdf5','a')
data = []
# Return activations of first 100 neurons:
# TODO: Resolution should be an input parameter
if image_key in f:
grp = f[image_key]
if layer_name in grp:
s = int(f[image_key][layer_name].shape[1]/40 +1)
data = grp[layer_name][:100,::s,::s].tolist()
return data
def weights(path,layer_name):
# Read weights file:
f = h5py.File(path+'weights.hdf5','r')
if layer_name in f:
data = f[layer_name][:100].tolist()
else:
data = []
return data
@blueprint.route('/send_params.json', methods=['POST', 'GET'])
@blueprint.route('/send_params', methods=['POST', 'GET'])
def send_params():
"""
Load image & Save activations
load_default <Args> -- true/false (should load from current task or folder)
job_path <Args> -- path of job to save outputs to
"""
# Set job path from flask input arguments
o = os.path.abspath(digits.__path__[0])+"/layer_outputs/"
path = o+"jobs/"+ str(flask.request.args['path'])
# Store image file temporarily:
outfile = tempfile.mkstemp(suffix='.png')
flask.request.files['image_file'].save(outfile[1])
image_path = outfile[1]
os.close(outfile[0])
# Save Activations:
p = Popen(["python", o+"get_activations.py", image_path, path])
p.wait()
return flask.jsonify({'data': "success!"})
@blueprint.route('/classify_one.json', methods=['POST'])
@blueprint.route('/classify_one', methods=['POST', 'GET'])
def classify_one():
"""
Classify one image and return the top 5 classifications
Returns JSON when requested: {predictions: {category: confidence,...}}
"""
model_job = job_from_request()
remove_image_path = False
image = None
if 'image_path' in flask.request.form and flask.request.form['image_path']:
image_path = flask.request.form['image_path']
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
outfile = tempfile.mkstemp(suffix='.png')
flask.request.files['image_file'].save(outfile[1])
image_path = outfile[1]
os.close(outfile[0])
remove_image_path = True
else:
raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Classify One Image",
model = model_job,
images = [image_path],
epoch = epoch,
layers = layers
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, visualizations = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job
scheduler.delete_job(inference_job)
if remove_image_path:
os.remove(image_path)
predictions = []
prototxt = model_job.train_task().get_network_desc()
if inputs is not None and len(inputs['data']) == 1:
image = utils.image.embed_image_html(inputs['data'][0])
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
if len(last_output_data) == 1:
scores = last_output_data[0].flatten()
indices = (-scores).argsort()
labels = model_job.train_task().get_labels()
predictions = []
for i in indices:
# ignore prediction if we don't have a label for the corresponding class
# the user might have set the final fully-connected layer's num_output to
# too high a value
if i < len(labels):
predictions.append( (labels[i], scores[i]) )
predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]
if request_wants_json():
return flask.jsonify({'predictions': predictions}), status_code
else:
return flask.render_template('models/images/classification/classify_one.html',
model_job = model_job,
job = inference_job,
image_src = image,
image_data = inputs['data'][0],
predictions = predictions,
visualizations = visualizations,
prototxt = prototxt,
total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
), status_code
@blueprint.route('/classify_many.json', methods=['POST'])
@blueprint.route('/classify_many', methods=['POST', 'GET'])
def classify_many():
"""
Classify many images and return the top 5 classifications for each
Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
"""
model_job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
paths, ground_truths = read_image_list(image_list, image_folder, num_test_images)
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Classify Many Images",
model = model_job,
images = paths,
epoch = epoch,
layers = 'none'
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# set return status code
status_code = 500 if inference_job.status == 'E' else 200
# delete job
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
# retrieve path and ground truth of images that were successfully processed
paths = [paths[idx] for idx in inputs['ids']]
ground_truths = [ground_truths[idx] for idx in inputs['ids']]
# defaults
classifications = None
show_ground_truth = None
top1_accuracy = None
top5_accuracy = None
confusion_matrix = None
per_class_accuracy = None
labels = None
if outputs is not None:
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
if len(last_output_data) < 1:
raise werkzeug.exceptions.BadRequest(
'Unable to classify any image from the file')
scores = last_output_data
# take top 5
indices = (-scores).argsort()[:, :5]
labels = model_job.train_task().get_labels()
n_labels = len(labels)
# remove invalid ground truth
ground_truths = [x if x is not None and (0 <= x < n_labels) else None for x in ground_truths]
# how many pieces of ground truth to we have?
n_ground_truth = len([1 for x in ground_truths if x is not None])
show_ground_truth = n_ground_truth > 0
# compute classifications and statistics
classifications = []
n_top1_accurate = 0
n_top5_accurate = 0
confusion_matrix = np.zeros((n_labels,n_labels), dtype=np.dtype(int))
for image_index, index_list in enumerate(indices):
result = []
if ground_truths[image_index] is not None:
if ground_truths[image_index] == index_list[0]:
n_top1_accurate += 1
if ground_truths[image_index] in index_list:
n_top5_accurate += 1
if (0 <= ground_truths[image_index] < n_labels) and (0 <= index_list[0] < n_labels):
confusion_matrix[ground_truths[image_index], index_list[0]] += 1
for i in index_list:
# `i` is a category in labels and also an index into scores
# ignore prediction if we don't have a label for the corresponding class
# the user might have set the final fully-connected layer's num_output to
# too high a value
if i < len(labels):
result.append((labels[i], round(100.0*scores[image_index, i],2)))
classifications.append(result)
# accuracy
if show_ground_truth:
top1_accuracy = round(100.0 * n_top1_accurate / n_ground_truth, 2)
top5_accuracy = round(100.0 * n_top5_accurate / n_ground_truth, 2)
per_class_accuracy = []
for x in xrange(n_labels):
n_examples = sum(confusion_matrix[x])
per_class_accuracy.append(round(100.0 * confusion_matrix[x,x] / n_examples, 2) if n_examples > 0 else None)
else:
top1_accuracy = None
top5_accuracy = None
per_class_accuracy = None
# replace ground truth indices with labels
ground_truths = [labels[x] if x is not None and (0 <= x < n_labels ) else None for x in ground_truths]
if request_wants_json():
joined = dict(zip(paths, classifications))
return flask.jsonify({'classifications': joined}), status_code
else:
return flask.render_template('models/images/classification/classify_many.html',
model_job = model_job,
job = inference_job,
paths = paths,
classifications = classifications,
show_ground_truth = show_ground_truth,
ground_truths = ground_truths,
top1_accuracy = top1_accuracy,
top5_accuracy = top5_accuracy,
confusion_matrix = confusion_matrix,
per_class_accuracy = per_class_accuracy,
labels = labels,
), status_code
@blueprint.route('/top_n', methods=['POST'])
def top_n():
"""
Classify many images and show the top N images per category by confidence
"""
model_job = job_from_request()
image_list = flask.request.files['image_list']
if not image_list:
raise werkzeug.exceptions.BadRequest('File upload not found')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'top_n' in flask.request.form and flask.request.form['top_n'].strip():
top_n = int(flask.request.form['top_n'])
else:
top_n = 9
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
paths, _ = read_image_list(image_list, image_folder, num_test_images)
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "TopN Image Classification",
model = model_job,
images = paths,
epoch = epoch,
layers = 'none'
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# delete job
scheduler.delete_job(inference_job)
results = None
if outputs is not None and len(outputs) > 0:
# convert to class probabilities for viewing
last_output_name, last_output_data = outputs.items()[-1]
scores = last_output_data
if scores is None:
raise RuntimeError('An error occured while processing the images')
labels = model_job.train_task().get_labels()
images = inputs['data']
indices = (-scores).argsort(axis=0)[:top_n]
results = []
# Can't have more images per category than the number of images
images_per_category = min(top_n, len(images))
# Can't have more categories than the number of labels or the number of outputs
n_categories = min(indices.shape[1], len(labels))
for i in xrange(n_categories):
result_images = []
for j in xrange(images_per_category):
result_images.append(images[indices[j][i]])
results.append((
labels[i],
utils.image.embed_image_html(
utils.image.vis_square(np.array(result_images),
colormap='white')
)
))
return flask.render_template('models/images/classification/top_n.html',
model_job = model_job,
job = inference_job,
results = results,
)
def get_datasets():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationDatasetJob) and (j.status.is_running() or j.status == Status.DONE)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_standard_networks():
return [
('lenet', 'LeNet'),
('alexnet', 'AlexNet'),
#('vgg-16', 'VGG (16-layer)'), #XXX model won't learn
('googlenet', 'GoogLeNet'),
]
def get_default_standard_network():
return 'alexnet'
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
if job.train_task().pretrained_model:
e.insert(0, (-1, 'Previous pretrained model'))
prev_network_snapshots.append(e)
return prev_network_snapshots
|
Lucaszw/DIGITS
|
digits/model/images/classification/views.py
|
Python
|
bsd-3-clause
| 39,839
|
[
"NEURON"
] |
6fe504d03e8618e03e8edb1da3d8aac30e233fdd1e84d80ba783a0cbf69aa972
|
''' EmailAction
This action writes all the necessary data to a cache file ( cache.db ) that
will be used later by the EmailAgent in order to send the emails for each site.
'''
__RCSID__ = '$Id$'
import os
import sqlite3
from DIRAC import S_ERROR, S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.Actions.BaseAction import BaseAction
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
from DIRAC.Core.Utilities.SiteCEMapping import getSiteForCE
class EmailAction(BaseAction):
def __init__(self, name, decisionParams, enforcementResult, singlePolicyResults,
clients=None):
super(EmailAction, self).__init__(name, decisionParams, enforcementResult,
singlePolicyResults, clients)
if 'DIRAC' in os.environ:
self.cacheFile = os.path.join(
os.getenv('DIRAC'), 'work/ResourceStatus/cache.db')
else:
self.cacheFile = os.path.realpath('cache.db')
def run(self):
''' Checks it has the parameters it needs and writes the date to a cache file.
'''
# Minor security checks
element = self.decisionParams['element']
if element is None:
return S_ERROR('element should not be None')
name = self.decisionParams['name']
if name is None:
return S_ERROR('name should not be None')
statusType = self.decisionParams['statusType']
if statusType is None:
return S_ERROR('statusType should not be None')
previousStatus = self.decisionParams['status']
if previousStatus is None:
return S_ERROR('status should not be None')
status = self.enforcementResult['Status']
if status is None:
return S_ERROR('status should not be None')
reason = self.enforcementResult['Reason']
if reason is None:
return S_ERROR('reason should not be None')
if self.decisionParams['element'] == 'Site':
siteName = self.decisionParams['name']
else:
elementType = self.decisionParams['elementType']
if elementType == 'StorageElement':
siteName = getSitesForSE(name)
elif elementType == 'ComputingElement':
siteName = getSiteForCE(name)
else:
siteName = {'OK': True, 'Value': 'Unassigned'}
if not siteName['OK']:
self.log.error('Resource %s does not exist at any site: %s' % (name, siteName['Message']))
siteName = "Unassigned Resources"
elif not siteName['Value']:
siteName = "Unassigned Resources"
else:
siteName = siteName['Value'] if isinstance(siteName['Value'], basestring) else siteName['Value'][0]
with sqlite3.connect(self.cacheFile) as conn:
try:
conn.execute('''CREATE TABLE IF NOT EXISTS ResourceStatusCache(
SiteName VARCHAR(64) NOT NULL,
ResourceName VARCHAR(64) NOT NULL,
Status VARCHAR(8) NOT NULL DEFAULT "",
PreviousStatus VARCHAR(8) NOT NULL DEFAULT "",
StatusType VARCHAR(128) NOT NULL DEFAULT "all",
Time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);''')
insertQuery = "INSERT INTO ResourceStatusCache (SiteName, ResourceName, Status, PreviousStatus, StatusType)"
insertQuery += " VALUES ('%s', '%s', '%s', '%s', '%s' ); " % (siteName, name, status,
previousStatus, statusType)
conn.execute(insertQuery)
conn.commit()
except sqlite3.OperationalError:
self.log.error('Email cache database is locked')
return S_OK()
|
andresailer/DIRAC
|
ResourceStatusSystem/PolicySystem/Actions/EmailAction.py
|
Python
|
gpl-3.0
| 3,602
|
[
"DIRAC"
] |
347a7932616bffaf473c214f5ebf5649959dbc162dca952c127cb439e6f82e28
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
from hyperspy.component import Component
sqrt2pi = math.sqrt(2 * math.pi)
def voigt(x, FWHM=1, gamma=1, center=0, scale=1):
"""Voigt lineshape.
The voigt peak is the convolution of a Lorentz peak with a Gaussian peak.
The formula used to calculate this is::
z(x) = (x + 1j gamma) / (sqrt(2) sigma)
w(z) = exp(-z**2) erfc(-1j z) / (sqrt(2 pi) sigma)
V(x) = scale Re(w(z(x-center)))
Parameters
----------
gamma : real
The half-width half-maximum of the Lorentzian
FWHM : real
The FWHM of the Gaussian
center : real
Location of the center of the peak
scale : real
Value at the highest point of the peak
Notes
-----
Ref: W.I.F. David, J. Appl. Cryst. (1986). 19, 63-64
adjusted to use stddev and HWHM rather than FWHM parameters
"""
# wofz function = w(z) = Fad[d][e][y]eva function = exp(-z**2)erfc(-iz)
from scipy.special import wofz
sigma = FWHM / 2.3548200450309493
z = (np.asarray(x) - center + 1j * gamma) / (sigma * math.sqrt(2))
V = wofz(z) / (math.sqrt(2 * np.pi) * sigma)
return scale * V.real
class Voigt(Component):
"""Voigt profile component with support for shirley background,
non_isochromaticity,transmission_function corrections and spin orbit
splitting specially suited for Photoemission spectroscopy data analysis.
f(x) = G(x)*L(x) where G(x) is the Gaussian function and L(x) is the
Lorentzian function
Attributes
----------
area : Parameter
centre: Parameter
FWHM : Parameter
gamma : Parameter
resolution : Parameter
shirley_background : Parameter
non_isochromaticity : Parameter
transmission_function : Parameter
spin_orbit_splitting : Bool
spin_orbit_branching_ratio : float
spin_orbit_splitting_energy : float
"""
def __init__(self):
Component.__init__(self, (
'area',
'centre',
'FWHM',
'gamma',
'resolution',
'shirley_background',
'non_isochromaticity',
'transmission_function'))
self._position = self.centre
self.FWHM.value = 1
self.gamma.value = 0
self.area.value = 1
self.resolution.value = 0
self.resolution.free = False
self.shirley_background.free = False
self.non_isochromaticity.value = 0
self.non_isochromaticity.free = False
self.transmission_function.value = 1
self.transmission_function.free = False
# Options
self.shirley_background.active = False
self.spin_orbit_splitting = False
self.spin_orbit_branching_ratio = 0.5
self.spin_orbit_splitting_energy = 0.61
self.isbackground = False
self.convolved = True
def function(self, x):
area = self.area.value * self.transmission_function.value
centre = self.centre.value
ab = self.non_isochromaticity.value
if self.resolution.value == 0:
FWHM = self.FWHM.value
else:
FWHM = math.sqrt(self.FWHM.value ** 2 + self.resolution.value ** 2)
gamma = self.gamma.value
k = self.shirley_background.value
f = voigt(x,
FWHM=FWHM, gamma=gamma, center=centre - ab, scale=area)
if self.spin_orbit_splitting is True:
ratio = self.spin_orbit_branching_ratio
shift = self.spin_orbit_splitting_energy
f2 = voigt(x, FWHM=FWHM, gamma=gamma,
center=centre - ab - shift, scale=area * ratio)
f += f2
if self.shirley_background.active:
cf = np.cumsum(f)
cf = cf[-1] - cf
self.cf = cf
return cf * k + f
else:
return f
def estimate_parameters(self, signal, E1, E2, only_current=False):
"""Estimate the voigt function by calculating the momenta the gaussian.
Parameters
----------
signal : Signal instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
Notes
-----
Adapted from http://www.scipy.org/Cookbook/FittingData
Examples
--------
>>> g = hs.model.components.Gaussian()
>>> x = np.arange(-10,10, 0.01)
>>> data = np.zeros((32,32,2000))
>>> data[:] = g.function(x).reshape((1,1,2000))
>>> s = hs.signals.Spectrum({'data' : data})
>>> s.axes_manager.axes[-1].offset = -10
>>> s.axes_manager.axes[-1].scale = 0.01
>>> g.estimate_parameters(s, -10,10, False)
"""
super(Voigt, self)._estimate_parameters(signal)
axis = signal.axes_manager.signal_axes[0]
energy2index = axis._get_index
i1 = energy2index(E1) if energy2index(E1) else 0
i2 = energy2index(E2) if energy2index(E2) else len(axis.axis) - 1
X = axis.axis[i1:i2]
if only_current is True:
data = signal()[i1:i2]
X_shape = (len(X),)
i = 0
center_shape = (1,)
else:
# TODO: write the rest of the code to estimate the parameters of
# the full dataset
i = axis.index_in_array
data_gi = [slice(None), ] * len(signal.data.shape)
data_gi[axis.index_in_array] = slice(i1, i2)
data = signal.data[data_gi]
X_shape = [1, ] * len(signal.data.shape)
X_shape[axis.index_in_array] = data.shape[i]
center_shape = list(data.shape)
center_shape[i] = 1
center = np.sum(X.reshape(X_shape) * data, i
) / np.sum(data, i)
sigma = np.sqrt(np.abs(np.sum((X.reshape(X_shape) - center.reshape(
center_shape)) ** 2 * data, i) / np.sum(data, i)))
height = data.max(i)
if only_current is True:
self.centre.value = center
self.FWHM.value = sigma * 2.3548200450309493
self.area.value = height * sigma * sqrt2pi
return True
else:
if self.area.map is None:
self.create_arrays(signal.axes_manager.navigation_shape)
self.area.map['values'][:] = height * sigma * sqrt2pi
self.area.map['is_set'][:] = True
self.FWHM.map['values'][:] = sigma * 2.3548200450309493
self.FWHM.map['is_set'][:] = True
self.centre.map['values'][:] = center
self.centre.map['is_set'][:] = True
return True
|
to266/hyperspy
|
hyperspy/_components/voigt.py
|
Python
|
gpl-3.0
| 7,618
|
[
"Gaussian"
] |
19b14b2275f3c1361a06c710450ef614eab37444993af4b0d582978b5991062f
|
"""
Functions to run curve shifting techniques on lightcurves produced by sim.multidraw.
We define the class runresults that "holds" results obtained by curve shifting techniques (saved into pickles).
"""
#import pycs.gen.lc
import pycs.sim.draw
import pycs.gen.util
import pycs.gen.lc
import numpy as np
import os
import time
#import pycs.sim.frk
import copy as pythoncopy
from glob import glob
import random
import os, sys
def applyopt(optfct, lcslist, **kwargs):
"""
Applies optfct (an optimizing function that takes a list of lightcurves as single argument)
to all the elements (list of lightcurves)
of lcset (a list of lists of lightcurves).
Optimizes the lightcuves themselves, in place, and returns a list of the ouputs of the optimizers, corresponding to the lcslist.
For instance, if the optfct output is a spline, it also contains the final r2s, that I will later save into the pkls !
About multi cpu :
First try using the multiprocessing module -> failed, as the optfct cannot be pickled. Perhaps need to rethink the
strategy.
Second try using good old forkmap... it works !
ncpu : None = I will use all CPUs, -1 = I will use all - 1 CPUs, and otherwise I will use ncpu CPUs.
"""
# Ouch, we cannot pickle optfct, hence multiprocessing module does not work...
# Then I tried forkmap
# Big bug ... border effect ? It works, but messes up the delays.
# Could not solve this ... back to 1 cpu for now.
# def retfct(lcs):
# """
# Used only for forkmap stuff, as we cannot modifiy the lcslist without
# making crap, it seems.
# """
# mylcs = [l.copy() for l in lcs] # Is this needed ? Not even sure, but better logic.
# optfctout = optfct(mylcs)
# return (optfctout, mylcs)
ncpu = 1
verbose = True
if ncpu == 1:
if verbose:
print "Starting the curve shifting on a single CPU, no multiprocessing..."
start = time.time()
kwargs_vec = [kwargs for k in lcslist]
# optfctouts = [optfct(lcs, **kwargs_vec[i]) for i,lcs in enumerate(lcslist)] # Ok to use optfct directly
optfctouts = []
sucess_dic = {'success':True, 'failed_id':[], 'error_list':[]}
for i, lcs in enumerate(lcslist):
try :
optout = optfct(lcs, **kwargs_vec[i])
except Exception as e:
print "WARNING : I have a probleme with the curve number %i."%(i)
sucess_dic['failed_id'].append(i)
sucess_dic['success'] = False
sucess_dic['error_list'].append(e)
else :
optfctouts.append(optout)
print "Shifted %i simulations, using 1 CPU, time : %s" % (len(lcslist), pycs.gen.util.strtd(time.time() - start))
# else:
# ncpuava = pycs.sim.frk.nprocessors()
# if ncpu == None:
# ncpu = ncpuava
# if ncpu == -1:
# ncpu = ncpuava - 1
# if verbose:
# print "Starting the curve shifting on %i/%i CPUs." % (ncpu, ncpuava)
# start = time.time()
# # Bug :
# #optfctouts = pycs.sim.frk.map(optfct, lcslist, n=ncpu)
#
# retfctouts = pycs.sim.frk.map(retfct, lcslist, n=ncpu)
# optfctouts = [retfctout[0] for retfctout in retfctouts]
# optlcslist = [retfctout[1] for retfctout in retfctouts]
# # And now we use the fact that lcslist is mutable ...
# assert len(lcslist) == len(optlcslist)
# for lcs, optlcs in zip(lcslist, optlcslist):
# lcs = optlcs
#
# print "Shifted %i simulations on %i/%i CPUs, time : %s" % (len(lcslist), ncpu, ncpuava, pycs.gen.util.strtd(time.time() - start))
if len(optfctouts) ==0 :
print("### WARNING : it seems that your optfct does not return anything ! ###")
# if optfctouts[0] == None:
# print("### WARNING : it seems that your optfct does not return anything ! ###")
return optfctouts, sucess_dic
# """
# sys.exit("Sorry, mp does not work yet...")
# import multiprocessing
# # And using multiprocessing :
# ncpuava = multiprocessing.cpu_count()
# print "We have %i CPUs on this machine." % (ncpuava)
# if ncpu == None:
# ncpu = ncpuava
# print "I will use %i CPUs." % (ncpu)
# start = time.time()
# pool = multiprocessing.Pool(processes=ncpu)
# optlcslist = pool.map(optfct, lcslist) # Here we use this retfct
# lcslist = optlcslist
# print "Done on %i CPUs, time : %s" % (ncpu, pycs.gen.util.strtd(time.time() - start))
# """
class runresults:
"""
Summarizes the huge list of list of lightcurves as a numpy array of timeshifts and some further info,
to serve as input for plots, and actual time delay determinations.
This replaces the old "boot.pkl" files ...
The TRUE shifts are also saved (if available)
All this is not related to a particular optimization technique.
Please also provide the success_dic to remove the curves where the optimiser failed.
Note the mask funcitonallity.
"""
def __init__(self, lcslist, qs=None, name="None", plotcolour = "#008800", success_dic = None):
"""
lcslist may or may not have "truetimeshifts". If not, I will put 0.0 as true shifts.
qs should be a numpy array (as long as lcslist) that contains some chi2, r2, or d2 stuff to quantify how good the fit was.
All the lcs in lcslist should be in the same order (I will check this, even if slow).
I will not sort these lcs (as you might want them in an unsorted order).
"""
if qs is not None:
self.qs = qs
if qs.shape[0] != len(lcslist):
raise RuntimeError("These qs don't have the right length !")
else:
# We put zeros...
self.qs = np.zeros(len(lcslist))
if len(lcslist) == 0:
raise RuntimeError("Should this happen ?")
self.tsarray = np.vstack(np.array([l.timeshift for l in lcs]) for lcs in lcslist)
# First index selects the simulation, second index selects the timeshifts of the curves in each lcs.
# We build a similar array for the true shifts (value = 0.0 if the curve was not drawn)
self.truetsarray = np.vstack(np.array([getattr(l, "truetimeshift", 0.0) for l in lcs]) for lcs in lcslist)
# We check the ordering of the lcs in lcslist
objectstringsasset = set(["/".join([l.object for l in lcs]) for lcs in lcslist])
if len(objectstringsasset) != 1:
raise RuntimeError("Ouch, your lcs in lcslist are not identical/ordered !")
self.labels = [l.object for l in lcslist[0]]
self.name = name
self.autoname = name
self.plottrue = False # By default we plot the measured delays.
self.plotgauss = False
self.plotcolour = plotcolour
self.success_dic = success_dic
self.check()
def __len__(self):
"""
The number of runs
"""
return self.tsarray.shape[0]
def nimages(self):
"""
The number of images (4 for a quad, 2 for a double) ...
"""
return self.tsarray.shape[1]
def __str__(self):
return "Runresults '%s' (%i)" % (getattr(self, "name", "untitled"), len(self))
def copy(self):
return pythoncopy.deepcopy(self)
def check(self):
if self.qs.shape[0] != len(self):
raise RuntimeError("qs length error")
#print self.truetsarray
#print self.tsarray
if self.tsarray.shape != self.truetsarray.shape:
raise RuntimeError("tsarray shape error")
def applymask(self, mask):
"""
Removes some of the runresults according to your mask.
"""
self.tsarray = self.tsarray[mask]
self.truetsarray = self.truetsarray[mask]
self.qs = self.qs[mask]
self.check()
def gettruets(self):
"""
Returns some summary stats about the true delays.
Used to find histogram ranges for plots, etc.
"""
#print self.truetsarray
ret = {}
ret["center"] = np.median(self.truetsarray, axis=0)
ret["max"] = np.max(self.truetsarray, axis=0)
ret["min"] = np.min(self.truetsarray, axis=0)
spans = ret["max"] - ret["min"]
ret["type"] = "distribution"
if np.all(spans < 0.00001): # Then all true delays are identical
ret["type"] = "same"
if np.all(np.absolute(ret["center"]) < 0.00001): # Then we have no true delays (i.e., they are all 0).
ret["type"] = "none"
return ret
def getts(self):
"""
A bit similar to gettruets, we return the median of the measured ts...
Used for plots etc, not for calculations.
"""
ret = {}
ret["center"] = np.median(self.tsarray, axis=0)
ret["max"] = np.max(self.tsarray, axis=0)
ret["min"] = np.min(self.tsarray, axis=0)
ret["type"] = "distribution"
return ret
def get_delays_from_ts(self):
"""
Return the time delays, from the timeshifts. I do not account for the true timeshift.
:return: dictionary containing the median, max, and min delays + delay labels
"""
n = len(self.labels)
couples = [(self.tsarray[:, i], self.tsarray[:, j]) for i in range(n) for j in range(n) if i < j]
label_couple = [self.labels[i] + self.labels[j] for i in range(n) for j in range(n) if i < j]
ret = {"center": [np.median(lcs2 - lcs1) for (lcs1, lcs2) in couples]}
ret["max"] = [np.max(lcs2 - lcs1) for (lcs1, lcs2) in couples]
ret["min"] = [np.min(lcs2 - lcs1) for (lcs1, lcs2) in couples]
ret["delay_label"] = label_couple
ret["type"] = "delay distribution"
return ret
def joinresults(rrlist):
"""
Give me a list of runresults objects, I join those into a single one an return the latter.
"""
if len(rrlist) == 0:
raise RuntimeError("Your rrlist is empty !")
joined = rrlist[0].copy() # Just to get an object, with labels from the first rr.
# Perform lots of test if it is ok to join these results ...
for rr in rrlist:
if rr.labels != joined.labels:
raise RuntimeError("Don't ask me to join runresults of different objects !")
#joined.name = "+".join(list(set([getattr(rr, 'simset', 'NoName') for rr in rrlist])))
joined.name = "+".join(list(set([getattr(rr, 'name', 'NoName') for rr in rrlist])))
joined.autoname = "%s" % (joined.name)
joined.tsarray = np.vstack([rr.tsarray for rr in rrlist])
joined.truetsarray = np.vstack([rr.truetsarray for rr in rrlist])
joined.qs = np.concatenate([rr.qs for rr in rrlist])
joined.check()
return joined
def collect(directory = "./test", plotcolour="#008800", name=None):
"""
Collects the runresult objects from a directory (typically from a multirun run),
and returns the joined runresults.
"""
#directory = "simset_%s" % (simset)
if not os.path.isdir(directory):
raise RuntimeError("I cannot find the directory %s" % directory)
pklfiles = sorted(glob(os.path.join(directory, "*_runresults.pkl")))
if len(pklfiles) == 0:
raise RuntimeError("I couldn't find pkl files in directory %s" % directory)
print "Reading %i runresult pickles..." % (len(pklfiles))
rrlist = [pycs.gen.util.readpickle(pklfile, verbose=False) for pklfile in pklfiles]
#for rr in rrlist:
# if not hasattr(rr, "qs"):
# rr.qs = None
jrr = pycs.sim.run.joinresults(rrlist)
jrr.plotcolour = plotcolour
if name is not None:
jrr.name = name
print "OK, I have collected %i runs from %s" % (len(jrr), jrr.name)
return jrr
def multirun(simset, lcs, optfct, kwargs_optim=None, optset="multirun", tsrand=10.0, analyse = True, shuffle=True, keepopt=False, trace=False, verbose=True, destpath = "./"):
"""
Top level wrapper to get delay "histograms" : I will apply the optfct to optimize the shifts
between curves that you got from :py:func:`pycs.sim.draw.multidraw`, and save the results in
form of runresult pickles.
.. note: Remove my ".workingon" file and I will finish the current pkl and skip the remaining ones !
This is useful to stop we cleanly.
It is perfectly ok to launch several instances of myself on the same simset, to go faster.
I will process every pkl of the simset only once, and prevent other instances from processing the same files.
You can use me for a lot of different tasks. (note from VB : not to make coffee apparently)
:param simset: The name of the simulations to run on. Those are in a directory called ``sims_name``.
:param lcs: Lightcurves that define the initial shifts and microlensings you want to use.
I will take the lightcurves from the simset, and put these shifts and ML on them.
:param kwargs_optim: kwargs to be passed to your optfct
:param optset: A new name for the optimisation.
:param optfct: The optimizing function that takes lcs as single argument, fully optimizes the curves,
and returns a spline, or a d2 value.
Can be None if argument analyse is False (used for tests).
:type optfct: function
:param tsrand: I will randomly shift the simulated curves before running the optfct
This randomizes the initial conditions.
(uniform distrib from -tsrand to tsrand)
:param shuffle: if True, I will shuffle the curves before running optc on them, and then sort them immediatly afterwards.
:param keepopt: a bit similar to Trace, but simpler : we write the optimized lightcurves as well as the output of the optimizers into one pickle file per input pickle file.
{"optfctoutlist":optfctouts, "optlcslist":simlcslist}
"""
if kwargs_optim is None :
kwargs_optim = {}
# We look for the sims directory OH GOD THIS IS SO UGLY !
simdir = destpath + "sims_%s" % (simset)
if not os.path.isdir(simdir):
raise RuntimeError("Sorry, I cannot find the directory %s" % simset)
simpkls = sorted(glob(os.path.join(simdir, "*.pkl")))
if verbose:
print "I have found %i simulation pickles in %s." % (len(simpkls), simdir)
# We prepare the destination directory
destdir = destpath+"sims_%s_opt_%s" % (simset, optset)
if verbose:
print "I'll write my results into the directory %s." % (destdir)
if not os.path.isdir(destdir):
os.mkdir(destdir)
else:
if verbose:
print "(The latter already exists.)"
# The initial conditions that I will set to the sims
if verbose:
print "Initial conditions : "
for l in lcs:
print l
success_dic = {'success': True, 'failed_id': [], 'error_list': []}
for simpkl in simpkls:
# First we test if this simpkl is already processed (or if another multirun is working on it).
simpklfilebase = os.path.splitext(os.path.basename(simpkl))[0]
workingonfilepath = os.path.join(destdir, simpklfilebase+".workingon")
resultsfilepath = os.path.join(destdir, simpklfilebase+"_runresults.pkl")
optfilepath = os.path.join(destdir, simpklfilebase+"_opt.pkl")
if os.path.exists(workingonfilepath) or os.path.exists(resultsfilepath):
continue
# Ok, we start, hence we want to avoid other instances to work on the same pkl ...
os.system("date > %s" % workingonfilepath)
print "--- Casino running on simset %s, optset %s ---" % (simset, optset)
simlcslist = pycs.gen.util.readpickle(simpkl)
print "Working for %s, %i simulations." % (resultsfilepath, len(simlcslist))
# We set the initial conditions for the curves to analyse, based on the lcs argument as reference.
for simlcs in simlcslist:
pycs.sim.draw.transfershifts(simlcs, lcs)
# Now we add uniform noise to the initial time shifts
if tsrand != 0.0:
for simlcs in simlcslist:
for simlc in simlcs:
simlc.shifttime(float(np.random.uniform(low=-tsrand, high=tsrand, size=1)))
else:
if verbose:
print "I do NOT randomize initial contidions for the time shifts !"
# And to the actual shifting, that will take most of the time
if analyse:
if shuffle:
for simlcs in simlcslist:
pycs.gen.lc.shuffle(simlcs)
optfctouts, success_dic = applyopt(optfct, simlcslist, **kwargs_optim)
if shuffle: # We sort them, as they will be passed the constructor of runresuts.
for simlcs in simlcslist:
pycs.gen.lc.objsort(simlcs, verbose=False)
else:
# Else, we just skip this step, and save the results anyway.
if verbose:
print "I do NOT analyse the curves !"
optfctouts = [None]*len(simlcslist)
# And now we want to save the results.
# If the optfct was a spline optmization, this optfctouts is a list of splines.
# Else it might be something different, we deal with this now.
if hasattr(optfctouts[0], "lastr2nostab"): # then it's a spline, and we will collect these lastr2nostab values.
tracesplinelists = [[optfctout] for optfctout in optfctouts] # just for the trace
qs = np.array([s.lastr2nostab for s in optfctouts])
if np.all(qs < 1.0):
print "### WARNING : qs values are very small, did you fit that spline ? ###"
else:
try:
qs = np.array(map(float, optfctouts)) # Then it's some kind of chi2, or a d2 : easy !
tracesplinelists = [[]]*len(simlcslist) # just for the trace
except Exception as e :
print e
tracesplinelists = [[]]*len(simlcslist) # just for the trace
qs = None
if analyse == True:
print type(optfctouts[0]), optfctouts
print "Oh no, I don't know what to do with the optfctouts !"
# Trace after shifting
if trace:
print "Saving trace of optimized curves ..."
tracedir = "trace_sims_%s_opt_%s" % (simset, optset)
for (simlcs, tracesplinelist) in zip(simlcslist, tracesplinelists):
pycs.gen.util.trace(lclist=simlcs, splist=tracesplinelist, tracedir = tracedir)
clean_simlcslist = clean_simlist(simlcslist, success_dic)
if keepopt:
# A bit similar to trace, we save the optimized lcs in a pickle file.
outopt = {"optfctoutlist":optfctouts, "optlcslist":clean_simlcslist}
pycs.gen.util.writepickle(outopt, optfilepath)
# Saving the results
rr = runresults(clean_simlcslist, qs = qs, name="sims_%s_opt_%s" % (simset, optset), success_dic = success_dic)
pycs.gen.util.writepickle(rr, resultsfilepath)
# We remove the lock for this pkl file.
# If the files does not exist we stop !
if not os.path.exists(workingonfilepath):
print "WORKINGON FILE REMOVED -> I STOP HERE"
break
else:
os.remove(workingonfilepath)
return success_dic
def clean_simlist(simlcslist, success_dic):
for i in reversed(success_dic['failed_id']):
print "remove simlcs ",i
del simlcslist[i]
return simlcslist
|
COSMOGRAIL/PyCS
|
pycs/sim/run.py
|
Python
|
gpl-3.0
| 17,656
|
[
"CASINO"
] |
0db8a59358ae87d530b02b8503845a82f833d311f8acb2bae8b101bf18673816
|
import unittest
from mjhmc.samplers.markov_jump_hmc import ContinuousTimeHMC, HMCBase, MarkovJumpHMC, HMC, ControlHMC
from mjhmc.misc.distributions import TestGaussian, Gaussian
import numpy as np
from mjhmc.misc.utils import overrides
n_seed = 1
eps = .05
class TestControl(unittest.TestCase):
"""
Serves as the base class since unittests messes up inheritance
"""
def setUp(self):
np.random.seed(n_seed)
self.sampler_to_test = HMCBase
def test_1d_gaussian(self):
"""
Checks to see that {} can sample from a 1d gaussian
""".format(self.sampler_to_test.__name__)
n_samples = 10000
gaussian_1d = TestGaussian(ndims=1)
sampler = self.sampler_to_test(
gaussian_1d.Xinit,
gaussian_1d.E,
gaussian_1d.dEdX
)
sampler.burn_in()
samples = sampler.sample(n_samples)
mean = np.mean(samples)
std = np.std(samples)
self.assertTrue(np.abs(mean) < eps,
msg='mean: {} is not within tolerance for {}'.format(
mean,
self.sampler_to_test.__name__))
self.assertTrue(np.abs(std - 1) < eps,
msg='std: {} is not within tolerance for {}'.format(
std,
self.sampler_to_test.__name__))
def test_ill_conditioned_gaussian(self):
"""
Checks to see that {} can sample from an ill-conditioned gaussian
""".format(self.sampler_to_test.__name__)
n_samples = 100000
ic_gaussian_2d = Gaussian(ndims=2, log_conditioning=1)
target_cov = np.linalg.inv(ic_gaussian_2d.J)
sampler = self.sampler_to_test(
ic_gaussian_2d.Xinit,
ic_gaussian_2d.E,
ic_gaussian_2d.dEdX
)
sampler.burn_in()
samples = sampler.sample(n_samples)
sample_cov = np.cov(samples)
self.assertTrue(self.approx_equal(sample_cov, target_cov),
msg=(" samples covariance: \n {} is not within tolerance to "
"target cov: \n {}. \n I am {}").format(
sample_cov,
target_cov,
self.sampler_to_test.__name__))
def approx_equal(self, arr1, arr2):
return np.linalg.norm(arr1 - arr2) < eps
def test_hyperparameter_setting(self):
"""
Checks to see that hyperparameters are properly set
"""
beta = np.random.random()
epsilon = np.random.random() * 5
num_leapfrop_steps = np.random.randint(10)
gauss = Gaussian()
sampler = self.sampler_to_test(
distribution=gauss,
beta=beta,
epsilon=epsilon,
num_leapfropg_steps=num_leapfrop_steps
)
self.assertTrue(sampler.beta == beta)
self.assertTrue(sampler.epsilon == epsilon)
self.assertTrue(sampler.num_leapfropg_steps == num_leapfrop_steps)
class TestHMC(TestControl):
@overrides(TestControl)
def setUp(self):
np.random.seed(n_seed)
self.sampler_to_test = HMC
class TestControlHMC(TestControl):
@overrides(TestControl)
def setUp(self):
np.random.seed(n_seed)
self.sampler_to_test = ControlHMC
# class TestContinuousHMC(TestControl):
# @overrides(TestControl)
# def setUp(self):
# np.random.seed(n_seed)
# self.sampler_to_test = ContinuousTimeHMC
class TestMJHMC(TestControl):
@overrides(TestControl)
def setUp(self):
np.random.seed(n_seed)
self.sampler_to_test = MarkovJumpHMC
|
rueberger/MJHMC
|
mjhmc/tests/test_continuous_samplers.py
|
Python
|
gpl-2.0
| 3,727
|
[
"Gaussian"
] |
169f814c78e1b2b274914541d1eb33c417f7c81846156ab534ac2dd0c29a263f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest2 as unittest
import json
import os
from monty.json import MontyDecoder
from pymatgen.analysis.defects.dilute_solution_model import *
import random
try:
import sympy
except ImportError:
sympy = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
with open(
os.path.join(test_dir, 'mp1048_defect_formation_energies.json')) as fp:
formation_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1048_raw_defect_energies.json')) as fp:
raw_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1487_raw_defect_energies.json')) as fp:
mp1487_raw_energy_dict = json.load(fp, cls=MontyDecoder)
# TODO (from SP): You MUST redo this entire test. The whole tset is
# monstrously slow. It takes more than 10 mins to get through this test alone.
@unittest.skipIf((not sympy) or random.randint(0, 10) % 10 != 0,
"sympy not present or random skip.")
class DiluteSolutionModelTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = raw_energy_dict['bulk_energy']
self.asites = raw_energy_dict['antisites']
self.vac = raw_energy_dict['vacancies']
self.struct = raw_energy_dict['structure']
self.T = 600
self.trial_mu = formation_energy_dict[str(self.T)]['chemical_potential']
def test_formation_energies_without_chem_pot(self):
"""
Should generate formation energies without input chempot
"""
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_formation_energies_with_chem_pot(self):
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_plot_data_without_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
def test_plot_data_with_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
# print(plot_data['y'])
@unittest.skipIf((not sympy) or random.randint(0, 10) % 10 != 0,
"sympy not present or random skip.")
class SoluteSiteFinderTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = mp1487_raw_energy_dict['bulk_energy']
self.asites = mp1487_raw_energy_dict['antisites']
self.vac = mp1487_raw_energy_dict['vacancies']
self.solutes = mp1487_raw_energy_dict['solutes']
self.struct = mp1487_raw_energy_dict['structure']
self.T = 1000
def test_plot_data_without_chem_pot(self):
plot_data = solute_site_preference_finder(
self.struct, self.e0, self.T, self.vac, self.asites, self.solutes,
solute_concen=0.01)
self.assertIsNotNone(plot_data)
def still_wait_plot_data_with_chem_pot(self):
plot_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(plot_data)
for key, value in plot_data.items():
self.assertIsNotNone(value)
if __name__ == "__main__":
unittest.main()
|
aykol/pymatgen
|
pymatgen/analysis/defects/tests/test_dilute_solution_model.py
|
Python
|
mit
| 4,733
|
[
"pymatgen"
] |
82b6a4e0a2425d62ce1f6d6c3a0a30d0d9501954411a5e38797876caf106dc30
|
#!/usr/bin/env python2
desc="""Scan alignments (SAM/BAM) for structural variants.
Detection of deletions, duplications and inversions from paired reads is implemented.
In addition, deletions and duplications are detected from deviations from mean depth
of coverage.
By default, the program dumps reads of interest and depth of coverage information. This
speeds up recalculation by the factor of 20X and should take <1% of BAM file size.
To be done:
+ SV detection
-- insertions testing
-- translocations
-- inversions
+ split read alignment
+ rlen learning
+ check coverage over entire region to detect alignment issues
Dependencies:
- pysam (sudo easy_install -U pysam)
"""
epilog="""Author: l.p.pryszcz+git@gmail.com
Mizerow, 13/03/2014
"""
import os, sys
import pickle, pysam, resource
from datetime import datetime
import numpy as np
from scipy import stats, signal
class SVs(object):
"""Store BAM related data. Take BAM file name as input."""
def __init__(self, bam, **kwargs):
#set variables
self._set_variables(kwargs)
#open handles
self._prepare_handles(bam)
#prepare storages
self._init_storages()
def _set_variables(self, kwargs):
"""Set parameters"""
#define mapq
if 'mapq' in kwargs:
self.mapq = kwargs['mapq']
else:
self.mapq = 20
#ploidy
if 'ploidy' in kwargs:
self.ploidy = kwargs['ploidy']
else:
self.ploidy = 2
#q - percentile
if 'q' in kwargs:
self.q = kwargs['q']
else:
self.q = 1.0
#min coverage change
if 'covD' in kwargs:
self.covD = kwargs['covD']
else:
self.covD = 0.33
#set read length
if 'rlen' in kwargs:
self.rlen = kwargs['rlen']
else:
self.rlen = None
#prepare logging
if 'log' in kwargs:
self.log = kwargs['log']
elif 'verbose' in kwargs and kwargs['verbose']:
self.log = sys.stderr
else:
self.log = None
#no dump
if 'nodump' in kwargs:
self.nodump = kwargs['nodump']
else:
self.nodump = False
#merge by depth of coverage variants
if 'merge' in kwargs:
self.merge = kwargs['merge']
else:
self.merge = False
#out
if 'out' in kwargs:
self.out = kwargs['out']
else:
self.out = sys.stdout
#coverage fraction
if 'cov_frac' in kwargs:
self.cov_frac = kwargs['cov_frac']
else:
self.cov_frac = 0.75
#min dup
if 'dup_isize_frac' in kwargs:
self.dup_isize_frac = kwargs['dup_isize_frac']
else:
self.dup_isize_frac = 0.9
#min cnv size from depth of coverage
if 'cnv_size' in kwargs:
self.cnv_size = kwargs['cnv_size']
else:
self.cnv_size = 1000
#min cnv size from depth of coverage
if 'w' in kwargs:
self.w = kwargs['w']
else:
self.w = 100
def _prepare_handles(self, bam):
"""Open BAM file for reading and set scanning parameters."""
#define internal sam and shortcuts
self.bam = bam
self.bamdump = self.bam + ".sv_algs.bam"
if os.path.isfile(self.bamdump):
if self.log:
self.log.write("Loading data from dump: %s ...\n" % self.bamdump)
self.sam = pysam.Samfile(self.bamdump)
self.chr2cov = pickle.load(open(self.bamdump+'.chr2cov.pickle'))
self.isize_mean = float(self.sam.header['PG'][0]['VN'])
self.isize_stdev = float(self.sam.header['PG'][1]['VN'])
self.isize_median = float(self.sam.header['PG'][2]['VN'])
self.pairs = [int(x) for x in self.sam.header['PG'][3]['VN'].split(",")]
self.nalgs = int(self.sam.header['PG'][4]['VN'])
else:
self.sam = pysam.Samfile(self.bam)
self.nalgs = self.sam.mapped + self.sam.unmapped
#shortcuts
self.refs = self.sam.references
self.lengths = self.sam.lengths
#coverage data storage
if not os.path.isfile(self.bamdump):
self.chr2cov = [np.zeros(l, dtype='int') for l in self.lengths]
#estimate insert size statistics
self.get_isize_stats()
#select main orientation
self.orientations = ("FF", "FR", "RF", "RR")
self.orientation = self.pairs.index(max(self.pairs))
#define deletion and insertion thresholds
self.del_isize = self.isize_mean + 2*self.isize_stdev
self.ins_isize = self.isize_mean - 2*self.isize_stdev
if self.log:
self.log.write(" FF/FR/RF/RR: %s/%s/%s/%s\n" % tuple(self.pairs))
self.log.write(" %s chosen\n" % self.orientations[self.orientation])
info = " median: %.2f mean: %.2f +- %.2f\n"
self.log.write(info%(self.isize_median, self.isize_mean, self.isize_stdev))
info = " deletion: isize >%.2f\n insertion: isize <%.2f\n"
self.log.write(info%(self.del_isize, self.ins_isize))
def _init_storages(self):
"""Initialase storages."""
#reads storages
self.delReads = []
self.dupReads = []
self.insReads = []
self.invReads = []
self.traReads = []
#variants storages
self.dels = [[] for r in self.refs]
self.dups = [[] for r in self.refs]
self.inss = [[] for r in self.refs]
self.invs = [[] for r in self.refs]
self.tras = [[] for r in self.refs]
def alg2orientation(self, alg):
"""Return pair orientation: FF: 0; FR: 1; RF: 2; RR: 4."""
##FR/RF
if alg.is_reverse != alg.mate_is_reverse:
#FR
if alg.is_read1 and not alg.is_reverse or \
alg.is_read2 and not alg.is_reverse:
return 1
#RF
else:
return 2
#RR - double check that!
elif alg.is_read1 and alg.is_reverse or \
alg.is_read2 and not alg.is_reverse:
return 3
#FF
else:
return 0
def get_isize_stats(self, limit=1e5):
"""Estimate insert size median, mean and stdev.
Also count pair orientations and select main.
"""
if self.log:
self.log.write("Estimating insert size stats...\n")
isizes = []
self.pairs = [0, 0, 0, 0]
for alg in pysam.Samfile(self.bam):
#take only reads with good alg quality and one read per pair
if alg.mapq < self.mapq or alg.isize < 1:
continue
#store isize
isizes.append(alg.isize)
#store pair orientation
self.pairs[self.alg2orientation(alg)] += 1
#stop if limit reached
if len(isizes) >= limit:
break
#get rid of right 5 percentile
maxins = stats.scoreatpercentile(isizes, 100-self.q)
minins = stats.scoreatpercentile(isizes, self.q)
isizes = filter(lambda x: minins<x<maxins, isizes)
#store
self.isize_median = np.median(isizes)
self.isize_mean = np.mean(isizes)
self.isize_stdev = np.std(isizes)
def add_read(self, alg):
"""Update handles for coverage, insert size, etc."""
#update coverage - read count rather that depth of coverage!
if not os.path.isfile(self.bamdump):
#self.chr2cov[alg.rname][alg.pos:alg.pos+alg.rlen] += 1
self.chr2cov[alg.rname][alg.pos] += 1
#skip right alignment and low quality
if alg.isize<0 or alg.mapq<self.mapq: # or alg.mrnm < alg.rname
return
orient = self.alg2orientation(alg)
##insertion/deletion
#correct pairing
if orient == self.orientation:
##deletion if significantly larger distance
if alg.isize > self.del_isize:
self.delReads.append(alg)
##insertion if significantly smaller distance
elif alg.isize < self.ins_isize:
self.insReads.append(alg)
##segmental duplication
#RF <--> FR or FF <--> RR??
elif self.orientation in (1, 2) and orient in (1, 2) or \
self.orientation in (0, 4) and orient in (0, 4):
self.dupReads.append(alg)
##inversion
#FR/RF -> FF/RR or FF/RR --> FR/RF
elif self.orientation in (1, 2) and orient in (0, 4) or \
self.orientation in (0, 4) and orient in (1, 2):
self.invReads.append(alg)
##translocation -- note, some of putative deletions may be also translocations
#orientation dosn't matter
if alg.rname != alg.mrnm:
self.traReads.append(alg)
def sv2bam(self):
"""Dump all alignments important for SVs"""
if self.log:
self.log.write("Dumping info to: %s ...\n"%self.bamdump)
#open out sam/bam handle
header = self.sam.header
nalgs = len(self.delReads) + len(self.dupReads) + len(self.insReads) + len(self.invReads) + len(self.traReads)
isize_info = [{"ID": "isize_mean", "VN": self.isize_mean},
{"ID": "isize_stdev", "VN": self.isize_stdev},
{"ID": "isize_median", "VN": self.isize_median},
{"ID": "pairs", "PN": "/".join(self.orientations),
"VN": ",".join(str(x) for x in self.pairs)},
{"ID": "nalgs", "VN": nalgs}]
header['PG'] = isize_info
out = pysam.Samfile(self.bamdump, "wb", header=header)
#store per base depth of coverage
with open(self.bamdump+".chr2cov.pickle", "w") as f:
pickle.dump(self.chr2cov, f, 2)
#dump individual algs
for algs in (self.delReads, self.dupReads, self.insReads, self.invReads, self.traReads):
for alg in algs:
out.write(alg)
out.close()
def get_clusters(self, algs):
"""Return clustered algs."""
#collapse dels by chromosome
#chr2dels = {i: [] for i, ref in enumerate(self.refs)}
#py2.6 compatible
chr2dels = {}
for i, ref in enumerate(self.refs):
chr2dels[i] = []
for alg in algs:
chr2dels[alg.rname].append(alg)
clusters = []
#process each chromosome
for chri in chr2dels:
clusters.append([])
hist = np.zeros((self.lengths[chri]/self.w)+1, dtype=int)
for alg in chr2dels[chri]:
hist[alg.pos/self.w] += 1
#get peaks
peaks = self.get_peaks(hist, 2*self.w)
if not peaks:
continue
#generate clusters
i = 0
for alg in chr2dels[chri]:
#before current peak
if alg.pos < peaks[i][0] - self.rlen:
continue
#after current peak
elif alg.pos > peaks[i][1]:
#skip peaks until next is after current read
while i < len(peaks) and alg.pos > peaks[i][1]:
i += 1
if i + 1 >= len(peaks):
break
#add fresh cluster
clusters.append([])
#store alg to cluster if within peak
if peaks[i][0] - self.rlen <= alg.pos <= peaks[i][1]:
clusters[-1].append(alg)
#filter by min reads
clusters = filter(lambda x: len(x) > self.minReads, clusters)#; print len(clusters)
return clusters
def get_algs_features(self, algs):
"""Return algs starts, mate starts, isizes, r"""
#filter by isize percentile
isizes = [alg.isize for alg in algs]
min_isize = stats.scoreatpercentile(isizes, self.q)
max_isize = stats.scoreatpercentile(isizes, 100-self.q)
algs = filter(lambda x: min_isize <= x.isize <= max_isize, algs)
#get sizes etc
isizes = [alg.isize for alg in algs]
starts = [alg.pos for alg in algs]
mstarts = [alg.mpos for alg in algs]
#rlen = np.mean([alg.rlen for alg in algs])
#get chromosome info
chrnames = [alg.rname for alg in algs]
mchrnames = [alg.mrnm for alg in algs]
return isizes, starts, mstarts, chrnames, mchrnames
def cnvs_from_pairs(self, reads, storage, cnvType, m=1):
"""Call deletions for paired reads. """
if not reads:
return
#get read clusters
rlen = self.rlen
for algs in self.get_clusters(reads):
isizes, starts, mstarts, chrnames, mchrnames = self.get_algs_features(algs)
#correct by the read length
try:
chri = int(np.median(chrnames))
except:
sys.stderr.write("Cannot get chromosome number: %s\n"%str(chrnames))
continue
chrname = self.refs[chri]
#get left/right mate starts
leftSt = np.median(starts)
rightSt = np.median(mstarts)
if rightSt<leftSt:
leftSt, rightSt = rightSt, leftSt
#check local depth and if enough reads
nreads = len(algs)
minLcCov = self.w * self.cov_frac * m * self.chr2cov[chri][min(starts):max(starts)+1].mean()
if nreads < minLcCov:
continue
#if too high coverage for deletion
if cnvType == "DEL":
start = int(leftSt + self.isize_mean/2.0)
if start<0: start = 0
end = int(rightSt - self.isize_mean/2.0 + rlen) + 1
size = end-start
#check coverage difference - adjust by read start
cov_obs = self.chr2cov[chri][start-rlen/2:end-rlen/2].mean()
cov_ratio = cov_obs / self.cov_mean
if cov_ratio > 1 - self.covD:
continue
#if too low coverage for duplication
elif cnvType == "DUP":
start = int(leftSt - self.isize_mean/2.0 + rlen)
if start<0: start = 0
end = int(rightSt + self.isize_mean/2.0) + 1
size = end-start
#check dup size
if size < self.dup_isize_frac*self.isize_mean:
continue
#check coverage difference
cov_obs = self.chr2cov[chri][start:end].mean()
cov_ratio = cov_obs / self.cov_mean
if cov_ratio < 1 + self.covD:
continue
#insertion
else:
start = int(leftSt + self.isize_mean/2.0 - rlen/2)
if start<0: start = 0
end = start + rlen/2
size = int(self.isize_mean - np.median(isizes))
cov_obs = self.chr2cov[chri][start:end].mean()
cov_ratio = cov_obs / self.cov_mean
#print min(starts), max(mstarts), leftSt, rightSt, cov_obs, cov_ratio
if not 1-self.covD < cov_ratio < 1+self.covD:
continue
#define ploidy
ploidy = self.ploidy * cov_ratio
if end<=start:
info = "[Warning] End before start: \n %s:%s-%s reads: %s ploidy: %s\n %s\n %s\n %s\n"
sys.stderr.write(info%(chrname, start, end, nreads, ploidy, \
str(isizes), str(starts), str(mstarts)))
continue
#store del
storage[chri].append((start, end, nreads, ploidy, size))
def get_peaks(self, hist, offset, th=None):
"""Return collapsed peaks."""
if th==None:
th = np.mean(hist) + np.std(hist)
#collapse neighbours
peaks = []
for p, v in enumerate(hist):
if v<th:
continue
#adjust with window
s = p*self.w
e = s+self.w
#extend previous peak if overlap
if peaks and s <= pe+offset:
peaks[-1][1] = e
else:
peaks.append([s, e])
pe = e
return peaks
def _cov2cnv(self, chri, covHist, cnvType, storage, m):
"""Computes CNVs from depth of coverage."""
th = (m + self.covD*2) * self.cov_mean
for start, end in self.get_peaks(covHist*m, 2*self.w, th):
chrname = self.refs[chri]
size = end-start
nreads = 0
if size < self.cnv_size:
continue
cov_obs = self.chr2cov[chri][start:end].mean()
cov_ratio = cov_obs / self.cov_mean
ploidy = self.ploidy * cov_ratio#; print cov_obs, cov_ratio, ploidy
# check read depth
if cnvType=="DUP":
if cov_ratio < 1+self.covD:
continue
# cnvType=="DEL" and
elif cov_ratio > 1-self.covD:
continue
#skip if already in storage
overlapping = filter(lambda x: x[0]<start<x[1] or x[0]<end<x[1] \
or start<x[0]<end or start<x[1]<end, \
storage[chri])
if overlapping:
txt = "already reported as"
if self.merge:
txt = "replaced"
#get positions of overlapping elements
idx = [storage[chri].index(o) for o in overlapping]
nreads = sum(o[2] for o in overlapping)
idx.sort(reverse=True)
#remove these events starting from last
for i in idx:
storage[chri].pop(i)
#store event
storage[chri].append((start, end, nreads, ploidy, size))
if self.log:
mtmp = " %s:%s-%s reads:%s ploidy:%.2f"
mstr = "\n".join(mtmp%(chrname, o[0], o[1], o[2], o[3]) \
for o in overlapping)
info = " %s %s:%s-%s ploidy:%.2f %s:\n%s\n"
self.log.write(info%(cnvType, chrname, start, end, ploidy, txt, \
mstr))
#skip adding
continue
#store del
storage[chri].append((start, end, nreads, ploidy, size))
def cnvs_from_depth(self):
""" """
for chri, chrname in enumerate(self.refs):
#coverage hist in w size windows
covHist = np.array([self.chr2cov[chri][i:i+self.w].mean() \
for i in range(0, self.lengths[chri]+1, self.w)])
#duplications and deletions
for cnvType, storage, m in zip(("DUP", "DEL"), (self.dups, self.dels), \
(1, -1)):
self._cov2cnv(chri, covHist, cnvType, storage, m)
def call_variants(self):
"""Call structural variants"""
#get expected coverage
self.cov_mean = 1.0 * sum(sum(c) for c in self.chr2cov) / sum(self.lengths)
self.minReads = self.cov_frac * self.cov_mean * self.w
if self.log:
info = "Calling variants...\n Expected read coverage: %.3f\n"
self.log.write(info % self.cov_mean)
#write header
self.out.write("#chrom\tstart\tend\tname\treads pairs\tploidy\tsize\n")
#set output formats
self.cnvline = "%s\t%s\t%s\t%s\t%s\t%.2f\t%s\n"
#call deletions and duplications
for reads, storage, cnvType in zip((self.delReads, self.dupReads), \
(self.dels, self.dups), ("DEL", "DUP")):
#call from paired reads
self.cnvs_from_pairs(reads, storage, cnvType)
#call from read depth
self.cnvs_from_depth()
#insertions
self.cnvs_from_pairs(self.insReads, self.inss, "INS", 4)
##report
for sv, cnvType in zip((self.dels, self.dups, self.invs), \
("DEL", "DUP", "INS")):
i = 0
for chri, variants in enumerate(sv):
variants.sort()
for i, (start, end, nreads, ploidy, size) in enumerate(variants, i+1):
chrname = self.refs[chri]
#define name
name = "%s%3i" % (cnvType, i)
name = name.replace(" ","0")
#write output
self.out.write(self.cnvline%(chrname, start, end, name, nreads, \
ploidy, size))
if self.log:
sys.stderr.write(" %ss: %s\n" % (cnvType, i))
def parse(self, test=0):
"""Parse sam alignments and store info"""
#parse algs
if self.log:
self.log.write("Parsing alignments...\n")
pchrom = ""
for i, alg in enumerate(self.sam, 1):
if test and i > test:
break
#write log
if self.log and not i % 1e5:
info = " %s [%.1f%s] reads for dels: %s dups: %s ins: %s invs: %s trans: %s [%s Mb]\r"
self.log.write(info % (i, i*100.0/self.nalgs, '%', len(self.delReads), \
len(self.dupReads), len(self.insReads), len(self.invReads), len(self.traReads), \
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
#skip unmapped and secondary alignments
if alg.rname<0 or alg.is_secondary:
continue
#add read
self.add_read(alg)
if self.log:
self.log.write(" %s alignments parsed. \n"%i)
#dump all important info
if not self.nodump and not os.path.isfile(self.bamdump):
self.sv2bam()
#get mean rlen
if not self.rlen:
self.rlen = np.mean([alg.rlen for alg in self.delReads])
if self.log:
self.log.write(" Mean read length: %.2f \n"%self.rlen)
#call variants
self.call_variants()
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='1.0b')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-i", "--bam", required=True,
help="BAM file")
parser.add_argument("-o", "--output", default=sys.stdout, type=argparse.FileType('w'),
help="output stream [stdout]")
parser.add_argument("-p", "--ploidy", default=2, type=int,
help="ploidy [%(default)s]")
parser.add_argument("-q", "--mapq", default=20, type=int,
help="min mapping quality for variants [%(default)s]")
parser.add_argument("--rlen", default=None, type=int,
help="read length [get from data]")
parser.add_argument("-c", "--covD", default=0.33, type=float,
help="min coverage change to call deletion/duplication [%(default)s]")
parser.add_argument("--cov_frac", default=0.1, type=float,
help="min fraction of local depth to call variation [%(default)s]")
parser.add_argument("--dup_isize_frac", default=0.9, type=float,
help="min duplication size as insert size fraction [%(default)s]")
parser.add_argument("--cnv_size", default=1000, type=int,
help="min CNV size from depth of coverage [%(default)s]")
parser.add_argument("--merge", default=False, action="store_true",
help="merge read pairs variants using depth of coverage variants")
parser.add_argument("--nodump", default=False, action="store_true",
help="dump SV reads for faster recalculations")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
#initialise structural variants
sv = SVs(o.bam, out=o.output, mapq=o.mapq, ploidy=o.ploidy, covD=o.covD, \
cov_frac=o.cov_frac, rlen=o.rlen, dup_isize_frac=o.dup_isize_frac, \
cnv_size=o.cnv_size, merge=o.merge, \
nodump=o.nodump, verbose=o.verbose)
#call variants in all chromosomes
sv.parse()
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
|
lpryszcz/bin
|
bam2sv.py
|
Python
|
gpl-3.0
| 25,590
|
[
"pysam"
] |
64d7f7d5f214adce4186e73bf7202ba142682e5e309a2e60d48a92d7a9949a36
|
# -*- coding: utf-8 -*-
import os
import sqlite3
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # New stdlib location in 3.0
from gpn.tests import _unittest as unittest
from gpn.tests.common import MkdtempTestCase
from gpn.node import Node
from gpn.connector import _schema
from gpn.connector import _SharedConnection
from gpn import IN_MEMORY
from gpn import TEMP_FILE
from gpn import READ_ONLY
class TestInstantiation(MkdtempTestCase):
def _make_node(self, filename):
global _schema
self._existing_node = filename
connection = sqlite3.connect(self._existing_node)
cursor = connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
for operation in _schema:
cursor.execute(operation)
cursor.execute('PRAGMA synchronous=FULL')
connection.close()
def test_existing_node(self):
"""Existing node should load without errors."""
filename = 'temp_node.node'
self._make_node(filename)
ptn = Node(self._existing_node) # Use existing file.
self.assertEqual(ptn.name, 'temp_node')
@unittest.skipIf(sqlite3.sqlite_version_info < (3, 8, 0),
'The query_only PRAGMA was added to SQLite in version 3.8.0')
def test_read_only_node(self):
"""The READ_ONLY flag should open a Node in read-only mode."""
self._make_node('existing_node')
ptn = Node(self._existing_node, mode=READ_ONLY)
connection = ptn._connect()
cursor = connection.cursor()
regex = 'attempt to write a readonly database'
with self.assertRaisesRegex((sqlite3.OperationalError,
sqlite3.IntegrityError), regex):
cursor.execute('INSERT INTO cell DEFAULT VALUES')
def test_new_node(self):
"""Named nodes that do not exist should be created."""
filepath = 'new_node.node'
self.assertFalse(os.path.exists(filepath))
ptn = Node(filepath) # Create new file.
del ptn
self.assertTrue(os.path.exists(filepath))
def test_subdirectory(self):
"""Subdirectory reference should also be supported."""
os.mkdir('subdir')
filepath = 'subdir/new_node.node'
self.assertFalse(os.path.exists(filepath))
ptn = Node(filepath) # Create new file.
self.assertEqual(ptn.name, 'subdir/new_node')
del ptn
self.assertTrue(os.path.exists(filepath))
def test_path_name_error(self):
"""If a path is specified, it should be used to set the node name.
If a `name` attribute is also provided, it must not be accepted.
"""
regex = 'Cannot specify both path and name.'
with self.assertRaisesRegex(AssertionError, regex):
Node('some_path.node', name='some_name')
def test_temporary_node(self):
"""Unnamed nodes should be temporary (in memory or tempfile)."""
# In memory.
ptn = Node()
self.assertFalse(ptn._connect._init_as_temp)
self.assertIsInstance(ptn._connect._dbsrc, _SharedConnection)
self.assertIsNone(ptn.name)
# On disk.
ptn = Node(mode=TEMP_FILE)
self.assertTrue(ptn._connect._init_as_temp)
self.assertTrue(os.path.isfile(ptn._connect._dbsrc))
self.assertIsNone(ptn.name)
def test_named_temporary_nodes(self):
# In memory.
node_name = 'temp_with_name'
ptn = Node(name=node_name)
self.assertFalse(ptn._connect._init_as_temp)
self.assertIsInstance(ptn._connect._dbsrc, _SharedConnection)
self.assertEqual(ptn.name, node_name)
# On disk.
ptn = Node(name=node_name, mode=TEMP_FILE)
self.assertTrue(ptn._connect._init_as_temp)
self.assertTrue(os.path.isfile(ptn._connect._dbsrc))
self.assertEqual(ptn.name, node_name)
class TestHash(unittest.TestCase):
def test_get_hash(self):
node = Node(mode=IN_MEMORY)
connection = node._connect()
cursor = connection.cursor()
# Hash of empty node should be None.
result = node._get_hash(cursor)
self.assertIsNone(result)
# Build node.
cursor.execute("INSERT INTO hierarchy VALUES (1, 'state', 0)")
cursor.execute("INSERT INTO hierarchy VALUES (2, 'county', 1)")
cursor.execute("INSERT INTO cell VALUES (1, 0)")
cursor.execute("INSERT INTO label VALUES (1, 1, 'Indiana')")
cursor.execute("INSERT INTO label VALUES (2, 2, 'LaPorte')")
cursor.execute("INSERT INTO cell_label VALUES (1, 1, 1, 1)")
cursor.execute("INSERT INTO cell_label VALUES (2, 1, 2, 2)")
# Expected hash of "11Indiana12LaPorte" (independently verified).
expected = 'a0eadc7b0547b9405dae9e3c50e038a550d9a718af10b53e567995a9378c22d7'
result = node._get_hash(cursor)
self.assertEqual(expected, result)
class TestTransactionHandling(unittest.TestCase):
def setUp(self):
self._node = Node(mode=IN_MEMORY)
connection = self._node._connect()
cursor = connection.cursor()
cursor.executescript("""
INSERT INTO hierarchy VALUES (1, 'country', 0);
INSERT INTO hierarchy VALUES (2, 'region', 1);
INSERT INTO cell VALUES (1, 0);
INSERT INTO label VALUES (1, 1, 'USA');
INSERT INTO label VALUES (2, 2, 'Northeast');
INSERT INTO cell_label VALUES (1, 1, 1, 1);
INSERT INTO cell_label VALUES (2, 1, 2, 2);
INSERT INTO cell VALUES (2, 0);
INSERT INTO label VALUES (3, 2, 'Midwest');
INSERT INTO cell_label VALUES (3, 2, 1, 1);
INSERT INTO cell_label VALUES (4, 2, 2, 3);
""")
def test_commit(self):
with self._node._connect() as connection:
connection.isolation_level = None
cursor = connection.cursor()
cursor.execute('BEGIN TRANSACTION')
cursor.execute('INSERT INTO cell VALUES (3, 0)') # <- Change.
cursor.execute('SELECT COUNT(*) FROM cell')
msg = 'Changes should be committed.'
self.assertEqual([(3,)], cursor.fetchall(), msg)
def test_rollback(self):
try:
with self._node._connect() as connection:
connection.isolation_level = None # <- REQUIRED!
cursor = connection.cursor() # <- REQUIRED!
cursor.execute('BEGIN TRANSACTION') # <- REQUIRED!
cursor.execute('DROP TABLE cell_label') # <- Change.
cursor.execute('INSERT INTO cell VALUES (3, 0)') # <- Change.
cursor.execute('This is not valid SQL -- operational error!') # <- Error!
except sqlite3.OperationalError:
pass
connection = self._node._connect()
cursor = connection.cursor()
msg = 'Changes should be rolled back.'
cursor.execute('SELECT COUNT(*) FROM cell')
self.assertEqual([(2,)], cursor.fetchall(), msg)
cursor.execute('SELECT COUNT(*) FROM cell_label')
self.assertEqual([(4,)], cursor.fetchall(), msg)
class TestInsert(unittest.TestCase):
def test_insert_one_cell(self):
node = Node(mode=IN_MEMORY)
connection = node._connect()
cursor = connection.cursor()
cursor.execute("INSERT INTO hierarchy VALUES (1, 'state', 0)")
cursor.execute("INSERT INTO hierarchy VALUES (2, 'county', 1)")
cursor.execute("INSERT INTO hierarchy VALUES (3, 'town', 2)")
items = [('state', 'OH'), ('county', 'Franklin'), ('town', 'Columbus')]
node._insert_one_cell(cursor, items) # <- Inserting here!
# Cell table.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'),
(2, 2, 'Franklin'),
(3, 3, 'Columbus')]
self.assertEqual(expected, cursor.fetchall())
# Cell_label table,
expected = [(1, 1, 1, 1), (2, 1, 2, 2), (3, 1, 3, 3)]
cursor.execute('SELECT * FROM cell_label ORDER BY cell_label_id')
self.assertEqual(expected, cursor.fetchall())
def test_insert_cells(self):
self.maxDiff = None
fh = StringIO('state,county,town\n'
'OH,Allen,Lima\n'
'OH,Cuyahoga,Cleveland\n'
'OH,Franklin,Columbus\n'
'OH,Hamilton,Cincinnati\n'
'OH,Montgomery,Dayton\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh) # <- Inserting here!
connection = node._connect()
cursor = connection.cursor()
# Hierarchy table.
cursor.execute('SELECT * FROM hierarchy ORDER BY hierarchy_level')
expected = [(1, 'state', 0), (2, 'county', 1), (3, 'town', 2)]
self.assertEqual(expected, cursor.fetchall())
# Cell table.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Allen'),
(3, 3, 'Lima'), (4, 2, 'Cuyahoga'),
(5, 3, 'Cleveland'), (6, 2, 'Franklin'),
(7, 3, 'Columbus'), (8, 2, 'Hamilton'),
(9, 3, 'Cincinnati'), (10, 2, 'Montgomery'),
(11, 3, 'Dayton'), (12, 1, 'UNMAPPED'),
(13, 2, 'UNMAPPED'), (14, 3, 'UNMAPPED')]
self.assertEqual(expected, cursor.fetchall())
# Cell_label table,
cursor.execute('SELECT * FROM cell_label ORDER BY cell_label_id')
expected = [(1, 1, 1, 1), (2, 1, 2, 2), (3, 1, 3, 3),
(4, 2, 1, 1), (5, 2, 2, 4), (6, 2, 3, 5),
(7, 3, 1, 1), (8, 3, 2, 6), (9, 3, 3, 7),
(10, 4, 1, 1), (11, 4, 2, 8), (12, 4, 3, 9),
(13, 5, 1, 1), (14, 5, 2, 10), (15, 5, 3, 11),
(16, 6, 1, 12), (17, 6, 2, 13), (18, 6, 3, 14)]
self.assertEqual(expected, cursor.fetchall())
# Node table (hash should be set).
cursor.execute('SELECT node_id, node_hash FROM node')
hashval = '71eeab7a5b4609a1978bd5c19e7d490556c5e42c503b39480c504bbaf99efe30'
self.assertEqual([(1, hashval)], cursor.fetchall())
def test_insert_cells_multiple_files(self):
"""Insert should accept multiple files."""
node = Node(mode=IN_MEMORY)
fh = StringIO('state,county,town\n'
'OH,Allen,Lima\n')
node._insert_cells(fh) # <- Inserting.
fh = StringIO('state,county,town\n'
'OH,Cuyahoga,Cleveland\n')
node._insert_cells(fh) # <- Inserting second file.
connection = node._connect()
cursor = connection.cursor()
# Hierarchy table.
cursor.execute('SELECT * FROM hierarchy ORDER BY hierarchy_level')
expected = [(1, 'state', 0), (2, 'county', 1), (3, 'town', 2)]
self.assertEqual(expected, cursor.fetchall())
# Cell table.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0), (3, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Allen'),
(3, 3, 'Lima'), (4, 1, 'UNMAPPED'),
(5, 2, 'UNMAPPED'), (6, 3, 'UNMAPPED'),
(7, 2, 'Cuyahoga'), (8, 3, 'Cleveland')]
self.assertEqual(expected, cursor.fetchall())
# Node table should have two hashes.
cursor.execute('SELECT node_id, node_hash FROM node')
expected = [(1, '5011d6c33da25f6a98422461595f275f'
'289a7a745a9e89ab6b4d36675efd944b'),
(2, '9184abbd5461828e01fe82209463221a'
'65d4c21b40287d633cf7e324a27475f5')]
self.assertEqual(expected, cursor.fetchall())
def test_insert_cells_bad_header(self):
"""Files must have the same header"""
node = Node(mode=IN_MEMORY)
fh = StringIO('state,county,town\n'
'OH,Hamilton,Cincinnati\n')
node._insert_cells(fh)
regex = 'Fieldnames must match hierarchy values.'
with self.assertRaisesRegex(AssertionError, regex):
fh = StringIO('state,county\n'
'OH,Montgomery\n')
node._insert_cells(fh)
def test_insert_cells_duplicate(self):
"""Duplicate rows should fail and rollback to previous state."""
fh = StringIO('state,county,town\n'
'OH,Cuyahoga,Cleveland\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh) # <- First insert!
regex = 'duplicate label set'
with self.assertRaisesRegex(sqlite3.IntegrityError, regex):
fh = StringIO('state,county,town\n'
'OH,Franklin,Columbus\n'
'OH,Hamilton,Cincinnati\n'
'OH,Hamilton,Cincinnati\n')
node._insert_cells(fh) # <- Second insert!
connection = node._connect()
cursor = connection.cursor()
# Cell table should include only values from first insert.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table should include only values from first insert.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Cuyahoga'), (3, 3, 'Cleveland'),
(4, 1, 'UNMAPPED'), (5, 2, 'UNMAPPED'), (6, 3, 'UNMAPPED')]
self.assertEqual(expected, cursor.fetchall())
def test_unmapped_levels(self):
"""Unmapped cells must have valid hierarchy levels."""
fh = StringIO('state,county,town\n'
'OH,Cuyahoga,Cleveland\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh) # <- First insert!
regex = 'invalid unmapped level'
with self.assertRaisesRegex(sqlite3.IntegrityError, regex):
fh = StringIO('state,county,town\n'
'OH,Franklin,Columbus\n'
'OH,UNMAPPED,Cincinnati\n')
node._insert_cells(fh) # <- Second insert!
connection = node._connect()
cursor = connection.cursor()
# Cell table should include only values from first insert.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table should include only values from first insert.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Cuyahoga'), (3, 3, 'Cleveland'),
(4, 1, 'UNMAPPED'), (5, 2, 'UNMAPPED'), (6, 3, 'UNMAPPED')]
self.assertEqual(expected, cursor.fetchall())
class TestSelect(unittest.TestCase):
def setUp(self):
fh = StringIO('country,region,state,city\n' # cell_ids
'USA,Midwest,IL,Chicago\n' # 1
'USA,Northeast,NY,New York\n' # 2
'USA,Northeast,PA,Philadelphia\n' # 3
'USA,South,TX,Dallas\n' # 4
'USA,South,TX,Houston\n' # 5
'USA,South,TX,San Antonio\n' # 6
'USA,West,AZ,Phoenix\n' # 7
'USA,West,CA,Los Angeles\n' # 8
'USA,West,CA,San Diego\n' # 9
'USA,West,CA,San Jose\n') # 10
self.node = Node(mode=IN_MEMORY)
self.node._insert_cells(fh)
def test_select_cell_id(self):
""" """
connection = self.node._connect()
cursor = connection.cursor()
result = self.node._select_cell_id(cursor, region='Northeast')
self.assertEqual([2, 3], list(result))
result = self.node._select_cell_id(cursor, region='West', state='CA')
self.assertEqual([8, 9, 10], list(result))
kwds = {'region': 'West', 'state': 'CA'}
result = self.node._select_cell_id(cursor, **kwds)
self.assertEqual([8, 9, 10], list(result))
result = self.node._select_cell_id(cursor, state='XX')
self.assertEqual([], list(result))
#result = node._select_cell_id()
#self.assertEqual([], list(result))
def test_select_cell(self):
result = self.node.select_cell(region='West', state='CA')
expected = [
{'country': 'USA', 'region': 'West', 'state': 'CA', 'city': 'Los Angeles'},
{'country': 'USA', 'region': 'West', 'state': 'CA', 'city': 'San Diego'},
{'country': 'USA', 'region': 'West', 'state': 'CA', 'city': 'San Jose'},
]
self.assertEqual(expected, list(result))
class TestFileImportExport(MkdtempTestCase):
def setUp(self):
super(self.__class__, self).setUp()
fh = StringIO('country,region,state,city\n'
'USA,Midwest,IL,Chicago\n'
'USA,Northeast,NY,New York\n'
'USA,Northeast,PA,Philadelphia\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh)
self.node = node
def test_export(self):
filename = 'tempexport.csv'
self.node.export_cells(filename)
with open(filename) as fh:
file_contents = fh.read()
expected_contents = ('cell_id,country,region,state,city\n'
'1,USA,Midwest,IL,Chicago\n'
'2,USA,Northeast,NY,New York\n'
'3,USA,Northeast,PA,Philadelphia\n'
'4,UNMAPPED,UNMAPPED,UNMAPPED,UNMAPPED\n')
self.assertEqual(expected_contents, file_contents)
def test_already_exists(self):
filename = 'tempexport.csv'
with open(filename, 'w') as fh:
fh.write('foo\n1\n2\n3')
regex = filename + ' already exists'
with self.assertRaisesRegex(AssertionError, regex):
self.node.export_cells(filename)
class TestRepr(unittest.TestCase):
def test_empty(self):
node = Node()
expected = ("<class 'gpn.node.Node'>\n"
"Name: None\n"
"Cells: None\n"
"Hierarchy: None\n"
"Edges: None")
self.assertEqual(expected, repr(node))
def test_basic(self):
fh = StringIO('country,region,state,city\n'
'USA,Midwest,IL,Chicago\n'
'USA,Northeast,NY,New York\n'
'USA,Northeast,PA,Philadelphia\n')
node = Node(mode=IN_MEMORY, name='newptn')
node._insert_cells(fh)
expected = ("<class 'gpn.node.Node'>\n"
"Name: newptn\n"
"Cells: 4\n"
"Hierarchy: country (USA), region, state, city\n"
"Edges: None")
self.assertEqual(expected, repr(node))
if __name__ == '__main__':
unittest.main()
|
shawnbrown/gpn
|
gpn/tests/test_node.py
|
Python
|
mit
| 19,713
|
[
"COLUMBUS"
] |
729574ccf7b1a68a3d38eeff4d19e49f2c2b0eb0d281f1cc00fb2a47220304e3
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
from django.urls import reverse_lazy
from rest_framework.decorators import api_view
from catmaid.control import (authentication, user, group, log, message, client,
common, deeplink, project, stack, stackgroup, tile, tracing, stats,
annotation, textlabel, label, link, connector,
neuron, node, treenode, suppressed_virtual_treenode, skeleton,
skeletonexport, treenodeexport, cropping, data_view, ontology,
classification, notifications, roi, clustering, volume, noop,
useranalytics, user_evaluation, search, graphexport, transaction,
graph2, circles, analytics, review, wiringdiagram, object, sampler,
similarity, nat, origin, point, landmarks, project_token, pointcloud, pointset)
from catmaid.history import record_request_action as record_view
from catmaid.views import CatmaidView
from catmaid.views.admin import ProjectDeletion
# A regular expression matching floating point and integer numbers
num = r'[-+]?[0-9]*\.?[0-9]+'
integer = r'[-+]?[0-9]+'
# A regular expression matching lists of integers with comma as delimiter
intlist = r'[0-9]+(,[0-9]+)*'
# A list of words, not containing commas
wordlist= r'\w+(,\w+)*'
app_name = 'catmaid'
# Add the main index.html page at the root:
urlpatterns = [
url(r'^$', ensure_csrf_cookie(CatmaidView.as_view(template_name='catmaid/index.html')), name="home"),
url(r'^version$', common.get_catmaid_version),
url(r'^neuroglancer$', ensure_csrf_cookie(CatmaidView.as_view(template_name='catmaid/neuroglancer.html'))),
]
# Additional administration views
urlpatterns += [
url(r'^admin/catmaid/project/delete-with-data$', ProjectDeletion.as_view(),
name="delete-projects-with-data"),
]
# Authentication and permissions
urlpatterns += [
url(r'^accounts/login$', authentication.login_user),
url(r'^accounts/logout$', authentication.logout_user),
url(r'^accounts/anonymous-api-token$', authentication.get_anonymous_token),
url(r'^accounts/(?P<project_id>\d+)/all-usernames$', authentication.all_usernames),
url(r'^permissions$', authentication.user_project_permissions),
url(r'^classinstance/(?P<ci_id>\d+)/permissions$', authentication.get_object_permissions),
url(r'^register$', authentication.register, name="register"),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
authentication.activate, name='activate'),
]
# Users
urlpatterns += [
url(r'^user-list$', user.user_list),
url(r'^user-table-list$', user.user_list_datatable),
url(r'^user-profile/update$', user.update_user_profile),
url(r'^user/password_change/$', user.NonAnonymousPasswordChangeView.as_view(
success_url=reverse_lazy('catmaid:home'), raise_exception=False)),
]
# Groups
urlpatterns += [
url(r'^groups/$', group.GroupList.as_view()),
url(r'^(?P<project_id>\d+)/groups/memberships/$', group.GroupMemberships.as_view()),
]
# Log
urlpatterns += [
url(r'^(?P<project_id>\d+)/logs/list$', log.list_logs),
url(r'^log/(?P<level>(info|error|debug))$', log.log_frontent_event),
]
# Transaction history
urlpatterns += [
url(r'^(?P<project_id>\d+)/transactions/$', transaction.transaction_collection),
url(r'^(?P<project_id>\d+)/transactions/location$', transaction.get_location),
]
# Project permissions
urlpatterns += [
url(r'^permissions/$', authentication.list_project_permissions),
url(r'^(?P<project_id>\d+)/permissions/project-user$', authentication.project_user_permission_set),
url(r'^(?P<project_id>\d+)/permissions/project-group$', authentication.project_group_permission_set),
]
# Project permissions
urlpatterns += [
url(r'^(?P<project_id>\d+)/project-tokens/$', project_token.ProjectTokenList.as_view()),
url(r'^(?P<project_id>\d+)/user-project-tokens/$', project_token.UserProjectTokenList.as_view()),
url(r'^(?P<project_id>\d+)/project-tokens/revoke$', project_token.ProjectTokenRevoker.as_view()),
url(r'^project-tokens/apply$', project_token.ProjectTokenApplicator.as_view()),
]
# Messages
urlpatterns += [
url(r'^messages/list$', message.list_messages),
url(r'^messages/(?P<message_id>\d+)/mark_read$', message.read_message),
url(r'^messages/latestunreaddate', message.get_latest_unread_date),
]
# CATMAID client datastore and data access
urlpatterns += [
url(r'^client/datastores/$', client.ClientDatastoreList.as_view()),
url(r'^client/datastores/(?P<name>[\w-]+)$', client.ClientDatastoreDetail.as_view()),
url(r'^client/datastores/(?P<name>[\w-]+)/$', client.ClientDataList.as_view()),
]
# General project model access
urlpatterns += [
url(r'^projects/$', project.projects),
url(r'^projects/export$', project.export_projects),
url(r'^(?P<project_id>\d+)/$', project.ProjectDetail.as_view()),
url(r'^(?P<project_id>\d+)/interpolatable-sections/$', project.interpolatable_sections),
url(r'^(?P<project_id>\d+)/fork$', project.fork),
url(r'^(?P<project_id>\d+)/favorite$', project.ProjectFavorite.as_view()),
]
# Deep links
urlpatterns += [
url(r'^(?P<project_id>\d+)/links/$', deeplink.DeepLinkList.as_view()),
url(r'^(?P<project_id>\d+)/links/(?P<alias>[0-9A-Za-z_\-]+)$', deeplink.DeepLinkSelector.as_view()),
url(r'^(?P<project_id>\d+)/links/(?P<alias>[0-9A-Za-z_\-]+)/details$', deeplink.DeepLinkDetails.as_view()),
url(r'^(?P<project_id>\d+)/links/by-id/(?P<link_id>[0-9]+)$', deeplink.DeepLinkByIdSelector.as_view()),
]
# General stack model access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stacks$', stack.stacks),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/info$', stack.stack_info),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/groups$', stack.stack_groups),
]
# General stack group access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stackgroup/(?P<stackgroup_id>\d+)/info$', stackgroup.get_stackgroup_info),
]
# Tile access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tile$', tile.get_tile),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/put_tile$', tile.put_tile),
]
# Tracing general
urlpatterns += [
url(r'^(?P<project_id>\d+)/tracing/setup/rebuild$', tracing.rebuild_tracing_setup_view),
url(r'^(?P<project_id>\d+)/tracing/setup/test$', tracing.check_tracing_setup_view),
url(r'^(?P<project_id>\d+)/tracing/setup/validate$', tracing.validate_tracing_setup),
]
# Reconstruction sampling
urlpatterns += [
url(r'^(?P<project_id>\d+)/samplers/$', sampler.list_samplers),
url(r'^(?P<project_id>\d+)/samplers/add$', sampler.add_sampler),
url(r'^(?P<project_id>\d+)/samplers/domains/types/$', sampler.list_domain_types),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/states/$', sampler.list_interval_states),
url(r'^(?P<project_id>\d+)/samplers/domains/(?P<domain_id>\d+)/details$', sampler.get_domain_details),
url(r'^(?P<project_id>\d+)/samplers/domains/(?P<domain_id>\d+)/intervals/$', sampler.list_domain_intervals),
url(r'^(?P<project_id>\d+)/samplers/domains/(?P<domain_id>\d+)/intervals/add-all$', sampler.add_all_intervals),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/(?P<interval_id>\d+)/details$', sampler.get_interval_details),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/(?P<interval_id>\d+)/set-state$', sampler.set_interval_state),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/$', sampler.SamplerDetail.as_view()),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/delete$', sampler.delete_sampler),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/domains/$', sampler.list_sampler_domains),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/domains/add$', sampler.add_sampler_domain),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/domains/add-all$', sampler.add_multiple_sampler_domains),
url(r'^(?P<project_id>\d+)/samplers/connectors/$', sampler.list_connectors),
url(r'^(?P<project_id>\d+)/samplers/connectors/states/$', sampler.list_connector_states),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/(?P<interval_id>\d+)/connectors/(?P<connector_id>\d+)/set-state$',
sampler.set_connector_state),
url(r'^(?P<project_id>\d+)/samplers/states/$', sampler.list_sampler_states),
]
# Statistics
urlpatterns += [
url(r'^(?P<project_id>\d+)/stats/aggregates$', stats.ProjectAggStats.as_view()),
url(r'^(?P<project_id>\d+)/stats/cable-length$', stats.stats_cable_length),
url(r'^(?P<project_id>\d+)/stats/nodecount$', stats.stats_nodecount),
url(r'^(?P<project_id>\d+)/stats/editor$', stats.stats_editor),
url(r'^(?P<project_id>\d+)/stats/summary$', stats.stats_summary),
url(r'^(?P<project_id>\d+)/stats/history$', stats.stats_history),
url(r'^(?P<project_id>\d+)/stats/user-history$', stats.stats_user_history),
url(r'^(?P<project_id>\d+)/stats/user-activity$', stats.stats_user_activity),
url(r'^(?P<project_id>\d+)/stats/server$', stats.ServerStats.as_view()),
]
# Annotations
urlpatterns += [
url(r'^(?P<project_id>\d+)/annotations/$', annotation.list_annotations),
url(r'^(?P<project_id>\d+)/annotations/query$', annotation.annotations_for_entities),
url(r'^(?P<project_id>\d+)/annotations/forskeletons$', annotation.annotations_for_skeletons),
url(r'^(?P<project_id>\d+)/annotations/table-list$', annotation.list_annotations_datatable),
url(r'^(?P<project_id>\d+)/annotations/add$', record_view("annotations.add")(annotation.annotate_entities)),
url(r'^(?P<project_id>\d+)/annotations/add-neuron-names$', record_view("annotations.addneuronname")(annotation.add_neuron_name_annotations)),
url(r'^(?P<project_id>\d+)/annotations/remove$', record_view("annotations.remove")(annotation.remove_annotations)),
url(r'^(?P<project_id>\d+)/annotations/replace$', record_view("annotations.replace")(annotation.replace_annotations)),
url(r'^(?P<project_id>\d+)/annotations/(?P<annotation_id>\d+)/remove$', record_view("annotations.remove")(annotation.remove_annotation)),
url(r'^(?P<project_id>\d+)/annotations/query-targets$', annotation.query_annotated_classinstances),
]
# Text labels
urlpatterns += [
url(r'^(?P<project_id>\d+)/textlabel/create$', record_view("textlabels.create")(textlabel.create_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/delete$', record_view("textlabels.delete")(textlabel.delete_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/update$', record_view("textlabels.update")(textlabel.update_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/all', textlabel.textlabels),
]
# Treenode labels
urlpatterns += [
url(r'^(?P<project_id>\d+)/labels/$', label.labels_all),
url(r'^(?P<project_id>\d+)/labels/detail$', label.labels_all_detail),
url(r'^(?P<project_id>\d+)/labels/stats$', label.get_label_stats),
url(r'^(?P<project_id>\d+)/labels-for-nodes$', label.labels_for_nodes),
url(r'^(?P<project_id>\d+)/labels/(?P<node_type>(treenode|location|connector))/(?P<node_id>\d+)/$', label.labels_for_node),
url(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/update$', record_view("labels.update")(label.label_update)),
url(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/remove$', record_view("labels.remove")(label.remove_label_link)),
url(r'^(?P<project_id>\d+)/label/remove$', record_view("labels.remove_unused")(label.label_remove)),
]
# Links
urlpatterns += [
url(r'^(?P<project_id>\d+)/link/create$', record_view("links.create")(link.create_link)),
url(r'^(?P<project_id>\d+)/link/delete$', record_view("links.remove")(link.delete_link)),
]
# Connector access
urlpatterns += [
url(r'^(?P<project_id>\d+)/connector/create$', record_view("connectors.create")(connector.create_connector)),
url(r'^(?P<project_id>\d+)/connector/delete$', record_view("connectors.remove")(connector.delete_connector)),
url(r'^(?P<project_id>\d+)/connector/list/graphedge$', connector.graphedge_list),
url(r'^(?P<project_id>\d+)/connector/list/one_to_many$', connector.one_to_many_synapses),
url(r'^(?P<project_id>\d+)/connector/list/many_to_many$', connector.many_to_many_synapses),
url(r'^(?P<project_id>\d+)/connector/list/completed$', connector.list_completed),
url(r'^(?P<project_id>\d+)/connector/list/linked-to-nodes$', connector.connectors_from_treenodes),
url(r'^(?P<project_id>\d+)/connector/skeletons$', connector.connector_skeletons),
url(r'^(?P<project_id>\d+)/connector/edgetimes$', connector.connector_associated_edgetimes),
url(r'^(?P<project_id>\d+)/connector/info$', connector.connectors_info),
url(r'^(?P<project_id>\d+)/connectors/$', connector.list_connectors),
url(r'^(?P<project_id>\d+)/connectors/links/$', connector.list_connector_links),
url(r'^(?P<project_id>\d+)/connectors/link-pairs/$', connector.list_connector_link_pairs),
url(r'^(?P<project_id>\d+)/connectors/(?P<connector_id>\d+)/$',
connector.connector_detail),
url(r'^(?P<project_id>\d+)/connectors/user-info$', connector.connector_user_info),
url(r'^(?P<project_id>\d+)/connectors/types/$', connector.connector_types),
url(r'^(?P<project_id>\d+)/connectors/in-bounding-box$', connector.connectors_in_bounding_box),
]
# Neuron access
urlpatterns += [
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/get-all-skeletons$', neuron.get_all_skeletons_of_neuron),
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/give-to-user$', record_view("neurons.give_to_user")(neuron.give_neuron_to_other_user)),
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/delete$', record_view("neurons.remove")(neuron.delete_neuron)),
url(r'^(?P<project_id>\d+)/neurons/(?P<neuron_id>\d+)/rename$', record_view("neurons.rename")(neuron.rename_neuron)),
url(r'^(?P<project_id>\d+)/neurons/$', neuron.list_neurons),
url(r'^(?P<project_id>\d+)/neurons/from-models$', neuron.get_neuron_ids_from_models),
url(r'^(?P<project_id>\d+)/neurons/rename$', neuron.rename_neurons),
url(r'^(?P<project_id>\d+)/neurons/all-skeletons$', neuron.list_all_skeletons),
]
# Node access
urlpatterns += [
url(r'^(?P<project_id>\d+)/node/(?P<node_id>\d+)/reviewed$', record_view("nodes.add_or_update_review")(node.update_location_reviewer)),
url(r'^(?P<project_id>\d+)/nodes/most-recent$', node.most_recent_treenode),
url(r'^(?P<project_id>\d+)/nodes/location$', node.get_locations),
url(r'^(?P<project_id>\d+)/nodes/nearest$', node.node_nearest),
url(r'^(?P<project_id>\d+)/node/update$', record_view("nodes.update_location")(node.node_update)),
url(r'^(?P<project_id>\d+)/node/list$', node.node_list_tuples),
url(r'^(?P<project_id>\d+)/node/get_location$', node.get_location),
url(r'^(?P<project_id>\d+)/node/user-info$', node.user_info),
url(r'^(?P<project_id>\d+)/nodes/find-labels$', node.find_labels),
url(r'^(?P<project_id>\d+)/nodes/$', api_view(['POST'])(node.node_list_tuples)),
]
# Treenode access
urlpatterns += [
url(r'^(?P<project_id>\d+)/treenode/create$', record_view("treenodes.create")(treenode.create_treenode)),
url(r'^(?P<project_id>\d+)/treenode/insert$', record_view("treenodes.insert")(treenode.insert_treenode)),
url(r'^(?P<project_id>\d+)/treenode/delete$', record_view("treenodes.remove")(treenode.delete_treenode)),
url(r'^(?P<project_id>\d+)/treenodes/compact-detail$', treenode.compact_detail_list),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/info$', treenode.treenode_info),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/compact-detail$', treenode.compact_detail),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/children$', treenode.find_children),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/confidence$', record_view("treenodes.update_confidence")(treenode.update_confidence)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/parent$', record_view("treenodes.update_parent")(treenode.update_parent)),
url(r'^(?P<project_id>\d+)/treenode/(?P<treenode_id>\d+)/radius$', record_view("treenodes.update_radius")(treenode.update_radius)),
url(r'^(?P<project_id>\d+)/treenodes/radius$', record_view("treenodes.update_radius")(treenode.update_radii)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/previous-branch-or-root$', treenode.find_previous_branchnode_or_root),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/next-branch-or-end$', treenode.find_next_branchnode_or_end),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/importing-user$', treenode.importing_user),
]
# Suppressed virtual treenode access
urlpatterns += [
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/$',
record_view("treenodes.suppress_virtual_node", "POST")(suppressed_virtual_treenode.SuppressedVirtualTreenodeList.as_view())),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/(?P<suppressed_id>\d+)$',
record_view("treenodes.unsuppress_virtual_node", "DELETE")(suppressed_virtual_treenode.SuppressedVirtualTreenodeDetail.as_view())),
]
# General skeleton access
urlpatterns += [
url(r'^(?P<project_id>\d+)/skeletons/$', skeleton.list_skeletons),
url(r'^(?P<project_id>\d+)/skeletons/cable-length$', skeleton.cable_lengths),
url(r'^(?P<project_id>\d+)/skeletons/summary$', skeleton.summary),
url(r'^(?P<project_id>\d+)/skeletons/connectivity-counts$', skeleton.connectivity_counts),
url(r'^(?P<project_id>\d+)/skeletons/completeness$', skeleton.completeness),
url(r'^(?P<project_id>\d+)/skeletons/validity$', skeleton.validity),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/node_count$', skeleton.node_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuronname$', skeleton.neuronname),
url(r'^(?P<project_id>\d+)/skeleton/neuronnames$', skeleton.neuronnames),
url(r'^(?P<project_id>\d+)/skeleton/node/(?P<treenode_id>\d+)/node_count$', skeleton.node_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/review/reset-own$', record_view("skeletons.reset_own_reviews")(skeleton.reset_own_reviewer_ids)),
url(r'^(?P<project_id>\d+)/skeletons/connectivity$', skeleton.skeleton_info_raw),
url(r'^(?P<project_id>\d+)/skeletons/in-bounding-box$', skeleton.skeletons_in_bounding_box),
url(r'^(?P<project_id>\d+)/skeleton/connectivity_matrix$', skeleton.connectivity_matrix),
url(r'^(?P<project_id>\d+)/skeletons/connectivity_matrix/csv$', skeleton.connectivity_matrix_csv),
url(r'^(?P<project_id>\d+)/skeletons/review-status$', skeleton.review_status),
url(r'^(?P<project_id>\d+)/skeletons/from-origin$', skeleton.from_origin),
url(r'^(?P<project_id>\d+)/skeletons/origin$', skeleton.origin_info),
url(r'^(?P<project_id>\d+)/skeletons/import-info$', skeleton.import_info),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/statistics$', skeleton.skeleton_statistics),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/contributor_statistics$', skeleton.contributor_statistics),
url(r'^(?P<project_id>\d+)/skeleton/contributor_statistics_multiple$', skeleton.contributor_statistics_multiple),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/id$', record_view('skeletons.update_id')(skeleton.SkeletonIdDetails.as_view())),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/find-labels$', skeleton.find_labels),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/open-leaves$', skeleton.open_leaves),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/root$', skeleton.root_for_skeleton),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/sampler-count$', skeleton.sampler_count),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/cable-length$', skeleton.cable_length),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/neuron-details$', skeleton.neurondetails),
url(r'^(?P<project_id>\d+)/skeleton/split$', record_view("skeletons.split")(skeleton.split_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/ancestry$', skeleton.skeleton_ancestry),
url(r'^(?P<project_id>\d+)/skeleton/join$', record_view("skeletons.merge")(skeleton.join_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/reroot$', record_view("skeletons.reroot")(skeleton.reroot_skeleton)),
url(r'^(?P<project_id>\d+)/skeletons/sampler-count$', skeleton.list_sampler_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/permissions$', skeleton.get_skeleton_permissions),
url(r'^(?P<project_id>\d+)/skeletons/import$', record_view("skeletons.import")(skeleton.import_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/annotationlist$', skeleton.annotation_list),
url(r'^(?P<project_id>\d+)/skeletons/within-spatial-distance$', skeleton.within_spatial_distance),
url(r'^(?P<project_id>\d+)/skeletons/node-labels$', skeleton.skeletons_by_node_labels),
url(r'^(?P<project_id>\d+)/skeletons/change-history$', skeleton.change_history),
url(r'^(?P<project_id>\d+)/skeletongroup/adjacency_matrix$', skeleton.adjacency_matrix),
url(r'^(?P<project_id>\d+)/skeletongroup/skeletonlist_subgraph', skeleton.skeletonlist_subgraph),
url(r'^(?P<project_id>\d+)/skeletongroup/all_shared_connectors', skeleton.all_shared_connectors),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/origins/$', origin.OriginCollection.as_view()),
]
# Skeleton export
urlpatterns += [
url(r'^(?P<project_id>\d+)/neuroml/neuroml_level3_v181$', skeletonexport.export_neuroml_level3_v181),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/swc$', skeletonexport.skeleton_swc),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/eswc$', skeletonexport.skeleton_eswc),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuroml$', skeletonexport.skeletons_neuroml),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/json$', skeletonexport.skeleton_with_metadata),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/compact-json$', skeletonexport.skeleton_for_3d_viewer),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/nrrd$', nat.r.export_nrrd),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor$', skeletonexport.compact_arbor),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor-with-minutes$', skeletonexport.compact_arbor_with_minutes),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/review$', skeletonexport.export_review_skeleton),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/reviewed-nodes$', skeletonexport.export_skeleton_reviews),
url(r'^(?P<project_id>\d+)/skeletons/measure$', skeletonexport.measure_skeletons),
url(r'^(?P<project_id>\d+)/skeleton/connectors-by-partner$', skeletonexport.skeleton_connectors_by_partner),
url(r'^(?P<project_id>\d+)/skeletons/partners-by-connector$', skeletonexport.partners_by_connector),
url(r'^(?P<project_id>\d+)/skeletons/connector-polyadicity$', skeletonexport.connector_polyadicity),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/compact-detail$', skeletonexport.compact_skeleton_detail),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/neuroglancer$', skeletonexport.neuroglancer_skeleton),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/node-overview$', skeletonexport.treenode_overview),
url(r'^(?P<project_id>\d+)/skeletons/compact-detail$', skeletonexport.compact_skeleton_detail_many),
# Marked as deprecated, but kept for backwards compatibility
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-skeleton$', skeletonexport.compact_skeleton),
]
# Treenode and Connector image stack archive export
urlpatterns += [
url(r'^(?P<project_id>\d+)/connectorarchive/export$', treenodeexport.export_connectors),
url(r'^(?P<project_id>\d+)/treenodearchive/export$', treenodeexport.export_treenodes),
]
# Pointclouds
urlpatterns += [
url(r'^(?P<project_id>\d+)/pointclouds/$', pointcloud.PointCloudList.as_view()),
url(r'^(?P<project_id>\d+)/pointclouds/(?P<pointcloud_id>\d+)/$', pointcloud.PointCloudDetail.as_view()),
url(r'^(?P<project_id>\d+)/pointclouds/(?P<pointcloud_id>\d+)/images/(?P<image_id>\d+)/$', pointcloud.PointCloudImageDetail.as_view()),
]
# Pointsets
urlpatterns += [
url(r'^(?P<project_id>\d+)/pointsets/$', pointset.PointSetList.as_view()),
url(r'^(?P<project_id>\d+)/pointsets/(?P<pointset_id>\d+)/$', pointset.PointSetDetail.as_view()),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/similarity/configs/$', similarity.ConfigurationList.as_view()),
url(r'^(?P<project_id>\d+)/similarity/configs/(?P<config_id>\d+)/$', similarity.ConfigurationDetail.as_view()),
url(r'^(?P<project_id>\d+)/similarity/configs/(?P<config_id>\d+)/recompute$', similarity.recompute_config),
url(r'^(?P<project_id>\d+)/similarity/queries/$', similarity.SimilarityList.as_view()),
url(r'^(?P<project_id>\d+)/similarity/queries/similarity$', similarity.compare_skeletons),
url(r'^(?P<project_id>\d+)/similarity/queries/(?P<similarity_id>\d+)/$', similarity.SimilarityDetail.as_view()),
url(r'^(?P<project_id>\d+)/similarity/queries/(?P<similarity_id>\d+)/recompute$', similarity.recompute_similarity),
url(r'^(?P<project_id>\d+)/similarity/test-setup$', similarity.test_setup),
]
# Cropping
urlpatterns += [
url(r'^(?P<project_id>\d+)/crop', cropping.crop),
url(r'^crop/download/(?P<file_path>.*)/$', cropping.download_crop)
]
# Tagging
urlpatterns += [
url(r'^(?P<project_id>\d+)/tags/list$', project.list_project_tags),
url(r'^(?P<project_id>\d+)/tags/clear$', record_view("projects.clear_tags")(project.update_project_tags)),
url(r'^(?P<project_id>\d+)/tags/(?P<tags>.*)/update$', record_view("projects.update_tags")(project.update_project_tags)),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/list$', stack.list_stack_tags),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/clear$', record_view("stacks.clear_tags")(stack.update_stack_tags)),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/(?P<tags>.*)/update$', record_view("stacks.update_tags")(stack.update_stack_tags)),
]
# Data views
urlpatterns += [
url(r'^dataviews/list$', data_view.get_available_data_views, name='list_dataviews'),
url(r'^dataviews/default$', data_view.get_default_properties, name='default_dataview'),
url(r'^dataviews/(?P<data_view_id>\d+)/$', data_view.get_detail, name='detail_dataview'),
url(r'^dataviews/(?P<data_view_id>\d+)/make-home-view$', data_view.make_home_view, name='make_home_view'),
url(r'^dataviews/show/(?P<data_view_id>\d+)$', data_view.get_data_view, name='show_dataview'),
url(r'^dataviews/show/default$', data_view.get_default_data_view, name='show_default_dataview'),
url(r'^dataviews/type/comment$', data_view.get_data_view_type_comment, name='get_dataview_type_comment'),
url(r'^dataviews/type/(?P<data_view_id>\d+)$', data_view.get_data_view_type, name='get_dataview_type'),
]
# Ontologies
urlpatterns += [
url(r'^ontology/knownroots$', ontology.get_known_ontology_roots),
url(r'^(?P<project_id>%s)/ontology/roots/$' % (integer), ontology.get_existing_roots),
url(r'^(?P<project_id>%s)/ontology/list$' % (integer), ontology.list_ontology),
url(r'^(?P<project_id>%s)/ontology/relations$' % (integer), ontology.get_available_relations),
url(r'^(?P<project_id>%s)/ontology/relations/add$' % (integer), record_view("ontologies.add_relation")(ontology.add_relation_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/rename$' % (integer), record_view("ontologies.rename_relation")(ontology.rename_relation)),
url(r'^(?P<project_id>%s)/ontology/relations/remove$' % (integer), record_view("ontologies.remove_relation")(ontology.remove_relation_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/removeall$' % (integer), record_view("ontologies.remove_all_relations")(ontology.remove_all_relations_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/list$' % (integer), ontology.list_available_relations),
url(r'^(?P<project_id>%s)/ontology/classes$' % (integer), ontology.get_available_classes),
url(r'^(?P<project_id>%s)/ontology/classes/add$' % (integer), record_view("ontologies.add_class")(ontology.add_class_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/rename$' % (integer), record_view("ontologies.rename_class")(ontology.rename_class)),
url(r'^(?P<project_id>%s)/ontology/classes/remove$' % (integer), record_view("ontologies.remove_class")(ontology.remove_class_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/removeall$' % (integer), record_view("ontologies.remove_all_classes")(ontology.remove_all_classes_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/list$' % (integer), ontology.list_available_classes),
url(r'^(?P<project_id>%s)/ontology/links/add$' % (integer), record_view("ontologies.add_link")(ontology.add_link_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/remove$' % (integer), record_view("ontologies.remove_link")(ontology.remove_link_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/removeselected$' % (integer), record_view("ontologies.remove_link")(ontology.remove_selected_links_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/removeall$' % (integer), record_view("ontologies.remove_all_links")(ontology.remove_all_links_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/restrictions/add$' % (integer), record_view("ontologies.add_restriction")(ontology.add_restriction)),
url(r'^(?P<project_id>%s)/ontology/restrictions/remove$' % (integer), record_view("ontologies.remove_restriction")(ontology.remove_restriction)),
url(r'^(?P<project_id>%s)/ontology/restrictions/(?P<restriction>[^/]*)/types$' % (integer), ontology.get_restriction_types),
]
# Classification
urlpatterns += [
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/roots/$',
classification.get_classification_roots),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/setup/test$',
classification.check_classification_setup_view, name='test_classification_setup'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/setup/rebuild$',
record_view("classifications.rebuild_env")(classification.rebuild_classification_setup_view), name='rebuild_classification_setup'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/new$',
record_view("classifications.add_graph")(classification.add_classification_graph), name='add_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/list$',
classification.list_classification_graph, name='list_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/list/(?P<link_id>\d+)$',
classification.list_classification_graph, name='list_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/(?P<link_id>\d+)/remove$',
record_view("classifications.remove_graph")(classification.remove_classification_graph), name='remove_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/instance-operation$',
record_view("classifications.update_graph")(classification.classification_instance_operation), name='classification_instance_operation'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/(?P<link_id>\d+)/autofill$',
record_view("classifications.autofill_graph")(classification.autofill_classification_graph), name='autofill_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/link$',
record_view("classifications.link_graph")(classification.link_classification_graph), name='link_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/stack/(?P<stack_id>{integer})/linkroi/(?P<ci_id>{integer})/$',
record_view("classifications.link_roi")(classification.link_roi_to_classification), name='link_roi_to_classification'),
url(rf'^classification/(?P<workspace_pid>{integer})/export$',
classification.export, name='export_classification'),
url(rf'^classification/(?P<workspace_pid>{integer})/export/excludetags/(?P<exclusion_tags>{wordlist})/$',
classification.export, name='export_classification'),
url(rf'^classification/(?P<workspace_pid>{integer})/search$',
classification.search, name='search_classifications'),
url(rf'^classification/(?P<workspace_pid>{integer})/export_ontology$',
classification.export_ontology, name='export_ontology'),
]
# Notifications
urlpatterns += [
url(r'^(?P<project_id>\d+)/notifications/list$', notifications.list_notifications),
url(r'^(?P<project_id>\d+)/changerequest/approve$', record_view("change_requests.approve")(notifications.approve_change_request)),
url(r'^(?P<project_id>\d+)/changerequest/reject$', record_view("change_requests.reject")(notifications.reject_change_request)),
]
# Regions of interest
urlpatterns += [
url(rf'^(?P<project_id>{integer})/roi/(?P<roi_id>{integer})/info$', roi.get_roi_info, name='get_roi_info'),
url(rf'^(?P<project_id>{integer})/roi/link/(?P<relation_id>{integer})/stack/(?P<stack_id>{integer})/ci/(?P<ci_id>{integer})/$',
record_view("rois.create_link")(roi.link_roi_to_class_instance), name='link_roi_to_class_instance'),
url(rf'^(?P<project_id>{integer})/roi/(?P<roi_id>{integer})/remove$', record_view("rois.remove_link")(roi.remove_roi_link), name='remove_roi_link'),
url(rf'^(?P<project_id>{integer})/roi/(?P<roi_id>{integer})/image$', roi.get_roi_image, name='get_roi_image'),
url(rf'^(?P<project_id>{integer})/roi/add$', record_view("rois.create")(roi.add_roi), name='add_roi'),
]
# General points
urlpatterns += [
url(rf'^(?P<project_id>{integer})/points/$', point.PointList.as_view()),
url(rf'^(?P<project_id>{integer})/points/(?P<point_id>[0-9]+)/$', point.PointDetail.as_view()),
]
# Landmarks
urlpatterns += [
url(rf'^(?P<project_id>{integer})/landmarks/$', landmarks.LandmarkList.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/$', landmarks.LandmarkDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/locations/$',
landmarks.LandmarkLocationList.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/locations/(?P<location_id>[0-9]+)/$',
landmarks.LandmarkLocationDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/groups/(?P<group_id>[0-9]+)/$',
landmarks.LandmarkAndGroupkLocationDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/$', landmarks.LandmarkGroupList.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/import$', landmarks.LandmarkGroupImport.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/materialize$', landmarks.LandmarkGroupMaterializer.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/links/$', landmarks.LandmarkGroupLinks.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/links/(?P<link_id>[0-9]+)/$',
landmarks.LandmarkGroupLinkDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/(?P<landmarkgroup_id>[0-9]+)/$', landmarks.LandmarkGroupDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/(?P<landmarkgroup_id>[0-9]+)/transitively-linked$',
landmarks.LandmarkGroupLinkage.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/(?P<landmarkgroup_id>[0-9]+)/locations/(?P<location_id>[0-9]+)/$',
landmarks.LandmarkGroupLocationList.as_view()),
]
# Clustering
urlpatterns += [
url(r'^clustering/(?P<workspace_pid>\d+)/setup$',
record_view("clusterings.setup_env")(clustering.setup_clustering), name='clustering_setup'),
url(r'^clustering/(?P<workspace_pid>\d+)/show$',
TemplateView.as_view(template_name="catmaid/clustering/display.html"),
name="clustering_display"),
]
# Volumes
urlpatterns += [
url(r'^(?P<project_id>\d+)/volumes/$', volume.volume_collection),
url(r'^(?P<project_id>\d+)/volumes/add$', record_view("volumes.create")(volume.add_volume)),
url(r'^(?P<project_id>\d+)/volumes/from-origin$', volume.from_origin),
url(r'^(?P<project_id>\d+)/volumes/from-entities$', volume.from_entities),
url(r'^(?P<project_id>\d+)/volumes/import$', record_view("volumes.create")(volume.import_volumes)),
url(r'^(?P<project_id>\d+)/volumes/entities/$', volume.get_volume_entities),
url(r'^(?P<project_id>\d+)/volumes/skeleton-innervations$', volume.get_skeleton_innervations),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/$', volume.VolumeDetail.as_view()),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/intersect$', volume.intersects),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/export\.(?P<extension>\w+)', volume.export_volume),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/update-meta-info$', volume.update_meta_information),
]
# Analytics
urlpatterns += [
url(r'^(?P<project_id>\d+)/analytics/skeletons$', analytics.analyze_skeletons),
url(r'^(?P<project_id>\d+)/analytics/broken-section-nodes$', analytics.list_broken_section_nodes)
]
# Front-end tests, disabled by default
if settings.FRONT_END_TESTS_ENABLED:
urlpatterns += [
url(r'^tests$', login_required(CatmaidView.as_view(template_name="catmaid/tests.html")), name="frontend_tests"),
]
# Collection of various parts of the CATMAID API. These methods are usually
# one- or two-liners and having them in a separate statement would not improve
# readability. Therefore, they are all declared in this general statement.
urlpatterns += [
# User analytics and proficiency
url(r'^(?P<project_id>\d+)/useranalytics$', useranalytics.plot_useranalytics),
url(r'^(?P<project_id>\d+)/userproficiency$', user_evaluation.evaluate_user),
url(r'^(?P<project_id>\d+)/graphexport/json$', graphexport.export_jsongraph),
# Graphs
url(r'^(?P<project_id>\d+)/skeletons/confidence-compartment-subgraph', graph2.skeleton_graph),
# Circles
url(r'^(?P<project_id>\d+)/graph/circlesofhell', circles.circles_of_hell),
url(r'^(?P<project_id>\d+)/graph/directedpaths', circles.find_directed_paths),
url(r'^(?P<project_id>\d+)/graph/dps', circles.find_directed_path_skeletons),
# Review
url(r'^(?P<project_id>\d+)/user/reviewer-whitelist$', review.reviewer_whitelist),
# Search
url(r'^(?P<project_id>\d+)/search$', search.search),
# Wiring diagram export
url(r'^(?P<project_id>\d+)/wiringdiagram/json$', wiringdiagram.export_wiring_diagram),
url(r'^(?P<project_id>\d+)/wiringdiagram/nx_json$', wiringdiagram.export_wiring_diagram_nx),
# Annotation graph export
url(r'^(?P<project_id>\d+)/annotationdiagram/nx_json$', object.convert_annotations_to_networkx),
]
# Patterns for Janelia render web service access
from catmaid.control.janelia_render import (
project as janelia_render_project,
review as janelia_render_review,
stack as janelia_render_stack)
urlpatterns += [
url(r'^janelia-render/projects/$', janelia_render_project.projects),
url(r'^(?P<project_id>.+)/user/reviewer-whitelist$', janelia_render_review.reviewer_whitelist),
url(r'^(?P<project_id>.+)/interpolatable-sections/$', noop.interpolatable_sections),
url(r'^janelia-render/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', janelia_render_stack.stack_info),
url(r'^janelia-render/(?P<project_id>.+)/stacks$', janelia_render_stack.stacks),
url(r'^janelia-render/(?P<project_id>.+)/annotations/$', noop.list_annotations),
url(r'^janelia-render/(?P<project_id>.+)/annotations/query-targets$', noop.query_annotation_targets),
url(r'^janelia-render/client/datastores/(?P<name>[\w-]+)/$', noop.datastore_settings),
]
# Patterns for DVID access
from catmaid.control.dvid import (project as dvidproject,
review as dvidreview, stack as dvidstack)
urlpatterns += [
url(r'^dvid/projects/$', dvidproject.projects),
url(r'^(?P<project_id>.+)/user/reviewer-whitelist$', dvidreview.reviewer_whitelist),
url(r'^(?P<project_id>.+)/interpolatable-sections/$', noop.interpolatable_sections),
url(r'^dvid/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', dvidstack.stack_info),
url(r'^dvid/(?P<project_id>.+)/stacks$', dvidstack.stacks),
url(r'^dvid/(?P<project_id>.+)/annotations/$', noop.list_annotations),
url(r'^dvid/(?P<project_id>.+)/annotations/query-targets$', noop.query_annotation_targets),
url(r'^dvid/client/datastores/(?P<name>[\w-]+)/$', noop.datastore_settings),
]
|
catmaid/CATMAID
|
django/applications/catmaid/urls.py
|
Python
|
gpl-3.0
| 41,647
|
[
"NEURON"
] |
9bedfa82be5eeeb50d709286d95b7447e526f5b3dc6214297f6930e1ba5363c0
|
import numpy as np
import scipy
import scipy.stats as stats
from scipy.misc import factorial
class Distribution(object):
def dim(self):
raise NotImplementedError('abstract base class')
def predict(self, cond=None):
raise NotImplementedError('abstract base class')
def sample(self, cond=None, key_prefix=""):
raise NotImplementedError('abstract base class')
def log_p(self, x, cond=None, key_prefix=""):
raise NotImplementedError('abstract base class')
def deriv_log_p(self, x, idx=None, cond=None, cond_key=None, cond_idx=None, lp0=None, eps=1e-4, **kwargs):
"""
Derivative of log P(X = x | cond = cond) with
respect to x_idx (if idx is not None) or with
respect to cond[cond_key]_{cond_idx} (if those
quantities are not None).
The default implementation computes a numerical
approximation to the derivative:
df/dx ~= f(x + eps)
"""
lp0 = lp0 if lp0 else self.log_p(x=x, cond=cond, **kwargs)
if cond_key is None:
# we're computing df/dx
if idx is None:
# assume x is scalar
deriv = ( self.log_p(x = x + eps, cond=cond, **kwargs) - lp0 ) / eps
else:
x[idx] += eps
deriv = ( self.log_p(x = x, cond=cond, **kwargs) - lp0 ) / eps
x[idx] -= eps
else:
# we're computing df/dcond[cond_key]
if cond_idx is None:
cond[cond_key] += eps
deriv = ( self.log_p(x = x, cond=cond, **kwargs) - lp0 ) / eps
cond[cond_key] -= eps
else:
cond[cond_key][cond_idx] += eps
deriv = ( self.log_p(x = x, cond=cond, **kwargs) - lp0 ) / eps
cond[cond_key][cond_idx] -= eps
return deriv
def dump_to_file(self, fname):
with open(fname, 'wb') as f:
cPickle.dump(self, f, cPickle.HIGHEST_PROTOCOL)
@staticmethod
def load_from_file(fname):
raise NotImplementedError('abstract base class')
def save_to_db(self, dbconn):
raise NotImplementedError('abstract base class')
@staticmethod
def load_from_db(dbconn, return_extra=False):
raise NotImplementedError('abstract base class')
class Gamma(Distribution):
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def log_p(self, x, **kwargs):
alpha = self.alpha
beta = self.beta
if x < 0.0: return np.log(1e-300)
# the special case of an exponential distribution is defined even when x==0
if alpha == 1: return np.log(beta) - beta*x
if x == 0.0: return np.log(1e-300)
lp = alpha*np.log(beta) - scipy.special.gammaln(alpha) + (alpha-1)*np.log(x) - beta*x
if np.isnan(lp):
lp = np.float("-inf")
return lp
def deriv_log_p(self, x):
alpha = self.alpha
beta = self.beta
if x == 0.0: return 1000.0
return (alpha-1)/x - beta
def predict(self, **kwargs):
return self.alpha/self.beta
def sample(self, **kwargs):
return stats.gamma.rvs(self.alpha, scale=1.0/self.beta, loc=0., n=1)
class InvGamma(Distribution):
# mean: beta/(alpha-1)
# mode: beta/(alpha+1)
# variance: beta^2 / ( (alpha-1)^2 (alpha-2) )
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def predict(self):
return self.beta / (self.alpha+1) # return the mode, since the mean isn't always defined
def log_p(self, x):
alpha = self.alpha
beta = self.beta
if alpha <= 0 or beta <= 0:
return np.float("-inf")
if x == 0.0: return np.log(1e-300)
lp = alpha*np.log(beta) - scipy.special.gammaln(alpha) - (alpha+1)*np.log(x) - beta/x
if np.isnan(lp):
lp = np.float("-inf")
return lp
def deriv_log_p(self, x):
alpha = self.alpha
beta = self.beta
if x == 0.0: return 1000.0
return beta/(x**2) - (alpha+1)/x
class LogNormal(Distribution):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def log_p(self, x):
mu = self.mu
sigma = self.sigma
if x == 0.0: return np.log(1e-300)
lp = -1 * np.log(x) - .5 * np.log(2*np.pi) - np.log(sigma) - .5 * (np.log(x) - mu)**2 / sigma**2
if np.isnan(lp):
lp = np.float("-inf")
return lp
def deriv_log_p(self, x):
mu = self.mu
sigma = self.sigma
if x == 0.0: return 1000.0
return (-1 -(np.log(x) - mu)/(sigma**2)) / x
def predict(self):
return np.exp(self.mu)
class LogUniform(Distribution):
def log_p(self, x):
return -np.log(x)
def deriv_log_p(self, x):
return -1.0/x
class Uniform(Distribution):
def __init__(self, lbound, rbound):
assert ( lbound < rbound)
self.lbound = lbound
self.rbound = rbound
def log_p(self, x, **kwargs):
if self.lbound <= x <= self.rbound:
return -np.log(self.rbound - self.lbound)
else:
return np.float("-inf")
def predict(self, **kwargs):
return self.lbound + (self.rbound - self.lbound) / 2
def sample(self, **kwargs):
return self.lbound + np.random.rand() * (self.rbound - self.lbound)
class Gaussian(Distribution):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def log_p(self, x, **kwargs):
mu = self.mean
sigma = self.std
lp = -.5 * np.log(2*np.pi*sigma*sigma) - .5 * (x - mu)**2 / sigma**2
if np.isnan(lp):
lp = np.float("-inf")
return lp
def predict(self, **kwargs):
return self.mean
def sample(self, **kwargs):
return self.mean + np.random.randn() * self.std
class Laplacian(Distribution):
def __init__(self, center, scale):
self.center = center
self.scale = scale
def log_p(self, x, **kwargs):
center = self.center
scale = self.scale
lp = -np.log(2*scale) - np.abs(x-center)/scale
if np.isnan(lp):
lp = np.float("-inf")
return lp
def predict(self, **kwargs):
return self.center
def sample(self, **kwargs):
u = np.random.rand()
return self.center - self.scale * np.sign(u) * np.log(1-2*u)
class Exponential(Distribution):
def __init__(self, rate, min_value=0.0):
self.rate = float(rate)
self.min_value = min_value
def log_p(self, x, **kwargs):
rate = self.rate
x = x - self.min_value
if x < 0:
lp = np.float("-inf")
lp = np.log(rate) - rate * x
return lp
def predict(self, **kwargs):
return 1.0/self.rate + self.min_value
def sample(self, **kwargs):
u = np.random.rand()
return -np.log(u) / self.rate + self.min_value
class Poisson(Distribution):
def __init__(self, mu):
self.mu = mu
def log_p(self, x, **kwargs):
return -self.mu + x * np.log(self.mu) - scipy.special.gammaln(x+1)
def predict(self, **kwargs):
return self.mu
def sample(self, **kwargs):
return stats.poisson.rvs(self.mu, n=1)
class Bernoulli(Distribution):
def __init__(self, p):
self.p = p
def log_p(self, x, **kwargs):
if x:
return np.log(self.p)
else:
return np.log(1-self.p)
def predict(self, **kwargs):
return self.p >= .5
def sample(self, **kwargs):
u = np.random.rand()
return u < self.p
class Negate(Distribution):
def __init__(self, dist):
self.dist = dist
def dim(self, *args, **kwargs):
return self.dist.dim( *args, **kwargs)
def predict(self, *args, **kwargs):
return -1 * self.dist.predict(*args, **kwargs)
def sample(self, *args, **kwargs):
return -1 * self.dist.sample(*args, **kwargs)
def log_p(self, x, *args, **kwargs):
return self.dist.log_p(-x, *args, **kwargs)
def deriv_log_p(self, x, *args, **kwargs):
return self.dist.deriv_log_p(-x, *args, **kwargs)
|
davmre/treegp
|
distributions.py
|
Python
|
gpl-3.0
| 8,292
|
[
"Gaussian"
] |
b749f3c0f3b8b5c03a526a77d780c1028390bf53b40dc3e7243c6afc7a305eac
|
#!/usr/bin/env python
import sys
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-A' and i < len(sys.argv)-1:
sys.path = sys.path + [sys.argv[i+1]]
import vtk
from vtk.util.misc import vtkRegressionTestImage, vtkGetDataRoot
# create planes
# Create the RenderWindow, Renderer
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren )
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkPLOT3DReader()
pl3d.SetXYZFileName( vtkGetDataRoot() + '/Data/combxyz.bin' )
pl3d.SetQFileName( vtkGetDataRoot() + '/Data/combq.bin' )
pl3d.SetScalarFunctionNumber( 100 )
pl3d.SetVectorFunctionNumber( 202 )
pl3d.Update()
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(pl3d.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
seeds = vtk.vtkLineSource()
seeds.SetPoint1(15, -5, 32)
seeds.SetPoint2(15, 5, 32)
seeds.SetResolution(10)
integ = vtk.vtkRungeKutta4()
sl = vtk.vtkStreamLine()
sl.SetIntegrator(integ)
sl.SetInputConnection(pl3d.GetOutputPort())
sl.SetSource(seeds.GetOutput())
sl.SetMaximumPropagationTime(0.1)
sl.SetIntegrationStepLength(0.1)
sl.SetIntegrationDirectionToBackward()
sl.SetStepLength(0.001)
scalarSurface = vtk.vtkRuledSurfaceFilter ()
scalarSurface.SetInputConnection(sl.GetOutputPort())
scalarSurface.SetOffset(0)
scalarSurface.SetOnRatio(2)
scalarSurface.PassLinesOn()
scalarSurface.SetRuledModeToResample()
scalarSurface.SetResolution(100,1)
scalarSurface.SetDistanceFactor(30)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(scalarSurface.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
mmapper = vtk.vtkPolyDataMapper()
mmapper.SetInputConnection(seeds.GetOutputPort())
mactor = vtk.vtkActor()
mactor.SetMapper(mmapper)
ren.AddActor(mactor)
ren.AddActor(actor)
ren.AddActor(outlineActor)
cam=ren.GetActiveCamera()
cam.SetClippingRange( 3.95297, 50 )
cam.SetFocalPoint( 8.88908, 0.595038, 29.3342 )
cam.SetPosition( -12.3332, 31.7479, 41.2387 )
cam.SetViewUp( 0.060772, -0.319905, 0.945498 )
renWin.Render()
retVal = vtkRegressionTestImage(renWin)
sys.exit( not retVal )
|
sgh/vtk
|
Graphics/Testing/Python/streamSurface2.py
|
Python
|
bsd-3-clause
| 2,307
|
[
"VTK"
] |
ac89606bd6e54c898a8f116d6756553f9d621393fce5e635fb4892ddcaea5878
|
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import collections
S2N_SSLv3 = 30
S2N_TLS10 = 31
S2N_TLS11 = 32
S2N_TLS12 = 33
S2N_TLS13 = 34
# namedtuple makes iterating through ciphers across client libraries easier. The openssl_1_1_1_compatible flag is for
# s_client tests. s_client won't be able to use those ciphers.
S2N_CIPHER = collections.namedtuple('S2N_CIPHER', 'openssl_name gnutls_priority_str min_tls_vers openssl_1_1_1_compatible openssl_fips_compatible')
# Specifying a single cipher suite in GnuTLS requires specifying a "priority string" that removes all cipher suites,
# and then adds each algorithm(kx,auth,enc,mac) for a given suite. See https://www.gnutls.org/manual/html_node/Priority-Strings.html
S2N_GNUTLS_PRIORITY_PREFIX="NONE:+COMP-NULL:+CTYPE-ALL:+CURVE-ALL"
ALL_TEST_CIPHERS = [
S2N_CIPHER("RC4-MD5", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+ARCFOUR-128:+MD5", S2N_SSLv3, False, False),
S2N_CIPHER("RC4-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+ARCFOUR-128:+SHA1", S2N_SSLv3, False, False),
S2N_CIPHER("DES-CBC3-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+3DES-CBC:+SHA1", S2N_SSLv3, False, True),
S2N_CIPHER("DHE-RSA-DES-CBC3-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+3DES-CBC:+SHA1", S2N_SSLv3, False, False),
S2N_CIPHER("AES128-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+AES-128-CBC:+SHA1", S2N_SSLv3, True, True),
S2N_CIPHER("DHE-RSA-AES128-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+AES-128-CBC:+SHA1", S2N_SSLv3, True, False),
S2N_CIPHER("AES256-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+AES-256-CBC:+SHA1", S2N_SSLv3, True, True),
S2N_CIPHER("DHE-RSA-AES256-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+AES-256-CBC:+SHA1", S2N_SSLv3, True, False),
S2N_CIPHER("AES128-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+AES-128-CBC:+SHA256", S2N_TLS12, True, True),
S2N_CIPHER("AES256-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+AES-256-CBC:+SHA256", S2N_TLS12, True, True),
S2N_CIPHER("DHE-RSA-AES128-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+AES-128-CBC:+SHA256", S2N_TLS12, True, True),
S2N_CIPHER("DHE-RSA-AES256-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+AES-256-CBC:+SHA256", S2N_TLS12, True, True),
S2N_CIPHER("AES128-GCM-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+AES-128-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("AES256-GCM-SHA384", S2N_GNUTLS_PRIORITY_PREFIX + ":+RSA:+AES-256-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("DHE-RSA-AES128-GCM-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+AES-128-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("DHE-RSA-AES256-GCM-SHA384", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+AES-256-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-ECDSA-AES128-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+AES-128-CBC:+SHA1", S2N_SSLv3, True, False),
S2N_CIPHER("ECDHE-ECDSA-AES256-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+AES-256-CBC:+SHA1", S2N_SSLv3, True, False),
S2N_CIPHER("ECDHE-ECDSA-AES128-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+AES-128-CBC:+SHA256", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-ECDSA-AES256-SHA384", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+AES-256-CBC:+SHA384", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-ECDSA-AES128-GCM-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+AES-128-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-ECDSA-AES256-GCM-SHA384", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+AES-256-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-RSA-DES-CBC3-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+3DES-CBC:+SHA1", S2N_SSLv3, False, False),
S2N_CIPHER("ECDHE-RSA-AES128-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+AES-128-CBC:+SHA1", S2N_SSLv3, True, False),
S2N_CIPHER("ECDHE-RSA-AES256-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+AES-256-CBC:+SHA1", S2N_SSLv3, True, False),
S2N_CIPHER("ECDHE-RSA-RC4-SHA", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+ARCFOUR-128:+SHA1", S2N_SSLv3, False, False),
S2N_CIPHER("ECDHE-RSA-AES128-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+AES-128-CBC:+SHA256", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-RSA-AES256-SHA384", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+AES-256-CBC:+SHA384", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-RSA-AES128-GCM-SHA256", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+AES-128-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-RSA-AES256-GCM-SHA384", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+AES-256-GCM:+AEAD", S2N_TLS12, True, True),
S2N_CIPHER("ECDHE-RSA-CHACHA20-POLY1305", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-RSA:+CHACHA20-POLY1305:+AEAD", S2N_TLS12, True, False),
S2N_CIPHER("ECDHE-ECDSA-CHACHA20-POLY1305", S2N_GNUTLS_PRIORITY_PREFIX + ":+ECDHE-ECDSA:+CHACHA20-POLY1305:+AEAD", S2N_TLS12, True, False),
S2N_CIPHER("DHE-RSA-CHACHA20-POLY1305", S2N_GNUTLS_PRIORITY_PREFIX + ":+DHE-RSA:+CHACHA20-POLY1305:+AEAD", S2N_TLS12, True, False),
]
# Expected preferences for SignatureAlgorithms in GnuTLS priority string format
# See https://github.com/awslabs/s2n/blob/master/tls/s2n_tls_digest_preferences.h
EXPECTED_RSA_SIGNATURE_ALGORITHM_PREFS = [
"SIGN-RSA-SHA256",
"SIGN-RSA-SHA384",
"SIGN-RSA-SHA512",
"SIGN-RSA-SHA224",
"SIGN-RSA-SHA1",
]
EXPECTED_ECDSA_SIGNATURE_ALGORITHM_PREFS = [
"SIGN-ECDSA-SHA256",
"SIGN-ECDSA-SHA384",
"SIGN-ECDSA-SHA512",
"SIGN-ECDSA-SHA224",
"SIGN-ECDSA-SHA1",
]
# Test ciphers to use when s2n built with Openssl 1.1.1 libcrypto. All ciphers should be available.
OPENSSL_1_1_1_TEST_CIPHERS = ALL_TEST_CIPHERS
# Test ciphers to use when s2n is built with Openssl 1.0.2 libcrypto. 1.0.2 does not have the
# ChaCha20-Poly1305 cipher.
OPENSSL_1_0_2_TEST_CIPHERS = list(filter(lambda x: "CHACHA20" not in x.openssl_name, ALL_TEST_CIPHERS))
# Test ciphers to use when s2n is built with Openssl 1.0.2 libcrypto that is linked with a FIPS module.
OPENSSL_1_0_2_FIPS_TEST_CIPHERS = list(filter(lambda x: x.openssl_fips_compatible == True, ALL_TEST_CIPHERS))
# Test ciphers to use when s2n is built with LibreSSL libcrypto. s2n does not implement the
# ChaCha20-Poly1305 cipher offered by LibreSSL.
LIBRESSL_TEST_CIPHERS = list(filter(lambda x: "CHACHA20" not in x.openssl_name, ALL_TEST_CIPHERS))
# Dictionary to look up ciphers to use by libcrypto s2n is built with.
# Libcrypto string will be an argument to test scripts.
S2N_LIBCRYPTO_TO_TEST_CIPHERS = {
"openssl-1.1.1" : OPENSSL_1_1_1_TEST_CIPHERS,
"openssl-1.0.2" : OPENSSL_1_0_2_TEST_CIPHERS,
"openssl-1.0.2-fips" : OPENSSL_1_0_2_FIPS_TEST_CIPHERS,
"libressl" : LIBRESSL_TEST_CIPHERS,
}
S2N_PROTO_VERS_TO_STR = {
S2N_SSLv3 : "SSLv3",
S2N_TLS10 : "TLSv1.0",
S2N_TLS11 : "TLSv1.1",
S2N_TLS12 : "TLSv1.2",
S2N_TLS13 : "TLSv1.3",
}
S2N_PROTO_VERS_TO_GNUTLS = {
S2N_SSLv3 : "VERS-SSL3.0",
S2N_TLS10 : "VERS-TLS1.0",
S2N_TLS11 : "VERS-TLS1.1",
S2N_TLS12 : "VERS-TLS1.2",
S2N_TLS13 : "VERS-TLS1.3",
}
TEST_CERT_DIRECTORY="../pems/"
TEST_RSA_CERT=TEST_CERT_DIRECTORY + "rsa_2048_sha256_wildcard_cert.pem"
TEST_RSA_KEY=TEST_CERT_DIRECTORY + "rsa_2048_sha256_wildcard_key.pem"
TEST_ECDSA_CERT=TEST_CERT_DIRECTORY + "ecdsa_p384_pkcs1_cert.pem"
TEST_ECDSA_KEY=TEST_CERT_DIRECTORY + "ecdsa_p384_pkcs1_key.pem"
TEST_DH_PARAMS=TEST_CERT_DIRECTORY + "dhparams_2048.pem"
# cert, key, and ocsp response for OCSP stapling tests
TEST_OCSP_CERT_DIRECTORY="../pems/ocsp/"
TEST_OCSP_CERT=TEST_OCSP_CERT_DIRECTORY + "server_cert.pem"
TEST_OCSP_KEY=TEST_OCSP_CERT_DIRECTORY + "server_key.pem"
TEST_OCSP_RESPONSE_FILE=TEST_OCSP_CERT_DIRECTORY + "ocsp_response.der"
TEST_OCSP_ECDSA_CERT=TEST_OCSP_CERT_DIRECTORY + "server_ecdsa_cert.pem"
TEST_OCSP_ECDSA_KEY=TEST_OCSP_CERT_DIRECTORY + "server_ecdsa_key.pem"
TEST_OCSP_ECDSA_RESPONSE_FILE=TEST_OCSP_CERT_DIRECTORY + "ocsp_ecdsa_response.der"
DEFAULT_CLIENT_CERT_PATH = TEST_CERT_DIRECTORY + "rsa_2048_sha256_client_cert.pem"
DEFAULT_CLIENT_KEY_PATH = TEST_CERT_DIRECTORY + "rsa_2048_sha256_client_key.pem"
TEST_SNI_CERT_DIRECTORY="../pems/sni/"
# Server certificates used to test matching domain names client with server_name
# ( cert_path, private_key_path, domains[] )
SNI_CERTS = {
"alligator" : ( TEST_SNI_CERT_DIRECTORY + "alligator_cert.pem", TEST_SNI_CERT_DIRECTORY + "alligator_key.pem",
["www.alligator.com"]),
"second_alligator_rsa" : ( TEST_SNI_CERT_DIRECTORY + "second_alligator_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY + "second_alligator_rsa_key.pem",
["www.alligator.com"]),
"alligator_ecdsa" : ( TEST_SNI_CERT_DIRECTORY + "alligator_ecdsa_cert.pem", TEST_SNI_CERT_DIRECTORY +
"alligator_ecdsa_key.pem", ["www.alligator.com"]),
"beaver" : ( TEST_SNI_CERT_DIRECTORY + "beaver_cert.pem", TEST_SNI_CERT_DIRECTORY + "beaver_key.pem",
["www.beaver.com"]),
"many_animals" : (TEST_SNI_CERT_DIRECTORY + "many_animal_sans_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY + "many_animal_sans_rsa_key.pem",
["www.catfish.com",
"www.dolphin.com",
"www.elephant.com",
"www.falcon.com",
"www.gorilla.com",
"www.horse.com",
"www.impala.com",
# "Simple hostname"
"Jackal",
"k.e.e.l.b.i.l.l.e.d.t.o.u.c.a.n",
# SAN on this cert is actually "ladybug.ladybug"
# Verify case insensitivity works as expected.
"LADYBUG.LADYBUG",
"com.penguin.macaroni"
]),
"narwhal_cn" : ( TEST_SNI_CERT_DIRECTORY + "narwhal_cn_cert.pem", TEST_SNI_CERT_DIRECTORY + "narwhal_cn_key.pem",
["www.narwhal.com"]),
"octopus_cn_platypus_san" : ( TEST_SNI_CERT_DIRECTORY + "octopus_cn_platypus_san_cert.pem", TEST_SNI_CERT_DIRECTORY
+ "octopus_cn_platypus_san_key.pem", ["www.platypus.com"]),
"quail_cn_rattlesnake_cn" : ( TEST_SNI_CERT_DIRECTORY + "quail_cn_rattlesnake_cn_cert.pem", TEST_SNI_CERT_DIRECTORY
+ "quail_cn_rattlesnake_cn_key.pem", ["www.quail.com", "www.rattlesnake.com"]),
"many_animals_mixed_case" : (TEST_SNI_CERT_DIRECTORY + "many_animal_sans_mixed_case_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY + "many_animal_sans_mixed_case_rsa_key.pem",
["alligator.com",
"beaver.com",
"catFish.com",
"WWW.dolphin.COM",
"www.ELEPHANT.com",
"www.Falcon.Com",
"WWW.gorilla.COM",
"www.horse.com",
"WWW.IMPALA.COM",
"WwW.jAcKaL.cOm"]),
"embedded_wildcard" : ( TEST_SNI_CERT_DIRECTORY + "embedded_wildcard_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY
+ "embedded_wildcard_rsa_key.pem", ["www.labelstart*labelend.com"]),
"non_empty_label_wildcard" : ( TEST_SNI_CERT_DIRECTORY + "non_empty_label_wildcard_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY
+ "non_empty_label_wildcard_rsa_key.pem", ["WILD*.middle.end"]),
"trailing_wildcard" : ( TEST_SNI_CERT_DIRECTORY + "trailing_wildcard_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY
+ "trailing_wildcard_rsa_key.pem", ["the.prefix.*"]),
"wildcard_insect" : ( TEST_SNI_CERT_DIRECTORY + "wildcard_insect_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY
+ "wildcard_insect_rsa_key.pem",
["ant.insect.hexapod",
"BEE.insect.hexapod",
"wasp.INSECT.hexapod",
"butterfly.insect.hexapod",
]),
"termite" : ( TEST_SNI_CERT_DIRECTORY + "termite_rsa_cert.pem", TEST_SNI_CERT_DIRECTORY + "termite_rsa_key.pem",
[ "termite.insect.hexapod" ]),
"underwing" : ( TEST_SNI_CERT_DIRECTORY + "underwing_ecdsa_cert.pem", TEST_SNI_CERT_DIRECTORY + "underwing_ecdsa_key.pem",
[ "underwing.insect.hexapod" ])
}
# Test cases for certificate selection.
# Test inputs: server certificates to load into s2nd, client SNI and capabilities, outputs are selected server cert
# and negotiated cipher.
MultiCertTest = collections.namedtuple('MultiCertTest', 'description server_certs client_sni client_ciphers expected_cert expect_matching_hostname')
MULTI_CERT_TEST_CASES= [
MultiCertTest(
description="Test basic SNI match for default cert.",
server_certs=[SNI_CERTS["alligator"], SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni="www.alligator.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=True),
MultiCertTest(
description="Test basic SNI matches for non-default cert.",
server_certs=[SNI_CERTS["alligator"], SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni="www.beaver.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["beaver"],
expect_matching_hostname=True),
MultiCertTest(
description="Test default cert is selected when there are no SNI matches.",
server_certs=[SNI_CERTS["alligator"], SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni="not.a.match",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=False),
MultiCertTest(
description="Test default cert is selected when no SNI is sent.",
server_certs=[SNI_CERTS["alligator"], SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni=None,
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=False),
MultiCertTest(
description="Test ECDSA cert is selected with matching domain and client only supports ECDSA.",
server_certs=[SNI_CERTS["alligator"], SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni="www.alligator.com",
client_ciphers="ECDHE-ECDSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator_ecdsa"],
expect_matching_hostname=True),
MultiCertTest(
description="Test ECDSA cert selected when: domain matches for both ECDSA+RSA, client supports ECDSA+RSA "\
" ciphers, ECDSA is higher priority on server side.",
server_certs=[SNI_CERTS["alligator"], SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni="www.alligator.com",
client_ciphers="ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator_ecdsa"],
expect_matching_hostname=True),
MultiCertTest(
description="Test domain match is highest priority. Domain matching ECDSA certificate should be selected"\
" even if domain mismatched RSA certificate is available and RSA cipher is higher priority.",
server_certs=[SNI_CERTS["beaver"], SNI_CERTS["alligator_ecdsa"]],
client_sni="www.alligator.com",
client_ciphers="ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA256",
expected_cert=SNI_CERTS["alligator_ecdsa"],
expect_matching_hostname=True),
MultiCertTest(
description="Test certificate with single SAN entry matching is selected before mismatched multi SAN cert",
server_certs=[SNI_CERTS["many_animals"] , SNI_CERTS["alligator"]],
client_sni="www.alligator.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=True),
# many_animals was the first cert added
MultiCertTest(
description="Test default cert with multiple sans and no SNI sent.",
server_certs=[SNI_CERTS["many_animals"] , SNI_CERTS["alligator"]],
client_sni=None,
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["many_animals"],
expect_matching_hostname=False),
MultiCertTest(
description="Test certificate match with CN",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["narwhal_cn"] ],
client_sni="www.narwhal.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["narwhal_cn"],
expect_matching_hostname=True),
MultiCertTest(
description="Test SAN+CN cert can match using SAN.",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["octopus_cn_platypus_san"] ],
client_sni="www.platypus.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["octopus_cn_platypus_san"],
expect_matching_hostname=True),
MultiCertTest(
description="Test that CN is not considered for matching if the certificate contains SANs.",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["octopus_cn_platypus_san"] ],
client_sni="www.octopus.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=False),
MultiCertTest(
description="Test certificate with multiple CNs can match.",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["quail_cn_rattlesnake_cn"] ],
client_sni="www.rattlesnake.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["quail_cn_rattlesnake_cn"],
expect_matching_hostname=False),
MultiCertTest(
description="Test cert with embedded wildcard is not treated as a wildcard.",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["embedded_wildcard"] ],
client_sni="www.labelstartWILDCARDlabelend.com",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=False),
MultiCertTest(
description="Test non empty left label wildcard cert is not treated as a wildcard."\
" s2n only supports wildcards with a single * as the left label",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["non_empty_label_wildcard"] ],
client_sni="WILDCARD.middle.end",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=False),
MultiCertTest(
description="Test cert with trailing * is not treated as wildcard.",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["trailing_wildcard"] ],
client_sni="the.prefix.WILDCARD",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=False),
MultiCertTest(
description="Certificate with exact sni match(termite.insect.hexapod) is preferred over wildcard"\
" *.insect.hexapod",
server_certs=[ SNI_CERTS["wildcard_insect"], SNI_CERTS["alligator"], SNI_CERTS["termite"] ],
client_sni="termite.insect.hexapod",
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["termite"],
expect_matching_hostname=True),
MultiCertTest(
description="ECDSA Certificate with exact sni match(underwing.insect.hexapod) is preferred over RSA wildcard"\
" *.insect.hexapod when RSA ciphers are higher priority than ECDSA in sever preferences.",
server_certs=[ SNI_CERTS["wildcard_insect"], SNI_CERTS["alligator"], SNI_CERTS["underwing"] ],
client_sni="underwing.insect.hexapod",
# AES128-GCM-SHA256 is prioritized about ECDHE-ECDSA-AES128-SHA in
# the "test_all" server cipher preferences
client_ciphers="AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA",
expected_cert=SNI_CERTS["underwing"],
expect_matching_hostname=True),
MultiCertTest(
description="Firstly loaded matching certificate should be selected among certificates with the same domain names",
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["second_alligator_rsa"] ],
client_sni="www.alligator.com",
client_ciphers="AES128-GCM-SHA256",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=True),
MultiCertTest(
description="Firstly loaded matching certificate should be selected among matching+non-matching certificates",
server_certs=[ SNI_CERTS["beaver"], SNI_CERTS["alligator"], SNI_CERTS["second_alligator_rsa"] ],
client_sni="www.alligator.com",
client_ciphers="AES128-GCM-SHA256",
expected_cert=SNI_CERTS["alligator"],
expect_matching_hostname=True)]
# Positive test for wildcard matches
MULTI_CERT_TEST_CASES.extend([MultiCertTest(
description="Test wildcard *.insect.hexapod matches subdomain " + specific_insect_domain,
server_certs=[ SNI_CERTS["alligator"], SNI_CERTS["wildcard_insect"] ],
client_sni=specific_insect_domain,
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["wildcard_insect"],
expect_matching_hostname=True) for specific_insect_domain in SNI_CERTS["wildcard_insect"][2]])
# Positive test for basic SAN matches
MULTI_CERT_TEST_CASES.extend([MultiCertTest(
description="Match SAN " + many_animal_domain + " in many_animals cert",
server_certs= [ SNI_CERTS["alligator"], SNI_CERTS["many_animals"] ],
client_sni=many_animal_domain,
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["many_animals"],
expect_matching_hostname=True) for many_animal_domain in SNI_CERTS["many_animals"][2]])
# Positive test for mixed cased SAN matches
MULTI_CERT_TEST_CASES.extend([MultiCertTest(
description="Match SAN " + many_animal_domain + " in many_animals_mixed_case cert",
server_certs= [SNI_CERTS["alligator"] , SNI_CERTS["many_animals_mixed_case"]],
client_sni=many_animal_domain,
client_ciphers="ECDHE-RSA-AES128-SHA",
expected_cert=SNI_CERTS["many_animals_mixed_case"],
expect_matching_hostname=True) for many_animal_domain in SNI_CERTS["many_animals_mixed_case"][2]])
|
raycoll/s2n
|
tests/integration/s2n_test_constants.py
|
Python
|
apache-2.0
| 22,191
|
[
"Octopus"
] |
f3eb5f057ec7f460b4fd7d6f8ca96d44e4999a053677ef55c5278b024fdd3a8c
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2008-2012 Wolfgang Rohdewald <wolfgang@rohdewald.de>
kajongg is free software you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from __future__ import print_function
import logging, socket, logging.handlers, traceback, os, datetime, shutil
import time
from locale import getpreferredencoding
from sys import stdout
try:
STDOUTENCODING = stdout.encoding
except AttributeError:
STDOUTENCODING = None
if not STDOUTENCODING:
STDOUTENCODING = getpreferredencoding()
SERVERMARK = '&&SERVER&&'
# util must not import twisted or we need to change kajongg.py
from common import Options, Internal, Debug
if Internal.haveKDE:
from kde import i18n, i18nc, Sorry, Information, NoPrompt
else:
# a server might not have KDE4
def i18n(englishIn, *args):
"""dummy for server"""
result = englishIn
if '%' in result:
for idx, arg in enumerate(args):
arg = xToUtf8(arg)
result = result.replace('%' + str(idx+1), unicode(arg))
if '%' in result:
for ignore in ['numid', 'filename']:
result = result.replace('<%s>' % ignore, '')
result = result.replace('</%s>' % ignore, '')
return result
def i18nc(dummyContext, englishIn, *args):
"""dummy for server"""
return i18n(englishIn, *args)
if not Internal.isServer:
from kde import KGlobal
else:
class PrintFirstArg(object):
"""just print the first argument"""
def __init__(self, *args):
kprint(args[0])
Sorry = Information = PrintFirstArg # pylint: disable=C0103
def appdataDir():
"""the per user directory with kajongg application information like the database"""
if Internal.isServer:
# the server might or might not have KDE installed, so to be on
# the safe side we use our own .kajonggserver directory
# the following code moves an existing kajonggserver.db to .kajonggserver
# but only if .kajonggserver does not yet exist
kdehome = os.environ.get('KDEHOME', '~/.kde')
oldPath = os.path.expanduser(kdehome + '/share/apps/kajongg/kajonggserver.db')
if not os.path.exists(oldPath):
oldPath = os.path.expanduser('~/.kde4/share/apps/kajongg/kajonggserver.db')
newPath = os.path.expanduser('~/.kajonggserver/')
if os.path.exists(oldPath) and not os.path.exists(newPath):
# upgrading an old kajonggserver installation
os.makedirs(newPath)
shutil.move(oldPath, newPath)
logInfo('moved %s to %s' % (oldPath, newPath))
if not os.path.exists(newPath):
try:
os.makedirs(newPath)
except OSError:
pass
return newPath
else:
result = os.path.dirname(unicode(KGlobal.dirs().locateLocal("appdata", ""))) + '/'
return result
def cacheDir():
"""the cache directory for this user"""
if Internal.isServer:
result = os.path.join(appdataDir(), 'cache')
else:
result = os.path.dirname(unicode(KGlobal.dirs().locateLocal("cache", "")))
result = os.path.join(result, 'kajongg')
if not os.path.exists(result):
try:
os.makedirs(result)
except OSError:
pass
return result
ENGLISHDICT = {}
LOGGER = None
def english(i18nstring):
"""translate back from local language"""
return ENGLISHDICT.get(i18nstring, i18nstring)
def translateServerMessage(msg):
"""because a PB exception can not pass a list of arguments, the server
encodes them into one string using SERVERMARK as separator. That
string is always english. Here we unpack and translate it into the
client language."""
if msg.find(SERVERMARK) >= 0:
return m18n(*tuple(msg.split(SERVERMARK)[1:-1]))
return msg
def stack(msg, limit=6):
"""returns a list of lines with msg as prefix"""
result = []
for idx, values in enumerate(traceback.extract_stack(limit=limit+2)[:-2]):
fileName, line, function, txt = values
result.append('%2d: %s %s/%d %s: %s' % (idx, msg, os.path.splitext(os.path.basename(fileName))[0],
line, function, txt))
return result
def initLog(logName):
"""init the loggers"""
global LOGGER # pylint: disable=W0603
LOGGER = logging.getLogger(logName)
try:
handler = logging.handlers.SysLogHandler('/dev/log')
except (AttributeError, socket.error):
handler = logging.handlers.RotatingFileHandler('kajongg.log', maxBytes=100000000, backupCount=10)
LOGGER.addHandler(handler)
LOGGER.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(name)s: %(levelname)s %(message)s")
handler.setFormatter(formatter)
def __logUnicodeMessage(prio, msg):
"""if we can encode the unicode msg to ascii, do so.
Otherwise convert the unicode object into an utf-8 encoded
str object.
The logger module would log the unicode object with the
marker feff at the beginning of every message, we do not want that."""
msg = msg.encode(getpreferredencoding(), 'ignore')[:4000]
kprint(msg)
LOGGER.log(prio, msg)
def xToUtf8(msg, args=None):
"""makes sure msg and all args are utf-8"""
if isinstance(msg, unicode):
msg = msg.encode('utf-8')
if args:
args = list(args[:])
for idx, arg in enumerate(args):
if isinstance(arg, unicode):
args[idx] = arg.encode('utf-8')
elif not isinstance(arg, str):
args[idx] = str(arg)
return msg, args
else:
return msg
def logMessage(msg, prio, showDialog, showStack=False, withGamePrefix=True):
"""writes info message to log and to stdout"""
if isinstance(msg, Exception):
msg = ' '.join(unicode(x.decode(getpreferredencoding()) \
if isinstance(x, str) else unicode(x)) for x in msg.args if x is not None)
if isinstance(msg, str):
msg = unicode(msg, 'utf-8')
elif not isinstance(msg, unicode):
msg = unicode(str(msg), 'utf-8')
msg = translateServerMessage(msg)
logMsg = msg
if withGamePrefix and Internal.logPrefix:
if Debug.process:
logMsg = '%s%d: %s' % (Internal.logPrefix, os.getpid(), msg)
else:
logMsg = '%s: %s' % (Internal.logPrefix, msg)
__logUnicodeMessage(prio, logMsg)
if showStack:
for line in traceback.format_stack()[2:-3]:
if not 'logException' in line:
__logUnicodeMessage(prio, ' ' + line.strip())
if showDialog:
if prio == logging.INFO:
return Information(msg)
else:
return Sorry(msg)
return NoPrompt(msg)
def logInfo(msg, showDialog=False, withGamePrefix=True):
"""log an info message"""
return logMessage(msg, logging.INFO, showDialog, withGamePrefix=withGamePrefix)
def logError(msg, withGamePrefix=True):
"""log an error message"""
return logMessage(msg, logging.ERROR, True, showStack=True, withGamePrefix=withGamePrefix)
def logDebug(msg, showStack=False, withGamePrefix=True, btIndent=None):
"""log this message and show it on stdout
if btIndent is set, message is indented by depth(backtrace)-btIndent"""
if btIndent:
depth = traceback.extract_stack()
msg = ' ' * (len(depth) - btIndent) + msg
return logMessage(msg, logging.DEBUG, False, showStack=showStack, withGamePrefix=withGamePrefix)
def logWarning(msg, withGamePrefix=True):
"""log this message and show it on stdout"""
return logMessage(msg, logging.WARNING, True, withGamePrefix=withGamePrefix)
def logException(exception, withGamePrefix=True):
"""logs error message and re-raises exception"""
logMessage(exception, logging.ERROR, True, showStack=True, withGamePrefix=withGamePrefix)
if isinstance(exception, (str, unicode)):
msg = exception.encode('utf-8', 'replace')
exception = Exception(msg)
raise exception
def m18n(englishText, *args):
"""wrapper around i18n converting QString into a Python unicode string"""
englishText = xToUtf8(englishText)
result = unicode(i18n(englishText, *args))
if not args:
ENGLISHDICT[result] = englishText
return result
def m18nc(context, englishText, *args):
"""wrapper around i18nc converting QString into a Python unicode string"""
englishText = xToUtf8(englishText)
result = unicode(i18nc(context, englishText, *args))
if not args:
ENGLISHDICT[result] = englishText
return result
def m18nE(englishText):
"""use this if you want to get the english text right now but still have the string translated"""
return englishText
def m18ncE(dummyContext, englishText):
"""use this if you want to get the english text right now but still have the string translated"""
return englishText
def socketName():
"""client and server process use this socket to talk to each other"""
serverDir = os.path.expanduser('~/.kajonggserver')
if not os.path.exists(serverDir):
appdataDir() # allocate the directory and possibly move old databases there
if Options.socket:
return Options.socket
else:
return os.path.join(serverDir, 'socket')
def which(program):
"""returns the full path for the binary or None"""
for path in os.environ['PATH'].split(os.pathsep):
fullName = os.path.join(path, program)
if os.path.exists(fullName):
return fullName
def removeIfExists(filename):
"""remove file if it exists. Returns True if it existed"""
exists = os.path.exists(filename)
if exists:
os.remove(filename)
return exists
def uniqueList(seq):
"""makes list content unique, keeping only the first occurrence"""
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
import gc
def _getr(slist, olist, seen):
"""Recursively expand slist's objects into olist, using seen to track
already processed objects."""
for elment in slist:
if id(elment) in seen:
continue
seen[id(elment)] = None
olist.append(elment)
tlist = gc.get_referents(elment)
if tlist:
_getr(tlist, olist, seen)
# The public function.
def get_all_objects():
"""Return a list of all live Python objects, not including the
list itself. May use this in Duration for showing where
objects are leaking"""
gc.collect()
gcl = gc.get_objects()
olist = []
seen = {}
# Just in case:
seen[id(gcl)] = None
seen[id(olist)] = None
seen[id(seen)] = None
# _getr does the real work.
_getr(gcl, olist, seen)
return olist
def kprint(*args, **kwargs):
"""a wrapper around print, always encoding unicode to something sensible"""
newArgs = []
for arg in args:
try:
arg = arg.decode('utf-8')
except BaseException:
arg = repr(arg)
arg = arg.encode(STDOUTENCODING, 'ignore')
newArgs.append(arg)
# we need * magic: pylint: disable=W0142
try:
print(*newArgs, sep=kwargs.get('sep', ' '), end=kwargs.get('end', '\n'), file=kwargs.get('file'))
except IOError as exception:
# very big konsole, busy system: sometimes Python says
# resource temporarily not available
time.sleep(0.1)
print(exception)
print(*newArgs, sep=kwargs.get('sep', ' '), end=kwargs.get('end', '\n'), file=kwargs.get('file'))
class Duration(object):
"""a helper class for checking code execution duration"""
def __init__(self, name, threshold=None, bug=False):
"""name describes where in the source we are checking
threshold in seconds: do not warn below
if bug is True, throw an exception if threshold is exceeded"""
self.name = name
self.threshold = threshold or 1.0
self.bug = bug
self.__start = datetime.datetime.now()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trback):
"""now check time passed"""
diff = datetime.datetime.now() - self.__start
if diff > datetime.timedelta(seconds=self.threshold):
msg = '%s took %d.%06d seconds' % (self.name, diff.seconds, diff.microseconds)
if self.bug:
logException(msg)
else:
logDebug(msg)
def checkMemory():
"""as the name says"""
#pylint: disable=R0912
if not Debug.gc:
return
gc.set_threshold( 0 )
gc.set_debug( gc.DEBUG_LEAK )
gc.enable()
logDebug('collecting {{{')
gc.collect() # we want to eliminate all output
logDebug('}}} done')
# code like this may help to find specific things
if True:
interesting = ('Client', 'Player', 'Game')
for obj in gc.garbage:
if hasattr(obj, 'cell_contents'):
obj = obj.cell_contents
if not any(x in repr(obj) for x in interesting):
continue
for referrer in gc.get_referrers(obj):
if referrer is gc.garbage:
continue
if hasattr(referrer, 'cell_contents'):
referrer = referrer.cell_contents
if referrer.__class__.__name__ in interesting:
for referent in gc.get_referents(referrer):
logDebug('%s refers to %s' % (referrer, referent))
else:
logDebug('referrer of %s/%s is: id=%s type=%s %s' % (
type(obj), obj, id(referrer), type(referrer), referrer))
logDebug('unreachable:%s' % gc.collect())
gc.set_debug(0)
|
ospalh/kajongg-fork
|
src/util.py
|
Python
|
gpl-2.0
| 14,387
|
[
"FEFF"
] |
bcd7a201ccdc496714fda536d77866c45c9b1201f0b5481589faae1a8aa26c56
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
from __future__ import absolute_import
import math
import numpy as np
import itertools as it
from psi4 import core
# Import driver helpers
from psi4.driver import p4util
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
### Math helper functions
def nCr(n, r):
f = math.factorial
return f(n) / f(r) / f(n-r)
### Begin CBS gufunc data
def _sum_cluster_ptype_data(ptype, ptype_dict, compute_list, fragment_slice_dict, fragment_size_dict, ret, vmfc=False):
"""
Sums gradient and hessian data from compute_list.
compute_list comes in as a tuple(frag, basis)
"""
if len(compute_list) == 0:
return
sign = 1
# Do ptype
if ptype == 'gradient':
for fragn, basisn in compute_list:
start = 0
grad = np.asarray(ptype_dict[(fragn, basisn)])
if vmfc:
sign = ((-1) ** (n - len(fragn)))
for bas in basisn:
end = start + fragment_size_dict[bas]
ret[fragment_slice_dict[bas]] += sign * grad[start:end]
start += fragment_size_dict[bas]
elif ptype == 'hessian':
for fragn, basisn in compute_list:
hess = np.asarray(ptype_dict[(fragn, basisn)])
if vmfc:
raise Exception("VMFC for hessian NYI")
# Build up start and end slices
abs_start, rel_start = 0, 0
abs_slices, rel_slices = [], []
for bas in basisn:
rel_end = rel_start + 3 * fragment_size_dict[bas]
rel_slices.append(slice(rel_start, rel_end))
rel_start += 3 * fragment_size_dict[bas]
tmp_slice = fragment_slice_dict[bas]
abs_slices.append(slice(tmp_slice.start * 3, tmp_slice.end * 3))
for abs_sl1, rel_sl1 in zip(abs_slices, rel_slices):
for abs_sl2, rel_sl2 in zip(abs_slices, rel_slices):
ret[abs_sl1, abs_sl2] += hess[rel_sl1, rel_sl2]
else:
raise KeyError("ptype can only be gradient or hessian How did you end up here?")
def _print_nbody_energy(energy_body_dict, header):
core.print_out("""\n ==> N-Body: %s energies <==\n\n""" % header)
core.print_out(""" n-Body Total Energy [Eh] I.E. [kcal/mol] Delta [kcal/mol]\n""")
previous_e = energy_body_dict[1]
nbody_range = list(energy_body_dict)
nbody_range.sort()
for n in nbody_range:
delta_e = (energy_body_dict[n] - previous_e)
delta_e_kcal = delta_e * constants.hartree2kcalmol
int_e_kcal = (energy_body_dict[n] - energy_body_dict[1]) * constants.hartree2kcalmol
core.print_out(""" %4s %20.12f %20.12f %20.12f\n""" %
(n, energy_body_dict[n], int_e_kcal, delta_e_kcal))
previous_e = energy_body_dict[n]
core.print_out("\n")
def nbody_gufunc(func, method_string, **kwargs):
"""
Computes the nbody interaction energy, gradient, or Hessian depending on input.
This is a generalized univeral function for computing interaction quantities.
:returns: *return type of func* |w--w| The interaction data.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| interaction data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.
:type func: function
:param func: ``energy`` || etc.
Python function that accepts method_string and a molecule. Returns a
energy, gradient, or Hessian as requested.
:type method_string: string
:param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, lowercase and usually unlabeled. Indicates the computational
method to be passed to func.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element of a tuple.
:type bsse_type: string or list
:param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.
Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
list is returned by this function. By default, this function is not called.
:type max_nbody: int
:param max_nbody: ``3`` || etc.
Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.
:type ptype: string
:param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``
Type of the procedure passed in.
:type return_total_data: :ref:`boolean <op_py_boolean>`
:param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|
If True returns the total data (energy/gradient/etc) of the system,
otherwise returns interaction data.
"""
### ==> Parse some kwargs <==
kwargs = p4util.kwargs_lower(kwargs)
return_wfn = kwargs.pop('return_wfn', False)
ptype = kwargs.pop('ptype', None)
return_total_data = kwargs.pop('return_total_data', False)
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
core.clean_variables()
if ptype not in ['energy', 'gradient', 'hessian']:
raise ValidationError("""N-Body driver: The ptype '%s' is not regonized.""" % ptype)
# Figure out BSSE types
do_cp = False
do_nocp = False
do_vmfc = False
return_method = False
# Must be passed bsse_type
bsse_type_list = kwargs.pop('bsse_type')
if bsse_type_list is None:
raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
if not isinstance(bsse_type_list, list):
bsse_type_list = [bsse_type_list]
for num, btype in enumerate(bsse_type_list):
if btype.lower() == 'cp':
do_cp = True
if (num == 0): return_method = 'cp'
elif btype.lower() == 'nocp':
do_nocp = True
if (num == 0): return_method = 'nocp'
elif btype.lower() == 'vmfc':
do_vmfc = True
if (num == 0): return_method = 'vmfc'
else:
raise ValidationError("N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower())
max_nbody = kwargs.get('max_nbody', -1)
max_frag = molecule.nfragments()
if max_nbody == -1:
max_nbody = molecule.nfragments()
else:
max_nbody = min(max_nbody, max_frag)
# What levels do we need?
nbody_range = range(1, max_nbody + 1)
fragment_range = range(1, max_frag + 1)
# Flip this off for now, needs more testing
# If we are doing CP lets save them integrals
#if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
# # Set to save RI integrals for repeated full-basis computations
# ri_ints_io = core.get_global_option('DF_INTS_IO')
# # inquire if above at all applies to dfmp2 or just scf
# core.set_global_option('DF_INTS_IO', 'SAVE')
# psioh = core.IOManager.shared_object()
# psioh.set_specific_retention(97, True)
bsse_str = bsse_type_list[0]
if len(bsse_type_list) >1:
bsse_str = str(bsse_type_list)
core.print_out("\n\n")
core.print_out(" ===> N-Body Interaction Abacus <===\n")
core.print_out(" BSSE Treatment: %s\n" % bsse_str)
cp_compute_list = {x:set() for x in nbody_range}
nocp_compute_list = {x:set() for x in nbody_range}
vmfc_compute_list = {x:set() for x in nbody_range}
vmfc_level_list = {x:set() for x in nbody_range} # Need to sum something slightly different
# Build up compute sets
if do_cp:
# Everything is in dimer basis
basis_tuple = tuple(fragment_range)
for nbody in nbody_range:
for x in it.combinations(fragment_range, nbody):
cp_compute_list[nbody].add( (x, basis_tuple) )
if do_nocp:
# Everything in monomer basis
for nbody in nbody_range:
for x in it.combinations(fragment_range, nbody):
nocp_compute_list[nbody].add( (x, x) )
if do_vmfc:
# Like a CP for all combinations of pairs or greater
for nbody in nbody_range:
for cp_combos in it.combinations(fragment_range, nbody):
basis_tuple = tuple(cp_combos)
for interior_nbody in nbody_range:
for x in it.combinations(cp_combos, interior_nbody):
combo_tuple = (x, basis_tuple)
vmfc_compute_list[interior_nbody].add( combo_tuple )
vmfc_level_list[len(basis_tuple)].add( combo_tuple )
# Build a comprehensive compute_range
compute_list = {x:set() for x in nbody_range}
for n in nbody_range:
compute_list[n] |= cp_compute_list[n]
compute_list[n] |= nocp_compute_list[n]
compute_list[n] |= vmfc_compute_list[n]
core.print_out(" Number of %d-body computations: %d\n" % (n, len(compute_list[n])))
# Build size and slices dictionaries
fragment_size_dict = {frag: molecule.extract_subsets(frag).natom() for
frag in range(1, max_frag+1)}
start = 0
fragment_slice_dict = {}
for k, v in fragment_size_dict.items():
fragment_slice_dict[k] = slice(start, start + v)
start += v
molecule_total_atoms = sum(fragment_size_dict.values())
# Now compute the energies
energies_dict = {}
ptype_dict = {}
for n in compute_list.keys():
core.print_out("\n ==> N-Body: Now computing %d-body complexes <==\n\n" % n)
total = len(compute_list[n])
for num, pair in enumerate(compute_list[n]):
core.print_out("\n N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" %
(num + 1, total, str(pair[0]), str(pair[1])))
ghost = list(set(pair[1]) - set(pair[0]))
current_mol = molecule.extract_subsets(list(pair[0]), ghost)
ptype_dict[pair] = func(method_string, molecule=current_mol, **kwargs)
energies_dict[pair] = core.get_variable("CURRENT ENERGY")
core.print_out("\n N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" %
(str(pair[0]), str(pair[1]), energies_dict[pair]))
# Flip this off for now, needs more testing
#if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
# core.set_global_option('DF_INTS_IO', 'LOAD')
core.clean()
# Final dictionaries
cp_energy_by_level = {n: 0.0 for n in nbody_range}
nocp_energy_by_level = {n: 0.0 for n in nbody_range}
cp_energy_body_dict = {n: 0.0 for n in nbody_range}
nocp_energy_body_dict = {n: 0.0 for n in nbody_range}
vmfc_energy_body_dict = {n: 0.0 for n in nbody_range}
# Build out ptype dictionaries if needed
if ptype != 'energy':
if ptype == 'gradient':
arr_shape = (molecule_total_atoms, 3)
elif ptype == 'hessian':
arr_shape = (molecule_total_atoms * 3, molecule_total_atoms * 3)
else:
raise KeyError("N-Body: ptype '%s' not recognized" % ptype)
cp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
nocp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
vmfc_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
cp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
nocp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
vmfc_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
else:
cp_ptype_by_level, cp_ptype_body_dict = None, None
nocp_ptype_by_level, nocp_ptype_body_dict = None, None
vmfc_ptype_body_dict = None
# Sum up all of the levels
for n in nbody_range:
# Energy
cp_energy_by_level[n] = sum(energies_dict[v] for v in cp_compute_list[n])
nocp_energy_by_level[n] = sum(energies_dict[v] for v in nocp_compute_list[n])
# Special vmfc case
if n > 1:
vmfc_energy_body_dict[n] = vmfc_energy_body_dict[n - 1]
for tup in vmfc_level_list[n]:
vmfc_energy_body_dict[n] += ((-1) ** (n - len(tup[0]))) * energies_dict[tup]
# Do ptype
if ptype != 'energy':
_sum_cluster_ptype_data(ptype, ptype_dict, cp_compute_list[n],
fragment_slice_dict, fragment_size_dict,
cp_ptype_by_level[n])
_sum_cluster_ptype_data(ptype, ptype_dict, nocp_compute_list[n],
fragment_slice_dict, fragment_size_dict,
nocp_ptype_by_level[n])
_sum_cluster_ptype_data(ptype, ptype_dict, vmfc_level_list[n],
fragment_slice_dict, fragment_size_dict,
vmfc_ptype_by_level[n], vmfc=True)
# Compute cp energy and ptype
if do_cp:
for n in nbody_range:
if n == max_frag:
cp_energy_body_dict[n] = cp_energy_by_level[n]
if ptype != 'energy':
cp_ptype_body_dict[n][:] = cp_ptype_by_level[n]
continue
for k in range(1, n + 1):
take_nk = nCr(max_frag - k - 1, n - k)
sign = ((-1) ** (n - k))
value = cp_energy_by_level[k]
cp_energy_body_dict[n] += take_nk * sign * value
if ptype != 'energy':
value = cp_ptype_by_level[k]
cp_ptype_body_dict[n] += take_nk * sign * value
_print_nbody_energy(cp_energy_body_dict, "Counterpoise Corrected (CP)")
cp_interaction_energy = cp_energy_body_dict[max_nbody] - cp_energy_body_dict[1]
core.set_variable('Counterpoise Corrected Total Energy', cp_energy_body_dict[max_nbody])
core.set_variable('Counterpoise Corrected Interaction Energy', cp_interaction_energy)
for n in nbody_range[1:]:
var_key = 'CP-CORRECTED %d-BODY INTERACTION ENERGY' % n
core.set_variable(var_key, cp_energy_body_dict[n] - cp_energy_body_dict[1])
# Compute nocp energy and ptype
if do_nocp:
for n in nbody_range:
if n == max_frag:
nocp_energy_body_dict[n] = nocp_energy_by_level[n]
if ptype != 'energy':
nocp_ptype_body_dict[n][:] = nocp_ptype_by_level[n]
continue
for k in range(1, n + 1):
take_nk = nCr(max_frag - k - 1, n - k)
sign = ((-1) ** (n - k))
value = nocp_energy_by_level[k]
nocp_energy_body_dict[n] += take_nk * sign * value
if ptype != 'energy':
value = nocp_ptype_by_level[k]
nocp_ptype_body_dict[n] += take_nk * sign * value
_print_nbody_energy(nocp_energy_body_dict, "Non-Counterpoise Corrected (NoCP)")
nocp_interaction_energy = nocp_energy_body_dict[max_nbody] - nocp_energy_body_dict[1]
core.set_variable('Non-Counterpoise Corrected Total Energy', nocp_energy_body_dict[max_nbody])
core.set_variable('Non-Counterpoise Corrected Interaction Energy', nocp_interaction_energy)
for n in nbody_range[1:]:
var_key = 'NOCP-CORRECTED %d-BODY INTERACTION ENERGY' % n
core.set_variable(var_key, nocp_energy_body_dict[n] - nocp_energy_body_dict[1])
# Compute vmfc energy and ptype
if do_vmfc:
_print_nbody_energy(vmfc_energy_body_dict, "Valiron-Mayer Function Couterpoise (VMFC)")
vmfc_interaction_energy = vmfc_energy_body_dict[max_nbody] - vmfc_energy_body_dict[1]
core.set_variable('Valiron-Mayer Function Couterpoise Total Energy', vmfc_energy_body_dict[max_nbody])
core.set_variable('Valiron-Mayer Function Couterpoise Interaction Energy', vmfc_interaction_energy)
for n in nbody_range[1:]:
var_key = 'VMFC-CORRECTED %d-BODY INTERACTION ENERGY' % n
core.set_variable(var_key, vmfc_energy_body_dict[n] - vmfc_energy_body_dict[1])
if return_method == 'cp':
ptype_body_dict = cp_ptype_body_dict
energy_body_dict = cp_energy_body_dict
elif return_method == 'nocp':
ptype_body_dict = nocp_ptype_body_dict
energy_body_dict = nocp_energy_body_dict
elif return_method == 'vmfc':
ptype_body_dict = vmfc_ptype_body_dict
energy_body_dict = vmfc_energy_body_dict
else:
raise ValidationError("N-Body Wrapper: Invalid return type. Should never be here, please post this error on github.")
# Figure out and build return types
if return_total_data:
ret_energy = energy_body_dict[max_nbody]
else:
ret_energy = energy_body_dict[max_nbody]
ret_energy -= energy_body_dict[1]
if ptype != 'energy':
if return_total_data:
np_final_ptype = ptype_body_dict[max_nbody].copy()
else:
np_final_ptype = ptype_body_dict[max_nbody].copy()
np_final_ptype -= ptype_body_dict[1]
ret_ptype = core.Matrix.from_array(np_final_ptype)
else:
ret_ptype = ret_energy
# Build and set a wavefunction
wfn = core.Wavefunction.build(molecule, 'sto-3g')
wfn.nbody_energy = energies_dict
wfn.nbody_ptype = ptype_dict
wfn.nbody_body_energy = energy_body_dict
wfn.nbody_body_ptype = ptype_body_dict
if ptype == 'gradient':
wfn.set_gradient(ret_ptype)
elif ptype == 'hessian':
wfn.set_hessian(ret_ptype)
core.set_variable("CURRENT ENERGY", ret_energy)
if return_wfn:
return (ret_ptype, wfn)
else:
return ret_ptype
|
andysim/psi4
|
psi4/driver/driver_nbody.py
|
Python
|
gpl-2.0
| 19,208
|
[
"Psi4"
] |
4adf7c885632a0c5bd8d83e9d2ea827a54daa25907df2976d4aed625dbd16519
|
# -*- coding: utf-8 -*-
import lettuce
import journal
@lettuce.step("I am not logged in")
def i_am_not_logged_in(step):
with journal.app.test_client() as client:
lettuce.world.client = client
@lettuce.step("I am logged in")
def i_am_logged_in(step):
with journal.app.test_client() as client:
with client.session_transaction() as sess:
sess['logged_in'] = True
lettuce.world.client = client
@lettuce.step("I visit the URI '(.*)'")
def i_visit_the_uri(step, a_string):
lettuce.world.response = lettuce.world.client.get(a_string)
@lettuce.step("the response should not contain '(.*)'")
def the_response_should_not_contain(step, a_string):
assert a_string not in lettuce.world.response.data
@lettuce.step("I should be redirected")
def i_should_be_redirected(step):
assert lettuce.world.response.status_code // 100 == 3 # 300 code
@lettuce.step("the response should contain '(.*)'")
def the_response_should_contain(step, a_string):\
assert a_string in lettuce.world.response.data
|
jonathansiebert/learning_journal
|
features/steps.py
|
Python
|
mit
| 1,049
|
[
"VisIt"
] |
3bea94d7f9a42cd48ba0048d6610770949234afd3e0fc891697f6e8f1f58fdc4
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""
CSV import classes
"""
import csv
import datetime
import time
from stoqlib.database.runtime import new_store
from stoqlib.importers.importer import Importer
from stoqlib.lib.dateutils import localdate
class CSVRow(object):
"""A row in a CSV file
"""
def __init__(self, item, field_names):
self.fields = []
for i, field in enumerate(item):
# XXX: we expect to receive unicode data
setattr(self, field_names[i], unicode(field, 'utf-8'))
self.fields.append(field_names[i])
def __repr__(self):
return '<CSV line %s>' % ', '.join(
['%s=%r' % (f, getattr(self, f)) for f in self.fields])
class CSVImporter(Importer):
"""Class to assist the process of importing csv files.
:cvar fields: field names, a list of strings
:cvar optional_fields: optional field names, a list of strings
:cvar dialect: optional, csv dialect, defaults to excel
"""
fields = []
optional_fields = []
dialect = 'excel'
def __init__(self, lines=500, dry=False):
"""
Create a new CSVImporter object.
:param lines: see :class:`set_lines_per_commit`
:param dry: see :class:`set_dry`
"""
Importer.__init__(self, items=lines, dry=dry)
self.lines = lines
#
# Public API
#
def feed(self, fp, filename='<stdin>'):
store = new_store()
self.before_start(store)
store.commit(close=True)
self.lineno = 1
self.rows = list(csv.reader(fp, dialect=self.dialect))
def get_n_items(self):
return len(self.rows)
def process_item(self, store, item_no):
t = time.time()
item = self.rows[item_no]
if not item or item[0].startswith('%'):
self.lineno += 1
return False
if len(item) < len(self.fields):
raise ValueError(
"line %d in file %s has %d fields, but we need at "
"least %d fields to be able to process it" % (self.lineno,
self.filename,
len(item),
len(self.fields)))
field_names = self.fields + self.optional_fields
if len(item) > len(field_names):
raise ValueError(
"line %d in file %s has %d fields, but we can at most "
"handle %d fields, fields=%r" % (self.lineno,
self.filename,
len(item),
len(field_names),
item))
row = CSVRow(item, field_names)
try:
self.process_one(row, row.fields, store)
except Exception:
print()
print('Error while processing row %d %r' % (self.lineno, row, ))
print()
raise
if self.items != -1:
if self.lineno % self.items == 0:
t2 = time.time()
print('%s Imported %d entries in %2.2f sec total=%d' % (
datetime.datetime.now().strftime('%T'), self.items,
t2 - t, self.lineno))
t = t2
self.lineno += 1
return True
def parse_date(self, data):
return localdate(*map(int, data.split('-')))
def parse_multi(self, domain_class, field, store):
if field == '*':
field_values = store.find(domain_class)
else:
items = store.find(domain_class).order_by(domain_class.te_id)
field_values = [items[int(field_id) - 1]
for field_id in field.split('|')]
return field_values
#
# Override this in a subclass
#
def process_one(self, row, fields, store):
"""Processes one line in a csv file, you can access the columns
using attributes on the data object.
:param row: object representing a row in the input
:param fields: a list of fields set in data
:param store: a store
"""
raise NotImplementedError
def read(self, iterable):
"""This can be overridden by as subclass which wishes to specialize
the CSV reader.
:param iterable: a sequence of lines which are going to be read
:returns: a sequence of parsed items
"""
|
tiagocardosos/stoq
|
stoqlib/importers/csvimporter.py
|
Python
|
gpl-2.0
| 5,393
|
[
"VisIt"
] |
6629d240d16d0ecbff549ae6b273bae2d21ed4d48ef81ffa0d1861cd81d95853
|
#!/usr/bin/env python
########################################################################
# File : dirac-info
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Report info about local DIRAC installation
"""
__RCSID__ = "$Id$"
import os
import DIRAC
from DIRAC import gConfig
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.PrettyPrint import printTable
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... Site' % Script.scriptName, ]))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
records = []
records.append(('Setup', gConfig.getValue('/DIRAC/Setup', 'Unknown')))
records.append(('ConfigurationServer', gConfig.getValue('/DIRAC/Configuration/Servers', [])))
records.append(('Installation path', DIRAC.rootPath))
if os.path.exists(os.path.join(DIRAC.rootPath, DIRAC.getPlatform(), 'bin', 'mysql')):
records.append(('Installation type', 'server'))
else:
records.append(('Installation type', 'client'))
records.append(('Platform', DIRAC.getPlatform()))
ret = getProxyInfo(disableVOMS=True)
if ret['OK']:
if 'group' in ret['Value']:
vo = getVOForGroup(ret['Value']['group'])
else:
vo = getVOForGroup('')
if not vo:
vo = "None"
records.append(('VirtualOrganization', vo))
if 'identity' in ret['Value']:
records.append(('User DN', ret['Value']['identity']))
if 'secondsLeft' in ret['Value']:
records.append(('Proxy validity, secs', {'Value': str(ret['Value']['secondsLeft']), 'Just': 'L'}))
if gConfig.getValue('/DIRAC/Security/UseServerCertificate', True):
records.append(('Use Server Certificate', 'Yes'))
else:
records.append(('Use Server Certificate', 'No'))
if gConfig.getValue('/DIRAC/Security/SkipCAChecks', False):
records.append(('Skip CA Checks', 'Yes'))
else:
records.append(('Skip CA Checks', 'No'))
try:
import gfalthr # pylint: disable=import-error
records.append(('gfal version', gfalthr.gfal_version()))
except BaseException:
pass
try:
import lcg_util # pylint: disable=import-error
records.append(('lcg_util version', lcg_util.lcg_util_version()))
except BaseException:
pass
records.append(('DIRAC version', DIRAC.version))
fields = ['Option', 'Value']
print
printTable(fields, records, numbering=False)
print
|
arrabito/DIRAC
|
Core/scripts/dirac-info.py
|
Python
|
gpl-3.0
| 2,553
|
[
"DIRAC"
] |
449489a8bd811654b3bf1b29b98f5d2a984fe2e69d6ba8b21dc282e53f4f41f6
|
#!/usr/bin/env python
import vtk
def main():
fileName = get_program_parameters()
colors = vtk.vtkNamedColors()
# Set the furniture colors.
colors.SetColor("Furniture", [204, 204, 153, 255])
scalarRange = [0.0, 0.0]
maxTime = 0
aren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
#
# Read the data.
#
reader = vtk.vtkStructuredGridReader()
reader.SetFileName(fileName)
reader.Update() # Force a read to occur.
reader.GetOutput().GetLength()
if reader.GetOutput().GetPointData().GetScalars():
reader.GetOutput().GetPointData().GetScalars().GetRange(scalarRange)
if reader.GetOutput().GetPointData().GetVectors():
maxVelocity = reader.GetOutput().GetPointData().GetVectors().GetMaxNorm()
maxTime = 4.0 * reader.GetOutput().GetLength() / maxVelocity
#
# Outline around the data.
#
outlineF = vtk.vtkStructuredGridOutlineFilter()
outlineF.SetInputConnection(reader.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outlineF.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(outlineMapper)
outline.GetProperty().SetColor(colors.GetColor3d("LampBlack"))
#
# Set up shaded surfaces (i.e., supporting geometry).
#
doorGeom = vtk.vtkStructuredGridGeometryFilter()
doorGeom.SetInputConnection(reader.GetOutputPort())
doorGeom.SetExtent(27, 27, 14, 18, 0, 11)
mapDoor = vtk.vtkPolyDataMapper()
mapDoor.SetInputConnection(doorGeom.GetOutputPort())
mapDoor.ScalarVisibilityOff()
door = vtk.vtkActor()
door.SetMapper(mapDoor)
door.GetProperty().SetColor(colors.GetColor3d("Burlywood"))
window1Geom = vtk.vtkStructuredGridGeometryFilter()
window1Geom.SetInputConnection(reader.GetOutputPort())
window1Geom.SetExtent(0, 0, 9, 18, 6, 12)
mapWindow1 = vtk.vtkPolyDataMapper()
mapWindow1.SetInputConnection(window1Geom.GetOutputPort())
mapWindow1.ScalarVisibilityOff()
window1 = vtk.vtkActor()
window1.SetMapper(mapWindow1)
window1.GetProperty().SetColor(colors.GetColor3d("SkyBlue"))
window1.GetProperty().SetOpacity(.6)
window2Geom = vtk.vtkStructuredGridGeometryFilter()
window2Geom.SetInputConnection(reader.GetOutputPort())
window2Geom.SetExtent(5, 12, 23, 23, 6, 12)
mapWindow2 = vtk.vtkPolyDataMapper()
mapWindow2.SetInputConnection(window2Geom.GetOutputPort())
mapWindow2.ScalarVisibilityOff()
window2 = vtk.vtkActor()
window2.SetMapper(mapWindow2)
window2.GetProperty().SetColor(colors.GetColor3d("SkyBlue"))
window2.GetProperty().SetOpacity(.6)
klower1Geom = vtk.vtkStructuredGridGeometryFilter()
klower1Geom.SetInputConnection(reader.GetOutputPort())
klower1Geom.SetExtent(17, 17, 0, 11, 0, 6)
mapKlower1 = vtk.vtkPolyDataMapper()
mapKlower1.SetInputConnection(klower1Geom.GetOutputPort())
mapKlower1.ScalarVisibilityOff()
klower1 = vtk.vtkActor()
klower1.SetMapper(mapKlower1)
klower1.GetProperty().SetColor(colors.GetColor3d("EggShell"))
klower2Geom = vtk.vtkStructuredGridGeometryFilter()
klower2Geom.SetInputConnection(reader.GetOutputPort())
klower2Geom.SetExtent(19, 19, 0, 11, 0, 6)
mapKlower2 = vtk.vtkPolyDataMapper()
mapKlower2.SetInputConnection(klower2Geom.GetOutputPort())
mapKlower2.ScalarVisibilityOff()
klower2 = vtk.vtkActor()
klower2.SetMapper(mapKlower2)
klower2.GetProperty().SetColor(colors.GetColor3d("EggShell"))
klower3Geom = vtk.vtkStructuredGridGeometryFilter()
klower3Geom.SetInputConnection(reader.GetOutputPort())
klower3Geom.SetExtent(17, 19, 0, 0, 0, 6)
mapKlower3 = vtk.vtkPolyDataMapper()
mapKlower3.SetInputConnection(klower3Geom.GetOutputPort())
mapKlower3.ScalarVisibilityOff()
klower3 = vtk.vtkActor()
klower3.SetMapper(mapKlower3)
klower3.GetProperty().SetColor(colors.GetColor3d("EggShell"))
klower4Geom = vtk.vtkStructuredGridGeometryFilter()
klower4Geom.SetInputConnection(reader.GetOutputPort())
klower4Geom.SetExtent(17, 19, 11, 11, 0, 6)
mapKlower4 = vtk.vtkPolyDataMapper()
mapKlower4.SetInputConnection(klower4Geom.GetOutputPort())
mapKlower4.ScalarVisibilityOff()
klower4 = vtk.vtkActor()
klower4.SetMapper(mapKlower4)
klower4.GetProperty().SetColor(colors.GetColor3d("EggShell"))
klower5Geom = vtk.vtkStructuredGridGeometryFilter()
klower5Geom.SetInputConnection(reader.GetOutputPort())
klower5Geom.SetExtent(17, 19, 0, 11, 0, 0)
mapKlower5 = vtk.vtkPolyDataMapper()
mapKlower5.SetInputConnection(klower5Geom.GetOutputPort())
mapKlower5.ScalarVisibilityOff()
klower5 = vtk.vtkActor()
klower5.SetMapper(mapKlower5)
klower5.GetProperty().SetColor(colors.GetColor3d("EggShell"))
klower6Geom = vtk.vtkStructuredGridGeometryFilter()
klower6Geom.SetInputConnection(reader.GetOutputPort())
klower6Geom.SetExtent(17, 19, 0, 7, 6, 6)
mapKlower6 = vtk.vtkPolyDataMapper()
mapKlower6.SetInputConnection(klower6Geom.GetOutputPort())
mapKlower6.ScalarVisibilityOff()
klower6 = vtk.vtkActor()
klower6.SetMapper(mapKlower6)
klower6.GetProperty().SetColor(colors.GetColor3d("EggShell"))
klower7Geom = vtk.vtkStructuredGridGeometryFilter()
klower7Geom.SetInputConnection(reader.GetOutputPort())
klower7Geom.SetExtent(17, 19, 9, 11, 6, 6)
mapKlower7 = vtk.vtkPolyDataMapper()
mapKlower7.SetInputConnection(klower7Geom.GetOutputPort())
mapKlower7.ScalarVisibilityOff()
klower7 = vtk.vtkActor()
klower7.SetMapper(mapKlower7)
klower7.GetProperty().SetColor(colors.GetColor3d("EggShell"))
hood1Geom = vtk.vtkStructuredGridGeometryFilter()
hood1Geom.SetInputConnection(reader.GetOutputPort())
hood1Geom.SetExtent(17, 17, 0, 11, 11, 16)
mapHood1 = vtk.vtkPolyDataMapper()
mapHood1.SetInputConnection(hood1Geom.GetOutputPort())
mapHood1.ScalarVisibilityOff()
hood1 = vtk.vtkActor()
hood1.SetMapper(mapHood1)
hood1.GetProperty().SetColor(colors.GetColor3d("Silver"))
hood2Geom = vtk.vtkStructuredGridGeometryFilter()
hood2Geom.SetInputConnection(reader.GetOutputPort())
hood2Geom.SetExtent(19, 19, 0, 11, 11, 16)
mapHood2 = vtk.vtkPolyDataMapper()
mapHood2.SetInputConnection(hood2Geom.GetOutputPort())
mapHood2.ScalarVisibilityOff()
hood2 = vtk.vtkActor()
hood2.SetMapper(mapHood2)
hood2.GetProperty().SetColor(colors.GetColor3d("Furniture"))
hood3Geom = vtk.vtkStructuredGridGeometryFilter()
hood3Geom.SetInputConnection(reader.GetOutputPort())
hood3Geom.SetExtent(17, 19, 0, 0, 11, 16)
mapHood3 = vtk.vtkPolyDataMapper()
mapHood3.SetInputConnection(hood3Geom.GetOutputPort())
mapHood3.ScalarVisibilityOff()
hood3 = vtk.vtkActor()
hood3.SetMapper(mapHood3)
hood3.GetProperty().SetColor(colors.GetColor3d("Furniture"))
hood4Geom = vtk.vtkStructuredGridGeometryFilter()
hood4Geom.SetInputConnection(reader.GetOutputPort())
hood4Geom.SetExtent(17, 19, 11, 11, 11, 16)
mapHood4 = vtk.vtkPolyDataMapper()
mapHood4.SetInputConnection(hood4Geom.GetOutputPort())
mapHood4.ScalarVisibilityOff()
hood4 = vtk.vtkActor()
hood4.SetMapper(mapHood4)
hood4.GetProperty().SetColor(colors.GetColor3d("Furniture"))
hood6Geom = vtk.vtkStructuredGridGeometryFilter()
hood6Geom.SetInputConnection(reader.GetOutputPort())
hood6Geom.SetExtent(17, 19, 0, 11, 16, 16)
mapHood6 = vtk.vtkPolyDataMapper()
mapHood6.SetInputConnection(hood6Geom.GetOutputPort())
mapHood6.ScalarVisibilityOff()
hood6 = vtk.vtkActor()
hood6.SetMapper(mapHood6)
hood6.GetProperty().SetColor(colors.GetColor3d("Furniture"))
cookingPlateGeom = vtk.vtkStructuredGridGeometryFilter()
cookingPlateGeom.SetInputConnection(reader.GetOutputPort())
cookingPlateGeom.SetExtent(17, 19, 7, 9, 6, 6)
mapCookingPlate = vtk.vtkPolyDataMapper()
mapCookingPlate.SetInputConnection(cookingPlateGeom.GetOutputPort())
mapCookingPlate.ScalarVisibilityOff()
cookingPlate = vtk.vtkActor()
cookingPlate.SetMapper(mapCookingPlate)
cookingPlate.GetProperty().SetColor(colors.GetColor3d("Tomato"))
filterGeom = vtk.vtkStructuredGridGeometryFilter()
filterGeom.SetInputConnection(reader.GetOutputPort())
filterGeom.SetExtent(17, 19, 7, 9, 11, 11)
mapFilter = vtk.vtkPolyDataMapper()
mapFilter.SetInputConnection(filterGeom.GetOutputPort())
mapFilter.ScalarVisibilityOff()
sgfilter = vtk.vtkActor()
sgfilter.SetMapper(mapFilter)
sgfilter.GetProperty().SetColor(colors.GetColor3d("Furniture"))
#
# regular streamlines
#
line = vtk.vtkLineSource()
line.SetResolution(39)
line.SetPoint1(0.08, 2.50, 0.71)
line.SetPoint2(0.08, 4.50, 0.71)
rakeMapper = vtk.vtkPolyDataMapper()
rakeMapper.SetInputConnection(line.GetOutputPort())
rake = vtk.vtkActor()
rake.SetMapper(rakeMapper)
streamers = vtk.vtkStreamTracer()
# streamers.DebugOn()
streamers.SetInputConnection(reader.GetOutputPort())
streamers.SetSourceConnection(line.GetOutputPort())
streamers.SetMaximumPropagation(maxTime)
streamers.SetInitialIntegrationStep(.5)
streamers.SetMinimumIntegrationStep(.1)
streamers.SetIntegratorType(2)
streamers.Update()
streamersMapper = vtk.vtkPolyDataMapper()
streamersMapper.SetInputConnection(streamers.GetOutputPort())
streamersMapper.SetScalarRange(scalarRange)
lines = vtk.vtkActor()
lines.SetMapper(streamersMapper)
lines.GetProperty().SetColor(colors.GetColor3d("Black"))
aren.TwoSidedLightingOn()
aren.AddActor(outline)
aren.AddActor(door)
aren.AddActor(window1)
aren.AddActor(window2)
aren.AddActor(klower1)
aren.AddActor(klower2)
aren.AddActor(klower3)
aren.AddActor(klower4)
aren.AddActor(klower5)
aren.AddActor(klower6)
aren.AddActor(klower7)
aren.AddActor(hood1)
aren.AddActor(hood2)
aren.AddActor(hood3)
aren.AddActor(hood4)
aren.AddActor(hood6)
aren.AddActor(cookingPlate)
aren.AddActor(sgfilter)
aren.AddActor(lines)
aren.AddActor(rake)
aren.SetBackground(colors.GetColor3d("SlateGray"))
aCamera = vtk.vtkCamera()
aren.SetActiveCamera(aCamera)
aren.ResetCamera()
aCamera.SetFocalPoint(3.505, 2.505, 1.255)
aCamera.SetPosition(3.505, 24.6196, 1.255)
aCamera.SetViewUp(0, 0, 1)
aCamera.Azimuth(60)
aCamera.Elevation(30)
aCamera.Dolly(1.5)
aren.ResetCameraClippingRange()
renWin.SetSize(640, 512)
renWin.Render()
# interact with data
iren.Start()
def get_program_parameters():
import argparse
description = 'Flow velocity computed for a small kitchen (top and side view).'
epilogue = '''
Forty streamlines start along the rake positioned under the window.
Some eventually travel over the hot stove and are convected upwards.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='kitchen.vtk.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Visualization/Kitchen.py
|
Python
|
apache-2.0
| 11,425
|
[
"VTK"
] |
c65ac60e38776c1f05185e90e109842d2d54e9ca3226c6979ba3c32e51ed231b
|
# finite-difference implementation of the diffusion equation with first-order
# explicit time discretization
#
# We are solving phi_t = k phi_xx
#
# We run at several resolutions and compute the error. This uses a
# cell-centered finite-difference grid
#
# M. Zingale (2013-04-07)
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
# Use LaTeX for rendering
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.titlesize'] = 'small'
class Grid1d(object):
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
""" grid class initialization """
self.nx = nx
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.ilo = ng
self.ihi = ng+nx-1
self.dx = (xmax - xmin)/nx
self.x = xmin + (np.arange(nx+2*ng) -ng + 0.5)*self.dx
# storage for the solution
self.phi = np.zeros((nx+2*ng), dtype=np.float64)
def scratch_array(self):
return np.zeros((2*self.ng+self.nx), dtype=np.float64)
def fill_BCs(self):
""" fill the Neumann BCs """
self.phi[0:self.ilo] = self.phi[self.ilo]
self.phi[self.ihi+1:] = self.phi[self.ihi]
def phi_a(self, t, k, t0, phi1, phi2):
""" analytic solution for the diffusion of a Gaussian """
xc = 0.5*(self.xmin + self.xmax)
return (phi2 - phi1)*np.sqrt(t0/(t + t0)) * \
np.exp(-0.25*(self.x-xc)**2/(k*(t + t0))) + phi1
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return np.sqrt(self.dx*np.sum(e[self.ilo:self.ihi+1]**2))
class Simulation(object):
def __init__(self, grid, k=1.0):
self.grid = grid
self.t = 0.0
self.k = k # diffusion coefficient
def init_cond(self, name, *args):
# initialize the data
if name == "gaussian":
t0, phi1, phi2 = args
self.grid.phi[:] = self.grid.phi_a(0.0, self.k, t0, phi1, phi2)
def evolve(self, C, tmax):
gr = self.grid
# time info
dt = C*0.5*gr.dx**2/self.k
phinew = gr.scratch_array()
while self.t < tmax:
# make sure we end right at tmax
if self.t + dt > tmax:
dt = tmax - self.t
# fill the boundary conditions
gr.fill_BCs()
alpha = self.k*dt/gr.dx**2
# loop over zones
for i in range(gr.ilo, gr.ihi+1):
# explicit diffusion
phinew[i] = gr.phi[i] + \
alpha*(gr.phi[i+1] - 2.0*gr.phi[i] + gr.phi[i-1])
# store the updated solution
gr.phi[:] = phinew[:]
self.t += dt
if __name__ == "__main__":
#-----------------------------------------------------------------------------
# diffusion coefficient
k = 1.0
# reference time
t0 = 1.e-4
# state coeffs
phi1 = 1.0
phi2 = 2.0
# solution at multiple times
# a characteristic timescale for diffusion if L^2/k
tmax = 0.0008
nx = 64
C = 0.8
ntimes = 4
tend = tmax/10.0**ntimes
c = ["C0", "C1", "C2", "C3", "C4"]
while tend <= tmax:
g = Grid1d(nx, ng=2)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tend)
phi_analytic = g.phi_a(tend, k, t0, phi1, phi2)
color = c.pop()
plt.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1],
"x", color=color, label="$t = %g$ s" % (tend))
plt.plot(g.x[g.ilo:g.ihi+1], phi_analytic[g.ilo:g.ihi+1],
color=color, ls=":")
tend = 10.0*tend
plt.xlim(0.35,0.65)
plt.legend(frameon=False, fontsize="medium")
plt.xlabel("$x$", fontsize="large")
plt.ylabel(r"$\phi$", fontsize="large")
plt.title("explicit diffusion, nx = {}, C = {:3.2f}".format(nx, C), fontsize="small")
plt.savefig("diff-explicit-{}.pdf".format(nx))
#-----------------------------------------------------------------------------
# convergence
plt.clf()
# a characteristic timescale for diffusion is L^2/k
tmax = 0.005
t0 = 1.e-4
phi1 = 1.0
phi2 = 2.0
k = 1.0
N = [16, 32, 64, 128, 256, 512]
# CFL number
C = 0.8
err = []
for nx in N:
# the present C-N discretization
g = Grid1d(nx, ng=1)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tmax)
phi_analytic = g.phi_a(tmax, k, t0, phi1, phi2)
err.append(g.norm(g.phi - phi_analytic))
plt.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1], label="N = %d" % (nx))
plt.legend(frameon=False)
plt.xlabel("$x$", fontsize="large")
plt.ylabel(r"$\phi$", fontsize="large")
plt.title("Explicit diffusion, C = {:3.2f}, t = {:5.2g}".format(C, tmax), fontsize="small")
plt.savefig("diffexplicit-res.pdf")
plt.clf()
N = np.array(N, dtype=np.float64)
err = np.array(err)
plt.scatter(N, err, color="C1", label="explicit diffusion")
plt.loglog(N, err[len(N)-1]*(N[len(N)-1]/N)**2, color="C0", label="$\mathcal{O}(\Delta x^2)$")
plt.xlabel(r"$N$", fontsize="large")
plt.ylabel(r"L2 norm of absolute error")
plt.title("Convergence of Explicit Diffusion, C = %3.2f, t = %5.2g" % (C, tmax), fontsize="small")
plt.ylim(1.e-6, 1.e-1)
plt.legend(frameon=False, fontsize="small")
plt.savefig("diffexplicit-converge-{}.pdf".format(C))
#-----------------------------------------------------------------------------
# exceed the timestep limit
plt.clf()
# a characteristic timescale for diffusion is L^2/k
tmax = 0.005
nx = 64
C = 2.0
g = Grid1d(nx, ng=2)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tend)
phi_analytic = g.phi_a(tend, k, t0, phi1, phi2)
plt.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1],
"x-", color="C0", label="$t = %g$ s" % (tend))
plt.plot(g.x[g.ilo:g.ihi+1], phi_analytic[g.ilo:g.ihi+1],
color="0.5", ls=":")
plt.xlim(0.35,0.65)
plt.xlabel("$x$", fontsize="large")
plt.ylabel(r"$\phi$", fontsize="large")
plt.title("explicit diffusion, nx = %d, C = %3.2f, t = %5.2g" % (nx, C, tmax), fontsize="small")
ax = plt.gca()
ax.xaxis.set_major_formatter(plt.ScalarFormatter(useMathText=True))
ax.yaxis.set_major_formatter(plt.ScalarFormatter(useMathText=True))
plt.savefig("diff-explicit-64-bad.pdf")
|
zingale/hydro_examples
|
diffusion/diffusion_explicit.py
|
Python
|
bsd-3-clause
| 6,767
|
[
"Gaussian"
] |
a9eec0659af17c2c578a6d34caecb0f5e5e2a0d5e92501fbdd49526a623a3c03
|
"""
Creating Occupancy grid using LIDAR sensor
Includes error defined below
"""
# TODO Stopping condition
# TODO Increase cell size of resultant map
# TODO Set botL = botW = 60 and edit move_bot() to move to centroid of unvisited cells
from numpy import array, add, subtract
from os import listdir
from PIL import Image
from pygame_arena import colors
from merge_images import merge_BW
import pygame
import pygame.locals
from random import gauss
from math import sin, cos, radians
specs = (500, 3, 1, 1, 300, 5)
"""
specs[0] - Frequency of LIDAR in Hz
specs[1] - Time taken for servo motor to complete 1 revolution
specs[2] - Bot Length (Y Axis, using standard cartesian Coordinates)
specs[3] - Bot Width (X Axis)
specs[4] - Max Distance LIDAR can measure, in cm. Turn it 40k cm later
specs[5] - Grey Area - Min Distance for bot to turn back from an obstacle
"""
errors = (0, 5)
"""
errors[0] - measurement error, per 100 cm
errors[1] - motion error, per 100 cm
"""
reality = []
all_points = [] # Points which are detected as obstacle or Out Of Sensor Bounds
OOB = [] # Out of Bound Area
real_coordinates = [] # bot's coordinates in real world
bot_center = []
direction = -1j
def init():
"""
Turns 'jpg' to reality array
Initializes x,y,result_map to some values
"""
global reality, real_coordinates, bot_center
im = Image.open('map.jpg')
reality = array(im)
# TODO Starting Point Issue
real_coordinates.append((reality.shape[1] / 2, reality.shape[0] / 2))
bot_center = (0, 0)
def move_bot():
"""
Core of Motion Part
Decides motion of the bot and tries to move it
Includes Gaussian error
"""
global reality, direction, real_coordinates, specs, direction, bot_center, errors
(x, y) = (real_coordinates[-1][0], real_coordinates[-1][1])
# given code will update bot's position
# note that i'm going in anticlockwise sense using 4 complex roots of 1 viz. 1,-1,i,-i
delta = 1
while delta < specs[4]:
if specs[5] <= x < reality.shape[1] - specs[5] and specs[5] <= y < reality.shape[0] - specs[5] \
and 0 == reality[y][x]:
x += 1 * int(direction.real)
y += 1 * int(direction.imag)
delta += 1
else:
break
(x, y) = real_coordinates[-1]
delta -= 1 # At this point, I'm assuming delta will always be greater than what we want
x = int(x + delta * int(direction.real) + gauss(0, errors[1]))
y = int(y + delta * int(direction.imag) + gauss(0, errors[1]))
if specs[5] > x:
x = specs[5]
print 'Exceeding X limit'
elif x >= reality.shape[1] - specs[5]:
x = reality.shape[1] - specs[5] - 1
print 'Exceeding X limit'
if specs[5] > y:
y = specs[5]
print 'Exceeding Y limit'
elif y >= reality.shape[0] - specs[5]:
y = reality.shape[0] - specs[5] - 1
print 'Exceeding Y limit'
real_coordinates.append((x, y))
bot_center = add(bot_center, subtract(real_coordinates[-1], real_coordinates[-2]))
# Finally changing the direction
direction *= 1j
def get_readings():
"""
Core of measurement taking
"""
global real_coordinates, reality, all_points, OOB
n = specs[0] * specs[1]
(x, y) = (int(real_coordinates[-1][0]), int(real_coordinates[-1][1]))
for i in range(1, n):
r = 1
(_x, _y) = (x - int(cos(radians(i * 360 / n)) * r), y - int(sin(radians(i * 360 / n)) * r))
while 0 <= _x < reality.shape[1] and 0 <= _y < reality.shape[0] and r < specs[4]:
if reality[_y][_x] != 0:
if (_x, _y) not in all_points: # to avoid same point being detected again in same session
all_points.append((_x, _y))
break
r += 1
(_x, _y) = (x - int(cos(radians(i * 360 / n)) * r), y - int(sin(radians(i * 360 / n)) * r))
# TODO Out Of Bounds Issue
# This is the situation when sensor's reading reach the max value
(_x, _y) = (x - int(cos(radians(i * 360 / n)) * (r - 1)), y - int(sin(radians(i * 360 / n)) * (r - 1)))
if r == specs[4]:
OOB.append((_x, _y))
if (_x, _y) not in all_points:
all_points.append((_x, _y))
def update_image():
"""
Merges the (short-lived, error-free) metric map to (global, error-prone) perspective map
Also creates the grey-area for bot's motion
"""
global all_points, OOB, real_coordinates, reality, bot_center
pygame.init()
screen = pygame.display.set_mode((2 * specs[4], 2 * specs[4]))
screen.fill(colors['BLACK'])
for i in range(0, len(all_points)):
# TODO Image Merge Issue
all_points[i] = add(all_points[i], subtract((specs[4], specs[4]), real_coordinates[-1]))
pygame.draw.polygon(screen, colors['WHITE'], all_points)
for i in range(0, len(OOB)):
# TODO Image Merge Issue
OOB[i] = add(OOB[i], subtract((specs[4], specs[4]), real_coordinates[-1]))
# TODO Grey Area Issue
pygame.draw.circle(screen, colors['LGREY'], OOB[i], specs[5])
pygame.image.save(screen, 'temp.jpg')
pygame.quit()
if len(real_coordinates) > 1:
bot_center = merge_BW('result.jpg', 'temp.jpg', bot_center)
else:
Image.open('temp.jpg').convert('L').save('result.jpg')
bot_center = (specs[4], specs[4])
all_points = [] # this is important else u'll be using points of previous readings again
OOB = []
def finish():
"""
:Prints Resultant map
"""
if 'result.jpg' in listdir('.'):
# TODO Invert Color Issue
# ImageOps.invert(Image.open('result.jpg')).show()
Image.open('result.jpg').show()
def main():
init()
start = raw_input("Press 'y' to start: ").startswith('y')
while start:
print "Started scanning at ", real_coordinates[-1]
get_readings()
update_image()
move_bot()
print "Ended Scanning. Bot moved ", subtract(real_coordinates[-1], real_coordinates[-2]), "\nNow at", \
real_coordinates[-1]
start = raw_input("\nPress 'y' to continue scanning: ").startswith('y')
finish()
if __name__ == '__main__':
main()
|
AKS1996/VOCOWA
|
SLAM_Lidar_BW_withError.py
|
Python
|
mit
| 6,243
|
[
"Gaussian"
] |
8df18678cc524602df295824d22dca81c5e794b4b7eaefebc3b8b666dfdaf9e3
|
#!/usr/bin/env python
# coding=utf-8
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import redis
# 初始化数据库连接:
engine = create_engine('mysql://root:root@localhost:3306/crystal?charset=utf8')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
|
x-hansong/Crystal
|
model/db_config.py
|
Python
|
mit
| 300
|
[
"CRYSTAL"
] |
9274e79d1f1a2aed82d5700174e1e0a304eb5dec95813128483005e1d780767c
|
#!/usr/bin/env python
import numpy as np
from ase.io import write
from gpaw.io.tar import Reader
from gpaw.tddft import TDDFT
from gpaw.tddft.fourier import DensityFourierTransform
from gpaw.tddft.units import aufrequency_to_eV, autime_to_attosec
# -------------------------------------------------------------------
if __name__ == '__main__':
import os
import sys
def printOptions():
scriptname = sys.argv[0].rsplit('/', 1)[-1]
print 'Usage:'
print ' %s FTDFILE GPWFILE' % scriptname
print ''
print 'Arguments:'
print ' FTDFILE Fourier transformed density tar-file.'
print ' GPWFILE GPAW calculation tar-file (optional).'
print ''
try:
assert len(sys.argv) == 3, 'Incorrect number of arguments.'
ftd_filename = sys.argv[1]
assert ftd_filename.endswith('.ftd'), 'Invalid FTD tarfile.'
assert os.path.isfile(ftd_filename), 'FTD tarfile not found.'
prefix = ftd_filename.rsplit('.ftd', 1)[0]
tar = Reader(ftd_filename)
try:
timestep = tar['TimeStep']
sigma = tar['Width']
except KeyError:
timestep = 1
sigma = None
omega_w = tar.get('Frequency')
gamma_w = tar.get('PhaseFactor')
Fnt_wsG = tar.get('FourierTransform')
Ant_sG = tar.get('Average')
atoms = None
del tar
gpw_filename = sys.argv[2]
assert gpw_filename.endswith('.gpw'), 'Invalid GPW tarfile.'
assert os.path.isfile(gpw_filename), 'GPW tarfile not found.'
calc = TDDFT(gpw_filename, txt=None)
obs = DensityFourierTransform(timestep * autime_to_attosec,
omega_w * aufrequency_to_eV,
(sigma is not None and sigma \
* aufrequency_to_eV or None))
obs.initialize(calc)
atoms = calc.get_atoms()
del calc
obs.read(ftd_filename, idiotproof=False)
try:
sys.stdout.write('Select grid refinement [1*/2]: ')
gdref = int(sys.stdin.readline().strip())
except:
gdref = 1
getall = slice(None) #hack to obtain all frequencies/spins
Fnt_wsG = obs.get_fourier_transform(getall, getall, gdref)
Ant_sG = obs.get_average(getall, gdref)
del obs
# Save modulus and phase as .cube files for all frequencies/spins
for w, Fnt_sG in enumerate(Fnt_wsG):
for s, Fnt_G in enumerate(Fnt_sG):
filename = '%s_Fnt_w%d_s%d_mod.cube' % (prefix,w,s)
print 'Saving %s (omega=%5.2f eV)...' \
% (filename, omega_w[w]*aufrequency_to_eV)
write(filename, atoms, data=np.abs(Fnt_G))
filename = '%s_Fnt_w%d_s%d_arg.cube' % (prefix,w,s)
print 'Saving %s (omega=%5.2f eV)...' \
% (filename, omega_w[w]*aufrequency_to_eV)
write(filename, atoms, data=np.arctan2(Fnt_G.imag, Fnt_G.real))
# Save mean density as .cube file for each spin
for s, Ant_G in enumerate(Ant_sG):
filename = '%s_Ant_s%d.cube' % (prefix,s)
print 'Saving %s...' % filename
write(filename, atoms, data=Ant_G)
except AssertionError, e:
printOptions()
print 'ERROR:', e
exit(-1)
|
qsnake/gpaw
|
tools/extract_ftd.py
|
Python
|
gpl-3.0
| 3,448
|
[
"ASE",
"GPAW"
] |
2b169e69ce9956bc72f8ef3695882fe2568004a1a841c3338d5d71060bc45f7f
|
from .lpi_filter import inverse, wiener, LPIFilter2D
from ._gaussian import gaussian
from .edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,
scharr, hscharr, vscharr, scharr_h, scharr_v,
prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,
roberts, roberts_positive_diagonal,
roberts_negative_diagonal, roberts_pos_diag,
roberts_neg_diag, laplace)
from ._rank_order import rank_order
from ._gabor import gabor_kernel, gabor
from .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,
threshold_isodata, threshold_li)
from . import rank
from .rank import median
from .._shared.utils import deprecated, copy_func
from .. import restoration
denoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\
(restoration.denoise_bilateral)
denoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\
(restoration.denoise_tv_bregman)
denoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\
(restoration.denoise_tv_chambolle)
gaussian_filter = copy_func(gaussian, name='gaussian_filter')
gaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)
gabor_filter = copy_func(gabor, name='gabor_filter')
gabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)
# Backward compatibility v<0.11
@deprecated('skimage.feature.canny')
def canny(*args, **kwargs):
# Hack to avoid circular import
from ..feature._canny import canny as canny_
return canny_(*args, **kwargs)
__all__ = ['inverse',
'wiener',
'LPIFilter2D',
'gaussian',
'median',
'canny',
'sobel',
'hsobel',
'vsobel',
'sobel_h',
'sobel_v',
'scharr',
'hscharr',
'vscharr',
'scharr_h',
'scharr_v',
'prewitt',
'hprewitt',
'vprewitt',
'prewitt_h',
'prewitt_v',
'roberts',
'roberts_positive_diagonal',
'roberts_negative_diagonal',
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
'denoise_tv_chambolle',
'denoise_bilateral',
'denoise_tv_bregman',
'rank_order',
'gabor_kernel',
'gabor',
'threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li',
'rank']
|
WarrenWeckesser/scikits-image
|
skimage/filters/__init__.py
|
Python
|
bsd-3-clause
| 2,594
|
[
"Gaussian"
] |
2f4dbf8b9040aa16c545794c51c1820d4acb76781cf4f73bba9d63bbc1773391
|
''' file name : boundingrect.py
Description : This sample shows how to find the bounding rectangle and minimum enclosing circle of a contour
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.html
Level : Beginner
Benefits : Learn to use 1) cv2.boundingRect() and 2) cv2.minEnclosingCircle()
Usage : python boundingrect.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials'''
import cv2
import numpy as np
def thresh_callback(thresh):
edges = cv2.Canny(blur,thresh,thresh*2)
drawing = np.zeros(img.shape,np.uint8) # Image to draw the contours
contours,hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
bx,by,bw,bh = cv2.boundingRect(cnt)
(cx,cy),radius = cv2.minEnclosingCircle(cnt)
cv2.drawContours(drawing,[cnt],0,(0,255,0),1) # draw contours in green color
cv2.circle(drawing,(int(cx),int(cy)),int(radius),(0,0,255),2) # draw circle in red color
cv2.rectangle(drawing,(bx,by),(bx+bw,by+bh),(255,0,0),3) # draw rectangle in blue color)
cv2.imshow('output',drawing)
cv2.imshow('input',img)
img = cv2.imread('messi5.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
cv2.namedWindow('input')
thresh = 100
max_thresh = 255
cv2.createTrackbar('canny thresh:','input',thresh,max_thresh,thresh_callback)
thresh_callback(0)
if cv2.waitKey(0) == 27:
cv2.destroyAllWindows()
### For more details & feature extraction on contours, visit : http://opencvpython.blogspot.com/2012/04/contour-features.html
|
asrob-uc3m/rpc_rpi
|
src/python/opencv_python_tutorials/Official_Tutorial_Python_Codes/3_imgproc/boundingrect.py
|
Python
|
gpl-3.0
| 1,723
|
[
"VisIt"
] |
576c3ac65c09aa04b157ec1134ca4e50d514855e8d0477abfc81c072d6dc35e5
|
"""
TornadoClient is equivalent of the RPCClient but in HTTPS.
Usage of TornadoClient is the same as RPCClient, you can instantiate TornadoClient with
complete url (https://host:port/System/Component) or just "System/Component". Like RPCClient
you can use all method defined in your service, your call will be automatically transformed
in RPC.
It also exposes the same interface for receiving file as the TransferClient.
Main changes:
- KeepAliveLapse is removed, requests library manages it itself.
- nbOfRetry (defined as private attribute) is removed, requests library manage it itself.
- Underneath it uses HTTP POST protocol and JSON. See :ref:`httpsTornado` for details
Example::
from DIRAC.Core.Tornado.Client.TornadoClient import TornadoClient
myService = TornadoClient("Framework/MyService")
myService.doSomething() #Returns S_OK/S_ERROR
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
# pylint: disable=broad-except
from DIRAC.Core.Tornado.Client.private.TornadoBaseClient import TornadoBaseClient
from DIRAC.Core.Utilities.JEncode import encode
class TornadoClient(TornadoBaseClient):
"""
Client for calling tornado services
Interface is based on RPCClient interface
"""
def __getattr__(self, attrname):
"""
Return the RPC call procedure
:param str attrname: Name of the procedure we are trying to call
:return: RPC procedure as function
"""
def call(*args):
"""
Just returns the right function for RPC Call
"""
return self.executeRPC(attrname, *args)
return call
# Name from RPCClient Interface
def executeRPC(self, method, *args):
"""
Calls a remote service
:param str method: remote procedure name
:param args: list of arguments
:returns: decoded response from server, server may return S_OK or S_ERROR
"""
rpcCall = {'method': method, 'args': encode(args)}
# Start request
retVal = self._request(**rpcCall)
retVal['rpcStub'] = (self._getBaseStub(), method, list(args))
return retVal
def receiveFile(self, destFile, *args):
"""
Equivalent of :py:meth:`~DIRAC.Core.DISET.TransferClient.TransferClient.receiveFile`
In practice, it calls the remote method `streamToClient` and stores the raw result in a file
:param str destFile: path where to store the result
:param args: list of arguments
:returns: S_OK/S_ERROR
"""
rpcCall = {'method': 'streamToClient', 'args': encode(args)}
# Start request
retVal = self._request(outputFile=destFile, **rpcCall)
return retVal
def executeRPCStub(rpcStub):
"""
Playback a stub
# Copy-paste from DIRAC.Core.DISET.RPCClient with RPCClient changed into TornadoClient
"""
# Generate a client with the same parameters
client = TornadoClient(rpcStub[0][0], **rpcStub[0][1])
# Get a functor to execute the RPC call
rpcFunc = getattr(client, rpcStub[1])
# Reproduce the call
return rpcFunc(*rpcStub[2])
|
yujikato/DIRAC
|
src/DIRAC/Core/Tornado/Client/TornadoClient.py
|
Python
|
gpl-3.0
| 3,089
|
[
"DIRAC"
] |
8aeb011a15019753d8b003ed24a59d9f6f2104b8d089e4943adf81093b5e8a7f
|
from tethys_sdk.base import TethysAppBase, url_map_maker
from tethys_sdk.handoff import HandoffHandler
from tethys_sdk.jobs import CondorJobTemplate
from tethys_sdk.compute import get_scheduler
class NetcdfToGsshaInput(TethysAppBase):
"""
Tethys app class for NetCDF to GSSHA Input.
"""
name = 'Convert NetCDF to GSSHA Input'
index = 'netcdf_to_gssha:home'
icon = 'netcdf_to_gssha/images/icon.png'
package = 'netcdf_to_gssha'
root_url = 'netcdf-to-gssha'
color = '#34495e'
description = 'Converts a gridded NetCDF variable to an ascii grid for use in GSSHA. Both Arc Info and Grass ascii formats are supported.'
def url_maps(self):
"""
Add controllers
"""
UrlMap = url_map_maker(self.root_url)
url_maps = (UrlMap(name='home',
url='netcdf-to-gssha',
controller='netcdf_to_gssha.controllers.home'),
UrlMap(name='jobs',
url='netcdf-to-gssha/jobs',
controller='netcdf_to_gssha.controllers.jobs'),
UrlMap(name='results',
url='netcdf-to-gssha/{job_id}/results',
controller='netcdf_to_gssha.controllers.results'),
UrlMap(name='download',
url='netcdf-to-gssha/{job_id}/download',
controller='netcdf_to_gssha.controllers.download')
)
return url_maps
def handoff_handlers(self):
"""
Add handoff handlers
"""
handoff_handlers = (HandoffHandler(name='convert-netcdf',
handler='netcdf_to_gssha.handoff.convert_netcdf',
# internal=True
),
HandoffHandler(name='old-convert-netcdf',
handler='handoff:convert_netcdf')
)
return handoff_handlers
def job_templates(self):
"""
Define job templates
"""
# demo = get_scheduler('Demo')
job_templates = (CondorJobTemplate(name='convert_to_ascii',
parameters={'executable': '$(APP_WORKSPACE)/netcdf_to_ascii.py',
'condorpy_template_name': 'vanilla_transfer_files',
# 'attributes': {'transfer_output_files': ('$(job_name).nc',),},
# 'scheduler': demo,
# 'remote_input_files': ('$(APP_WORKSPACE)/netcdf_to_ascii.py',),
}
),
)
return job_templates
|
CI-WATER/tethysapp-netcdf_to_gssha
|
tethysapp/netcdf_to_gssha/app.py
|
Python
|
bsd-2-clause
| 2,921
|
[
"NetCDF"
] |
f9eed8922d80f28f513bc72cecbbdc14c8a925ca173c6392ed1a3044a89a320b
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import os, uuid
from copy import deepcopy
from datetime import datetime
from traceback import format_exc
# SQLAlchemy
from sqlalchemy.exc import IntegrityError
# Zato
from zato.cli import ZatoCommand, common_logging_conf_contents, common_odb_opts, kvdb_opts
from zato.common import CONTENT_TYPE, SERVER_JOIN_STATUS
from zato.common.defaults import http_plain_server_port
from zato.common.odb.model import Cluster, Server
from zato.common.util import encrypt
server_conf_template = """[main]
gunicorn_bind=localhost:{{port}}
gunicorn_worker_class=gevent
gunicorn_workers={{gunicorn_workers}}
gunicorn_timeout=240
gunicorn_user=
gunicorn_group=
gunicorn_proc_name=
gunicorn_logger_class=
deployment_lock_expires=1073741824 # 2 ** 30 seconds ≅ 34 years
deployment_lock_timeout=180
token={{token}}
service_sources=./service-sources.txt
[crypto]
use_tls=False
tls_protocol=TLSv1
tls_ciphers=EECDH+AES:EDH+AES:-SHA1:EECDH+RC4:EDH+RC4:RC4-SHA:EECDH+AES256:EDH+AES256:AES256-SHA:!aNULL:!eNULL:!EXP:!LOW:!MD5
tls_client_certs=optional
priv_key_location=zato-server-priv-key.pem
pub_key_location=zato-server-pub-key.pem
cert_location=zato-server-cert.pem
ca_certs_location=zato-server-ca-certs.pem
[odb]
db_name={{odb_db_name}}
engine={{odb_engine}}
extra=
host={{odb_host}}
port={{odb_port}}
password={{odb_password}}
pool_size={{odb_pool_size}}
username={{odb_user}}
use_async_driver=True
[hot_deploy]
pickup_dir=../../pickup-dir
work_dir=../../work
backup_history=100
backup_format=bztar
delete_after_pick_up=False
# These three are relative to work_dir
current_work_dir=./hot-deploy/current
backup_work_dir=./hot-deploy/backup
last_backup_work_dir=./hot-deploy/backup/last
[deploy_patterns_allowed]
order=true_false
*=True
[invoke_patterns_allowed]
order=true_false
*=True
[invoke_target_patterns_allowed]
order=true_false
*=True
[singleton]
initial_sleep_time=2500
# If a server doesn't update its keep alive data in
# connector_server_keep_alive_job_time * grace_time_multiplier seconds
# it will be considered down and another server from the cluster will assume
# the control of connectors
connector_server_keep_alive_job_time=30 # In seconds
grace_time_multiplier=3
[spring]
context_class=zato.server.spring_context.ZatoContext
[misc]
return_internal_objects=False
internal_services_may_be_deleted=False
initial_cluster_name={{initial_cluster_name}}
initial_server_name={{initial_server_name}}
queue_build_cap=30 # All queue-based connections need to initialize in that many seconds
http_proxy=
locale=
ensure_sql_connections_exist=True
http_server_header=Zato
zeromq_connect_sleep=0.1
aws_host=
use_soap_envelope=True
[stats]
expire_after=168 # In hours, 168 = 7 days = 1 week
[kvdb]
host={{kvdb_host}}
port={{kvdb_port}}
unix_socket_path=
password={{kvdb_password}}
db=0
socket_timeout=
charset=
errors=
use_redis_sentinels=False
redis_sentinels=
redis_sentinels_master=
shadow_password_in_logs=True
log_connection_info_sleep_time=5 # In seconds
[startup_services_first_worker]
zato.helpers.input-logger=Sample payload for a startup service (first worker)
zato.notif.init-notifiers=
zato.pubsub.move-to-target-queues=
zato.pubsub.delete-expired=
zato.pubsub.invoke-callbacks=
zato.kvdb.log-connection-info=
[startup_services_any_worker]
zato.helpers.input-logger=Sample payload for a startup service (any worker)
[pubsub]
move_to_target_queues_interval=3 # In seconds
delete_expired_interval=180 # In seconds
invoke_callbacks_interval=2 # In seconds
[profiler]
enabled=False
profiler_dir=profiler
log_filename=profiler.log
cachegrind_filename=cachegrind.out
discard_first_request=True
flush_at_shutdown=True
url_path=/zato-profiler
unwind=False
[user_config]
# All paths are either absolute or relative to the directory server.conf is in
user=./user.conf
[newrelic]
config=
environment=
ignore_errors=
log_file=
log_level=
[sentry]
dsn=
timeout=5
level=WARN
[rbac]
custom_auth_list_service=
[component_enabled]
stats=True
slow_response=True
[content_type]
json = {JSON}
plain_xml = {PLAIN_XML}
soap11 = {SOAP11}
soap12 = {SOAP12}
[os_environ]
sample_key=sample_value
""".format(**CONTENT_TYPE).encode('utf-8')
service_sources_contents = """# Visit https://zato.io/docs for more information.
# All paths are relative to server root so that, for instance,
# ./my-services will resolve to /opt/zato/server1/my-services if a server has been
# installed into /opt/zato/server1
# List your service sources below, each on a separate line.
# Recommended to be always the very last line so all services that have been
# hot-deployed are picked up last.
./work/hot-deploy/current
# Visit https://zato.io/docs for more information."""
user_conf_contents = """[sample_section]
string_key=sample_string
list_key=sample,list
"""
lua_zato_rename_if_exists = """
-- Checks whether a from_key exists and if it does renames it to to_key.
-- Returns an error code otherwise.
-- Return codes:
-- 10 = Ok, renamed from_key -> to_key
-- 11 = No such from_key
local from_key = KEYS[1]
local to_key = KEYS[2]
if redis.call('exists', from_key) == 1 then
redis.call('rename', from_key, to_key)
return 10
else
return 11
end
"""
default_odb_pool_size = 1
directories = (
'config',
'config/repo',
'logs',
'pickup-dir',
'profiler',
'work',
'work/hot-deploy',
'work/hot-deploy/current',
'work/hot-deploy/backup',
'work/hot-deploy/backup/last',
'config/repo/lua',
'config/repo/lua/internal',
'config/repo/lua/user',
'config/repo/static',
'config/repo/tls',
'config/repo/tls/keys-certs',
'config/repo/tls/ca-certs',
)
files = {
'config/repo/logging.conf':common_logging_conf_contents.format(log_path='./logs/server.log'),
'config/repo/service-sources.txt':service_sources_contents,
'config/repo/lua/internal/zato.rename_if_exists.lua':lua_zato_rename_if_exists
}
priv_key_location = './config/repo/config-priv.pem'
priv_key_location = './config/repo/config-pub.pem'
class Create(ZatoCommand):
""" Creates a new Zato server
"""
needs_empty_dir = True
allow_empty_secrets = True
opts = deepcopy(common_odb_opts)
opts.extend(kvdb_opts)
opts.append({'name':'pub_key_path', 'help':"Path to the server's public key in PEM"})
opts.append({'name':'priv_key_path', 'help':"Path to the server's private key in PEM"})
opts.append({'name':'cert_path', 'help':"Path to the server's certificate in PEM"})
opts.append({'name':'ca_certs_path', 'help':"Path to the a PEM list of certificates the server will trust"})
opts.append({'name':'cluster_name', 'help':'Name of the cluster to join'})
opts.append({'name':'server_name', 'help':"Server's name"})
def __init__(self, args):
super(Create, self).__init__(args)
self.target_dir = os.path.abspath(args.path)
self.dirs_prepared = False
self.token = uuid.uuid4().hex
def prepare_directories(self, show_output):
if show_output:
self.logger.debug('Creating directories..')
for d in sorted(directories):
d = os.path.join(self.target_dir, d)
if show_output:
self.logger.debug('Creating {d}'.format(d=d))
os.mkdir(d)
self.dirs_prepared = True
def execute(self, args, port=http_plain_server_port, show_output=True):
engine = self._get_engine(args)
session = self._get_session(engine)
cluster = session.query(Cluster).\
filter(Cluster.name == args.cluster_name).\
first()
if not cluster:
msg = "Cluster [{}] doesn't exist in the ODB".format(args.cluster_name)
self.logger.error(msg)
return self.SYS_ERROR.NO_SUCH_CLUSTER
server = Server()
server.cluster_id = cluster.id
server.name = args.server_name
server.token = self.token
server.last_join_status = SERVER_JOIN_STATUS.ACCEPTED
server.last_join_mod_by = self._get_user_host()
server.last_join_mod_date = datetime.utcnow()
session.add(server)
try:
if not self.dirs_prepared:
self.prepare_directories(show_output)
repo_dir = os.path.join(self.target_dir, 'config', 'repo')
self.copy_server_crypto(repo_dir, args)
priv_key = open(os.path.join(repo_dir, 'zato-server-priv-key.pem')).read()
if show_output:
self.logger.debug('Created a Bazaar repo in {}'.format(repo_dir))
self.logger.debug('Creating files..')
for file_name, contents in sorted(files.items()):
file_name = os.path.join(self.target_dir, file_name)
if show_output:
self.logger.debug('Creating {}'.format(file_name))
f = file(file_name, 'w')
f.write(contents)
f.close()
logging_conf_loc = os.path.join(self.target_dir, 'config/repo/logging.conf')
logging_conf = open(logging_conf_loc).read()
open(logging_conf_loc, 'w').write(logging_conf.format(
log_path=os.path.join(self.target_dir, 'logs', 'zato.log')))
if show_output:
self.logger.debug('Logging configuration stored in {}'.format(logging_conf_loc))
odb_engine=args.odb_type
if odb_engine.startswith('postgresql'):
odb_engine = 'postgresql+pg8000'
server_conf_loc = os.path.join(self.target_dir, 'config/repo/server.conf')
server_conf = open(server_conf_loc, 'w')
server_conf.write(
server_conf_template.format(
port=port,
gunicorn_workers=2,
odb_db_name=args.odb_db_name or args.sqlite_path,
odb_engine=odb_engine,
odb_host=args.odb_host or '',
odb_port=args.odb_port or '',
odb_password=encrypt(args.odb_password, priv_key) if args.odb_password else '',
odb_pool_size=default_odb_pool_size,
odb_user=args.odb_user or '',
token=self.token,
kvdb_host=args.kvdb_host,
kvdb_port=args.kvdb_port,
kvdb_password=encrypt(args.kvdb_password, priv_key) if args.kvdb_password else '',
initial_cluster_name=args.cluster_name,
initial_server_name=args.server_name,
))
server_conf.close()
user_conf_loc = os.path.join(self.target_dir, 'config/repo/user.conf')
user_conf = open(user_conf_loc, 'w')
user_conf.write(user_conf_contents)
user_conf.close()
if show_output:
self.logger.debug('Core configuration stored in {}'.format(server_conf_loc))
# Initial info
self.store_initial_info(self.target_dir, self.COMPONENTS.SERVER.code)
session.commit()
except IntegrityError, e:
msg = 'Server name [{}] already exists'.format(args.server_name)
if self.verbose:
msg += '. Caught an exception:[{}]'.format(format_exc(e))
self.logger.error(msg)
self.logger.error(msg)
session.rollback()
return self.SYS_ERROR.SERVER_NAME_ALREADY_EXISTS
except Exception, e:
msg = 'Could not create the server, e:[{}]'.format(format_exc(e))
self.logger.error(msg)
session.rollback()
else:
if show_output:
self.logger.debug('Server added to the ODB')
if show_output:
if self.verbose:
msg = """Successfully created a new server.
You can now start it with the 'zato start {}' command.""".format(self.target_dir)
self.logger.debug(msg)
else:
self.logger.info('OK')
|
AmrnotAmr/zato
|
code/zato-cli/src/zato/cli/create_server.py
|
Python
|
gpl-3.0
| 12,381
|
[
"VisIt"
] |
288f418d8b375c1e6a330f8ff6f5dce8777ac2a942ddb5d0649bf167f4e3d32f
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg (hagberg@lanl.gov)
# Dan Schult (dschult@colgate.edu)
# Ben Edwards (BJEdwards@gmail.com)
# Arya McCarthy (admccarthy@smu.edu)
"""Generators for geometric graphs.
"""
from __future__ import division
from bisect import bisect_left
from itertools import combinations
from itertools import product
from math import sqrt
import math
import random
from random import uniform
try:
from scipy.spatial import cKDTree as KDTree
except ImportError:
_is_scipy_available = False
else:
_is_scipy_available = True
import networkx as nx
from networkx.utils import nodes_or_number
__all__ = ['geographical_threshold_graph', 'waxman_graph',
'navigable_small_world_graph', 'random_geometric_graph']
def euclidean(x, y):
"""Returns the Euclidean distance between the vectors ``x`` and ``y``.
Each of ``x`` and ``y`` can be any iterable of numbers. The
iterables must be of the same length.
"""
return sqrt(sum((a - b) ** 2 for a, b in zip(x, y)))
def _fast_construct_edges(G, radius, p):
"""Construct edges for random geometric graph.
Requires scipy to be installed.
"""
pos = nx.get_node_attributes(G, 'pos')
nodes, coords = list(zip(*pos.items()))
kdtree = KDTree(coords) # Cannot provide generator.
edge_indexes = kdtree.query_pairs(radius, p)
edges = ((nodes[u], nodes[v]) for u, v in edge_indexes)
G.add_edges_from(edges)
def _slow_construct_edges(G, radius, p):
"""Construct edges for random geometric graph.
Works without scipy, but in `O(n^2)` time.
"""
# TODO This can be parallelized.
for (u, pu), (v, pv) in combinations(G.nodes(data='pos'), 2):
if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius ** p:
G.add_edge(u, v)
@nodes_or_number(0)
def random_geometric_graph(n, radius, dim=2, pos=None, p=2):
"""Returns a random geometric graph in the unit cube.
The random geometric graph model places `n` nodes uniformly at
random in the unit cube. Two nodes are joined by an edge if the
distance between the nodes is at most `radius`.
Edges are determined using a KDTree when SciPy is available.
This reduces the time complexity from :math:`O(n^2)` to :math:`O(n)`.
Parameters
----------
n : int or iterable
Number of nodes or iterable of nodes
radius: float
Distance threshold value
dim : int, optional
Dimension of graph
pos : dict, optional
A dictionary keyed by node with node positions as values.
p : float
Which Minkowski distance metric to use. `p` has to meet the condition
``1 <= p <= infinity``.
If this argument is not specified, the :math:`L^2` metric (the Euclidean
distance metric) is used.
This should not be confused with the `p` of an Erdős-Rényi random
graph, which represents probability.
Returns
-------
Graph
A random geometric graph, undirected and without self-loops.
Each node has a node attribute ``'pos'`` that stores the
position of that node in Euclidean space as provided by the
``pos`` keyword argument or, if ``pos`` was not provided, as
generated by this function.
Examples
--------
Create a random geometric graph on twenty nodes where nodes are joined by
an edge if their distance is at most 0.1::
>>> G = nx.random_geometric_graph(20, 0.1)
Notes
-----
This uses a *k*-d tree to build the graph.
The `pos` keyword argument can be used to specify node positions so you
can create an arbitrary distribution and domain for positions.
For example, to use a 2D Gaussian distribution of node positions with mean
(0, 0) and standard deviation 2::
>>> import random
>>> n = 20
>>> p = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)}
>>> G = nx.random_geometric_graph(n, 0.2, pos=p)
References
----------
.. [1] Penrose, Mathew, *Random Geometric Graphs*,
Oxford Studies in Probability, 5, 2003.
"""
# TODO Is this function just a special case of the geographical
# threshold graph?
#
# n_name, nodes = n
# half_radius = {v: radius / 2 for v in nodes}
# return geographical_threshold_graph(nodes, theta=1, alpha=1,
# weight=half_radius)
#
n_name, nodes = n
G = nx.Graph()
G.name = 'random_geometric_graph({}, {}, {})'.format(n, radius, dim)
G.add_nodes_from(nodes)
# If no positions are provided, choose uniformly random vectors in
# Euclidean space of the specified dimension.
if pos is None:
pos = {v: [random.random() for i in range(dim)] for v in nodes}
nx.set_node_attributes(G, 'pos', pos)
if _is_scipy_available:
_fast_construct_edges(G, radius, p)
else:
_slow_construct_edges(G, radius, p)
return G
@nodes_or_number(0)
def geographical_threshold_graph(n, theta, alpha=2, dim=2, pos=None,
weight=None, metric=None):
r"""Returns a geographical threshold graph.
The geographical threshold graph model places `n` nodes uniformly at
random in a rectangular domain. Each node `u` is assigned a weight
:math:`w_u`. Two nodes `u` and `v` are joined by an edge if
.. math::
w_u + w_v \ge \theta r^{\alpha}
where `r` is the distance between `u` and `v`, and :math:`\theta`,
:math:`\alpha` are parameters.
Parameters
----------
n : int or iterable
Number of nodes or iterable of nodes
theta: float
Threshold value
alpha: float, optional
Exponent of distance function
dim : int, optional
Dimension of graph
pos : dict
Node positions as a dictionary of tuples keyed by node.
weight : dict
Node weights as a dictionary of numbers keyed by node.
metric : function
A metric on vectors of numbers (represented as lists or
tuples). This must be a function that accepts two lists (or
tuples) as input and yields a number as output. The function
must also satisfy the four requirements of a `metric`_.
Specifically, if *d* is the function and *x*, *y*,
and *z* are vectors in the graph, then *d* must satisfy
1. *d*(*x*, *y*) ≥ 0,
2. *d*(*x*, *y*) = 0 if and only if *x* = *y*,
3. *d*(*x*, *y*) = *d*(*y*, *x*),
4. *d*(*x*, *z*) ≤ *d*(*x*, *y*) + *d*(*y*, *z*).
If this argument is not specified, the Euclidean distance metric is
used.
.. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29
Returns
-------
Graph
A random geographic threshold graph, undirected and without
self-loops.
Each node has a node attribute ``'pos'`` that stores the
position of that node in Euclidean space as provided by the
``pos`` keyword argument or, if ``pos`` was not provided, as
generated by this function. Similarly, each node has a node
attribute ``'weight'`` that stores the weight of that node as
provided or as generated.
Examples
--------
Specify an alternate distance metric using the ``metric`` keyword
argument. For example, to use the "`taxicab metric`_" instead of the
default `Euclidean metric`_::
>>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y))
>>> G = nx.geographical_threshold_graph(10, 0.1, metric=dist)
.. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry
.. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance
Notes
-----
If weights are not specified they are assigned to nodes by drawing randomly
from the exponential distribution with rate parameter :math:`\lambda=1`.
To specify weights from a different distribution, use the `weight` keyword
argument::
>>> import random
>>> n = 20
>>> w = {i: random.expovariate(5.0) for i in range(n)}
>>> G = nx.geographical_threshold_graph(20, 50, weight=w)
If node positions are not specified they are randomly assigned from the
uniform distribution.
References
----------
.. [1] Masuda, N., Miwa, H., Konno, N.:
Geographical threshold graphs with small-world and scale-free
properties.
Physical Review E 71, 036108 (2005)
.. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus,
Giant component and connectivity in geographical threshold graphs,
in Algorithms and Models for the Web-Graph (WAW 2007),
Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007
"""
n_name, nodes = n
G = nx.Graph()
G.add_nodes_from(nodes)
# If no weights are provided, choose them from an exponential
# distribution.
if weight is None:
weight = {v: random.expovariate(1) for v in G}
# If no positions are provided, choose uniformly random vectors in
# Euclidean space of the specified dimension.
if pos is None:
pos = {v: [random.random() for i in range(dim)] for v in nodes}
# If no distance metric is provided, use Euclidean distance.
if metric is None:
metric = euclidean
nx.set_node_attributes(G, 'weight', weight)
nx.set_node_attributes(G, 'pos', pos)
# Returns ``True`` if and only if the nodes whose attributes are
# ``du`` and ``dv`` should be joined, according to the threshold
# condition.
def should_join(pair):
u, v = pair
u_pos, v_pos = pos[u], pos[v]
u_weight, v_weight = weight[u], weight[v]
return theta * metric(u_pos, v_pos) ** alpha <= u_weight + v_weight
G.add_edges_from(filter(should_join, combinations(G, 2)))
return G
@nodes_or_number(0)
def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1),
metric=None):
r"""Return a Waxman random graph.
The Waxman random graph model places `n` nodes uniformly at random
in a rectangular domain. Each pair of nodes at distance `d` is
joined by an edge with probability
.. math::
p = \alpha \exp(-d / \beta L).
This function implements both Waxman models, using the `L` keyword
argument.
* Waxman-1: if `L` is not specified, it is set to be the maximum distance
between any pair of nodes.
* Waxman-2: if `L` is specified, the distance between a pair of nodes is
chosen uniformly at random from the interval `[0, L]`.
Parameters
----------
n : int or iterable
Number of nodes or iterable of nodes
alpha: float
Model parameter
beta: float
Model parameter
L : float, optional
Maximum distance between nodes. If not specified, the actual distance
is calculated.
domain : four-tuple of numbers, optional
Domain size, given as a tuple of the form `(x_min, y_min, x_max,
y_max)`.
metric : function
A metric on vectors of numbers (represented as lists or
tuples). This must be a function that accepts two lists (or
tuples) as input and yields a number as output. The function
must also satisfy the four requirements of a `metric`_.
Specifically, if *d* is the function and *x*, *y*,
and *z* are vectors in the graph, then *d* must satisfy
1. *d*(*x*, *y*) ≥ 0,
2. *d*(*x*, *y*) = 0 if and only if *x* = *y*,
3. *d*(*x*, *y*) = *d*(*y*, *x*),
4. *d*(*x*, *z*) ≤ *d*(*x*, *y*) + *d*(*y*, *z*).
If this argument is not specified, the Euclidean distance metric is
used.
.. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29
Returns
-------
Graph
A random Waxman graph, undirected and without self-loops. Each
node has a node attribute ``'pos'`` that stores the position of
that node in Euclidean space as generated by this function.
Examples
--------
Specify an alternate distance metric using the ``metric`` keyword
argument. For example, to use the "`taxicab metric`_" instead of the
default `Euclidean metric`_::
>>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y))
>>> G = nx.waxman_graph(10, 0.5, 0.1, metric=dist)
.. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry
.. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance
References
----------
.. [1] B. M. Waxman, *Routing of multipoint connections*.
IEEE J. Select. Areas Commun. 6(9),(1988) 1617--1622.
"""
n_name, nodes = n
G = nx.Graph()
G.add_nodes_from(nodes)
(xmin, ymin, xmax, ymax) = domain
# Each node gets a uniformly random position in the given rectangle.
pos = {v: (uniform(xmin, xmax), uniform(ymin, ymax)) for v in G}
nx.set_node_attributes(G, 'pos', pos)
# If no distance metric is provided, use Euclidean distance.
if metric is None:
metric = euclidean
# If the maximum distance L is not specified (that is, we are in the
# Waxman-1 model), then find the maximum distance between any pair
# of nodes.
#
# In the Waxman-1 model, join nodes randomly based on distance. In
# the Waxman-2 model, join randomly based on random l.
if L is None:
L = max(metric(x, y) for x, y in combinations(pos.values(), 2))
dist = lambda u, v: metric(pos[u], pos[v])
else:
dist = lambda u, v: random.random() * L
# `pair` is the pair of nodes to decide whether to join.
def should_join(pair):
return random.random() < alpha * math.exp(-dist(*pair) / (beta * L))
G.add_edges_from(filter(should_join, combinations(G, 2)))
return G
def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
r"""Return a navigable small-world graph.
A navigable small-world graph is a directed grid with additional long-range
connections that are chosen randomly.
[...] we begin with a set of nodes [...] that are identified with the set
of lattice points in an :math:`n \times n` square,
:math:`\{(i, j): i \in \{1, 2, \ldots, n\}, j \in \{1, 2, \ldots, n\}\}`,
and we define the *lattice distance* between two nodes `(i, j)` and
`(k, l)` to be the number of "lattice steps" separating them:
`d((i, j), (k, l)) = |k - i| + |l - j|`.
For a universal constant `p >= 1`, the node `u` has a directed edge to
every other node within lattice distance `p` --- these are its *local
contacts*. For universal constants `q >= 0` and `r >= 0` we also
construct directed edges from `u` to `q` other nodes (the *long-range
contacts*) using independent random trials; the `i`th directed edge from
`u` has endpoint `v` with probability proportional to `[d(u,v)]^{-r}`.
-- [1]_
Parameters
----------
n : int
The length of one side of the lattice; the number of nodes in
the graph is therefore :math:`n^2`.
p : int
The diameter of short range connections. Each node is joined with every
other node within this lattice distance.
q : int
The number of long-range connections for each node.
r : float
Exponent for decaying probability of connections. The probability of
connecting to a node at lattice distance `d` is `1/d^r`.
dim : int
Dimension of grid
seed : int, optional
Seed for random number generator (default=None).
References
----------
.. [1] J. Kleinberg. The small-world phenomenon: An algorithmic
perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000.
"""
if (p < 1):
raise nx.NetworkXException("p must be >= 1")
if (q < 0):
raise nx.NetworkXException("q must be >= 0")
if (r < 0):
raise nx.NetworkXException("r must be >= 1")
if seed is not None:
random.seed(seed)
G = nx.DiGraph()
nodes = list(product(range(n), repeat=dim))
for p1 in nodes:
probs = [0]
for p2 in nodes:
if p1 == p2:
continue
d = sum((abs(b - a) for a, b in zip(p1, p2)))
if d <= p:
G.add_edge(p1, p2)
probs.append(d**-r)
cdf = list(nx.utils.accumulate(probs))
for _ in range(q):
target = nodes[bisect_left(cdf, random.uniform(0, cdf[-1]))]
G.add_edge(p1, target)
return G
|
jfinkels/networkx
|
networkx/generators/geometric.py
|
Python
|
bsd-3-clause
| 16,745
|
[
"Gaussian"
] |
d205ee12bc7474c63c94c11d84777c33a036fed236f64ebb425ec0422304d6be
|
# -*- coding: utf-8 -*-
"""
This Python module contains configurations(aka settings) of the application.
Updated since version 1.1:
1. Added path to paraview script template file.
Updated since version 1.2 (OpenWarp - Add Logging Functionality):
1. Added support for logging.
"""
__author__ = "caoweiquan322, yedtoss"
__copyright__ = "Copyright (C) 2014-2016 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import sys
import os
# Represents the suffix of executables in different platforms.
bin_suffix = sys.platform == 'win32' and '.exe' or ''
# Represents the directory of this application.
app_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Represents the application home in the user home
user_home = os.path.join(os.path.expanduser("~"), "OpenWarpFiles")
# Represents the full path of the "nglib-mesh" executable binary.
# Required, non-empty.
if sys.platform == 'win32':
MESH_GENERATOR_BIN = os.path.join(app_dir, 'bundled', 'mesh-generator', 'bin', 'nglib-mesh' + bin_suffix)
elif sys.platform == "darwin":
MESH_GENERATOR_BIN = os.path.join(app_dir, 'bundled', 'mesh-generator', 'build', 'nglib-mesh' + bin_suffix)
else:
MESH_GENERATOR_BIN = os.path.join(app_dir, 'bundled', 'mesh-generator', 'bin', 'nglib_mesh' + bin_suffix)
# Represents the full path of the "preProcessor" executable binary.
# Required, non-empty.
PREPROCESSOR_BIN = os.path.join(app_dir, 'bundled', 'simulation', 'bin', 'preProcessor' + bin_suffix)
# Represents the full path of the "Solver" executable binary.
# Required, non-empty.
SOLVER_BIN = os.path.join(app_dir, 'bundled', 'simulation', 'bin', 'Solver' + bin_suffix)
# Represents the full path of the "postProcessor" executable binary.
# Required, non-empty.
POSTPROCESSOR_BIN = os.path.join(app_dir, 'bundled', 'simulation', 'bin', 'postProcessor' + bin_suffix)
# Represents the full path of the ParaView executable binary.
# Required, non-empty.
if sys.platform == 'win32':
PARAVIEW_BIN = os.path.join(app_dir, 'bundled', 'paraview', 'bin', 'paraview' + bin_suffix)
elif sys.platform == "darwin":
PARAVIEW_BIN = os.path.join(app_dir, 'bundled', 'paraview.app/Contents/MacOS/paraview')
else:
PARAVIEW_BIN = os.path.join(app_dir, 'bundled', 'paraview_linux/bin/paraview')
# Represents the full path of the ParaView script template for loading data files.
# Required, non-empty.
PARAVIEW_SCRIPT_TEMPLATE = os.path.join(app_dir, 'bundled', 'paraview', 'paraview_script_template.py')
# Represents the root directory for user data.
# Required, non-empty.
USER_DATA_DIRECTORY = os.path.join(user_home, 'user_data')
# Represents the root directory for temporary files
# Required, non-empty.
TEMP_DATA_DIRECTORY = os.path.join(user_home, 'temp')
# Represents the accepted file extensions for paraview to visualize.
VISUALIZATION_FILE_EXTENSIONS = ['stl', 'vtk', 'tec']
# Represents the web server port.
# Required. It must be a positive integer in range of [0, 65535].
WEB_SERVER_PORT = 8386
LOG_DIR = os.path.join(user_home, 'logs')
LOG_FILE = os.path.join(LOG_DIR, 'logs.log')
|
NREL/OpenWARP
|
source/openwarpgui/openwarp/settings.py
|
Python
|
apache-2.0
| 3,165
|
[
"ParaView",
"VTK"
] |
90f2bf9058b6c27dd81c2cf35beae4e7fb2a95bff9b78e6294ea567afcc4f727
|
# -*- coding: utf-8 -*-
'''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for software and other kinds of works.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
“This License” refers to version 3 of the GNU General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based on the Program.
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
* a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
* b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
* c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
* d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
* a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
* b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
* c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
* d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
* e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
* a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
* b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
* c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
* d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
* e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
* f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
'''
# fix a bug in uuid, import it first !!
import uuid
import os
import sys
import ConfigParser
#sys.path.append('/usr/lib/python/')
#sys.path.append('/usr/lib64/python2.4/site-packages')
#sys.path.append('/usr/lib64/python2.4/site-packages/gtk-2.0')
#sys.path.append(os.environ['CUON_PATH'])
#try:
import pygtk
#except:
# print 'No python-module pygtk found. please install first'
# sys.exit(0)
import os.path
import shlex, subprocess
pygtk.require('2.0')
import gtk
import gtk.glade
import gobject
import cuon.Addresses.addresses
import cuon.Addresses.SingleAddress
import cuon.Addresses.SinglePartner
import cuon.Addresses.SingleScheduling
print 'import Address'
import cuon.Articles.articles
import cuon.Bank.bank
import cuon.Addresses.contact
try:
import cuon.Clients.clients
except Exception, params:
print 'import failed'
print Exception, params
import cuon.Leasing.leasing
import cuon.Order.order
import cuon.Proposal.proposal
import cuon.User.user
import cuon.Preferences.preferences
import cuon.PrefsFinance.prefsFinance
import cuon.Stock.stock
import cuon.XML.MyXML
from cuon.TypeDefs.typedefs import typedefs
from cuon.Windows.windows import windows
import cPickle
import cuon.Databases.dumps
from cuon.TypeDefs.typedefs_server import typedefs_server
import cuon.Databases.cyr_load_table
#import threading
import cuon.Staff.staff
import cuon.Project.project
import commands
import cuon.Databases.SingleDataTreeModel
import cuon.Databases.SingleCuon
import cuon.Finances.invoicebook
import cuon.Finances.bookkeeping
import cuon.Finances.cashAccountBook
import cuon.Calendar.calendar
try:
import cuon.Web2.web2
except:
print 'no Module Web2'
try:
from PIL import Image
except:
print 'no PIL Image found'
try:
import cuon.SQL_Shell.sql_shell
except:
pass
# localisation
import locale, gettext
import time
import cuon.E_Mail.imap_dms
try:
#import gtkhtml2
import gtkmozembed as moz
except:
print 'gtkhtml not found'
#http connections
import httplib, urllib
#try:
# import profile
#except:
# print "no Profile"
import cuon.DMS.documentTools
import bz2
class MainWindow(windows):
"""
@author: Juergen Hamel
@organization: Cyrus-Computer GmbH, D-32584 Loehne
@copyright: by Juergen Hamel
@license: GPL ( GNU GENERAL PUBLIC LICENSE )
@contact: jh@cyrus.de
"""
def __init__(self, sT):
windows.__init__(self)
self.sStartType = sT
self.Version = {'Major': 11, 'Minor': 10, 'Rev': 2, 'Species': 0, 'Maschine': 'Linux,BSD,Windows,Mac'}
self.sTitle = `self.Version['Major']` + '.' + `self.Version['Minor']` + '.' + `self.Version['Rev']`
self.t0 = None
self.t1 = None
self.t2 = None
self.t3 = None
self.allTables = {}
self.sDebug = 'NO'
self.ModulNumber = self.MN['Mainwindow']
self.extMenucommand = {}
self.store = None
self.connectTreeId = None
self.singleAddress = None
self.singlePartner = None
self.singleSchedul = None
self.schedulHash1 = None
self.schedulHash2 = None
self.schedulHash3 = None
self.ClientID = 0
self.firstGtkMozStart = True
self.mapmoz = None
#self.extMenucommand['ext1'] = 'Test'
#set this Functions to None
def loadUserInfo(self):
pass
def checkClient(self):
pass
def delete_event(self, widget, event, data=None):
self.on_end1_activate(None)
return False
def destroy(self, widget, data=None):
print "destroy signal occurred"
self.on_end1_activate(None)
def on_end1_activate(self,event):
print "exit cuon"
#clean up the tmp-files
try:
os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon__*' ))
except Exception, params:
print 'prm1', Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon_data/dms/cuon__*' ))
except Exception, params:
print 'prm1', Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon_data/dms/*__dms*' ))
except Exception, params:
print 'prm1', Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath(self.dicUser['prefPath']['tmp'] + '/*__dms*' ))
except Exception, params:
print 'prm2',Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath( self.td.cuon_path + '/*__dms*' ))
except Exception, params:
print 'prm3',Exception, params
#pass
try:
os.system( 'rm ' + os.path.normpath( './*__dms*' ))
except Exception, params:
print 'prm3',Exception, params
self.on_logout1_activate(None)
self.gtk_main_quit()
def on_databases1_activate(self,event):
import cuon.Databases.databases
daba = cuon.Databases.databases.databaseswindow()
def on_login1_activate(self,event):
import cuon.Login.login
print 'lgi client id = ', self.ClientID
lgi = cuon.Login.login.loginwindow( [self.getWidget('eUserName')], None, Username, PASSWORD, self.ClientID)
self.openDB()
self.oUser = self.loadObject('User')
self.closeDB()
if self.oUser.getUserName()== 'EMPTY':
pass
else:
self.getWidget('eServer').set_text(self.td.server)
#choose the client
#sys.exit()
self.on_clients1_activate(None)
print 'Hallo - client'
self.checkMenus()
print 'ShowNews = ', self.dicUser['showNews']
if self.dicUser['showNews'] :
self.activateClick('onlineNews')
def checkMenus(self):
liModullist = self.rpc.callRP('User.getModulList', self.oUser.getSqlDicUser())
#print liModullist
if self.sStartType == 'server':
self.enableMenuItem('serverMode')
self.disableMenuItem('user')
self.enableMenuItem('login')
misc_menu = False
#print 'LI_MODULELIST'
print liModullist
for iL in liModullist:
#print iL
if 'all' in iL:
#print 'key all found'
#data
self.addEnabledMenuItems('work','mi_addresses1')
self.addEnabledMenuItems('work','mi_articles1')
self.addEnabledMenuItems('work','mi_bibliographic')
self.addEnabledMenuItems('work','mi_clients1')
self.addEnabledMenuItems('work','contracts1')
self.addEnabledMenuItems('work','mi_leasing1')
print 'enableMenuItem staff'
self.addEnabledMenuItems('work','mi_staff1')
print 'enableMenuItem staff end'
#action
self.addEnabledMenuItems('work','mi_order1')
self.addEnabledMenuItems('work','mi_stock1')
self.addEnabledMenuItems('work','mi_dms1')
self.addEnabledMenuItems('work','mi_supportticket1')
#accounting
self.addEnabledMenuItems('work','mi_cash_account_book1')
# extras
self.addEnabledMenuItems('work','mi_expert_system1')
self.addEnabledMenuItems('work','mi_project1')
## self.addEnabledMenuItems('work','mi_forms1')
## self.addEnabledMenuItems('work','mi_forms_addresses1')
#tools
self.addEnabledMenuItems('work','mi_preferences1')
self.addEnabledMenuItems('work','mi_user1')
self.addEnabledMenuItems('work','mi_finances1')
#self.addEnabledMenuItems('work','mi_project1')
self.addEnabledMenuItems('work','mi_import_data1')
self.enableMenuItem('work')
if iL.has_key('addresses'):
self.addEnabledMenuItems('misc','mi_addresses1')
misc_menu = True
if iL.has_key('articles'):
self.addEnabledMenuItems('misc','mi_articles1')
misc_menu = True
if iL.has_key('biblio'):
self.addEnabledMenuItems('misc','mi_bibliographic')
misc_menu = True
if iL.has_key('clients'):
self.addEnabledMenuItems('misc','mi_clients1')
misc_menu = True
if iL.has_key('staff'):
self.addEnabledMenuItems('misc','mi_staff1')
misc_menu = True
if iL.has_key('order'):
self.addEnabledMenuItems('misc','mi_order1')
misc_menu = True
if iL.has_key('stock'):
self.addEnabledMenuItems('misc','mi_stock1')
misc_menu = True
if iL.has_key('dms'):
self.addEnabledMenuItems('misc','mi_dms1')
misc_menu = True
if iL.has_key('account_book'):
self.addEnabledMenuItems('misc','mi_cash_account_book1')
misc_menu = True
if iL.has_key('expert_system'):
self.addEnabledMenuItems('misc','mi_expert_system1')
misc_menu = True
if iL.has_key('project'):
print 'key project found '
self.addEnabledMenuItems('misc','mi_project1')
misc_menu = True
if iL.has_key('web2'):
print 'key web2 found '
self.addEnabledMenuItems('misc','web2')
misc_menu = True
if iL.has_key('forms'):
print 'key forms found '
self.addEnabledMenuItems('misc','forms1')
misc_menu = True
print '-----------------------'
if iL.has_key('forms_addresses'):
print 'key forms_addresses found '
self.addEnabledMenuItems('misc','forms_addresses1')
self.addEnabledMenuItems('misc','mi_addresses_notes_misc1')
self.addEnabledMenuItems('misc','mi_addresses_notes_contacter1')
self.addEnabledMenuItems('misc','mi_addresses_notes_representant1')
self.addEnabledMenuItems('misc','mi_addresses_notes_salesman1')
misc_menu = True
if iL.has_key('experimental'):
print 'key experimental found'
self.addEnabledMenuItems('experimental','mi_mayavi1')
self.addEnabledMenuItems('experimental','mi_test1')
self.enableMenuItem('experimental')
if iL.has_key('extendet_gpl'):
try:
liExtGpl = iL['extendet_gpl']
print 'Ext.GPL =', liExtGpl
for newProgram in liExtGpl:
print newProgram
mi1 = self.addMenuItem(self.getWidget(newProgram['MenuItem']['Main']),newProgram['MenuItem']['Sub'])
try:
print 'new Item = ', `mi1`
if newProgram['MenuItem']['ExternalNumber'] == 'ext1':
mi1.connect("activate", self.on_ext1_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext2':
mi1.connect("activate", self.on_ext2_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext3':
mi1.connect("activate", self.on_ext3_activate)
elif newProgram['MenuItem']['ExternalNumber'] == 'ext4':
mi1.connect("activate", self.on_ext4_activate)
if newProgram.has_key('Imports'):
newImports = newProgram['Imports']
for nI in newImports:
try:
print 'import ext Module 1', nI
exec('import ' + nI)
print 'import extendet module 2', nI
except Exception, params:
print Exception, params
if newProgram.has_key('MenuStart'):
print 'MenuStart = ', newProgram['MenuItem']['ExternalNumber']
self.extMenucommand[newProgram['MenuItem']['ExternalNumber']] = newProgram['MenuStart']
if newProgram.has_key('Start'):
exec(newProgram['Start'])
print 'EXEC = ', newProgram['Start']
except Exception,params:
print Exception,params
except Exception,params:
print Exception,params
if misc_menu:
self.enableMenuItem('misc')
def on_logout1_activate(self, event):
print 'Logout'
try:
self.rpc.callRP('Database.logout', self.oUser.getUserName())
except:
print 'Exception'
self.disableMenuItem('login')
self.enableMenuItem('user')
def on_eUserName_changed(self, event):
if self.getWidget('eUserName').get_text() != 'EMPTY':
print 'User changed 22'
self.openDB()
self.oUser = self.loadObject('User')
print 'sDebug (Cuon) = ' + self.sDebug
self.oUser.setDebug(self.sDebug)
self.saveObject('User', self.oUser)
self.closeDB()
# self.openDB()
#if self.startProgressBar():
if not self.allTables:
self.generateLocalSqlObjects()
# self.stopProgressBar()
#print self.oUser.getDicUser()
# now start scheduling
print 'Client = ', self.oUser.getSqlDicUser()['client']
def generateSqlObjects(self):
self.setProgressBar( 0.4)
entryList = self.rpc.callRP('Database.executeNormalQuery',"select skey from cuon where skey ~* 'entry_' ")
#print entryList
self.openDB()
for i in entryList:
#print i['skey']
sk = self.rpc.callRP('Database.getInfo', i['skey'])
self.saveObject(i['skey'],sk)
#print sk
self.closeDB()
#self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables')
at = self.rpc.callRP('Database.getInfo', 'allTables')
#print 'at23 = ', `at`
self.setProgressBar( 3.0)
liAllTables = cPickle.loads(eval(self.doDecode(at)))
#sys.exit(0)
#print 'liAllTables = '
#print liAllTables
iCount = len(liAllTables)
for i in range(iCount):
self.loadSqlDefs(liAllTables, i)
self.setProgressBar( (float(i) * 1.0/float(iCount) * 100.0) + 5.0)
#print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0)
#print self.allTables
def generateLocalSqlObjects(self):
at = self.rpc.callRP('Database.getInfo', 'allTables')
#print 'at24 = ', `at`
liAllTables = cPickle.loads(eval(self.doDecode(at)))
#liAllTables = cPickle.loads(self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables'))
#print 'liAllTables = ', liAllTables
#print liAllTables
iCount = len(liAllTables)
#print 'iCount = ', iCount
for i in range(iCount):
self.loadLocalSqlDefs(liAllTables, i)
#self.setProgressBar(float(i) * 1.0/float(iCount) * 100.0)
#print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0)
#print self.allTables
def loadSqlDefs(self, liAllTables, i ):
try:
clt = cuon.Databases.cyr_load_table.cyr_load_table()
print 'Table0 = ', liAllTables[i]
if liAllTables[i].find('_history') < 0:
print 'Table = ', liAllTables[i]
self.allTables[liAllTables[i]] = clt.loadTable(liAllTables[i])
except Exception, param:
print 'ERROR SQL Defs'
print Exception
print param
print liAllTables[i]
def loadLocalSqlDefs(self, liAllTables, i ):
#print 'loadLocalSQL1 ', liAllTables
#print 'loadLocalSQL2 ', i
clt = cuon.Databases.cyr_load_table.cyr_load_table()
self.allTables[liAllTables[i]] = clt.loadLocalTable(liAllTables[i])
#print 'loadLocalSQL3 ', `self.allTables`
# Data-Menu
#-->
def on_addresses1_activate(self,event):
adr = cuon.Addresses.addresses.addresswindow(self.allTables)
def on_articles1_activate(self,event):
art = cuon.Articles.articles.articleswindow(self.allTables)
def on_bank1_activate(self,event):
bank = cuon.Bank.bank.bankwindow(self.allTables)
def on_bibliographic_activate(self, event):
import cuon.Biblio.biblio
bib = cuon.Biblio.biblio.bibliowindow(self.allTables)
def on_clients1_activate(self, event):
#print self.allTables
self.dicUser = self.oUser.getDicUser()
if event:
self.ClientID = 0
print 'cli = ', self.ClientID
cli = cuon.Clients.clients.clientswindow(self.allTables, self.ClientID, eClient = self.getWidget('eClient'))
def on_staff1_activate(self, event):
staff = cuon.Staff.staff.staffwindow(self.allTables)
# submenu contracts1
def on_leasing1_activate(self, event):
leasing = cuon.Leasing.leasing.leasingwindow(self.allTables)
# Action-Menu
def on_proposal1_activate(self,event):
ord = cuon.Proposal.proposal.proposalwindow(self.allTables)
def on_order1_activate(self,event):
ord = cuon.Order.order.orderwindow(self.allTables)
def on_stock1_activate(self,event):
ord = cuon.Stock.stock.stockwindow(self.allTables)
def on_mi_supportticket1_activate(self, event):
import cuon.SupportTicket.supportTicket
supt = cuon.SupportTicket.supportTicket.supportticketwindow(self.allTables)
def on_dms1_activate(self,event):
import cuon.DMS.dms
dms = cuon.DMS.dms.dmswindow(self.allTables)
# Finances
# Cash Account Book
def on_cash_account_book1_activate(self, event):
cab = cuon.Finances.cashAccountBook.cashAccountBookwindow(self.allTables)
def on_bookkeeping1_activate(self, event):
bk = cuon.Finances.bookkeeping.bookkeepingwindow(self.allTables)
def on_listOfInvoices1_activate(self, event):
loi = cuon.Finances.invoicebook.invoicebookwindow(self.allTables)
def on_analyse_costs1_activate(self, event ):
pass
# Extras
def on_expert_system1_activate(self, event):
import cuon.AI.ai
cai = cuon.AI.ai.aiwindow(self.allTables)
def on_project1_activate(self, event):
cpro = cuon.Project.project.projectwindow(self.allTables)
def on_web2_activate(self, event):
web2 = cuon.Web2.web2.web2window(self.allTables)
def on_stats1_activate(self, event):
import cuon.Stats.stats
stats = cuon.Stats.stats.statswindow(self.allTables)
def on_calendar_activate(self, event):
ccal = cuon.Calendar.calendar.calendarwindow(self.allTables)
def on_mindmap1_activate(self, event):
import cuon.Think.think
think = cuon.Think.think.thinkwindow(self.allTables)
# Tools
def on_addresses_notes_misc1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Misc'])
def on_addresses_notes_contacter1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Contacter'])
def on_addresses_notes_representant1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Rep'])
def on_addresses_notes_salesman1_activate(self,event):
dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Salesman'])
def on_update1_activate(self, event):
self.updateVersion()
def on_pref_user1_activate(self,event):
prefs = cuon.Preferences.preferences.preferenceswindow(self.allTables)
def on_prefs_finances_activate(self,event):
prefs = cuon.PrefsFinance.prefsFinance.prefsFinancewindow(self.allTables)
def on_webshop1_activate(self,event):
import cuon.WebShop.webshop
print 'Webshop'
prefs = cuon.WebShop.webshop.webshopwindow(self.allTables)
def updateVersion(self):
if self.startProgressBar():
self.generateSqlObjects()
self.writeAllGladeFiles()
self.stopProgressBar()
def on_import_data1_activate(self, event):
import cuon.Databases.import_generic1
imp1 = cuon.Databases.import_generic1.import_generic1(self.allTables)
def on_ExportData_activate(self, event):
print 'export Data'
import cuon.Databases.export_generic1
exp1 = cuon.Databases.export_generic1.export_generic1(self.allTables)
def on_sql_shell_activated(self, event):
sqlw = cuon.SQL_Shell.sql_shell.sql_shell()
def on_test1_activate(self, event):
import cuon.VTK.mainLogo
import cuon.VTK.test
te = cuon.VTK.test.test()
te.show()
# Logs
def on_logs_mail1_activate(self, event):
import cuon.Editor.editor
dicFile = {'TYPE':'FILE','NAME':os.path.normpath(self.td.cuon_path + '/' + 'cuonmail.log'),'Rights':'RO'}
em = cuon.Editor.editor.editorwindow(dicFile)
# help and info
def on_about1_activate(self, event):
about1 = self.getWidget('aCuon')
about1.show()
def on_onlinehelp_activate(self, event):
import cuon.Help.help
he1 = cuon.Help.help.helpwindow()
# hide about-info
def on_okAbout1_clicked(self, event):
about1 = self.getWidget('aCuon')
about1.hide()
# extendet Menu
def on_ext1_activate(self, event):
print 'ext1 menu activated !!!!!'
ext1 = eval(self.extMenucommand['ext1'])
try:
ext1.start()
except:
print 'No StartModule'
def on_ext2_activate(self, event):
print 'ext2 menu activated !!!!!'
ext2 = eval(self.extMenucommand['ext2'])
try:
ext2.start()
except:
print 'No StartModule'
def on_ext3_activate(self, event):
print 'ext3 menu activated !!!!!'
ext3 = eval(self.extMenucommand['ext3'])
try:
ext3.start()
except:
print 'No StartModule'
def on_ext4_activate(self, event):
print 'ext4 menu activated !!!!!'
print self.extMenucommand['ext4']
ext4 = eval(self.extMenucommand['ext4'])
try:
ext4.start()
except:
print 'No StartModule ext4'
def getNewClientSoftware(self, id):
cuonpath = '..'
self.infoMsg('C.U.O.N. will now try to load the new Clientversion. ')
shellcommand = 'rm ' + cuonpath + '/newclient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
shellcommand = 'rm -R ' + cuonpath + '/iClient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
sc = cuon.Databases.SingleCuon.SingleCuon(self.allTables)
sc.saveNewVersion(id)
shellcommand = 'cd '+cuonpath+' ; tar -xvjf newclient'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
#shellcommand = 'sh ' + cuonpath + '/iClient/iCuon '
#liStatus = commands.getstatusoutput(shellcommand)
#print shellcommand, liStatus
f = open('newversion','a')
f.write(`self.Version`)
f.close()
# Plugins
# Dia
shellcommand = 'if [ ! -d ~/.dia/python ] ; then mkdir ~/.dia/python ; fi '
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
shellcommand = 'cd ' + cuonpath +'/Plugins/Dia ; cp cuon_dia.py ~/.dia/python '
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
self.infoMsg('Update complete. Please start C.U.O.N. new ')
def startT0(self):
try:
print 'First T0'
self.openDB()
oUser = self.loadObject('User')
self.closeDB()
if oUser:
#print 'T0 Client = ', oUser.client
if oUser.client > 0:
self.singleAddress = cuon.Addresses.SingleAddress.SingleAddress(self.allTables)
self.singlePartner = cuon.Addresses.SinglePartner.SinglePartner(self.allTables)
self.singleSchedul = cuon.Addresses.SingleScheduling.SingleScheduling(self.allTables)
self.startTiming()
except Exception, params:
print Exception, params
return True
def startTiming(self):
#'print start Timer'
# 60*1000 = 1 minute
time_contact = 2*60*1000
time_schedul = 15*60*1000
time_imap_dms = 30*60*1000
if self.t0:
gobject.source_remove(self.t0)
if self.t1:
gobject.source_remove(self.t1)
if self.t2:
gobject.source_remove(self.t2)
if self.t3:
gobject.source_remove(self.t3)
try:
if not self.t1:
self.startChecking()
self.t1 = gobject.timeout_add(time_contact, self.startChecking)
except Exception, params:
print Exception, params
try:
if not self.t2:
self.setSchedulTree()
self.t2 = gobject.timeout_add(time_schedul,self.setSchedulTree)
except Exception, params:
print Exception, params
try:
if not self.t3:
self.checkImapDMS()
self.t3 = gobject.timeout_add(time_imap_dms,self.checkImapDMS)
except Exception, params:
print Exception, params
def checkImapDMS(self):
#print '######################################### EMail #########################'
self.openDB()
oUser = self.loadObject('User')
self.closeDB()
imapD = cuon.E_Mail.imap_dms.imap_dms(self.allTables, oUser.getDicUser())
imapD.run()
#print '######################################### EMail END #########################'
return True
def startChecking(self):
#gtk.gdk.threads_enter()
try:
#print 'start scheduling'
#print self.Version
self.openDB()
oUser = self.loadObject('User')
liSchedul = self.loadObject('Scheduling')
self.closeDB()
#print `self.oUser.getDicUser()`
#print 'Client = ', oUser.getDicUser()['client']
liContacts = self.rpc.callRP('Address.getAllActiveContacts', oUser.getSqlDicUser())
#print liContacts
try:
if not liSchedul:
liSchedul = []
for contacts in liContacts:
ok = False
for oldSchedul in liSchedul:
if oldSchedul == contacts['id']:
ok = True
if not ok:
cuon.Addresses.contact.contactwindow(self.allTables, contacts['address_id'], contacts['partner_id'])
liSchedul.append(contacts['id'])
except Exception, params:
print Exception, params
self.openDB()
self.saveObject('Scheduling', liSchedul)
self.closeDB()
#cuon.Addresses.contact.contactwindow(self.allTables)
finally:
# print 'Ende'
pass
return True
#gtk.gdk.threads_leave()
#self.startTimer(10)
def on_rbScheduls_activate(self, event):
print 'rbScheduls clicked'
self.setSchedulTree()
def disconnectTree(self):
try:
self.getWidget('treeSchedul').get_selection().disconnect(self.connectTreeId)
except:
pass
def connectTree(self):
try:
self.connectTreeId = self.getWidget('treeSchedul').get_selection().connect("changed", self.tree_select_callback)
except:
pass
def tree_select_callback(self, treeSelection):
listStore, iter = treeSelection.get_selected()
print listStore,iter
if listStore and len(listStore) > 0:
row = listStore[0]
else:
row = -1
if iter != None:
sNewId = listStore.get_value(iter, 0)
print sNewId
try:
newID = int(sNewId[sNewId.find('###')+ 3:])
self.setDateValues(newID)
except:
pass
#self.fillEntries(newId)
def on_treeSchedul_row_activated(self, event):
print 'event'
self.on_bGotoAddress_clicked(event)
def setSchedulTree(self):
self.openDB()
oUser = self.loadObject('User')
self.closeDB()
# Data
sChoice = 'All'
if self.getWidget('rbSchedulsNew').get_active():
sChoice = 'New'
elif self.getWidget('rbSchedulsCancel').get_active():
sChoice = 'Cancel'
elif self.getWidget('rbSchedulsActualWeek').get_active():
sChoice = 'actualWeek'
print 'sChoice = ', sChoice
liDates, newHash = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'Name','All', sChoice, self.schedulHash1)
#print 'lidates = ', liDates
#print 'newHash = ', newHash
if liDates == ['NO_NEW_DATA']:
print 'liDates = no Data'
return True
# new data arrived, go on
#liststore = gtk.ListStore(str)
self.schedulHash1 = newHash
self.disconnectTree()
treeview = self.getWidget('treeSchedul')
#treeview.set_model(liststore)
#renderer = gtk.CellRendererText()
#column = gtk.TreeViewColumn("Scheduls", renderer, text=0)
#treeview.append_column(column)
treestore = gtk.TreeStore(object)
treestore = gtk.TreeStore(str)
## renderer = gtk.CellRendererText()
##
## column = gtk.TreeViewColumn("Zweite Spalte", renderer, text=0)
## treeview.append_column(column)
treeview.set_model(treestore)
print 'Schedul by names: ', liDates
if liDates:
lastRep = None
lastSalesman = None
Schedulname = None
lastSchedulname = None
iter = treestore.append(None,[_('Names')])
iter2 = None
iter3 = None
for oneDate in liDates:
Schedulname = oneDate['schedul_name']
if lastSchedulname != Schedulname:
lastSchedulname = Schedulname
iter2 = treestore.insert_after(iter,None,[lastSchedulname])
sTime = self.getTimeString(oneDate['time_begin'] )
sTime2 = self.getTimeString(oneDate['time_end'] )
iter3 = treestore.insert_after(iter2,None,[oneDate['date'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] + ' ###' + `oneDate['id']`])
## try:
## iter = treestore.append(None,['Names'])
## iter2 = treestore.insert_after(iter,None,['jhamel'])
## iter3 = treestore.insert_after(iter2,None,['termin1'])
## iter = treestore.append(None,['Scheduls'])
## iter2 = treestore.insert_after(iter,None,['date'])
## iter3 = treestore.insert_after(iter2,None,['termin1'])
## except Exception,params:
## print Exception,params
##
#liDates, self.schedulHash2 = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'Schedul','All',sChoice)
#liTest.sort(key=(lambda x: (x['test1'], lambda x: x['testA']) ))
liDates.sort(key=(lambda x: (x['date_norm'], x['schedul_name'], x['time_begin'] )), reverse = True)
#print 'Schedul by schedul_date 2 : ', liDates
if liDates:
lastRep = None
lastSalesman = None
Schedulname = None
lastSchedulname = None
iter = treestore.append(None,[_('Schedul')])
iter2 = None
iter3 = None
for oneDate in liDates:
Schedulname = oneDate['date']
if lastSchedulname != Schedulname:
lastSchedulname = Schedulname
iter2 = treestore.insert_after(iter,None,[lastSchedulname])
sTime = self.getTimeString(oneDate['time_begin'] )
sTime2 = self.getTimeString(oneDate['time_end'] )
iter3 = treestore.insert_after(iter2,None,[oneDate['schedul_name'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] +' ###' + `oneDate['id']`])
# reps and Saleman
# #liDates, self.schedulHash3 = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'rep_salesman','All', sChoice)
#liDates.sort(key=(lambda x: (x['date_norm'], lambda x: x['rep_lastname'], lambda x: x['salesman_lastname'], lambda x: x['date_norm'])), reverse = True)
liDates.sort(key=(lambda x: (x['salesman_lastname'], x['rep_lastname'], x['date_norm']) ), reverse = True)
#print 'Schedul by names: 3', liDates
if liDates and liDates not in ['NONE']:
lastRep = None
lastSalesman = None
Schedulname = None
lastSchedulname = None
iter = treestore.append(None,[_('Salesman')])
iter2 = None
iter3 = None
for oneDate in liDates:
Schedulname = oneDate['schedul_name']
if lastSchedulname != Schedulname:
lastSchedulname = Schedulname
iter2 = treestore.insert_after(iter,None,[lastSchedulname])
sTime = self.getTimeString(oneDate['time_begin'] )
sTime2 = self.getTimeString(oneDate['time_end'] )
iter3 = treestore.insert_after(iter2,None,[oneDate['date'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] + ' ###' + `oneDate['id']`])
treeview.show()
self.connectTree()
return True
def setDateValues(self, id):
widgetTVAddress = self.getWidget('tvAddress')
widgetTVPartner = self.getWidget('tvPartner')
widgetEShortRemark = self.getWidget('eShortRemark')
widgetTvEvent = self.getWidget('tvEvent')
self.singleSchedul.load(id)
partnerid = self.singleSchedul.getPartnerID()
self.singlePartner.load(partnerid)
addressid = self.singlePartner.getAddressID()
self.singleAddress.load(addressid)
print partnerid
print addressid
s = self.singleSchedul.getShortRemark()
print 's=', s
if s:
widgetEShortRemark.set_text(s)
else:
widgetEShortRemark.set_text('')
s = self.singleSchedul.getNotes()
print 's=', s
if s:
self.add2Textbuffer(widgetTvEvent,s,'Overwrite')
else:
self.add2Textbuffer(widgetTvEvent,' ','Overwrite')
s = self.singleAddress.getMailAddress()
if s:
self.add2Textbuffer(widgetTVAddress,s,'Overwrite')
else:
self.add2Textbuffer(widgetTVAddress,' ','Overwrite')
s = self.singlePartner.getMailAddress()
if s:
self.add2Textbuffer(widgetTVPartner,s,'Overwrite')
else:
self.add2Textbuffer(widgetTVPartner,' ','Overwrite')
def on_bGotoAddress_clicked(self, event):
if self.singleAddress.ID > 0:
adr = cuon.Addresses.addresses.addresswindow(self.allTables, addrid = self.singleAddress.ID)
def on_bChat_clicked(self, event):
print self.dicUser
shellcommand = shlex.split(self.dicUser['Communications']['textChat'] )
liStatus = subprocess.Popen(shellcommand)
#liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
def on_b3DChat_clicked(self, event):
shellcommand = shlex.split(self.dicUser['Communications']['3DChat'])
liStatus = subprocess.Popen(shellcommand)
#liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
def on_bEmail_clicked(self, event):
shellcommand = shlex.split(self.dicUser['Communications']['emailPrg'])
liStatus = subprocess.Popen(shellcommand)
#liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
#def startTimer(self, seconds):
# self.t1 = threading.Timer(seconds, self.startChecking)
# self.t1.start()
def on_eClient_changed(self, event):
''' client ID changed '''
try:
dt = cuon.DMS.documentTools.documentTools()
sFile = dt.load_mainwindow_logo(self.allTables)
if sFile:
print "image found"
logo = self.getWidget("company_logo")
#
# newIm = Image.fromstring('RGB',[1024, 1024], bz2.decompress( image))
# newIm.thumbnail([208,208])
# sFile = self.dicUser['prefPath']['tmp'] + 'cuon_mainwindow_logo.png'
# save(sFile)
print 'sFile = ', sFile
pixbuf = gtk.gdk.pixbuf_new_from_file(sFile)
scaled_buf = pixbuf.scale_simple(208,208,gtk.gdk.INTERP_BILINEAR)
logo.set_from_pixbuf(scaled_buf)
logo.show()
except:
pass
#logo.set_from_file(sFile)
def on_onlineNews_activate(self, event):
self.winNews.remove(self.swMap)
self.mapmoz = None
self.mapmoz = moz.MozEmbed()
self.viewMap = gtk.Viewport()
self.swMap = gtk.ScrolledWindow()
self.viewMap.add(self.mapmoz)
self.swMap.add(self.viewMap)
self.winNews.add(self.swMap)
print 'mapmoz = ', self.mapmoz
if self.mapmoz:
if self.dicUser['Locales'].lower() == 'de':
sUrl = 'http://cuon.org/Cuon/news.html'
else:
sUrl = 'http://cuon.org/en_Cuon/news.html'
print sUrl
self.mapmoz.load_url(sUrl)
#self.mapmoz.set_size_request(816,600)
self.viewMap.show()
self.swMap.show()
self.mapmoz.show()
self.winNews.show_all()
def closeOnlineNews(self, event, data=None):
self.winNews.hide()
def startMain(self, sStartType, sDebug,sLocal='NO', Username='EMPTY', PASSWORD='Test', ClientID=0):
#ML = cuon.VTK.mainLogo.mainLogo()
#ML.startLogo()
self.ClientID = ClientID
if sDebug:
self.sDebug = sDebug
else:
self.sDebug = 'NO'
if sStartType == 'server':
print 'Server-Modus'
td = typedefs_server()
# create widget tree ...
self.gladeName = '/usr/share/cuon/glade/cuon.glade2'
self.loadGladeFile(self.gladeName)
else:
id, version = self.rpc.callRP('Database.getLastVersion')
print 'Version', version
print 'id', id
## self.openDB()
## version = self.loadObject('ProgramVersion')
## self.closeDB()
##
print 'Version:' + str(version)
print self.Version['Major'], version['Major']
print self.Version['Minor'], version['Minor']
print self.Version['Rev'], version['Rev']
print self.Version, version
self.openDB()
oUser = self.loadObject('User')
if not oUser:
oUser = cuon.User.user.User()
oUser.client = 0
self.saveObject('User',oUser)
self.closeDB()
if not version:
print 'no Version, please inform Cuon-Administrator'
sys.exit(0)
if self.rpc.callRP('Database.checkVersion', self.Version, version) == 'Wrong':
print ' ungleiche Versionen'
print 'load new version of pyCuon'
self.getNewClientSoftware(id)
cuonpath = '..'
shellcommand = 'rm ' + cuonpath + '/cuonObjects'
liStatus = commands.getstatusoutput(shellcommand)
print shellcommand, liStatus
self.openDB()
version = self.saveObject('newClientVersion',True)
self.closeDB()
sys.exit(0)
self.openDB()
newClientExist = self.loadObject('newClientVersion')
self.closeDB()
if newClientExist:
self.updateVersion()
self.openDB()
self.saveObject('ProgramVersion', self.Version)
version = self.saveObject('newClientVersion',False)
self.closeDB()
version = self.rpc.callRP('Database.getLastVersion')
print 'Version', version
if sLocal != 'NO' and self.rpc.callRP('Database.checkVersion', self.Version, version[1]) == 'Wrong':
self.getNewClientSoftware(id)
sys.exit(0)
# create widget tree ...
# self.gladeName = td.main_glade_name
self.loadGlade('cuon.xml','window1')
self.win1 = self.getWidget("window1")
self.win1.connect("delete_event", self.delete_event)
self.win1.connect("destroy", self.destroy)
# Online news
self.winNews= self.getWidget('OnlineNews')
self.winNews.connect("delete_event", self.closeOnlineNews)
self.swMap = self.getWidget('swOnlineNews')
# Menu-items
self.initMenuItemsMain()
self.disableAllMenuItems()
self.addEnabledMenuItems('login','logout1')
self.addEnabledMenuItems('login','data')
self.addEnabledMenuItems('login','action1')
self.addEnabledMenuItems('login','accounting1')
self.addEnabledMenuItems('login','extras')
self.addEnabledMenuItems('login','tools')
self.addEnabledMenuItems('serverMode','databases1')
self.addEnabledMenuItems('user','login1')
self.addEnabledMenuItems('user','tools')
self.addEnabledMenuItems('user','update1')
self.disableMenuItem('login')
self.disableMenuItem('serverMode')
self.enableMenuItem('user')
sTitle = self.getWidget('window1').get_title() + self.sTitle
self.setTitle('window1',sTitle)
self.openDB()
oUser = self.loadObject('User')
if not oUser:
oUser = cuon.User.user.User()
oUser.client = 0
self.saveObject('User',oUser)
self.saveObject('Scheduling', [])
self.closeDB()
# set initial columns
treeview = self.getWidget('treeSchedul')
#treeview.set_model(liststore)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Scheduls", renderer, text=0)
treeview.append_column(column)
self.t0 = gobject.timeout_add(2000, self.startT0)
if Username != "empty":
print "Username = ", Username
self.activateClick("login1")
def gtk_main_quit(self):
if self.t1:
gobject.source_remove(self.t1)
gtk.main_quit()
#gtk.gdk.threads_init()
def getConfigOption(cpParser, section, option):
value = None
if cpParser.has_option(section,option):
value = cpParser.get(section, option)
print 'getConfigOption', section + ', ' + option + ' = ' + value
if not value:
value = " "
return value
sStartType = 'client'
sDebug = 'NO'
sLocal = 'NO'
print sys.argv
# Args:
# 1 server http://host:post
# 2 client/server
# 3 Debug = ON/OFF
# 4 Path to Locale/ default
# 5 cuon_path
try:
cpParser = ConfigParser.ConfigParser()
sFile = 'cuon.ini'
f = open(sFile)
cpParser.readfp(f)
f.close()
except:
pass
Description = None
WorkingDir= 'NO'
Host = None
Port = None
Proto = None
sStartType = 'client'
sLocal = 'NO'
sDebug = 'NO'
AlternateGui = 'LINUX-Standard'
Username = "EMPTY"
PASSWORD = "TEST"
ClientID = 0
sSect = 'Client'
Description = getConfigOption(cpParser, sSect,'DESCRIPTION')
WorkingDir = getConfigOption(cpParser, sSect,'WORKINGDIR')
Host = getConfigOption(cpParser, sSect,'HOST')
Port = getConfigOption(cpParser, sSect,'PORT')
Proto = getConfigOption(cpParser, sSect,'PROTOCOL')
sStartType = getConfigOption(cpParser, sSect,'TYPE')
sLocal = WorkingDir + getConfigOption(cpParser, sSect,'LOCALE')
sDebug = getConfigOption(cpParser, sSect,'DEBUG')
AlternateGui = getConfigOption(cpParser, sSect,'ALTERNATEGUI')
Username = getConfigOption(cpParser, sSect, "USERNAME").strip()
PASSWORD = getConfigOption(cpParser, sSect, "PASSWORD").strip()
try:
ClientID = int(getConfigOption(cpParser, sSect, "CLIENT_ID"))
except:
ClientID = 0
if not Username or not PASSWORD or not ClientID:
Username = "EMPTY"
PASSWORD = "TEST"
ClientID = 0
print "AlternateGui = ", AlternateGui
if not AlternateGui or AlternateGui == 'NO' :
AlternateGui = 'LINUX-Standard'
if len(sys.argv) > 4:
if len(sys.argv[4]) > 1:
sLocal = sys.argv[4]
if len(sys.argv) > 3:
if len(sys.argv[3]) > 1:
sDebug = sys.argv[3]
if len(sys.argv) > 2:
if len(sys.argv[2]) > 1:
sStartType = sys.argv[2]
print sStartType
if sStartType == 'server':
td = cuon.TypeDefs.typedefs_server.typedefs_server()
else:
td = cuon.TypeDefs.typedefs.typedefs()
td.SystemName = AlternateGui
td.cuon_path = WorkingDir
td.server = Proto.strip() +'://' + Host.strip() +':' + Port.strip()
if len(sys.argv) > 1:
if len(sys.argv[1]) > 1:
if sys.argv[1] != 'NO':
td.server = sys.argv[1]
print 'td-server =', td.server
if len(sys.argv) > 5:
if len(sys.argv[5]) > 1:
if sys.argv[5] != 'NO':
td.cuon_path = sys.argv[5]
print 'td.cuon_path =', td.cuon_path
if len(sys.argv) > 6:
if len(sys.argv[6]) > 1:
if sys.argv[6] != 'NO':
td.SystemName = sys.argv[6]
print 'td.System =', td.SystemName
else:
td.SystemName = 'LINUX-Standard'
else:
td.SystemName = 'LINUX-Standard'
else:
td.SystemName = AlternateGui
print 'now -> ', td.SystemName
if len(sys.argv) > 7:
if len(sys.argv[7]) > 1:
Username = sys.argv[7]
print 'Username =', Username
print len(sys.argv)
if len(sys.argv) > 8:
if len(sys.argv[8]) > 1:
PASSWORD = sys.argv[8]
print 'password =', PASSWORD
if len(sys.argv) > 9:
if len(sys.argv[9]) > 0:
ClientID = int(sys.argv[9].strip())
print 'clientID =', ClientID
# set some pathes
try:
print 'WorkingDir', WorkingDir
if not os.path.exists(WorkingDir + '/cuon_data'):
print 'make dir cuon_data'
os.mkdir(WorkingDir +'/cuon_data')
if not os.path.exists(WorkingDir +'/cuon_data/dms'):
print 'make dir cuon_data/dms'
os.mkdir(WorkingDir +'/cuon_data/dms')
if not os.path.exists(WorkingDir +'/cuon_data/import'):
print 'make dir cuon_data/dms'
os.mkdir(WorkingDir +'/cuon_data/import')
if not os.path.exists(WorkingDir +'/cuon_data/export'):
print 'make dir cuon_data/dms'
os.mkdir(WorkingDir +'/cuon_data/export')
except Exception, params:
print Exception, params
d = cuon.Databases.dumps.dumps(td)
d.openDB()
d.saveObject('td', td)
d.closeDB()
if sLocal == 'NO':
DIR = '/usr/share/locale'
else:
DIR = sLocal
#locale.setlocale (locale.LC_ALL, '')
APP = 'cuon'
gettext.bindtextdomain (APP, DIR)
gettext.textdomain (APP)
gettext.install (APP, DIR, unicode=1)
gtk.glade.bindtextdomain(APP,DIR)
gtk.glade.textdomain(APP)
print _('Debug by C.U.O.N. = ' ), sDebug
m = MainWindow(sStartType)
m.startMain(sStartType, sDebug,sLocal, Username, PASSWORD, ClientID)
#profile.run('m.startMain(sStartType, sDebug,sLocal)','cuonprofile')
# Import Psyco if available
#try:
# import psyco
#psyco.full()
#print ' start psyco'
#except ImportError:
# print 'no psyco found'
#gtk.gdk.threads_enter()
gtk.main()
#gtk.gdk.threads_leave()
#gtk.main()
|
BackupTheBerlios/cuon-svn
|
cuon_client/Cuon.py
|
Python
|
gpl-3.0
| 83,624
|
[
"VTK"
] |
0ca5b9b6d59bb7b20cafedf510c0fd68b8de681760f7696cfdcabe51da746d4d
|
# Copyright 2000-2004 Brad Chapman.
# Copyright 2001 Iddo Friedberg.
# Copyright 2007-2016 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes for generic sequence alignment.
Contains classes to deal with generic sequence alignment stuff not
specific to a particular program or format.
"""
from __future__ import print_function
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import Alphabet
class Alignment(object):
"""Represent a set of alignments (DEPRECATED).
This is a base class to represent alignments, which can be subclassed
to deal with an alignment in a specific format.
With the introduction of the MultipleSeqAlignment class in Bio.Align,
this base class is deprecated and is likely to be removed in future
releases of Biopython.
"""
def __init__(self, alphabet):
"""Initialize a new Alignment object.
Arguments:
- alphabet - The alphabet to use for the sequence objects that are
created. This alphabet must be a gapped type.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACT-CTAGCTAG Beta
ACTGCTAGATAG Gamma
"""
import warnings
import Bio
warnings.warn("With the introduction of the MultipleSeqAlignment "
"class in Bio.Align, this base class is deprecated "
"and is likely to be removed in a future release of "
"Biopython.", Bio.BiopythonDeprecationWarning)
if not (isinstance(alphabet, Alphabet.Alphabet) or
isinstance(alphabet, Alphabet.AlphabetEncoder)):
raise ValueError("Invalid alphabet argument")
self._alphabet = alphabet
# hold everything at a list of SeqRecord objects
self._records = []
def _str_line(self, record, length=50):
"""Returns a truncated string representation of a SeqRecord (PRIVATE).
This is a PRIVATE function used by the __str__ method.
"""
if record.seq.__class__.__name__ == "CodonSeq":
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" \
% (record.seq[:length - 3], record.seq[-3:], record.id)
else:
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" \
% (record.seq[:length - 6], record.seq[-3:], record.id)
def __str__(self):
"""Returns a multi-line string summary of the alignment.
This output is intended to be readable, but large alignments are
shown truncated. A maximum of 20 rows (sequences) and 50 columns
are shown, with the record identifiers. This should fit nicely on a
single screen. e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACT-CTAGCTAG Beta
ACTGCTAGATAG Gamma
See also the alignment's format method.
"""
rows = len(self._records)
lines = ["%s alignment with %i rows and %i columns"
% (str(self._alphabet), rows, self.get_alignment_length())]
if rows <= 20:
lines.extend(self._str_line(rec) for rec in self._records)
else:
lines.extend(self._str_line(rec) for rec in self._records[:18])
lines.append("...")
lines.append(self._str_line(self._records[-1]))
return "\n".join(lines)
def __repr__(self):
"""Returns a representation of the object for debugging.
The representation cannot be used with eval() to recreate the object,
which is usually possible with simple python ojects. For example:
<Bio.Align.Generic.Alignment instance (2 records of length 14,
SingleLetterAlphabet()) at a3c184c>
The hex string is the memory address of the object, see help(id).
This provides a simple way to visually distinguish alignments of
the same size.
"""
# A doctest for __repr__ would be nice, but __class__ comes out differently
# if run via the __main__ trick.
return "<%s instance (%i records of length %i, %s) at %x>" % \
(self.__class__, len(self._records),
self.get_alignment_length(), repr(self._alphabet), id(self))
# This version is useful for doing eval(repr(alignment)),
# but it can be VERY long:
# return "%s(%s, %s)" \
# % (self.__class__, repr(self._records), repr(self._alphabet))
def format(self, format):
"""Returns the alignment as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.AlignIO (such as "fasta", "clustal", "phylip",
"stockholm", etc), which is used to turn the alignment into a
string.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align.format("fasta"))
>Alpha
ACTGCTAGCTAG
>Beta
ACT-CTAGCTAG
>Gamma
ACTGCTAGATAG
<BLANKLINE>
>>> print(align.format("phylip"))
3 12
Alpha ACTGCTAGCT AG
Beta ACT-CTAGCT AG
Gamma ACTGCTAGAT AG
<BLANKLINE>
For Python 2.6, 3.0 or later see also the built in format() function.
"""
# See also the __format__ added for Python 2.6 / 3.0, PEP 3101
# See also the SeqRecord class and its format() method using Bio.SeqIO
return self.__format__(format)
def __format__(self, format_spec):
"""Returns the alignment as a string in the specified file format.
This method supports the python format() function added in
Python 2.6/3.0. The format_spec should be a lower case
string supported by Bio.AlignIO as an output file format.
See also the alignment's format() method."""
if format_spec:
from Bio._py3k import StringIO
from Bio import AlignIO
handle = StringIO()
AlignIO.write([self], handle, format_spec)
return handle.getvalue()
else:
# Follow python convention and default to using __str__
return str(self)
def get_all_seqs(self):
"""Return all of the sequences involved in the alignment (DEPRECATED).
The return value is a list of SeqRecord objects.
This method is deprecated, as the Alignment object itself now offers
much of the functionality of a list of SeqRecord objects (e.g.
iteration or slicing to create a sub-alignment). Instead use the
Python builtin function list, i.e. my_list = list(my_align)
"""
import warnings
import Bio
warnings.warn("This method is deprecated, since the alignment object"
"now acts more like a list. Instead of calling "
"align.get_all_seqs() you can use list(align)",
Bio.BiopythonDeprecationWarning)
return self._records
def __iter__(self):
"""Iterate over alignment rows as SeqRecord objects.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> for record in align:
... print(record.id)
... print(record.seq)
Alpha
ACTGCTAGCTAG
Beta
ACT-CTAGCTAG
Gamma
ACTGCTAGATAG
"""
return iter(self._records)
def get_seq_by_num(self, number):
"""Retrieve a sequence by row number (DEPRECATED).
Returns:
- A Seq object for the requested sequence.
Raises:
- IndexError - If the specified number is out of range.
NOTE: This is a legacy method. In new code where you need to access
the rows of the alignment (i.e. the sequences) consider iterating
over them or accessing them as SeqRecord objects.
"""
import warnings
import Bio
warnings.warn("This is a legacy method and is likely to be removed "
"in a future release of Biopython. In new code where "
"you need to access the rows of the alignment (i.e. the "
"sequences) consider iterating over them or accessing "
"them as SeqRecord objects.",
Bio.BiopythonDeprecationWarning)
return self._records[number].seq
def __len__(self):
"""Returns the number of sequences in the alignment.
Use len(alignment) to get the number of sequences (i.e. the number of
rows), and alignment.get_alignment_length() to get the length of the
longest sequence (i.e. the number of columns).
This is easy to remember if you think of the alignment as being like a
list of SeqRecord objects.
"""
return len(self._records)
def get_alignment_length(self):
"""Return the maximum length of the alignment.
All objects in the alignment should (hopefully) have the same
length. This function will go through and find this length
by finding the maximum length of sequences in the alignment.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.get_alignment_length()
12
If you want to know the number of sequences in the alignment,
use len(align) instead:
>>> len(align)
3
"""
max_length = 0
for record in self._records:
if len(record.seq) > max_length:
max_length = len(record.seq)
return max_length
def add_sequence(self, descriptor, sequence, start=None, end=None,
weight=1.0):
"""Add a sequence to the alignment.
This doesn't do any kind of alignment, it just adds in the sequence
object, which is assumed to be prealigned with the existing
sequences.
Arguments:
- descriptor - The descriptive id of the sequence being added.
This will be used as the resulting SeqRecord's
.id property (and, for historical compatibility,
also the .description property)
- sequence - A string with sequence info.
- start - You can explicitly set the start point of the sequence.
This is useful (at least) for BLAST alignments, which can
just be partial alignments of sequences.
- end - Specify the end of the sequence, which is important
for the same reason as the start.
- weight - The weight to place on the sequence in the alignment.
By default, all sequences have the same weight. (0.0 =>
no weight, 1.0 => highest weight)
"""
new_seq = Seq(sequence, self._alphabet)
# We are now effectively using the SeqRecord's .id as
# the primary identifier (e.g. in Bio.SeqIO) so we should
# populate it with the descriptor.
# For backwards compatibility, also store this in the
# SeqRecord's description property.
new_record = SeqRecord(new_seq,
id=descriptor,
description=descriptor)
# hack! We really need to work out how to deal with annotations
# and features in biopython. Right now, I'll just use the
# generic annotations dictionary we've got to store the start
# and end, but we should think up something better. I don't know
# if I'm really a big fan of the LocatableSeq thing they've got
# in BioPerl, but I'm not positive what the best thing to do on
# this is...
if start:
new_record.annotations['start'] = start
if end:
new_record.annotations['end'] = end
# another hack to add weight information to the sequence
new_record.annotations['weight'] = weight
self._records.append(new_record)
def get_column(self, col):
"""Returns a string containing a given column.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.get_column(0)
'AAA'
>>> align.get_column(3)
'G-G'
"""
# TODO - Support negative indices?
col_str = ''
assert col >= 0 and col <= self.get_alignment_length()
for rec in self._records:
col_str += rec.seq[col]
return col_str
def __getitem__(self, index):
"""Access part of the alignment.
We'll use the following example alignment here for illustration:
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align.Generic import Alignment
>>> align = Alignment(Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.add_sequence("Delta", "ACTGCTTGCTAG")
>>> align.add_sequence("Epsilon", "ACTGCTTGATAG")
You can access a row of the alignment as a SeqRecord using an integer
index (think of the alignment as a list of SeqRecord objects here):
>>> first_record = align[0]
>>> print("%s %s" % (first_record.id, first_record.seq))
Alpha ACTGCTAGCTAG
>>> last_record = align[-1]
>>> print("%s %s" % (last_record.id, last_record.seq))
Epsilon ACTGCTTGATAG
You can also access use python's slice notation to create a sub-alignment
containing only some of the SeqRecord objects:
>>> sub_alignment = align[2:5]
>>> print(sub_alignment)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGATAG Gamma
ACTGCTTGCTAG Delta
ACTGCTTGATAG Epsilon
This includes support for a step, i.e. align[start:end:step], which
can be used to select every second sequence:
>>> sub_alignment = align[::2]
>>> print(sub_alignment)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACTGCTAGATAG Gamma
ACTGCTTGATAG Epsilon
Or to get a copy of the alignment with the rows in reverse order:
>>> rev_alignment = align[::-1]
>>> print(rev_alignment)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 5 rows and 12 columns
ACTGCTTGATAG Epsilon
ACTGCTTGCTAG Delta
ACTGCTAGATAG Gamma
ACT-CTAGCTAG Beta
ACTGCTAGCTAG Alpha
Right now, these are the ONLY indexing operations supported. The use of
a second column based index is under discussion for a future update.
"""
if isinstance(index, int):
# e.g. result = align[x]
# Return a SeqRecord
return self._records[index]
elif isinstance(index, slice):
# e.g. sub_aling = align[i:j:k]
# Return a new Alignment using only the specified records.
# TODO - See Bug 2554 for changing the __init__ method
# to allow us to do this more cleanly.
sub_align = Alignment(self._alphabet)
sub_align._records = self._records[index]
return sub_align
elif len(index) == 2:
raise TypeError("Row and Column indexing is not currently supported,"
"but may be in future.")
else:
raise TypeError("Invalid index type.")
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/Align/Generic.py
|
Python
|
mit
| 17,878
|
[
"BLAST",
"BioPerl",
"Biopython"
] |
d435d23eb2b6244ccd080579e50d075faf9e1d14b9eead2bceceeb06d3781afd
|
import json
import re
# See http://js.cytoscape.org/#style/node-body
ALLOWED_NODE_SHAPES = ['rectangle', 'roundrectangle', 'ellipse', 'triangle',
'pentagon', 'hexagon', 'heptagon', 'octagon', 'star',
'diamond', 'vee', 'rhomboid']
ALLOWED_NODE_BORDER_STYLES = ['solid', 'dotted', 'dashed', 'double']
ALLOWED_NODE_BACKGROUND_REPEAT = ['no-repeat', 'repeat-x', 'repeat-y', 'repeat']
ALLOWED_NODE_TEXT_TRANSFORM = ['none', 'uppercase', 'lowercase']
ALLOWED_NODE_TEXT_WRAP = ['none', 'wrap']
ALLOWED_TEXT_BACKROUND_SHAPE = ['rectangle', 'roundrectangle']
ALLOWED_TEXT_HALIGN = ['left', 'center', 'right']
ALLOWED_TEXT_VALIGN = ['top', 'center', 'bottom']
## See http://js.cytoscape.org/#style/labels
ALLOWED_TEXT_WRAP = ['wrap', 'none']
## See http://js.cytoscape.org/#style/edge-arrow
ALLOWED_ARROW_SHAPES = ['tee', 'triangle', 'triangle-tee', 'triangle-backcurve',
'square', 'circle', 'diamond', 'none']
## See http://js.cytoscape.org/#style/edge-line
ALLOWED_EDGE_STYLES = ['solid', 'dotted', 'dashed']
ALLOWED_ARROW_FILL = ['filled', 'hollow']
NODE_COLOR_ATTRIBUTES = ['background_color', 'border_color', 'color',
'text_outline_color', 'text_shadow_color',
'text_border_color']
EDGE_COLOR_ATTRIBUTES = ['line_color', 'source_arrow_color',
'mid_source_arrow_color', 'target_arrow_color',
'mid_target_arrow_color']
def verify_json(graph_json):
graph_json = json.loads(graph_json)
for node in graph_json["graph"]["nodes"]:
node = node["data"]
if "shape" in node:
shape = node["shape"].lower()
else:
shape = "ellipse"
if shape not in ALLOWED_NODE_SHAPES:
shape = "ellipse"
node["shape"] = shape
return json.dumps(graph_json)
def validate_clean_json(json_string):
"""
Validates JSON to see if all properties are consistent with API.
@param graphJson: JSON of graph
"""
cleaned_json = json.loads(clean_graph_json(json_string))
if "graph" not in cleaned_json:
return "JSON of graph must have 'graph' property"
if "nodes" not in cleaned_json["graph"]:
return "JSON of graph must have 'nodes' property"
if not isinstance(cleaned_json["graph"]["nodes"], list):
return "Nodes property must contain an array"
if "edges" not in cleaned_json["graph"]:
return "JSON of graph must have 'edges' property"
if not isinstance(cleaned_json["graph"]["edges"], list):
return "Edges property must contain an array"
# Validate all node properties
nodes = cleaned_json["graph"]["nodes"]
error = validate_node_properties(nodes)
if error is not None:
return None, error
# Validate all edge properties
error = validate_edge_properties(cleaned_json["graph"]["edges"], nodes)
if error is not None:
return None, error
# Attach ID's to each edge for traversing the element
cleaned_json = assign_edge_ids(cleaned_json)
return cleaned_json, None
def validate_edge_properties(edges, nodes):
"""
Validates all edge properties.
@param edges: Array of edge objects (http://js.cytoscape.org)
"""
error = ""
edge_id = None
node_list = [node["data"]["id"] for node in nodes]
# Go through all edges to verify if edges contain valid properties
# recognized by CytoscapeJS
for edge in edges:
edge = edge["data"]
# Check if source and target node of an edge exist in JSON node list
if edge["source"] not in node_list or edge["target"] not in node_list:
return "For all edges source and target nodes should exist in node list"
# If edge has no source and target nodes, throw error since they are required
if "source" not in edge or "target" not in edge:
return "All edges must have at least a source and target property. Please verify that all edges meet this requirement."
# Check if source and target nodes are strings, integers or floats
if not (isinstance(edge["source"], (basestring, int, float)) and isinstance(edge["target"],
(basestring, int, float))):
return "Source and target nodes of the edge must be strings, integers or floats"
edge_id = "with source: " + str(edge["source"]) + "and target: " + str(edge["target"])
# If edge is directed, it must have a target_arrow_shape
if "directed" in edge and edge["directed"] == "true":
if "target_arrow_shape" not in edge:
return "Edge", edge_id, "must have a target_arrow_shape property if directed is set to true"
if "source_arrow_shape" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["source_arrow_shape"], ALLOWED_ARROW_SHAPES)
if "mid_source_arrow_shape" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["source_arrow_shape"], ALLOWED_ARROW_SHAPES)
if "target_arrow_shape" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["target_arrow_shape"], ALLOWED_ARROW_SHAPES)
if "mid_target_arrow_shape" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["mid_target_arrow_shape"], ALLOWED_ARROW_SHAPES)
if "line_style" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["line_style"], ALLOWED_EDGE_STYLES)
if "source_arrow_fill" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["source_arrow_fill"], ALLOWED_ARROW_FILL)
if "mid_source_arrow_fill" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["mid_source_arrow_fill"], ALLOWED_ARROW_FILL)
if "target_arrow_fill" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["target_arrow_fill"], ALLOWED_ARROW_FILL)
if "mid_target_arrow_fill" in edge:
error += find_property_in_array("Edge", edge_id, edge, edge["mid_target_arrow_fill"], ALLOWED_ARROW_FILL)
for attr in EDGE_COLOR_ATTRIBUTES:
if attr in edge:
error += check_color_hex(edge[attr])
if len(error) > 0:
return error
else:
return None
def validate_node_properties(nodes):
"""
Validates all node properties.
:param G: NetworkX object.
"""
unique_ids = set()
error = ""
# Go through all nodes to verify if the nodes contain valid properties
# recognized by CytoscapeJS
for node in nodes:
node = node["data"]
# Check the data type of node, should be int, float or string
if not isinstance(node["id"], (basestring, int, float)):
return "All nodes must be strings, integers or floats"
# Check to see if ID is in node
if "id" not in node:
return "All nodes must have a unique ID. Please verify that all nodes meet this requirement."
if node["id"] not in unique_ids:
unique_ids.add(node["id"])
else:
return "There are multiple nodes with ID: " + str(
node["id"]) + ". Please make sure all node IDs are unique."
# Checks shape of nodes to make sure it contains only legal shapes
if "shape" in node:
error += find_property_in_array("Node", node["id"], "shape", node["shape"], ALLOWED_NODE_SHAPES)
# If node contains a border-style property, check to make sure it is
# a legal value
if "border_style" in node:
error += find_property_in_array("Node", node["id"], "border_style", node["border_style"],
ALLOWED_NODE_BORDER_STYLES)
# If node contains a background_black property, check to make sure
# they have values [-1, 1]
if "border_blacken" in node:
if node["border_blacken"] >= -1 and node["border_blacken"] <= -1:
error += "Node: " + str(
node["id"]) + " contains illegal border_blacken value. Must be between [-1, 1]."
if "background_repeat" in node:
error += find_property_in_array("Node", node["id"], "background_repeat", node["background_repeat"],
ALLOWED_NODE_BACKGROUND_REPEAT)
if "text_transform" in node:
error += find_property_in_array("Node", node["id"], "text_transform", node["text_transform"],
ALLOWED_NODE_TEXT_TRANSFORM)
if "text_wrap" in node:
error += find_property_in_array("Node", node["id"], "text_wrap", node["text_wrap"], ALLOWED_NODE_TEXT_WRAP)
if "text_background_shape" in node:
error += find_property_in_array("Node", node["id"], "text_background_shape", node["text_background_shape"],
ALLOWED_NODE_SHAPES)
if "text_halign" in node:
error += find_property_in_array("Node", node["id"], "text_halign", node["text_halign"], ALLOWED_TEXT_HALIGN)
if "text_valign" in node:
error += find_property_in_array("Node", node["id"], "text_valign", node["text_valign"], ALLOWED_TEXT_VALIGN)
for attr in NODE_COLOR_ATTRIBUTES:
if attr in node:
error += check_color_hex(node[attr])
if len(error) > 0:
return error
else:
return None
def check_color_hex(color_code):
"""
Check the validity of the hexadecimal code of various node and edge color
related attributes.
This function returns an error if the hexadecimal code is not of the format
'#XXX' or '#XXXXXX', i.e. hexadecimal color code is not valid.
:param color_code: color code
"""
# if color name is given instead of hex code, no need to check its validity
if not color_code.startswith('#'):
return ""
valid = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', color_code)
if valid is None:
return color_code + ' is not a valid hex color code.'
else:
return ""
def find_property_in_array(elementType, key, prop, value, array):
"""
Goes through array to see if property is contained in the array.
:param elementType: Node or an Edge
:param key: Key to search for in network
:param value: Value of key
:param prop: Name to search for in array
:param array: Array to search for property in
"""
if value not in array:
array_list = ",".join(array)
return elementType + " " + key + " contains illegal value for property: " + prop + ". Value given for this property was: " + value + ". Accepted values for property: " + prop + " are: [" + array_list + "]"
else:
return ""
def assign_edge_names(graph_json):
'''
Assignes names to the edges to be the names of the nodes that they are attached to.
:param graph_json: JSON of graph
:return graph_json: JSON of graph having unique ID's for all edges
'''
ids = []
# Creates ID's for all of the edges by creating utilizing the source and target nodes
# The edge ID would have the form: source-target
for edge in graph_json['graph']['edges']:
# To make sure int and floats are also accepted as source and target nodes of an edge
source_node = str(edge['data']['source'])
target_node = str(edge['data']['target'])
edge['data']['name'] = source_node + '-' + target_node
# If the ID has not yet been seen (is unique), simply store the ID
# of that edge as source-target
if edge['data']['name'] not in ids:
ids.append(edge['data']['name'])
else:
# Otherwise if there are multiple edges with the same ID,
# append a number to the end of the ID so we can distinguish
# multiple edges having the same source and target.
# This needs to be done because HTML DOM needs unique IDs.
counter = 0
while edge['data']['name'] in ids:
counter += 1
edge['data']['name'] = edge['data']['name'] + str(counter)
ids.append(edge['data']['name'])
# Return JSON having all edges containing unique ID's
return graph_json
# This file is a wrapper to communicate with sqlite3 database
# that does not need authentication for connection.
# It may be viewed as the controller to the database
def convert_json(original_json):
'''
Converts original_json that's used in Cytoscape Web
such that it is compatible with the new Cytoscape.js
See: http://cytoscape.github.io/cytoscape.js/
Original json structure used for Cytoscape Web:
{
"metadata": {
},
"graph": {
"data": {
"nodes": [
{ "id": "node1", "label": "n1", ... },
{ "id": "node2", "label": "n2", ... },
...
],
"edges": [
{ "id": "edge1", "label": "e1", ... },
{ "id": "edge2", "label": "e2", ... },
...
]
}
}
}
New json structure:
{
"metadata": {
},
"graph": {
"nodes": [
{"data": {"id": "node1", "content": "n1", ...}},
{"data": {"id": "node2", "content": "n2", ...}},
...
],
"edges": [
{"data": {"id": "edge1", "content": "e1", ...}},
{"data": {"id": "edge2", "content": "e2", ...}},
...
]
}
}
'''
# parse old json data
old_json = json.loads(original_json)
old_nodes = old_json['graph']['data']['nodes']
old_edges = old_json['graph']['data']['edges']
new_nodes, new_edges = [], []
# format node and edge data
for node in old_nodes:
# Used for backwards-compatibility since some JSON have label
# but new CytoscapeJS uses the content property
if 'label' in node:
node['content'] = node['label']
del node['label']
# If the node has any content inside of it, display that content, otherwise, just make it an empty string
if 'content' not in node['data']:
node['data']['content'] = ""
new_nodes.append({"data": node})
for edge in old_edges:
new_edges.append({"data": edge})
# build the new json
new_json = {}
new_json['metadata'] = old_json['metadata']
new_json['graph'] = {}
new_json['graph']['nodes'] = new_nodes
new_json['graph']['edges'] = new_edges
return json.dumps(new_json, indent=4)
def clean_graph_json(original_json_string):
'''
Converts original_json_string such that its compatible with Cytoscape.js and Graphspace's json format.
See: http://cytoscape.github.io/cytoscape.js/
Original json structure used for Cytoscape Web:
{
"metadata": {
},
"graph": {
"data": {
"nodes": [
{ "id": "node1", "label": "n1", ... },
{ "id": "node2", "label": "n2", ... },
...
],
"edges": [
{ "id": "edge1", "label": "e1", ... },
{ "id": "edge2", "label": "e2", ... },
...
]
}
}
}
New json structure:
{
"metadata": {
},
"graph": {
"nodes": [
{"data": {"id": "node1", "content": "n1", ...}},
{"data": {"id": "node2", "content": "n2", ...}},
...
],
"edges": [
{"data": {"id": "edge1", "content": "e1", ...}},
{"data": {"id": "edge2", "content": "e2", ...}},
...
]
}
}
'''
# parse old json data
old_json = json.loads(original_json_string)
if 'data' in old_json['graph']:
old_nodes = old_json['graph']['data']['nodes']
old_edges = old_json['graph']['data']['edges']
else:
old_nodes = [node['data'] for node in old_json['graph']['nodes']]
old_edges = [edge['data'] for edge in old_json['graph']['edges']]
new_nodes, new_edges = [], []
# format node and edge data
for node in old_nodes:
# Used for backwards-compatibility since some JSON have label
# but new CytoscapeJS uses the content property
if 'label' in node:
node['content'] = node['label']
del node['label']
# If the node has any content inside of it, display that content, otherwise, just make it an empty string
if 'content' not in node:
node['content'] = ""
new_nodes.append({"data": node})
for edge in old_edges:
new_edges.append({"data": edge})
# build the new json
new_json = {}
new_json['metadata'] = old_json['metadata']
new_json['graph'] = {}
new_json['graph']['nodes'] = new_nodes
new_json['graph']['edges'] = new_edges
return json.dumps(new_json, indent=4)
|
Murali-group/GraphSpace
|
applications/graphs/json_validator.py
|
Python
|
gpl-2.0
| 15,367
|
[
"Cytoscape"
] |
96b29a7b7944c466b392db5d5322892e74a1aa72696dacf00e47075d08ec4a66
|
############################################################################
##
## Copyright (C) 2006-2010 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""Modules for handling vtkRenderWindowInteractor events"""
from PyQt4 import QtCore, QtGui
from core.modules.basic_modules import String
from core.modules.vistrails_module import Module, NotCacheable
from core.modules.module_registry import get_module_registry
from core.modules.module_configure import StandardModuleConfigurationWidget
from core.modules.python_source_configure import PythonEditor
from core.vistrail.module_function import ModuleFunction, ModuleParam
import urllib
################################################################################
class vtkInteractionHandler(NotCacheable, Module):
"""
vtkInteractionHandler allow users to insert callback code for interacting
with the vtkRenderWindowInteractor InteractionEvent
"""
# Since vtkCommand is not wrapped in Python, we need to hardcoded all events
# string from vtkCommand.h
vtkEvents = [
'AnyEvent',
'DeleteEvent',
'StartEvent',
'EndEvent',
'RenderEvent',
'ProgressEvent',
'PickEvent',
'StartPickEvent',
'EndPickEvent',
'AbortCheckEvent',
'ExitEvent',
'LeftButtonPressEvent',
'LeftButtonReleaseEvent',
'MiddleButtonPressEvent',
'MiddleButtonReleaseEvent',
'RightButtonPressEvent',
'RightButtonReleaseEvent',
'EnterEvent',
'LeaveEvent',
'KeyPressEvent',
'KeyReleaseEvent',
'CharEvent',
'ExposeEvent',
'ConfigureEvent',
'TimerEvent',
'MouseMoveEvent',
'MouseWheelForwardEvent',
'MouseWheelBackwardEvent',
'ResetCameraEvent',
'ResetCameraClippingRangeEvent',
'ModifiedEvent',
'WindowLevelEvent',
'StartWindowLevelEvent',
'EndWindowLevelEvent',
'ResetWindowLevelEvent',
'SetOutputEvent',
'ErrorEvent',
'WarningEvent',
'StartInteractionEvent',
'InteractionEvent',
'EndInteractionEvent',
'EnableEvent',
'DisableEvent',
'CreateTimerEvent',
'DestroyTimerEvent',
'PlacePointEvent',
'PlaceWidgetEvent',
'CursorChangedEvent',
'ExecuteInformationEvent',
'RenderWindowMessageEvent',
'WrongTagEvent',
'StartAnimationCueEvent',
'AnimationCueTickEvent',
'EndAnimationCueEvent',
'VolumeMapperRenderEndEvent',
'VolumeMapperRenderProgressEvent',
'VolumeMapperRenderStartEvent',
'VolumeMapperComputeGradientsEndEvent',
'VolumeMapperComputeGradientsProgressEvent',
'VolumeMapperComputeGradientsStartEvent',
'WidgetModifiedEvent',
'WidgetValueChangedEvent',
'WidgetActivateEvent',
'ConnectionCreatedEvent',
'ConnectionClosedEvent',
'DomainModifiedEvent',
'PropertyModifiedEvent',
'UpdateEvent',
'RegisterEvent',
'UnRegisterEvent',
'UpdateInformationEvent']
def __init__(self):
Module.__init__(self)
self.observer = None
self.handler = None
self.shareddata = None
def compute(self):
""" compute() -> None
Actually compute nothing
"""
self.observer = self.force_get_input('Observer')
self.handler = self.force_get_input('Handler', '')
self.shareddata = self.force_get_input_list('SharedData')
if len(self.shareddata)==1:
self.shareddata = self.shareddata[0]
if self.observer:
source = urllib.unquote(self.handler)
observer = self.observer.vtkInstance
for e in vtkInteractionHandler.vtkEvents:
f = e[0].lower() + e[1:]
f = f.replace('Event', 'Handler')
source += ('\nif locals().has_key("%s"):\n' % f +
'\tobserver.AddObserver("%s", ' % e +
'self.eventHandler)\n')
exec(source)
if hasattr(self.observer.vtkInstance, 'PlaceWidget'):
self.observer.vtkInstance.PlaceWidget()
def eventHandler(self, obj, event):
""" eventHandler(obj: vtkObject, event: str) -> None
A proxy for all vtk events to direct to the correct calls
"""
if self.handler!='':
source = urllib.unquote(self.handler)
f = event[0].lower() + event[1:]
f = f.replace('Event', 'Handler')
myGlobals = globals()
myGlobals.update({'self':self})
exec(source + ('\nif locals().has_key("%s"):\n' % f)+
('\t%s(obj, self.shareddata)' % f)) in myGlobals, locals()
def clear(self):
""" clear() -> None
Remove event handler so the object can be freed correctly
"""
# Remove all observers
if self.observer:
for e in vtkInteractionHandler.vtkEvents:
self.observer.vtkInstance.RemoveObservers(e)
Module.clear(self)
def repaintCells(self):
""" repaintCells() -> None
Redraw all cells on the current sheet
"""
from packages.spreadsheet.spreadsheet_controller \
import spreadsheetController
from packages.spreadsheet.spreadsheet_event \
import RepaintCurrentSheetEvent
spreadsheetController.postEventToSpreadsheet(RepaintCurrentSheetEvent())
class HandlerConfigurationWidget(StandardModuleConfigurationWidget):
"""
HandlerConfigurationWidget is simialr to PythonSource
configuration widget except that it doesn't allow add/remove
ports. In this configuration widget, the user will enter their
python code to handle a specifc event
"""
def __init__(self, module, controller, parent=None):
""" HandlerConfigurationWidget(module: Module,
controller: VistrailController,
parent: QWidget)
-> HandlerConfigurationWidget
Setup the dialog to have a single python source editor and 2
buttons
"""
StandardModuleConfigurationWidget.__init__(self, module,
controller, parent)
self.setWindowTitle('Handler Python Script Editor')
self.setLayout(QtGui.QVBoxLayout())
self.layout().setMargin(0)
self.layout().setSpacing(0)
self.createEditor()
self.createButtonLayout()
def findHandlerFunction(self):
""" findHandlerFunction() -> int
Return the function id associated with input port 'source'
"""
fid = -1
for i in xrange(self.module.getNumFunctions()):
if self.module.functions[i].name=='Handler':
fid = i
break
return fid
def createEditor(self):
""" createEditor() -> None
Add a python editor into the widget layout
"""
self.codeEditor = PythonEditor(self)
fid = self.findHandlerFunction()
if fid!=-1:
f = self.module.functions[fid]
self.codeEditor.setPlainText(urllib.unquote(f.params[0].strValue))
self.codeEditor.document().setModified(False)
self.layout().addWidget(self.codeEditor, 1)
def createButtonLayout(self):
""" createButtonLayout() -> None
Construct Ok & Cancel button
"""
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setMargin(5)
self.okButton = QtGui.QPushButton('&OK', self)
self.okButton.setAutoDefault(False)
self.okButton.setFixedWidth(100)
self.buttonLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton('&Cancel', self)
self.cancelButton.setAutoDefault(False)
self.cancelButton.setShortcut('Esc')
self.cancelButton.setFixedWidth(100)
self.buttonLayout.addWidget(self.cancelButton)
self.layout().addLayout(self.buttonLayout)
self.connect(self.okButton, QtCore.SIGNAL('clicked(bool)'), self.okTriggered)
self.connect(self.cancelButton, QtCore.SIGNAL('clicked(bool)'), self.close)
def sizeHint(self):
""" sizeHint() -> QSize
Return the recommendation size of this widget
"""
return QtCore.QSize(512, 512)
def updateController(self, controller):
""" updateController() -> None
Based on the input of the python editor, update the vistrail
controller appropriately
"""
if self.codeEditor.document().isModified():
code = urllib.quote(str(self.codeEditor.toPlainText()))
functions = [('Handler', [code])]
self.controller.update_functions(self.module, functions)
def okTriggered(self, checked = False):
""" okTriggered(checked: bool) -> None
Update vistrail controller (if neccesssary) then close the widget
"""
self.updateController(self.controller)
self.emit(QtCore.SIGNAL('doneConfigure()'))
self.close()
def registerSelf():
""" registerSelf() -> None
Registry module with the registry
"""
registry = get_module_registry()
vIO = registry.get_descriptor_by_name(
'edu.utah.sci.vistrails.vtksnl',
'vtkInteractorObserver').module
registry.add_module(vtkInteractionHandler, configureWidgetType=HandlerConfigurationWidget)
registry.add_input_port(vtkInteractionHandler, 'Observer', vIO)
registry.add_input_port(vtkInteractionHandler, 'Handler', String, True)
registry.add_input_port(vtkInteractionHandler, 'SharedData', Module)
registry.add_output_port(vtkInteractionHandler, 'self',
vtkInteractionHandler)
|
VisTrails/vistrails-contrib-legacy
|
vtksnl/vtkhandler.py
|
Python
|
bsd-3-clause
| 10,922
|
[
"VTK"
] |
d5a22de7a68abad56cb2bfbd9fda68d02ff6354ba2866ecc508f802182f3adbb
|
""" UserProfileDB class is a front-end to the User Profile Database
"""
__RCSID__ = "$Id$"
import types
import os
import sys
import hashlib
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Base.DB import DB
class UserProfileDB( DB ):
""" UserProfileDB class is a front-end to the User Profile Database
"""
tableDict = { 'up_Users' : { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'UserName' : 'VARCHAR(32) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : 'Id',
'UniqueIndexes' : { 'U' : [ 'UserName' ] },
'Engine': 'InnoDB',
},
'up_Groups': { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'UserGroup' : 'VARCHAR(32) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : 'Id',
'UniqueIndexes' : { 'G' : [ 'UserGroup' ] },
'Engine': 'InnoDB',
},
'up_VOs': { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'VO' : 'VARCHAR(32) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : 'Id',
'UniqueIndexes' : { 'VO' : [ 'VO' ] },
'Engine': 'InnoDB',
},
'up_ProfilesData': { 'Fields' : { 'UserId' : 'INTEGER',
'GroupId' : 'INTEGER',
'VOId' : 'INTEGER',
'Profile' : 'VARCHAR(255) NOT NULL',
'VarName' : 'VARCHAR(255) NOT NULL',
'Data' : 'BLOB',
'ReadAccess' : 'VARCHAR(10) DEFAULT "USER"',
'PublishAccess' : 'VARCHAR(10) DEFAULT "USER"',
},
'PrimaryKey' : [ 'UserId', 'GroupId', 'Profile', 'VarName' ],
'Indexes' : { 'ProfileKey' : [ 'UserId', 'GroupId', 'Profile' ],
'UserKey' : [ 'UserId' ] ,
},
'Engine': 'InnoDB',
},
'up_HashTags': { 'Fields' : { 'UserId' : 'INTEGER',
'GroupId' : 'INTEGER',
'VOId' : 'INTEGER',
'HashTag' : 'VARCHAR(32) NOT NULL',
'TagName' : 'VARCHAR(255) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : [ 'UserId', 'GroupId', 'TagName' ],
'Indexes' : { 'HashKey' : [ 'UserId', 'HashTag' ] },
'Engine': 'InnoDB',
},
}
def __init__( self ):
""" Constructor
"""
self.__permValues = [ 'USER', 'GROUP', 'VO', 'ALL' ]
self.__permAttrs = [ 'ReadAccess', 'PublishAccess' ]
DB.__init__(self, 'UserProfileDB', 'Framework/UserProfileDB')
retVal = self.__initializeDB()
if not retVal[ 'OK' ]:
raise Exception( "Can't create tables: %s" % retVal[ 'Message' ] )
def _checkTable( self ):
""" Make sure the tables are created
"""
return self.__initializeDB()
def __initializeDB( self ):
"""
Create the tables
"""
retVal = self._query( "show tables" )
if not retVal[ 'OK' ]:
return retVal
tablesInDB = [ t[0] for t in retVal[ 'Value' ] ]
tablesD = {}
if 'up_Users' not in tablesInDB:
tablesD[ 'up_Users' ] = self.tableDict['up_Users']
if 'up_Groups' not in tablesInDB:
tablesD[ 'up_Groups' ] = self.tableDict[ 'up_Groups']
if 'up_VOs' not in tablesInDB:
tablesD[ 'up_VOs' ] = self.tableDict['up_VOs']
if 'up_ProfilesData' not in tablesInDB:
tablesD[ 'up_ProfilesData' ] = self.tableDict['up_ProfilesData']
if 'up_HashTags' not in tablesInDB:
tablesD[ 'up_HashTags' ] = self.tableDict['up_HashTags']
return self._createTables( tablesD )
def __getUserId( self, userName, insertIfMissing = True ):
return self.__getObjId( userName, 'UserName', 'up_Users', insertIfMissing )
def __getGroupId( self, groupName, insertIfMissing = True ):
return self.__getObjId( groupName, 'UserGroup', 'up_Groups', insertIfMissing )
def __getVOId( self, voName, insertIfMissing = True ):
return self.__getObjId( voName, 'VO', 'up_VOs', insertIfMissing )
def __getObjId( self, objValue, varName, tableName, insertIfMissing = True ):
result = self.getFields( tableName, ['Id'], { varName: objValue } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
objId = data[0][0]
self.updateFields( tableName, ['LastAccess'], ['UTC_TIMESTAMP()'], { 'Id': objId } )
return S_OK( objId )
if not insertIfMissing:
return S_ERROR( "No entry %s for %s defined in the DB" % ( objValue, varName ) )
result = self.insertFields( tableName, [ varName, 'LastAccess' ], [ objValue, 'UTC_TIMESTAMP()' ] )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'lastRowId' ] )
def getUserGroupIds( self, userName, userGroup, insertIfMissing = True ):
result = self.__getUserId( userName, insertIfMissing )
if not result[ 'OK' ]:
return result
userId = result[ 'Value' ]
result = self.__getGroupId( userGroup, insertIfMissing )
if not result[ 'OK' ]:
return result
groupId = result[ 'Value' ]
userVO = Registry.getVOForGroup( userGroup )
if not userVO:
userVO = "undefined"
result = self.__getVOId( userVO, insertIfMissing )
if not result[ 'OK' ]:
return result
voId = result[ 'Value' ]
return S_OK( ( userId, groupId, voId ) )
def deleteUserProfile( self, userName, userGroup = False ):
"""
Delete the profiles for a user
"""
result = self.__getUserId( userName )
if not result[ 'OK' ]:
return result
userId = result[ 'Value' ]
condDict = { 'UserId': userId }
if userGroup:
result = self.__getGroupId( userGroup )
if not result[ 'OK' ]:
return result
groupId = result[ 'Value' ]
condDict['GroupId'] = groupId
result = self.deleteEntries( 'up_ProfilesData', condDict )
if not result[ 'OK' ] or not userGroup:
return result
return self.deleteEntries( 'up_Users', { 'Id': userId } )
def __webProfileUserDataCond( self, userIds, sqlProfileName = False, sqlVarName = False ):
condSQL = [ '`up_ProfilesData`.UserId=%s' % userIds[0],
'`up_ProfilesData`.GroupId=%s' % userIds[1],
'`up_ProfilesData`.VOId=%s' % userIds[2] ]
if sqlProfileName:
condSQL.append( '`up_ProfilesData`.Profile=%s' % sqlProfileName )
if sqlVarName:
condSQL.append( '`up_ProfilesData`.VarName=%s' % sqlVarName )
return " AND ".join( condSQL )
def __webProfileReadAccessDataCond( self, userIds, ownerIds, sqlProfileName, sqlVarName = False, match = False ):
permCondSQL = []
sqlCond = []
if match:
sqlCond.append( '`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % ( ownerIds[0], ownerIds[1] ) )
else:
permCondSQL.append( '`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % ( ownerIds[0], ownerIds[1] ) )
permCondSQL.append( '`up_ProfilesData`.GroupId=%s AND `up_ProfilesData`.ReadAccess="GROUP"' % userIds[1] )
permCondSQL.append( '`up_ProfilesData`.VOId=%s AND `up_ProfilesData`.ReadAccess="VO"' % userIds[2] )
permCondSQL.append( '`up_ProfilesData`.ReadAccess="ALL"' )
sqlCond.append( '`up_ProfilesData`.Profile = %s' % sqlProfileName )
if sqlVarName:
sqlCond.append( "`up_ProfilesData`.VarName = %s" % ( sqlVarName ) )
#Perms
sqlCond.append( "( ( %s ) )" % " ) OR ( ".join( permCondSQL ) )
return " AND ".join( sqlCond )
def __parsePerms( self, perms, addMissing = True ):
normPerms = {}
for pName in self.__permAttrs:
if not perms or pName not in perms:
if addMissing:
normPerms[ pName ] = self.__permValues[0]
continue
else:
permVal = perms[ pName ].upper()
for nV in self.__permValues:
if nV == permVal:
normPerms[ pName ] = nV
break
if pName not in normPerms and addMissing:
normPerms[ pName ] = self.__permValues[0]
return normPerms
def retrieveVarById( self, userIds, ownerIds, profileName, varName ):
"""
Get a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlCond = self.__webProfileReadAccessDataCond( userIds, ownerIds, sqlProfileName, sqlVarName, True )
#when we retrieve the user profile we have to take into account the user.
selectSQL = "SELECT data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
return S_OK( data[0][0] )
return S_ERROR( "No data for userIds %s profileName %s varName %s" % ( userIds, profileName, varName ) )
def retrieveAllUserVarsById( self, userIds, profileName ):
"""
Get a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
sqlCond = self.__webProfileUserDataCond( userIds, sqlProfileName )
selectSQL = "SELECT varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
return S_OK( dict( data ) )
def retrieveUserProfilesById( self, userIds ):
"""
Get all profiles and data for a user
"""
sqlCond = self.__webProfileUserDataCond( userIds )
selectSQL = "SELECT Profile, varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
dataDict = {}
for row in data:
if row[0] not in dataDict:
dataDict[ row[0] ] = {}
dataDict[ row[0] ][ row[1] ] = row[2 ]
return S_OK( dataDict )
def retrieveVarPermsById( self, userIds, ownerIds, profileName, varName ):
"""
Get a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlCond = self.__webProfileReadAccessDataCond( userIds, ownerIds, sqlProfileName, sqlVarName )
selectSQL = "SELECT %s FROM `up_ProfilesData` WHERE %s" % ( ", ".join( self.__permAttrs ), sqlCond )
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
permDict = {}
for i in range( len( self.__permAttrs ) ):
permDict[ self.__permAttrs[ i ] ] = data[0][i]
return S_OK( permDict )
return S_ERROR( "No data for userIds %s profileName %s varName %s" % ( userIds, profileName, varName ) )
def deleteVarByUserId( self, userIds, profileName, varName ):
"""
Remove a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlCond = self.__webProfileUserDataCond( userIds, sqlProfileName, sqlVarName )
selectSQL = "DELETE FROM `up_ProfilesData` WHERE %s" % sqlCond
return self._update( selectSQL )
def storeVarByUserId( self, userIds, profileName, varName, data, perms ):
"""
Set a data entry for a profile
"""
sqlInsertValues = []
sqlInsertKeys = []
sqlInsertKeys.append( ( 'UserId', userIds[0] ) )
sqlInsertKeys.append( ( 'GroupId', userIds[1] ) )
sqlInsertKeys.append( ( 'VOId', userIds[2] ) )
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
sqlInsertKeys.append( ( 'Profile', sqlProfileName ) )
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlInsertKeys.append( ( 'VarName', sqlVarName ) )
result = self._escapeString( data )
if not result[ 'OK' ]:
return result
sqlInsertValues.append( ( 'Data', result[ 'Value' ] ) )
normPerms = self.__parsePerms( perms )
for k in normPerms:
sqlInsertValues.append( ( k, '"%s"' % normPerms[ k ] ) )
sqlInsert = sqlInsertKeys + sqlInsertValues
insertSQL = "INSERT INTO `up_ProfilesData` ( %s ) VALUES ( %s )" % ( ", ".join( [ f[0] for f in sqlInsert ] ),
", ".join( [ str( f[1] ) for f in sqlInsert ] ) )
result = self._update( insertSQL )
if result[ 'OK' ]:
return result
#If error and not duplicate -> real error
if result[ 'Message' ].find( "Duplicate entry" ) == -1:
return result
updateSQL = "UPDATE `up_ProfilesData` SET %s WHERE %s" % ( ", ".join( [ "%s=%s" % f for f in sqlInsertValues ] ),
self.__webProfileUserDataCond( userIds,
sqlProfileName,
sqlVarName ) )
return self._update( updateSQL )
def setUserVarPermsById( self, userIds, profileName, varName, perms ):
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
nPerms = self.__parsePerms( perms, False )
if not nPerms:
return S_OK()
sqlPerms = ",".join( [ "%s='%s'" % ( k, nPerms[k] ) for k in nPerms ] )
updateSql = "UPDATE `up_ProfilesData` SET %s WHERE %s" % ( sqlPerms,
self.__webProfileUserDataCond( userIds,
sqlProfileName,
sqlVarName ) )
return self._update( updateSql )
def retrieveVar( self, userName, userGroup, ownerName, ownerGroup, profileName, varName ):
"""
Get a data entry for a profile
"""
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
result = self.getUserGroupIds( ownerName, ownerGroup )
if not result[ 'OK' ]:
return result
ownerIds = result[ 'Value' ]
return self.retrieveVarById( userIds, ownerIds, profileName, varName )
def retrieveUserProfiles( self, userName, userGroup ):
"""
Helper for getting data
"""
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveUserProfilesById( userIds )
def retrieveAllUserVars( self, userName, userGroup, profileName ):
"""
Helper for getting data
"""
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveAllUserVarsById( userIds, profileName )
def retrieveVarPerms( self, userName, userGroup, ownerName, ownerGroup, profileName, varName ):
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
result = self.getUserGroupIds( ownerName, ownerGroup, False )
if not result[ 'OK' ]:
return result
ownerIds = result[ 'Value' ]
return self.retrieveVarPermsById( userIds, ownerIds, profileName, varName )
def setUserVarPerms( self, userName, userGroup, profileName, varName, perms ):
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.setUserVarPermsById( userIds, profileName, varName, perms )
def storeVar( self, userName, userGroup, profileName, varName, data, perms = None ):
"""
Helper for setting data
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.storeVarByUserId( userIds, profileName, varName, data, perms = perms )
finally:
pass
def deleteVar( self, userName, userGroup, profileName, varName ):
"""
Helper for deleting data
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.deleteVarByUserId( userIds, profileName, varName )
finally:
pass
def __profilesCondGenerator( self, value, varType, initialValue = False ):
if type( value ) in types.StringTypes:
value = [ value ]
ids = []
if initialValue:
ids.append( initialValue )
for val in value:
if varType == 'user':
result = self.__getUserId( val, insertIfMissing = False )
elif varType == 'group':
result = self.__getGroupId( val, insertIfMissing = False )
else:
result = self.__getVOId( val, insertIfMissing = False )
if not result[ 'OK' ]:
continue
ids.append( result[ 'Value' ] )
if varType == 'user':
fieldName = 'UserId'
elif varType == 'group':
fieldName = 'GroupId'
else:
fieldName = 'VOId'
return "`up_ProfilesData`.%s in ( %s )" % ( fieldName, ", ".join( [ str( iD ) for iD in ids ] ) )
def listVarsById( self, userIds, profileName, filterDict = None ):
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
sqlCond = [ "`up_Users`.Id = `up_ProfilesData`.UserId",
"`up_Groups`.Id = `up_ProfilesData`.GroupId",
"`up_VOs`.Id = `up_ProfilesData`.VOId",
self.__webProfileReadAccessDataCond( userIds, userIds, sqlProfileName ) ]
if filterDict:
fD = {}
for k in filterDict:
fD[ k.lower() ] = filterDict[ k ]
filterDict = fD
for k in ( 'user', 'group', 'vo' ):
if k in filterDict:
sqlCond.append( self.__profilesCondGenerator( filterDict[ k ], k ) )
sqlVars2Get = [ "`up_Users`.UserName", "`up_Groups`.UserGroup", "`up_VOs`.VO", "`up_ProfilesData`.VarName" ]
sqlQuery = "SELECT %s FROM `up_Users`, `up_Groups`, `up_VOs`, `up_ProfilesData` WHERE %s" % ( ", ".join( sqlVars2Get ),
" AND ".join( sqlCond ) )
return self._query( sqlQuery )
def listVars( self, userName, userGroup, profileName, filterDict = None ):
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.listVarsById( userIds, profileName, filterDict )
def storeHashTagById( self, userIds, tagName, hashTag = False ):
"""
Set a data entry for a profile
"""
if not hashTag:
hashTag = hashlib.md5()
hashTag.update( "%s;%s;%s" % ( Time.dateTime(), userIds, tagName ) )
hashTag = hashTag.hexdigest()
result = self.insertFields( 'up_HashTags', [ 'UserId', 'GroupId', 'VOId', 'TagName', 'HashTag' ],
[ userIds[0], userIds[1], userIds[2], tagName, hashTag ] )
if result[ 'OK' ]:
return S_OK( hashTag )
#If error and not duplicate -> real error
if result[ 'Message' ].find( "Duplicate entry" ) == -1:
return result
result = self.updateFields( 'up_HashTags', ['HashTag'], [hashTag], { 'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'TagName': tagName } )
if not result[ 'OK' ]:
return result
return S_OK( hashTag )
def retrieveHashTagById( self, userIds, hashTag ):
"""
Get a data entry for a profile
"""
result = self.getFields( 'up_HashTags', ['TagName'], { 'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'HashTag': hashTag } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
return S_OK( data[0][0] )
return S_ERROR( "No data for combo userId %s hashTag %s" % ( userIds, hashTag ) )
def retrieveAllHashTagsById( self, userIds ):
"""
Get a data entry for a profile
"""
result = self.getFields( 'up_HashTags', ['HashTag', 'TagName'], { 'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2] } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
return S_OK( dict( data ) )
def storeHashTag( self, userName, userGroup, tagName, hashTag = False ):
"""
Helper for storing HASH
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.storeHashTagById( userIds, tagName, hashTag )
finally:
pass
def retrieveHashTag( self, userName, userGroup, hashTag ):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveHashTagById( userIds, hashTag )
finally:
pass
def retrieveAllHashTags( self, userName, userGroup ):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveAllHashTagsById( userIds )
finally:
pass
def getUserProfileNames( self, permission ):
"""
it returns the available profile names by not taking account the permission: ReadAccess and PublishAccess
"""
result = None
permissions = self.__parsePerms( permission, False )
if not permissions:
return S_OK()
condition = ",".join( [ "%s='%s'" % ( k, permissions[k] ) for k in permissions ] )
query = "SELECT distinct Profile from `up_ProfilesData` where %s" % condition
retVal = self._query( query )
if retVal['OK']:
result = S_OK( [i[0] for i in retVal['Value']] )
else:
result = retVal
return result
def testUserProfileDB():
""" Some test cases
"""
# building up some fake CS values
gConfig.setOptionValue( 'DIRAC/Setup', 'Test' )
gConfig.setOptionValue( '/DIRAC/Setups/Test/Framework', 'Test' )
host = '127.0.0.1'
user = 'Dirac'
pwd = 'Dirac'
db = 'AccountingDB'
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/Host', host )
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/DBName', db )
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/User', user )
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/Password', pwd )
db = UserProfileDB()
assert db._connect()['OK']
userName = 'testUser'
userGroup = 'testGroup'
profileName = 'testProfile'
varName = 'testVar'
tagName = 'testTag'
hashTag = '237cadc4af90277e9524e6386e264630'
data = 'testData'
perms = 'USER'
try:
if False:
for tableName in db.tableDict.keys():
result = db._update( 'DROP TABLE `%s`' % tableName )
assert result['OK']
gLogger.info( '\n Creating Table\n' )
# Make sure it is there and it has been created for this test
result = db._checkTable()
assert result == {'OK': True, 'Value': None }
result = db._checkTable()
assert result == {'OK': True, 'Value': 0}
gLogger.info( '\n Adding some data\n' )
result = db.storeVar( userName, userGroup, profileName, varName, data, perms )
assert result['OK']
assert result['Value'] == 1
gLogger.info( '\n Some queries\n' )
result = db.getUserGroupIds( userName, userGroup )
assert result['OK']
assert result['Value'] == ( 1, 1, 1 )
result = db.listVars( userName, userGroup, profileName )
assert result['OK']
assert result['Value'][0][3] == varName
result = db.retrieveUserProfiles( userName, userGroup )
assert result['OK']
assert result['Value'] == { profileName: { varName: data } }
result = db.storeHashTag( userName, userGroup, tagName, hashTag )
assert result['OK']
assert result['Value'] == hashTag
result = db.retrieveAllHashTags( userName, userGroup )
assert result['OK']
assert result['Value'] == { hashTag: tagName }
result = db.retrieveHashTag( userName, userGroup, hashTag )
assert result['OK']
assert result['Value'] == tagName
gLogger.info( '\n OK\n' )
except AssertionError:
print 'ERROR ',
if not result['OK']:
print result['Message']
else:
print result
sys.exit( 1 )
if __name__ == '__main__':
from DIRAC.Core.Base import Script
Script.parseCommandLine()
gLogger.setLevel( 'VERBOSE' )
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset pyhthon optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
testUserProfileDB()
|
andresailer/DIRAC
|
FrameworkSystem/DB/UserProfileDB.py
|
Python
|
gpl-3.0
| 27,546
|
[
"DIRAC"
] |
e9d26550ad89d0da947d006cccef5c6e5c02509c04afe080f3b8869d5acebf6f
|
"""
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
COMPLETED_SUBJECT_TEMPLATE = "Majic: {name} has completed"
COMPLETED_MESSAGE_TEMPLATE = \
"""
Hi {users_name},
Majic has successfully finished your model run.
Name: {name}
Description: {description}
Link: {url}
Regards,
Majic
"""
FAILED_SUBJECT_TEMPLATE = "Majic: {name} has encountered an error"
FAILED_MESSAGE_TEMPLATE = \
"""
Hi {users_name},
Majic has encountered an error when running your model run.
Name: {name}
Description: {description}
Link: {url}
Error: {error_message}
Regards,
Majic
"""
UNKNOWN_SUBJECT_TEMPLATE = "Majic: {name} has encountered an unexpected problem"
UNKNOWN_MESSAGE_TEMPLATE = \
"""
Hi {users_name},
Majic has encountered an unexpected problem when running your model run.
Name: {name}
Description: {description}
Link: {url}
Unknown problem: {error_message}
Regards,
Majic
"""
GROUP_SPACE_FULL_ALERT_SUBJECT = "Majic: The group space is almost full"
GROUP_SPACE_FULL_ALERT_MESSAGE = \
"""
Hi Admin,
The group space that Majic uses to store model runs is almost full. Please delete some data or request more group space.
The size of the group space is the quota allocated to the system user.
Current space in GB: {current_quota}
Current used space in GB: {used_space}
Regards,
Majic
"""
ACCOUNT_REQUESTED_USER = "Dear %s,\r\n\r\n" \
"Your request for a Majic account has been passed on to the Majic admin team. " \
"Once it has been approved you will receive an email letting you know that an " \
"account has been created for you," \
"\r\n\r\nThanks for registering your interest with Majic!"
ACCOUNT_REQUESTED_ADMIN = "Dear Majic admin,\r\n\r\nThe following request for a Majic " \
"account has been received:\r\n\r\n" \
"Name: {first_name} {last_name}\r\n" \
"Email: {email}\r\n" \
"Institution: {institution}\r\n" \
"Expected usage: {usage}\r\n\r\n" \
"Please follow this link to review this user's request: \r\n" \
"{link}\r\n"
ACCOUNT_REQUEST_FULL = "Dear %s,\r\n\r\n" \
"Unfortunately we are unable to accept any more account requests for today. We're " \
"sorry for the inconvenience, please try again tomorrow."
ACCOUNT_REQUEST_REJECTED_SUBJECT = "Rejection of your Majic account request"
ACCOUNT_REQUEST_REJECTED_MESSAGE = """
Dear {first_name} {last_name},
Sorry your request for a Majic account has been rejected. The reason for reject was:
{reason}
Regards,
Majic Admin Team
"""
ACCOUNT_REQUEST_ACCEPTED_SUBJECT = "Majic account creation"
ACCOUNT_REQUEST_ACCEPTED_MESSAGE = """
Dear {first_name} {last_name},
We have created you an account in Majic. To access the account please follow this link:
{link}
This link is only valid for the next 24 hours but if you visit this page after that time a new link will be sent to you.
If you did not request an account please ignore this email.
We hope you enjoy using the system.
Regards,
Majic Admin Team
"""
PASSWORD_RESET_SUBJECT = "Majic account password reset request"
PASSWORD_RESET_MESSAGE = """
Dear {name},
You may now reset your password in Majic. To do this please follow this link:
{link}
This link is only valid for the next 24 hours but if you visit this page after that time a new link will be sent to you.
If you did not request a password reset for this account please ignore this email.
Regards,
Majic Admin Team
"""
FAILED_SUBMIT_SUPPORT_SUBJECT_TEMPLATE = "Majic: Has failed to submit a job"
FAILED_SUBMIT_SUPPORT_MESSAGE_TEMPLATE = \
"""
Hi Support,
Majic has encountered an error when submitting a job:
ID: {id}
Name: {name}
Description: {description}
Error: {error_message}
This might be because the job runner is down temporarily or something more serious you should consider investigating.
Regards,
Majic
"""
|
NERC-CEH/jules-jasmin
|
majic/joj/utils/email_messages.py
|
Python
|
gpl-2.0
| 4,850
|
[
"VisIt"
] |
6aed037982b2b928791ddacb2d0c8f6dc8add915d0a1954c1d31681fc40eb94c
|
"""Classes for use with Quantum ESPRESSO
Representation of a spectrum and more
"""
import re
import os
import copy as cp
import numpy as np
from string import digits
import asetk.atomistic.fundamental as fu
import asetk.atomistic.constants as atc
from . import cube
#class Dispersion(object):
# """Holds k-points belonging to one spin"""
# def __init__(self, kpoints=None, kvectors=None):
# if kpoints is None:
# self.kpoints = []
# self.kvectors = []
# else:
# self.kpoints = kpoints
# self.kvectors = kvectors
#
# def addkpoint(self, kpoint, kvector):
# self.kpoints.append(kpoint)
# self.kvectors.append(kvector)
#
# def energylevels(self):
# s = fu.EnergyLevels()
# for l in self.kpoints:
# s += l
# return s
#
# def mergekpoints(self):
# self.kpoints = energylevels(self.kpoints)
#
# def __iadd__(self, s):
# """Merging two spins"""
# if len(self.kpoints) != len(s.kpoints):
# print("Unable to merge due to different number of kpoints")
# for i in range(len(self.kpoints)):
# self.kpoints[i] += s.kpoints[i]
# return self
#
# def shift(self, e):
# for l in self.kpoints:
# l.shift(e)
#
# @property
# def nbnd(self):
# nbnds = [len(k.levels) for k in self.kpoints]
# nbnd = np.unique(nbnds)
#
# if len( np.unique(nbnd) ) != 1:
# print("Warning: k-points have different numer of bands {}"\
# .format(nbnd))
# return nbnd[0]
#
# @property
# def nk(self):
# return len(self.kpoints)
class Spectrum(object):
"""A collection of dispersions, grouped by spin"""
def __init__(self, dispersions=None, spins=None):
"""Set up spectrum from list of dispersions."""
self.dispersions = dispersions
self.spins = spins
@classmethod
def from_save(cls, fname):
"""Creates Spectrum from QE save directory"""
tmp = Spectrum()
tmp.read_from_save(fname)
return tmp
@classmethod
def from_output(cls, fname):
"""Creates Spectrum from QE output"""
tmp = Spectrum()
tmp.read_from_output(fname)
return tmp
@property
def energies(self):
"""Returns list of all energy levels of all spins."""
list = [d.energies for d in self.dispersions]
return np.concatenate(list)
@property
def occupations(self):
"""Returns list of level occupations of all spins."""
os = np.array([])
for d in self.dispersions:
os = np.concatenate( (os, d.occupations))
return os
@property
def fermi(self):
"""Returns Fermi energy."""
fermis = [d.fermi for d in self.dispersions]
fermi = np.unique(fermis)
if len(fermi) == 1:
return fermi[0]
elif len(fermi) != 1:
print("There are Fermi energies {}".format(fermis))
print("Using the mean {}".format(np.mean(fermis)))
return np.mean(fermis)
@property
def nbnd(self):
nbnds = [d.nbnd for d in self.dispersions]
nbnd = np.unique(nbnds)
if len( np.unique(nbnd) ) != 1:
print("Warning: spins have different numer of bands {}"\
.format(nbnd))
return nbnd[0]
@property
def nkpt(self):
nkpts = [d.nkpt for d in self.dispersions]
nkpt = np.unique(nkpts)
if len( np.unique(nkpt) ) != 1:
print("Warning: spins have different numer of k-points {}"\
.format(nkpt))
return nkpt[0]
@property
def nspin(self):
return len(self.dispersions)
def copy(self, spectrum):
"""Performs deep copy of spectrum."""
self.dispersions = [ d.copy() for d in spectrum.dispersions ]
self.spins = cp.copy(spectrum.spins)
def shift(self, de):
for d in self.dispersions:
d.shift(de)
def __str__(self):
text = "Spectrum containing {} spins\n".format(len(self.dispersions))
for i in range(len(self.dispersions)):
d = self.dispersions[i]
s = self.spins[i]
text += 'spin {} : {}\n'.format(s+1, d.__str__())
return text
def __getitem__(self, index):
return self.dispersions[index]
def read_from_save(self, prefix):
"""Reads Spectrum from QE save directory"""
savedir = prefix + '.save'
if not os.path.exists(savedir):
raise IOError("Directory {s} not found.".format(s=savedir))
os.chdir(savedir)
dataxml = open('data-file.xml', 'r').read()
nspinregex = '<NUMBER_OF_SPIN_COMPONENTS.*?>\s*(\d+)'
nspin = int( re.search(nspinregex, dataxml, re.DOTALL).group(1) )
# should be able to match scientific and non-scientific notation
floatregex = '-?\d+\.\d+(?:[Ee][+\-]?\d+)?'
# get fermi energy
fermiregex = '<FERMI_ENERGY.*?>\s*({f})'.format(f=floatregex)
fermi = float( re.search(fermiregex, dataxml, re.DOTALL).group(1) )
fermi *= atc.Ha / atc.eV
#get lattice parameter
alatregex = '<LATTICE_PARAMETER.*?>\s*({f})'.format(f=floatregex)
alat = float( re.search(alatregex, dataxml, re.DOTALL).group(1) )
alat *= atc.a0
# get k-points
kptregex = '<K-POINT\.(\d+)\s+XYZ=\"({f}) ({f}) ({f})\"\s+WEIGHT=\"({f})\"/>'\
.format(f=floatregex)
kptdatas = re.findall(kptregex, dataxml)
self.dispersions = []
self.spins = []
for spin in range(nspin):
dispersion = fu.Dispersion()
for kpt in kptdatas:
kindex = int(kpt[0])
kvec = np.array([ kpt[1], kpt[2], kpt[3] ], dtype=float)
#kvec *= 2*np.pi / alat
kdir = 'K{k:05d}'.format(k=kindex)
if not os.path.exists(kdir):
raise IOError("Directory {s} not found.".format(s=kdir))
# Read energy levels
os.chdir(kdir)
# get correct file name
if nspin == 1:
eigf = 'eigenval.xml'
elif nspin == 2:
eigf = 'eigenval{}.xml'.format(spin+1)
else:
print("Error: Can only handle nspin=1, 2")
if not os.path.exists(eigf):
print("Error: Cannot find file {}".format(eigf))
eigenvalxml = open(eigf, 'r').read()
eigregex = '<EIGENVALUES.*?>(.*?)<'
eigstring = re.search(eigregex, eigenvalxml, re.DOTALL).group(1)
levelregex = '\-?\d+.*'
levelregex = floatregex
levels = np.array(re.findall(levelregex, eigstring), dtype = float)
levels *= atc.Ha / atc.eV
kpt = fu.KPoint(
energylevels=fu.EnergyLevels(energies=levels, fermi=fermi),
kvector=kvec
)
dispersion.kpoints.append(kpt)
os.chdir('..')
self.dispersions.append(dispersion)
self.spins.append(spin)
os.chdir('..')
def read_from_output(self, prefix):
return 0;
class Atoms(fu.Atoms):
"""Atoms that can be read from QE output"""
@classmethod
def from_save(cls, prefix):
tmp = Atoms()
tmp.read_from_save(prefix)
return tmp
def read_from_save(self, prefix):
"""Read from .save directory"""
savedir = prefix + '.save'
if not os.path.exists(savedir):
raise IOError("Directory {s} not found.".format(s=savedir))
dataxml = open(savedir + '/data-file.xml', 'r').read()
# should be able to match scientific and non-scientific notation
floatregex = '-?\d+\.\d+(?:[Ee][+\-]?\d+)?'
#get lattice parameter
alatregex = '<LATTICE_PARAMETER.*?>\s*({f})'.format(f=floatregex)
alat = float( re.search(alatregex, dataxml, re.DOTALL).group(1) )
alat *= atc.a0
# get k-points
atregex = '<ATOM\.(\d+) SPECIES=\"(\w+\s*)\" INDEX=\"(\d+)\" tau=\"({f}) ({f}) ({f})\" if_pos=\"(\d+)\s+(\d+)\s+(\d+)\"/>'\
.format(f=floatregex)
atdatas = re.findall(atregex, dataxml)
symbols = []
positions = []
for at in atdatas:
at_num = at[0]
at_sym = at[1].strip()
at_index = at[2]
at_pos = np.array(at[3:6], dtype=float)
at_ifpos = np.array(at[6:9], dtype=int)
symbols.append(at_sym)
#positions.append(at_pos * atc.a0)
positions.append(at_pos * atc.a0 / atc.Angstrom)
self.__init__(positions=positions, symbols=symbols)
class QECube:
"""Intermediate cube file format written by pp.x
These files contain (squared) Kohn-Sham wave functions (?)
in a text-based format very similar to the Gaussian cube format.
Format specification
LINE FORMAT CONTENTS
===============================================================
1 A TITLE
2 8I8 NX NY NZ NX NY NZ #ATOMS #SPECIES
3 I8,6F16.8 IBRAV CELLDM(1:6)
4-6 3F16.8 CELL VECTORS IN ALAT UNITS, ONLY IF IBRAV=0
(OTHERWISE CONTINUE WITH LINE 7)
7 3F16.8,I8 ???
#SPECIES LINES OF ATOMIC SPECIES INFO:
... I4,S4,6.2F INDEX, LABEL, #VALENCE ELECTRONS OF PSEUDO
#ATOMS LINES OF ATOM COORDINATES:
... I5,3F12.6,I4 ATOM INDEX, X, Y, Z [ALAT UNITS], SPECIES INDEX
REST: 5E17.9 CUBE DATA (WITH X INCREMENT MOVING FASTEST, THEN
Y AND THEN Z)
ALL COORDINATES ARE GIVEN IN ATOMIC UNITS.
"""
def __init__(self, filename=None, title=None, cell=None, atoms=None,
data=None):
"""Standard constructor, all parameters default to None"""
self.filename = filename
self.title = title
self.cell = cell
self.atoms = atoms
self.data = data
self._shape = None # stores shape, if grid data isn't read
@classmethod
def from_file(cls, fname, read_data=False):
"""Creates Cube from cube file"""
tmp = QECube()
tmp.read_qe_cube_file(fname, read_data=read_data)
return tmp
def read_qe_cube_file(self, fname, read_data=False, v=1):
"""Reads header and/or data of cube file"""
#super(WfnCube, self).read_cube_file(fname, read_data, v)
self.filename = fname
b2A = atc.a0 / atc.Angstrom
f = open(fname, 'r')
readline = f.readline
# line 1
self.title = readline()
# self.comment = readline()
# axes = [0, 1, 2]
# line 2
line = readline().split()
nx, ny, nz, nxs, nys, nzs, natoms, nspecies = np.array(line, dtype=int)
shape = np.array([nx, ny, nz], dtype=int)
self._shape = shape
# line 3
line = readline().split()
ibrav = int(line[0])
alat = float(line[1])
celldm = np.array(line[2:],dtype=float)
if ibrav > 0:
raise ValueError("ibrav > 0 not yet implemented.")
# lines 4-6
cell = np.empty((3,3))
for i in range(3):
x, y, z = [float(s) for s in readline().split()]
cell[i] = np.array([x,y,z], dtype=float)
cell *= b2A * alat
# line 7
line = readline().split()
# species
species = np.empty(nspecies, dtype=str)
for i in range(nspecies):
sindex, symbol, valence_electrons = readline().split()
# removing any digits that may be part of the symbol
# such as C1, C2, ...
species[i] = symbol.translate(None, digits)
# atoms
at_positions = np.empty((natoms, 3))
at_symbols = np.empty(natoms, dtype=str)
for i in range(natoms):
line = readline().split()
at_positions[i] = [float(s) for s in line[1:4]]
at_symbols[i] = species[int(line[4])-1]
at_positions *= b2A * alat
pbc = [True, True, True]
self.atoms = fu.Atoms(symbols=at_symbols, positions=at_positions,
cell=cell, pbc=pbc)
if read_data:
# Note:
# This is already ~1.7x faster than ASE's version.
# However, parsing still dominates for reasonable disk speeds
# (parsing time = 8x reading time on 480 MB/s SSD)
# In order to parse quickly, use read_csv from the pandas module.
# read() pretty much maxes out the disk read speed.
# split() takes a considerable amount of time.
# The conversion to float is even more expensive.
self.data = np.array(f.read().split(), dtype=float)
# In QE's format, the fastest index is x, then y, then z
self.data = self.data.reshape(shape[::-1])
self.data = self.data.swapaxes(0,2)
f.close()
def write_cube_file(self, fname=None):
"""Writes object to Gaussian cube file
"""
tmp = self.to_cube()
tmp.write_cube_file(fname=fname)
def to_cube(self):
"""Converts object to Gaussian cube object
"""
tmp = cube.Cube(
filename=self.filename,
title=self.title,
comment="Converted from QE intermediate cube format\n",
origin = np.array([0,0,0]),
atoms = self.atoms,
data = self.data)
return tmp
|
ltalirz/asetk
|
asetk/format/qe.py
|
Python
|
mit
| 13,800
|
[
"ASE",
"Gaussian",
"Quantum ESPRESSO"
] |
ccc5c828202b8388ac309da6f0fb553c9e4171149aeea6687138b3a56f83dc32
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def gaussian(T, N, zero_momentum=True, seed=7654321):
"""
This Python module generates initial particle velocities
with temperature T according to a Maxwell-Boltzmann distribution.
"""
# TODO: account for mass
import random
sqrtT = T**0.5
random.seed(seed)
vx = []
vy = []
vz = []
for i in xrange(N):
vx.append(sqrtT * random.gauss(0.0, 1.0))
vy.append(sqrtT * random.gauss(0.0, 1.0))
vz.append(sqrtT * random.gauss(0.0, 1.0))
if(zero_momentum):
# remove net momentum
sumvx = 0.0
sumvy = 0.0
sumvz = 0.0
for vx_, vy_, vz_ in zip(vx, vy, vz):
sumvx += vx_
sumvy += vy_
sumvz += vz_
sumvx = sumvx / N
sumvy = sumvy / N
sumvz = sumvz / N
for i in xrange(N):
vx[i] = vx[i] - sumvx
vy[i] = vy[i] - sumvy
vz[i] = vz[i] - sumvz
return vx, vy, vz
|
fedepad/espressopp
|
src/tools/velocities.py
|
Python
|
gpl-3.0
| 1,736
|
[
"ESPResSo",
"Gaussian"
] |
992f9b88286e27c34cf9a1174dd687a463a3fa822087587d0fc2c034b61a9869
|
import vtk, os, sys
from vtk.test import Testing
ss = vtk.vtkSphereSource() #make mesh to test with
af = vtk.vtkElevationFilter() #add some attributes
af.SetInputConnection(ss.GetOutputPort())
ef = vtk.vtkExtractEdges() #make lines to test
ef.SetInputConnection(af.GetOutputPort())
gf = vtk.vtkGlyph3D() #make verts to test
pts = vtk.vtkPoints()
pts.InsertNextPoint(0,0,0)
verts = vtk.vtkCellArray()
avert = vtk.vtkVertex()
avert.GetPointIds().SetId(0, 0)
verts.InsertNextCell(avert)
onevertglyph = vtk.vtkPolyData()
onevertglyph.SetPoints(pts)
onevertglyph.SetVerts(verts)
gf.SetSourceData(onevertglyph)
gf.SetInputConnection(af.GetOutputPort())
testwrites = ["points","lines","mesh"]
failed = False
for datasetString in testwrites:
if datasetString == "points":
toshow=gf
elif datasetString == "lines":
toshow = ef
else:
toshow = af
gw = vtk.vtkGeoJSONWriter()
fname = "sphere_"+datasetString+".json"
gw.SetInputConnection(toshow.GetOutputPort())
gw.SetFileName(fname)
gw.Write()
if (os.path.exists(fname) and
os.path.isfile(fname)):
os.remove(fname)
else:
print "Failed to write " + fname + " to file"
failed = True
gw.WriteToOutputStringOn()
gw.Write()
gj = "['"+str(gw.RegisterAndGetOutputString()).replace('\n','')+"']"
if len(gj) <= 1000:
print "Failed to write " + fname + " to buffer"
failed = True
if failed:
sys.exit(1)
sys.exit(0)
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/IO/GeoJSON/Testing/Python/TestGeoJSONWriter.py
|
Python
|
mit
| 1,470
|
[
"VTK"
] |
f97693172beffde8a1c71a3862fc047d83577a2ac66ea3c88098c7e316fcc3b8
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import sys
from spack import *
class Mfem(Package, CudaPackage, ROCmPackage):
"""Free, lightweight, scalable C++ library for finite element methods."""
tags = ['fem', 'finite-elements', 'high-order', 'amr', 'hpc', 'radiuss', 'e4s']
homepage = 'http://www.mfem.org'
git = 'https://github.com/mfem/mfem.git'
maintainers = ['v-dobrev', 'tzanio', 'acfisher',
'goxberry', 'markcmiller86']
test_requires_compiler = True
# Recommended mfem builds to test when updating this file: see the shell
# script 'test_builds.sh' in the same directory as this file.
# mfem is downloaded from a URL shortener at request of upstream
# author Tzanio Kolev <tzanio@llnl.gov>. See here:
# https://github.com/mfem/mfem/issues/53
#
# The following procedure should be used to verify security when a
# new version is added:
#
# 1. Verify that no checksums on old versions have changed.
#
# 2. Verify that the shortened URL for the new version is listed at:
# https://mfem.org/download/
#
# 3. Use http://getlinkinfo.com or similar to verify that the
# underling download link for the latest version comes has the
# prefix: http://mfem.github.io/releases
#
# If this quick verification procedure fails, additional discussion
# will be required to verify the new version.
# 'develop' is a special version that is always larger (or newer) than any
# other version.
version('develop', branch='master')
version('4.3.0',
sha256='3a495602121b986049286ea0b23512279cdbdfb43c15c42a1511b521051fbe38',
url='https://bit.ly/mfem-4-3', extension='tar.gz')
version('4.2.0',
'4352a225b55948d2e73a5ee88cece0e88bdbe7ba6726a23d68b2736d3221a86d',
url='https://bit.ly/mfem-4-2', extension='tar.gz')
version('4.1.0',
'4c83fdcf083f8e2f5b37200a755db843cdb858811e25a8486ad36b2cbec0e11d',
url='https://bit.ly/mfem-4-1', extension='tar.gz')
# Tagged development version used by xSDK
version('4.0.1-xsdk', commit='c55c80d17b82d80de04b849dd526e17044f8c99a')
version('4.0.0',
'df5bdac798ea84a263979f6fbf79de9013e1c55562f95f98644c3edcacfbc727',
url='https://bit.ly/mfem-4-0', extension='tar.gz')
# Tagged development version used by the laghos package:
version('3.4.1-laghos-v2.0', tag='laghos-v2.0')
version('3.4.0',
sha256='4e73e4fe0482636de3c5dc983cd395839a83cb16f6f509bd88b053e8b3858e05',
url='https://bit.ly/mfem-3-4', extension='tar.gz')
version('3.3.2',
sha256='b70fa3c5080b9ec514fc05f4a04ff74322b99ac4ecd6d99c229f0ed5188fc0ce',
url='https://goo.gl/Kd7Jk8', extension='tar.gz')
# Tagged development version used by the laghos package:
version('3.3.1-laghos-v1.0', tag='laghos-v1.0')
version('3.3',
sha256='b17bd452593aada93dc0fee748fcfbbf4f04ce3e7d77fdd0341cc9103bcacd0b',
url='http://goo.gl/Vrpsns', extension='tar.gz')
version('3.2',
sha256='2938c3deed4ec4f7fd5b5f5cfe656845282e86e2dcd477d292390058b7b94340',
url='http://goo.gl/Y9T75B', extension='tar.gz')
version('3.1',
sha256='841ea5cf58de6fae4de0f553b0e01ebaab9cd9c67fa821e8a715666ecf18fc57',
url='http://goo.gl/xrScXn', extension='tar.gz')
variant('static', default=True,
description='Build static library')
variant('shared', default=False,
description='Build shared library')
variant('mpi', default=True,
description='Enable MPI parallelism')
# Can we make the default value for 'metis' to depend on the 'mpi' value?
variant('metis', default=True,
description='Enable METIS support')
variant('openmp', default=False,
description='Enable OpenMP parallelism')
# Note: '+cuda' and 'cuda_arch' variants are added by the CudaPackage
# Note: '+rocm' and 'amdgpu_target' variants are added by the ROCmPackage
variant('occa', default=False, description='Enable OCCA backend')
variant('raja', default=False, description='Enable RAJA backend')
variant('libceed', default=False, description='Enable libCEED backend')
variant('umpire', default=False, description='Enable Umpire support')
variant('amgx', default=False, description='Enable NVIDIA AmgX solver support')
variant('threadsafe', default=False,
description=('Enable thread safe features.'
' Required for OpenMP.'
' May cause minor performance issues.'))
variant('superlu-dist', default=False,
description='Enable MPI parallel, sparse direct solvers')
variant('strumpack', default=False,
description='Enable support for STRUMPACK')
variant('suite-sparse', default=False,
description='Enable serial, sparse direct solvers')
variant('petsc', default=False,
description='Enable PETSc solvers, preconditioners, etc.')
variant('slepc', default=False,
description='Enable SLEPc integration')
variant('sundials', default=False,
description='Enable Sundials time integrators')
variant('pumi', default=False,
description='Enable functionality based on PUMI')
variant('gslib', default=False,
description='Enable functionality based on GSLIB')
variant('mpfr', default=False,
description='Enable precise, 1D quadrature rules')
variant('lapack', default=False,
description='Use external blas/lapack routines')
variant('debug', default=False,
description='Build debug instead of optimized version')
variant('netcdf', default=False,
description='Enable Cubit/Genesis reader')
variant('conduit', default=False,
description='Enable binary data I/O using Conduit')
variant('zlib', default=True,
description='Support zip\'d streams for I/O')
variant('gnutls', default=False,
description='Enable secure sockets using GnuTLS')
variant('libunwind', default=False,
description='Enable backtrace on error support using Libunwind')
# TODO: SIMD, Ginkgo, ADIOS2, HiOp, MKL CPardiso, Axom/Sidre
variant('timer', default='auto',
values=('auto', 'std', 'posix', 'mac', 'mpi'),
description='Timing functions to use in mfem::StopWatch')
variant('examples', default=False,
description='Build and install examples')
variant('miniapps', default=False,
description='Build and install miniapps')
conflicts('+shared', when='@:3.3.2')
conflicts('~static~shared')
conflicts('~threadsafe', when='@:3+openmp')
conflicts('+cuda', when='@:3')
conflicts('+rocm', when='@:4.1')
conflicts('+cuda+rocm')
conflicts('+netcdf', when='@:3.1')
conflicts('+superlu-dist', when='@:3.1')
# STRUMPACK support was added in mfem v3.3.2, however, here we allow only
# strumpack v3+ support for which is available starting with mfem v4.0:
conflicts('+strumpack', when='@:3')
conflicts('+gnutls', when='@:3.1')
conflicts('+zlib', when='@:3.2')
conflicts('+mpfr', when='@:3.2')
conflicts('+petsc', when='@:3.2')
conflicts('+slepc', when='@:4.1')
conflicts('+sundials', when='@:3.2')
conflicts('+pumi', when='@:3.3.2')
conflicts('+gslib', when='@:4.0')
conflicts('timer=mac', when='@:3.3.0')
conflicts('timer=mpi', when='@:3.3.0')
conflicts('~metis+mpi', when='@:3.3.0')
conflicts('+metis~mpi', when='@:3.3.0')
conflicts('+conduit', when='@:3.3.2')
conflicts('+occa', when='mfem@:3')
conflicts('+raja', when='mfem@:3')
conflicts('+libceed', when='mfem@:4.0')
conflicts('+umpire', when='mfem@:4.0')
conflicts('+amgx', when='mfem@:4.1')
conflicts('+amgx', when='~cuda')
conflicts('+mpi~cuda ^hypre+cuda')
conflicts('+superlu-dist', when='~mpi')
conflicts('+strumpack', when='~mpi')
conflicts('+petsc', when='~mpi')
conflicts('+slepc', when='~petsc')
conflicts('+pumi', when='~mpi')
conflicts('timer=mpi', when='~mpi')
depends_on('mpi', when='+mpi')
depends_on('hypre@2.10.0:2.13', when='@:3.3+mpi')
depends_on('hypre@:2.20.0', when='@3.4:4.2+mpi')
depends_on('hypre@:2.23.0', when='@4.3.0+mpi')
depends_on('hypre', when='+mpi')
depends_on('metis', when='+metis')
depends_on('blas', when='+lapack')
depends_on('lapack@3.0:', when='+lapack')
depends_on('sundials@2.7.0', when='@:3.3.0+sundials~mpi')
depends_on('sundials@2.7.0+mpi+hypre', when='@:3.3.0+sundials+mpi')
depends_on('sundials@2.7.0:', when='@3.3.2:+sundials~mpi')
depends_on('sundials@2.7.0:+mpi+hypre', when='@3.3.2:+sundials+mpi')
depends_on('sundials@5.0.0:', when='@4.0.1-xsdk:+sundials~mpi')
depends_on('sundials@5.0.0:+mpi+hypre', when='@4.0.1-xsdk:+sundials+mpi')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('sundials@5.4.0:+cuda cuda_arch={0}'.format(sm_),
when='@4.2.0:+sundials+cuda cuda_arch={0}'.format(sm_))
depends_on('pumi@2.2.3:', when='@4.2.0:+pumi')
depends_on('pumi', when='+pumi~shared')
depends_on('pumi+shared', when='+pumi+shared')
depends_on('gslib@1.0.5:+mpi', when='+gslib+mpi')
depends_on('gslib@1.0.5:~mpi~mpiio', when='+gslib~mpi')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('superlu-dist', when='+superlu-dist')
depends_on('strumpack@3.0.0:', when='+strumpack~shared')
depends_on('strumpack@3.0.0:+shared', when='+strumpack+shared')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('strumpack+cuda cuda_arch={0}'.format(sm_),
when='+strumpack+cuda cuda_arch={0}'.format(sm_))
# The PETSc tests in MFEM will fail if PETSc is not configured with
# SuiteSparse and MUMPS. On the other hand, if we require the variants
# '+suite-sparse+mumps' of PETSc, the xsdk package concretization fails.
depends_on('petsc@3.8:+mpi+double+hypre', when='+petsc')
depends_on('slepc@3.8.0:', when='+slepc')
# Recommended when building outside of xsdk:
# depends_on('petsc@3.8:+mpi+double+hypre+suite-sparse+mumps',
# when='+petsc')
depends_on('mpfr', when='+mpfr')
depends_on('netcdf-c@4.1.3:', when='+netcdf')
depends_on('unwind', when='+libunwind')
depends_on('zlib', when='+zlib')
depends_on('gnutls', when='+gnutls')
depends_on('conduit@0.3.1:,master:', when='+conduit')
depends_on('conduit+mpi', when='+conduit+mpi')
# The MFEM 4.0.0 SuperLU interface fails when using hypre@2.16.0 and
# superlu-dist@6.1.1. See https://github.com/mfem/mfem/issues/983.
# This issue was resolved in v4.1.
conflicts('+superlu-dist',
when='mfem@:4.0 ^hypre@2.16.0: ^superlu-dist@6:')
# The STRUMPACK v3 interface in MFEM seems to be broken as of MFEM v4.1
# when using hypre version >= 2.16.0.
# This issue is resolved in v4.2.
conflicts('+strumpack', when='mfem@4.0.0:4.1 ^hypre@2.16.0:')
conflicts('+strumpack ^strumpack+cuda', when='~cuda')
depends_on('occa@1.0.8:', when='@:4.1+occa')
depends_on('occa@1.1.0:', when='@4.2.0:+occa')
depends_on('occa+cuda', when='+occa+cuda')
# TODO: propagate '+rocm' variant to occa when it is supported
depends_on('raja@0.10.0:', when='@4.0.1:+raja')
depends_on('raja@0.7.0:0.9.0', when='@4.0.0+raja')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('raja+cuda cuda_arch={0}'.format(sm_),
when='+raja+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('raja+rocm amdgpu_target={0}'.format(gfx),
when='+raja+rocm amdgpu_target={0}'.format(gfx))
depends_on('libceed@0.6:', when='@:4.1+libceed')
depends_on('libceed@0.7:', when='@4.2.0:+libceed')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('libceed+cuda cuda_arch={0}'.format(sm_),
when='+libceed+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('libceed+rocm amdgpu_target={0}'.format(gfx),
when='+libceed+rocm amdgpu_target={0}'.format(gfx))
depends_on('umpire@2.0.0:', when='+umpire')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('umpire+cuda cuda_arch={0}'.format(sm_),
when='+umpire+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('umpire+rocm amdgpu_target={0}'.format(gfx),
when='+umpire+rocm amdgpu_target={0}'.format(gfx))
# AmgX: propagate the cuda_arch and mpi settings:
for sm_ in CudaPackage.cuda_arch_values:
depends_on('amgx+mpi cuda_arch={0}'.format(sm_),
when='+amgx+mpi cuda_arch={0}'.format(sm_))
depends_on('amgx~mpi cuda_arch={0}'.format(sm_),
when='+amgx~mpi cuda_arch={0}'.format(sm_))
patch('mfem_ppc_build.patch', when='@3.2:3.3.0 arch=ppc64le')
patch('mfem-3.4.patch', when='@3.4.0')
patch('mfem-3.3-3.4-petsc-3.9.patch',
when='@3.3.0:3.4.0 +petsc ^petsc@3.9.0:')
patch('mfem-4.2-umpire.patch', when='@4.2.0+umpire')
patch('mfem-4.2-slepc.patch', when='@4.2.0+slepc')
patch('mfem-4.2-petsc-3.15.0.patch', when='@4.2.0+petsc ^petsc@3.15.0:')
patch('mfem-4.3-hypre-2.23.0.patch', when='@4.3.0')
patch('mfem-4.3-cusparse-11.4.patch', when='@4.3.0+cuda')
# Patch to fix MFEM makefile syntax error. See
# https://github.com/mfem/mfem/issues/1042 for the bug report and
# https://github.com/mfem/mfem/pull/1043 for the bugfix contributed
# upstream.
patch('mfem-4.0.0-makefile-syntax-fix.patch', when='@4.0.0')
phases = ['configure', 'build', 'install']
def setup_build_environment(self, env):
env.unset('MFEM_DIR')
env.unset('MFEM_BUILD_DIR')
#
# Note: Although MFEM does support CMake configuration, MFEM
# development team indicates that vanilla GNU Make is the
# preferred mode of configuration of MFEM and the mode most
# likely to be up to date in supporting *all* of MFEM's
# configuration options. So, don't use CMake
#
def configure(self, spec, prefix):
def yes_no(varstr):
return 'YES' if varstr in self.spec else 'NO'
# See also find_system_libraries in lib/spack/llnl/util/filesystem.py
# where the same list of paths is used.
sys_lib_paths = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib']
def is_sys_lib_path(dir):
return dir in sys_lib_paths
xcompiler = ''
xlinker = '-Wl,'
if '+cuda' in spec:
xcompiler = '-Xcompiler='
xlinker = '-Xlinker='
cuda_arch = None if '~cuda' in spec else spec.variants['cuda_arch'].value
# We need to add rpaths explicitly to allow proper export of link flags
# from within MFEM.
# Similar to spec[pkg].libs.ld_flags but prepends rpath flags too.
# Also does not add system library paths as defined by 'sys_lib_paths'
# above -- this is done to avoid issues like this:
# https://github.com/mfem/mfem/issues/1088.
def ld_flags_from_library_list(libs_list):
flags = ['%s-rpath,%s' % (xlinker, dir)
for dir in libs_list.directories
if not is_sys_lib_path(dir)]
flags += ['-L%s' % dir for dir in libs_list.directories
if not is_sys_lib_path(dir)]
flags += [libs_list.link_flags]
return ' '.join(flags)
def ld_flags_from_dirs(pkg_dirs_list, pkg_libs_list):
flags = ['%s-rpath,%s' % (xlinker, dir) for dir in pkg_dirs_list
if not is_sys_lib_path(dir)]
flags += ['-L%s' % dir for dir in pkg_dirs_list
if not is_sys_lib_path(dir)]
flags += ['-l%s' % lib for lib in pkg_libs_list]
return ' '.join(flags)
def find_optional_library(name, prefix):
for shared in [True, False]:
for path in ['lib64', 'lib']:
lib = find_libraries(name, join_path(prefix, path),
shared=shared, recursive=False)
if lib:
return lib
return LibraryList([])
# Determine how to run MPI tests, e.g. when using '--test=root', when
# Spack is run inside a batch system job.
mfem_mpiexec = 'mpirun'
mfem_mpiexec_np = '-np'
if 'SLURM_JOBID' in os.environ:
mfem_mpiexec = 'srun'
mfem_mpiexec_np = '-n'
elif 'LSB_JOBID' in os.environ:
if 'LLNL_COMPUTE_NODES' in os.environ:
mfem_mpiexec = 'lrun'
mfem_mpiexec_np = '-n'
else:
mfem_mpiexec = 'jsrun'
mfem_mpiexec_np = '-p'
metis5_str = 'NO'
if ('+metis' in spec) and spec['metis'].satisfies('@5:'):
metis5_str = 'YES'
zlib_var = 'MFEM_USE_ZLIB' if (spec.satisfies('@4.1.0:')) else \
'MFEM_USE_GZSTREAM'
options = [
'PREFIX=%s' % prefix,
'MFEM_USE_MEMALLOC=YES',
'MFEM_DEBUG=%s' % yes_no('+debug'),
# NOTE: env['CXX'] is the spack c++ compiler wrapper. The real
# compiler is defined by env['SPACK_CXX'].
'CXX=%s' % env['CXX'],
'MFEM_USE_LIBUNWIND=%s' % yes_no('+libunwind'),
'%s=%s' % (zlib_var, yes_no('+zlib')),
'MFEM_USE_METIS=%s' % yes_no('+metis'),
'MFEM_USE_METIS_5=%s' % metis5_str,
'MFEM_THREAD_SAFE=%s' % yes_no('+threadsafe'),
'MFEM_USE_MPI=%s' % yes_no('+mpi'),
'MFEM_USE_LAPACK=%s' % yes_no('+lapack'),
'MFEM_USE_SUPERLU=%s' % yes_no('+superlu-dist'),
'MFEM_USE_STRUMPACK=%s' % yes_no('+strumpack'),
'MFEM_USE_SUITESPARSE=%s' % yes_no('+suite-sparse'),
'MFEM_USE_SUNDIALS=%s' % yes_no('+sundials'),
'MFEM_USE_PETSC=%s' % yes_no('+petsc'),
'MFEM_USE_SLEPC=%s' % yes_no('+slepc'),
'MFEM_USE_PUMI=%s' % yes_no('+pumi'),
'MFEM_USE_GSLIB=%s' % yes_no('+gslib'),
'MFEM_USE_NETCDF=%s' % yes_no('+netcdf'),
'MFEM_USE_MPFR=%s' % yes_no('+mpfr'),
'MFEM_USE_GNUTLS=%s' % yes_no('+gnutls'),
'MFEM_USE_OPENMP=%s' % yes_no('+openmp'),
'MFEM_USE_CONDUIT=%s' % yes_no('+conduit'),
'MFEM_USE_CUDA=%s' % yes_no('+cuda'),
'MFEM_USE_HIP=%s' % yes_no('+rocm'),
'MFEM_USE_OCCA=%s' % yes_no('+occa'),
'MFEM_USE_RAJA=%s' % yes_no('+raja'),
'MFEM_USE_AMGX=%s' % yes_no('+amgx'),
'MFEM_USE_CEED=%s' % yes_no('+libceed'),
'MFEM_USE_UMPIRE=%s' % yes_no('+umpire'),
'MFEM_MPIEXEC=%s' % mfem_mpiexec,
'MFEM_MPIEXEC_NP=%s' % mfem_mpiexec_np]
cxxflags = spec.compiler_flags['cxxflags']
if cxxflags:
# Add opt/debug flags if they are not present in global cxx flags
opt_flag_found = any(f in self.compiler.opt_flags
for f in cxxflags)
debug_flag_found = any(f in self.compiler.debug_flags
for f in cxxflags)
if '+debug' in spec:
if not debug_flag_found:
cxxflags.append('-g')
if not opt_flag_found:
cxxflags.append('-O0')
else:
if not opt_flag_found:
cxxflags.append('-O2')
cxxflags = [(xcompiler + flag) for flag in cxxflags]
if '+cuda' in spec:
cxxflags += [
'-x=cu --expt-extended-lambda -arch=sm_%s' % cuda_arch,
'-ccbin %s' % (spec['mpi'].mpicxx if '+mpi' in spec
else env['CXX'])]
if self.spec.satisfies('@4.0.0:'):
cxxflags.append(self.compiler.cxx11_flag)
# The cxxflags are set by the spack c++ compiler wrapper. We also
# set CXXFLAGS explicitly, for clarity, and to properly export the
# cxxflags in the variable MFEM_CXXFLAGS in config.mk.
options += ['CXXFLAGS=%s' % ' '.join(cxxflags)]
if '~static' in spec:
options += ['STATIC=NO']
if '+shared' in spec:
options += [
'SHARED=YES',
'PICFLAG=%s' % (xcompiler + self.compiler.cxx_pic_flag)]
if '+mpi' in spec:
options += ['MPICXX=%s' % spec['mpi'].mpicxx]
hypre = spec['hypre']
# The hypre package always links with 'blas' and 'lapack'.
all_hypre_libs = hypre.libs + hypre['lapack'].libs + \
hypre['blas'].libs
options += [
'HYPRE_OPT=-I%s' % hypre.prefix.include,
'HYPRE_LIB=%s' % ld_flags_from_library_list(all_hypre_libs)]
if '+metis' in spec:
options += [
'METIS_OPT=-I%s' % spec['metis'].prefix.include,
'METIS_LIB=%s' %
ld_flags_from_library_list(spec['metis'].libs)]
if '+lapack' in spec:
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options += [
# LAPACK_OPT is not used
'LAPACK_LIB=%s' % ld_flags_from_library_list(lapack_blas)]
if '+superlu-dist' in spec:
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options += [
'SUPERLU_OPT=-I%s -I%s' %
(spec['superlu-dist'].prefix.include,
spec['parmetis'].prefix.include),
'SUPERLU_LIB=%s %s' %
(ld_flags_from_dirs([spec['superlu-dist'].prefix.lib,
spec['parmetis'].prefix.lib],
['superlu_dist', 'parmetis']),
ld_flags_from_library_list(lapack_blas))]
if '+strumpack' in spec:
strumpack = spec['strumpack']
sp_opt = ['-I%s' % strumpack.prefix.include]
sp_lib = [ld_flags_from_library_list(strumpack.libs)]
# Parts of STRUMPACK use fortran, so we need to link with the
# fortran library and also the MPI fortran library:
if '~shared' in strumpack:
if os.path.basename(env['FC']) == 'gfortran':
gfortran = Executable(env['FC'])
libext = 'dylib' if sys.platform == 'darwin' else 'so'
libfile = os.path.abspath(gfortran(
'-print-file-name=libgfortran.%s' % libext,
output=str).strip())
gfortran_lib = LibraryList(libfile)
sp_lib += [ld_flags_from_library_list(gfortran_lib)]
if ('^mpich' in strumpack) or ('^mvapich2' in strumpack):
sp_lib += ['-lmpifort']
elif '^openmpi' in strumpack:
sp_lib += ['-lmpi_mpifh']
elif '^spectrum-mpi' in strumpack:
sp_lib += ['-lmpi_ibm_mpifh']
if '+openmp' in strumpack:
# The '+openmp' in the spec means strumpack will TRY to find
# OpenMP; if not found, we should not add any flags -- how do
# we figure out if strumpack found OpenMP?
if not self.spec.satisfies('%apple-clang'):
sp_opt += [xcompiler + self.compiler.openmp_flag]
if '^parmetis' in strumpack:
parmetis = strumpack['parmetis']
sp_opt += [parmetis.headers.cpp_flags]
sp_lib += [ld_flags_from_library_list(parmetis.libs)]
if '^netlib-scalapack' in strumpack:
scalapack = strumpack['scalapack']
sp_opt += ['-I%s' % scalapack.prefix.include]
sp_lib += [ld_flags_from_dirs([scalapack.prefix.lib],
['scalapack'])]
elif '^scalapack' in strumpack:
scalapack = strumpack['scalapack']
sp_opt += [scalapack.headers.cpp_flags]
sp_lib += [ld_flags_from_library_list(scalapack.libs)]
if '+butterflypack' in strumpack:
bp = strumpack['butterflypack']
sp_opt += ['-I%s' % bp.prefix.include]
sp_lib += [ld_flags_from_dirs([bp.prefix.lib],
['dbutterflypack',
'zbutterflypack'])]
if '+zfp' in strumpack:
zfp = strumpack['zfp']
sp_opt += ['-I%s' % zfp.prefix.include]
sp_lib += [ld_flags_from_dirs([zfp.prefix.lib], ['zfp'])]
if '+cuda' in strumpack:
# assuming also ('+cuda' in spec)
sp_lib += ['-lcusolver', '-lcublas']
options += [
'STRUMPACK_OPT=%s' % ' '.join(sp_opt),
'STRUMPACK_LIB=%s' % ' '.join(sp_lib)]
if '+suite-sparse' in spec:
ss_spec = 'suite-sparse:' + self.suitesparse_components
options += [
'SUITESPARSE_OPT=-I%s' % spec[ss_spec].prefix.include,
'SUITESPARSE_LIB=%s' %
ld_flags_from_library_list(spec[ss_spec].libs)]
if '+sundials' in spec:
sun_spec = 'sundials:' + self.sundials_components
options += [
'SUNDIALS_OPT=%s' % spec[sun_spec].headers.cpp_flags,
'SUNDIALS_LIB=%s' %
ld_flags_from_library_list(spec[sun_spec].libs)]
if '+petsc' in spec:
petsc = spec['petsc']
if '+shared' in petsc:
options += [
'PETSC_OPT=%s' % petsc.headers.cpp_flags,
'PETSC_LIB=%s' % ld_flags_from_library_list(petsc.libs)]
else:
options += ['PETSC_DIR=%s' % petsc.prefix]
if '+slepc' in spec:
slepc = spec['slepc']
options += [
'SLEPC_OPT=%s' % slepc.headers.cpp_flags,
'SLEPC_LIB=%s' % ld_flags_from_library_list(slepc.libs)]
if '+pumi' in spec:
pumi_libs = ['pumi', 'crv', 'ma', 'mds', 'apf', 'pcu', 'gmi',
'parma', 'lion', 'mth', 'apf_zoltan', 'spr']
options += [
'PUMI_OPT=-I%s' % spec['pumi'].prefix.include,
'PUMI_LIB=%s' %
ld_flags_from_dirs([spec['pumi'].prefix.lib], pumi_libs)]
if '+gslib' in spec:
options += [
'GSLIB_OPT=-I%s' % spec['gslib'].prefix.include,
'GSLIB_LIB=%s' %
ld_flags_from_dirs([spec['gslib'].prefix.lib], ['gs'])]
if '+netcdf' in spec:
lib_flags = ld_flags_from_dirs([spec['netcdf-c'].prefix.lib],
['netcdf'])
hdf5 = spec['hdf5:hl']
if hdf5.satisfies('~shared'):
hdf5_libs = hdf5.libs
hdf5_libs += LibraryList(find_system_libraries('libdl'))
lib_flags += " " + ld_flags_from_library_list(hdf5_libs)
options += [
'NETCDF_OPT=-I%s' % spec['netcdf-c'].prefix.include,
'NETCDF_LIB=%s' % lib_flags]
if '+zlib' in spec:
if "@:3.3.2" in spec:
options += ['ZLIB_DIR=%s' % spec['zlib'].prefix]
else:
options += [
'ZLIB_OPT=-I%s' % spec['zlib'].prefix.include,
'ZLIB_LIB=%s' %
ld_flags_from_library_list(spec['zlib'].libs)]
if '+mpfr' in spec:
options += [
'MPFR_OPT=-I%s' % spec['mpfr'].prefix.include,
'MPFR_LIB=%s' %
ld_flags_from_dirs([spec['mpfr'].prefix.lib], ['mpfr'])]
if '+gnutls' in spec:
options += [
'GNUTLS_OPT=-I%s' % spec['gnutls'].prefix.include,
'GNUTLS_LIB=%s' %
ld_flags_from_dirs([spec['gnutls'].prefix.lib], ['gnutls'])]
if '+libunwind' in spec:
libunwind = spec['unwind']
headers = find_headers('libunwind', libunwind.prefix.include)
headers.add_macro('-g')
libs = find_optional_library('libunwind', libunwind.prefix)
# When mfem uses libunwind, it also needs 'libdl'.
libs += LibraryList(find_system_libraries('libdl'))
options += [
'LIBUNWIND_OPT=%s' % headers.cpp_flags,
'LIBUNWIND_LIB=%s' % ld_flags_from_library_list(libs)]
if '+openmp' in spec:
options += [
'OPENMP_OPT=%s' % (xcompiler + self.compiler.openmp_flag)]
if '+cuda' in spec:
options += [
'CUDA_CXX=%s' % join_path(spec['cuda'].prefix, 'bin', 'nvcc'),
'CUDA_ARCH=sm_%s' % cuda_arch]
if '+rocm' in spec:
amdgpu_target = ','.join(spec.variants['amdgpu_target'].value)
options += [
'HIP_CXX=%s' % spec['hip'].hipcc,
'HIP_ARCH=%s' % amdgpu_target]
if '+occa' in spec:
options += ['OCCA_OPT=-I%s' % spec['occa'].prefix.include,
'OCCA_LIB=%s' %
ld_flags_from_dirs([spec['occa'].prefix.lib],
['occa'])]
if '+raja' in spec:
options += ['RAJA_OPT=-I%s' % spec['raja'].prefix.include,
'RAJA_LIB=%s' %
ld_flags_from_dirs([spec['raja'].prefix.lib],
['RAJA'])]
if '+amgx' in spec:
amgx = spec['amgx']
if '+shared' in amgx:
options += ['AMGX_OPT=-I%s' % amgx.prefix.include,
'AMGX_LIB=%s' %
ld_flags_from_library_list(amgx.libs)]
else:
options += ['AMGX_DIR=%s' % amgx.prefix]
if '+libceed' in spec:
options += ['CEED_OPT=-I%s' % spec['libceed'].prefix.include,
'CEED_LIB=%s' %
ld_flags_from_dirs([spec['libceed'].prefix.lib],
['ceed'])]
if '+umpire' in spec:
options += ['UMPIRE_OPT=-I%s' % spec['umpire'].prefix.include,
'UMPIRE_LIB=%s' %
ld_flags_from_library_list(spec['umpire'].libs)]
timer_ids = {'std': '0', 'posix': '2', 'mac': '4', 'mpi': '6'}
timer = spec.variants['timer'].value
if timer != 'auto':
options += ['MFEM_TIMER_TYPE=%s' % timer_ids[timer]]
if '+conduit' in spec:
conduit = spec['conduit']
headers = HeaderList(find(conduit.prefix.include, 'conduit.hpp',
recursive=True))
conduit_libs = ['libconduit', 'libconduit_relay',
'libconduit_blueprint']
libs = find_libraries(conduit_libs, conduit.prefix.lib,
shared=('+shared' in conduit))
libs += LibraryList(find_system_libraries('libdl'))
if '+hdf5' in conduit:
hdf5 = conduit['hdf5']
headers += find_headers('hdf5', hdf5.prefix.include)
libs += hdf5.libs
##################
# cyrush note:
##################
# spack's HeaderList is applying too much magic, undermining us:
#
# It applies a regex to strip back to the last "include" dir
# in the path. In our case we need to pass the following
# as part of the CONDUIT_OPT flags:
#
# -I<install_path>/include/conduit
#
# I tried several ways to present this path to the HeaderList,
# but the regex always kills the trailing conduit dir
# breaking build.
#
# To resolve the issue, we simply join our own string with
# the headers results (which are important b/c they handle
# hdf5 paths when enabled).
##################
# construct proper include path
conduit_include_path = conduit.prefix.include.conduit
# add this path to the found flags
conduit_opt_flags = "-I{0} {1}".format(conduit_include_path,
headers.cpp_flags)
options += [
'CONDUIT_OPT=%s' % conduit_opt_flags,
'CONDUIT_LIB=%s' % ld_flags_from_library_list(libs)]
make('config', *options, parallel=False)
make('info', parallel=False)
def build(self, spec, prefix):
make('lib')
@run_after('build')
def check_or_test(self):
# Running 'make check' or 'make test' may fail if MFEM_MPIEXEC or
# MFEM_MPIEXEC_NP are not set appropriately.
if not self.run_tests:
# check we can build ex1 (~mpi) or ex1p (+mpi).
make('-C', 'examples', 'ex1p' if ('+mpi' in self.spec) else 'ex1',
parallel=False)
# make('check', parallel=False)
else:
make('all')
make('test', parallel=False)
def install(self, spec, prefix):
make('install', parallel=False)
# TODO: The way the examples and miniapps are being installed is not
# perfect. For example, the makefiles do not work.
install_em = ('+examples' in spec) or ('+miniapps' in spec)
if install_em and ('+shared' in spec):
make('examples/clean', 'miniapps/clean')
# This is a hack to get the examples and miniapps to link with the
# installed shared mfem library:
with working_dir('config'):
os.rename('config.mk', 'config.mk.orig')
copy(str(self.config_mk), 'config.mk')
shutil.copystat('config.mk.orig', 'config.mk')
prefix_share = join_path(prefix, 'share', 'mfem')
if '+examples' in spec:
make('examples')
install_tree('examples', join_path(prefix_share, 'examples'))
if '+miniapps' in spec:
make('miniapps')
install_tree('miniapps', join_path(prefix_share, 'miniapps'))
if install_em:
install_tree('data', join_path(prefix_share, 'data'))
examples_src_dir = 'examples'
examples_data_dir = 'data'
@run_after('install')
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir,
self.examples_data_dir])
def test(self):
test_dir = join_path(
self.test_suite.current_test_cache_dir,
self.examples_src_dir
)
# MFEM has many examples to serve as a suitable smoke check. ex10
# was chosen arbitrarily among the examples that work both with
# MPI and without it
test_exe = 'ex10p' if ('+mpi' in self.spec) else 'ex10'
self.run_test(
'make',
[
'CONFIG_MK={0}/share/mfem/config.mk'.format(self.prefix),
test_exe,
'parallel=False'
],
purpose='test: building {0}'.format(test_exe),
skip_missing=False,
work_dir=test_dir,
)
self.run_test(
'./{0}'.format(test_exe),
[
'--mesh',
'../{0}/beam-quad.mesh'.format(self.examples_data_dir)
],
[],
installed=False,
purpose='test: running {0}'.format(test_exe),
skip_missing=False,
work_dir=test_dir,
)
# this patch is only needed for mfem 4.1, where a few
# released files include byte order marks
@when('@4.1.0')
def patch(self):
# Remove the byte order mark since it messes with some compilers
files_with_bom = [
'fem/gslib.hpp', 'fem/gslib.cpp', 'linalg/hiop.hpp',
'miniapps/gslib/field-diff.cpp', 'miniapps/gslib/findpts.cpp',
'miniapps/gslib/pfindpts.cpp']
bom = '\xef\xbb\xbf' if sys.version_info < (3,) else u'\ufeff'
for f in files_with_bom:
filter_file(bom, '', f)
@property
def suitesparse_components(self):
"""Return the SuiteSparse components needed by MFEM."""
ss_comps = 'umfpack,cholmod,colamd,amd,camd,ccolamd,suitesparseconfig'
if self.spec.satisfies('@3.2:'):
ss_comps = 'klu,btf,' + ss_comps
return ss_comps
@property
def sundials_components(self):
"""Return the SUNDIALS components needed by MFEM."""
spec = self.spec
sun_comps = 'arkode,cvodes,nvecserial,kinsol'
if '+mpi' in spec:
if spec.satisfies('@4.2:'):
sun_comps += ',nvecparallel,nvecmpiplusx'
else:
sun_comps += ',nvecparhyp,nvecparallel'
if '+cuda' in spec and '+cuda' in spec['sundials']:
sun_comps += ',nveccuda'
return sun_comps
@property
def headers(self):
"""Export the main mfem header, mfem.hpp.
"""
hdrs = HeaderList(find(self.prefix.include, 'mfem.hpp',
recursive=False))
return hdrs or None
@property
def libs(self):
"""Export the mfem library file.
"""
libs = find_libraries('libmfem', root=self.prefix.lib,
shared=('+shared' in self.spec), recursive=False)
return libs or None
@property
def config_mk(self):
"""Export the location of the config.mk file.
This property can be accessed using spec['mfem'].package.config_mk
"""
dirs = [self.prefix, self.prefix.share.mfem]
for d in dirs:
f = join_path(d, 'config.mk')
if os.access(f, os.R_OK):
return FileList(f)
return FileList(find(self.prefix, 'config.mk', recursive=True))
@property
def test_mk(self):
"""Export the location of the test.mk file.
This property can be accessed using spec['mfem'].package.test_mk.
In version 3.3.2 and newer, the location of test.mk is also defined
inside config.mk, variable MFEM_TEST_MK.
"""
dirs = [self.prefix, self.prefix.share.mfem]
for d in dirs:
f = join_path(d, 'test.mk')
if os.access(f, os.R_OK):
return FileList(f)
return FileList(find(self.prefix, 'test.mk', recursive=True))
|
LLNL/spack
|
var/spack/repos/builtin/packages/mfem/package.py
|
Python
|
lgpl-2.1
| 39,670
|
[
"NetCDF"
] |
3a5659672becbf5b337270c74b50aff351cfe54431b0d805f42a8a4ff7222a64
|
# python
# EVE-Central.com Contribtastic
# Copyright (C) 2005-2010 Yann Ramin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (in file COPYING) for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from evec_upload.version import *
from evec_upload.scanner import *
from evec_upload.uploader import get_uploader
from evec_upload.taskbar import *
import evec_upload.login
import evec_upload.options
from evec_upload.config import Config
import wx
import pickle
import os
import sys
import images
import urllib
import time
import wx.lib.newevent
(UpdateUploadEvent, EVT_UPDATE_UPLOAD) = wx.lib.newevent.NewEvent()
(DoneUploadEvent, EVT_DONE_UPLOAD) = wx.lib.newevent.NewEvent()
class MainFrame(wx.Frame):
MENU_SETTINGS = wx.NewId()
MENU_ABOUT = wx.NewId()
MENU_SCANNOW = wx.NewId()
MENU_LOCATE = wx.NewId()
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title,
pos=(150, 150), style = wx.CAPTION | wx.MINIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CLOSE_BOX )# size=(350, 150))
try:
check_protocol()
r = check_client()
if r is not True:
dlg = wx.MessageDialog(self, 'Client outdated! New version ' + `r` + ' available! Visit EVE-Central.com to update!', 'Outdated client',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
os.system("explorer http://eve-central.com")
sys.exit(-1)
except IOError:
dlg = wx.MessageDialog(self, 'The network appears to be down. I cannot reach EVE-central.com. Check your firewall settings or internet connection',
'Can\'t communicate with EVE-Central.com',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
sys.exit(-1)
# Load config
config = Config()
r = config.reinit
if r == -1:
dlg = wx.MessageDialog(self, """The uploader client configuration has been reset since an old configuration file was found. Please check your configuration (such as path).""", 'Client Upgrade', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.scanner_thread = ScannerThread()
self.scanner_thread.start()
def donecb(count, success, this=self):
evt = DoneUploadEvent(count = count, success = success)
wx.PostEvent(this, evt)
self.donecb = donecb
def updcb(typename, success, this=self):
#print "UPD: %s, %s" % (typename, success,)
evt = UpdateUploadEvent(typename = typename, success = success)
wx.PostEvent(this, evt)
self.updcb = updcb
self.uploader = get_uploader(config, updcb)
# Set icon
self.SetIcon(images.getIconIcon())
# Task Bar
self.tbicon = TaskBarIcon(self)
# Create the menubar
menuBar = wx.MenuBar()
# and a menu
menu = wx.Menu()
# option menu
opmenu = wx.Menu()
# help menu
helpmenu = wx.Menu()
# add an item to the menu, using \tKeyName automatically
# creates an accelerator, the third param is some help text
# that will show up in the statusbar
menu.Append(self.MENU_SCANNOW, "S&can now...")
menu.AppendSeparator()
#menu.Append(self.MENU_SETTINGS, "&Settings...")
#menu.Append(self.MENU_LOCATE, "&Locate cache folder...")
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit")
helpmenu.Append(self.MENU_ABOUT, "&About")
# bind the menu event to an event handler
self.Bind(wx.EVT_MENU, self.OnTimer, id=self.MENU_SCANNOW)
self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.OnAbout, id = self.MENU_ABOUT)
self.Bind(wx.EVT_CLOSE, self.OnTimeToClose)
# and put the menu on the menubar
menuBar.Append(menu, "&File")
menuBar.Append(helpmenu, "&Help")
self.SetMenuBar(menuBar)
self.CreateStatusBar()
self.SetStatusText("Idle")
# Now create the Panel to put the other controls on.
panel = wx.Panel(self)
self.pathtext = wx.StaticText(panel, -1, "Please wait...")
self.pathtext_l = wx.StaticText(panel, -1, "Using folder: Autodetecting folders.")
#self.usertext_l = wx.StaticText(panel, -1, "Character name: ")
#self.usertext = wx.StaticText(panel, -1, "...")
self.uploadtext = wx.StaticText(panel, -1, "")
if config['character_id'] == 0:
self.uploads = long(0)
else:
self.uploads = long(0)
self.scans = 0
self.motd = wx.TextCtrl(panel, -1,
"",
size=(200, 100), style=wx.TE_MULTILINE|wx.TE_READONLY)
self.update_motd()
#text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer_path = wx.FlexGridSizer(2,2)
#--
sizer_path.Add(self.pathtext_l, 2, wx.EXPAND|wx.ALL, 1)
sizer_path.Add(self.pathtext, 0, wx.ALL|wx.EXPAND, 1)
#sizer_path.Add(self.usertext_l, 2, wx.EXPAND|wx.ALL, 1)
#sizer_path.Add(self.usertext, 0, wx.ALL|wx.EXPAND, 1)
#--
sizer.Add(sizer_path, 0, wx.EXPAND | wx.ALL, 1)
line = wx.StaticLine(panel, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
sizer.Add(self.uploadtext, 0, wx.ALL, 1)
sizer.Add(self.motd, 4, wx.ALL|wx.EXPAND, 1)
panel.SetSizer(sizer)
panel.Layout()
self.timer = wx.Timer(self)
self.timer.Start(120000)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.Bind(EVT_UPDATE_UPLOAD, self.OnUploadUpdate)
self.Bind(EVT_DONE_UPLOAD, self.OnUploadDone)
self.load_infowidgets()
self.paths = []
self.paths_age = time.time()
def load_infowidgets(self):
config = Config()
self.pathtext.SetLabel("")
self.uploadtext.SetLabel("Uploads so far: " + `self.uploads`[:-1] + " Scans so far: " + `self.scans`)
def OnTimeToClose(self, evt):
"""Event handler for the button click."""
self.tbicon.OnTaskBarQuit(None)
self.Close()
sys.exit(0)
def OnUploadUpdate(self, evt):
self.uploads = self.uploads + 1
self.SetStatusText("Uploaded " + evt.typename)
self.load_infowidgets()
def OnUploadDone(self, evt):
self.load_infowidgets()
if evt.success == True:
self.SetStatusText("Idle - Uploaded " + `evt.count` + " last run")
self.scans += 1
self.load_infowidgets()
else:
self.SetStatusText("Error scanning directory! Check EVE path!")
def OnAbout(self, evt):
global ProgramVersionNice
dlg = wx.MessageDialog(self, 'Contribtastic! ' + ProgramVersionNice +"\n(c) 2006-2012 Yann Ramin. All Rights Reserved.\n\nSee EVE-Central.com for the latest updates and information.", 'About',
wx.OK
)
dlg.ShowModal()
dlg.Destroy()
def OnTimer(self, evt):
config = Config()
self.SetStatusText("Uploading...")
if not self.paths or self.paths_age > (time.time() + (60*60*24)):
self.paths = default_locations()
self.paths_age = time.time()
for path in self.paths:
print "Scanning path ",path
job = ScannerPayload(path, self.uploader, self.donecb)
self.scanner_thread.trigger(job)
def update_motd(self):
motdf = urllib.urlopen("http://eve-central.com/motd.txt")
motd = ""
for line in motdf.readlines():
motd += line
motdf.close()
self.motd.WriteText(motd)
class EVEc_Upload(wx.App):
def OnInit(self):
frame = MainFrame(None, "Contribtastic!")
self.SetTopWindow(frame)
config = Config()
show = True
if len(sys.argv) > 1 and sys.argv[1] == "-hide":
show = False
print "Startup config: ",config.config_obj
if 'hide' in config:
show = not config['hide']
frame.Show(show)
return True
if __name__ == "__main__":
app = EVEc_Upload(redirect=False)
app.MainLoop()
|
theatrus/contribtastic
|
python/src/evec_upload/main.py
|
Python
|
gpl-2.0
| 9,305
|
[
"VisIt"
] |
708ff0c31dc8fd84b53a96fb4e98c4cdcdd26b9e61c018cc1d79822f53304869
|
#!/usr/bin/env python
"""
Script that emulates the behaviour of a shell to edit the CS config.
"""
import sys
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.ConfigurationSystem.Client.CSShellCLI import CSShellCLI
# Invariants:
# * root does not end with "/" or root is "/"
# * root starts with "/"
def main():
shell = CSShellCLI()
shell.cmdloop()
if __name__ == "__main__":
sys.exit( main() )
|
Andrew-McNab-UK/DIRAC
|
ConfigurationSystem/scripts/dirac-configuration-shell.py
|
Python
|
gpl-3.0
| 432
|
[
"DIRAC"
] |
cfe044fcb000b80b5cf7578ab33b6ca59c1228604fb12c77eae3108b5badaa01
|
# -*- coding: utf-8 -*-
"""
pint.util
~~~~~~~~~
Miscellaneous functions for pint.
:copyright: 2016 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
from decimal import Decimal
import locale
import sys
import re
import operator
from numbers import Number
from fractions import Fraction
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from logging import NullHandler
import logging
from token import STRING, NAME, OP, NUMBER
from tokenize import untokenize
from .compat import string_types, tokenizer, lru_cache, maketrans, NUMERIC_TYPES
from .formatting import format_unit,siunitx_format_unit
from .pint_eval import build_eval_tree
from .errors import DefinitionSyntaxError
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
def matrix_to_string(matrix, row_headers=None, col_headers=None, fmtfun=lambda x: str(int(x))):
"""Takes a 2D matrix (as nested list) and returns a string.
"""
ret = []
if col_headers:
ret.append(('\t' if row_headers else '') + '\t'.join(col_headers))
if row_headers:
ret += [rh + '\t' + '\t'.join(fmtfun(f) for f in row)
for rh, row in zip(row_headers, matrix)]
else:
ret += ['\t'.join(fmtfun(f) for f in row)
for row in matrix]
return '\n'.join(ret)
def transpose(matrix):
"""Takes a 2D matrix (as nested list) and returns the transposed version.
"""
return [list(val) for val in zip(*matrix)]
def column_echelon_form(matrix, ntype=Fraction, transpose_result=False):
"""Calculates the column echelon form using Gaussian elimination.
:param matrix: a 2D matrix as nested list.
:param ntype: the numerical type to use in the calculation.
:param transpose_result: indicates if the returned matrix should be transposed.
:return: column echelon form, transformed identity matrix, swapped rows
"""
lead = 0
M = transpose(matrix)
_transpose = transpose if transpose_result else lambda x: x
rows, cols = len(M), len(M[0])
new_M = []
for row in M:
r = []
for x in row:
if isinstance(x, float):
x = ntype.from_float(x)
else:
x = ntype(x)
r.append(x)
new_M.append(r)
M = new_M
# M = [[ntype(x) for x in row] for row in M]
I = [[ntype(1) if n == nc else ntype(0) for nc in range(rows)] for n in range(rows)]
swapped = []
for r in range(rows):
if lead >= cols:
return _transpose(M), _transpose(I), swapped
i = r
while M[i][lead] == 0:
i += 1
if i != rows:
continue
i = r
lead += 1
if cols == lead:
return _transpose(M), _transpose(I), swapped
M[i], M[r] = M[r], M[i]
I[i], I[r] = I[r], I[i]
swapped.append(i)
lv = M[r][lead]
M[r] = [mrx / lv for mrx in M[r]]
I[r] = [mrx / lv for mrx in I[r]]
for i in range(rows):
if i == r:
continue
lv = M[i][lead]
M[i] = [iv - lv*rv for rv, iv in zip(M[r], M[i])]
I[i] = [iv - lv*rv for rv, iv in zip(I[r], I[i])]
lead += 1
return _transpose(M), _transpose(I), swapped
def pi_theorem(quantities, registry=None):
"""Builds dimensionless quantities using the Buckingham π theorem
:param quantities: mapping between variable name and units
:type quantities: dict
:return: a list of dimensionless quantities expressed as dicts
"""
# Preprocess input and build the dimensionality Matrix
quant = []
dimensions = set()
if registry is None:
getdim = lambda x: x
else:
getdim = registry.get_dimensionality
for name, value in quantities.items():
if isinstance(value, string_types):
value = ParserHelper.from_string(value)
if isinstance(value, dict):
dims = getdim(UnitsContainer(value))
elif not hasattr(value, 'dimensionality'):
dims = getdim(value)
else:
dims = value.dimensionality
if not registry and any(not key.startswith('[') for key in dims):
logger.warning('A non dimension was found and a registry was not provided. '
'Assuming that it is a dimension name: {}.'.format(dims))
quant.append((name, dims))
dimensions = dimensions.union(dims.keys())
dimensions = list(dimensions)
# Calculate dimensionless quantities
M = [[dimensionality[dimension] for name, dimensionality in quant]
for dimension in dimensions]
M, identity, pivot = column_echelon_form(M, transpose_result=False)
# Collect results
# Make all numbers integers and minimize the number of negative exponents.
# Remove zeros
results = []
for rowm, rowi in zip(M, identity):
if any(el != 0 for el in rowm):
continue
max_den = max(f.denominator for f in rowi)
neg = -1 if sum(f < 0 for f in rowi) > sum(f > 0 for f in rowi) else 1
results.append(dict((q[0], neg * f.numerator * max_den / f.denominator)
for q, f in zip(quant, rowi) if f.numerator != 0))
return results
def solve_dependencies(dependencies):
"""Solve a dependency graph.
:param dependencies: dependency dictionary. For each key, the value is
an iterable indicating its dependencies.
:return: list of sets, each containing keys of independents tasks dependent
only of the previous tasks in the list.
"""
d = dict((key, set(dependencies[key])) for key in dependencies)
r = []
while d:
# values not in keys (items without dep)
t = set(i for v in d.values() for i in v) - set(d.keys())
# and keys without value (items without dep)
t.update(k for k, v in d.items() if not v)
# can be done right away
if not t:
raise ValueError('Cyclic dependencies exist among these items: {}'.format(', '.join(repr(x) for x in d.items())))
r.append(t)
# and cleaned up
d = dict(((k, v - t) for k, v in d.items() if v))
return r
def find_shortest_path(graph, start, end, path=None):
path = (path or []) + [start]
if start == end:
return path
if not start in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
def find_connected_nodes(graph, start, visited=None):
if not start in graph:
return None
visited = (visited or set())
visited.add(start)
for node in graph[start]:
if node not in visited:
find_connected_nodes(graph, node, visited)
return visited
class udict(dict):
""" Custom dict implementing __missing__.
"""
def __missing__(self, key):
return 0.
class UnitsContainer(Mapping):
"""The UnitsContainer stores the product of units and their respective
exponent and implements the corresponding operations.
UnitsContainer is a read-only mapping. All operations (even in place ones)
return new instances.
"""
__slots__ = ('_d', '_hash')
def __init__(self, *args, **kwargs):
d = udict(*args, **kwargs)
self._d = d
for key, value in d.items():
if not isinstance(key, string_types):
raise TypeError('key must be a str, not {}'.format(type(key)))
if not isinstance(value, Number):
raise TypeError('value must be a number, not {}'.format(type(value)))
if not isinstance(value, float):
d[key] = float(value)
self._hash = hash(frozenset(self._d.items()))
def copy(self):
return self.__copy__()
def add(self, key, value):
newval = self._d[key] + value
new = self.copy()
if newval:
new._d[key] = newval
else:
del new._d[key]
return new
def remove(self, keys):
""" Create a new UnitsContainer purged from given keys.
"""
d = udict(self._d)
return UnitsContainer(((key, d[key]) for key in d if key not in keys))
def rename(self, oldkey, newkey):
""" Create a new UnitsContainer in which an entry has been renamed.
"""
d = udict(self._d)
d[newkey] = d.pop(oldkey)
return UnitsContainer(d)
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __getitem__(self, key):
return self._d[key]
def __contains__(self, key):
return key in self._d
def __hash__(self):
return self._hash
def __getstate__(self):
return {'_d': self._d, '_hash': self._hash}
def __setstate__(self, state):
self._d = state['_d']
self._hash = state['_hash']
def __eq__(self, other):
if isinstance(other, UnitsContainer):
other = other._d
elif isinstance(other, string_types):
other = ParserHelper.from_string(other)
other = other._d
return dict.__eq__(self._d, other)
def __str__(self):
return self.__format__('')
def __repr__(self):
tmp = '{%s}' % ', '.join(["'{}': {}".format(key, value)
for key, value in sorted(self._d.items())])
return '<UnitsContainer({})>'.format(tmp)
def __format__(self, spec):
return format_unit(self, spec)
def format_babel(self, spec, **kwspec):
return format_unit(self, spec, **kwspec)
def __copy__(self):
return UnitsContainer(self._d)
def __mul__(self, other):
d = udict(self._d)
if not isinstance(other, self.__class__):
err = 'Cannot multiply UnitsContainer by {}'
raise TypeError(err.format(type(other)))
for key, value in other.items():
d[key] += value
keys = [key for key, value in d.items() if value == 0]
for key in keys:
del d[key]
return UnitsContainer(d)
__rmul__ = __mul__
def __pow__(self, other):
if not isinstance(other, NUMERIC_TYPES):
err = 'Cannot power UnitsContainer by {}'
raise TypeError(err.format(type(other)))
d = udict(self._d)
for key, value in d.items():
d[key] *= other
return UnitsContainer(d)
def __truediv__(self, other):
if not isinstance(other, self.__class__):
err = 'Cannot divide UnitsContainer by {}'
raise TypeError(err.format(type(other)))
d = udict(self._d)
for key, value in other.items():
d[key] -= value
keys = [key for key, value in d.items() if value == 0]
for key in keys:
del d[key]
return UnitsContainer(d)
def __rtruediv__(self, other):
if not isinstance(other, self.__class__) and other != 1:
err = 'Cannot divide {} by UnitsContainer'
raise TypeError(err.format(type(other)))
return self**-1
class ParserHelper(UnitsContainer):
""" The ParserHelper stores in place the product of variables and
their respective exponent and implements the corresponding operations.
ParserHelper is a read-only mapping. All operations (even in place ones)
return new instances.
WARNING : The hash value used does not take into account the scale
attribute so be careful if you use it as a dict key and then two unequal
object can have the same hash.
"""
__slots__ = ('scale', )
def __init__(self, scale=1, *args, **kwargs):
super(ParserHelper, self).__init__(*args, **kwargs)
self.scale = scale
@classmethod
def from_word(cls, input_word):
"""Creates a ParserHelper object with a single variable with exponent one.
Equivalent to: ParserHelper({'word': 1})
"""
return cls(1, [(input_word, 1)])
@classmethod
def from_string(cls, input_string):
return cls._from_string(input_string)
@classmethod
def eval_token(cls, token, use_decimal=False):
token_type = token.type
token_text = token.string
if token_type == NUMBER:
try:
return int(token_text)
except ValueError:
if use_decimal:
return Decimal(token_text)
return float(token_text)
elif token_type == NAME:
return ParserHelper.from_word(token_text)
else:
raise Exception('unknown token type')
@classmethod
@lru_cache()
def _from_string(cls, input_string):
"""Parse linear expression mathematical units and return a quantity object.
"""
if not input_string:
return cls()
input_string = string_preprocessor(input_string)
if '[' in input_string:
input_string = input_string.replace('[', '__obra__').replace(']', '__cbra__')
reps = True
else:
reps = False
gen = tokenizer(input_string)
ret = build_eval_tree(gen).evaluate(cls.eval_token)
if isinstance(ret, Number):
return ParserHelper(ret)
if not reps:
return ret
return ParserHelper(ret.scale,
dict((key.replace('__obra__', '[').replace('__cbra__', ']'), value)
for key, value in ret.items()))
def __copy__(self):
return ParserHelper(scale=self.scale, **self)
def copy(self):
return self.__copy__()
def __hash__(self):
if self.scale != 1.0:
mess = 'Only scale 1.0 ParserHelper instance should be considered hashable'
raise ValueError(mess)
return self._hash
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.scale == other.scale and\
super(ParserHelper, self).__eq__(other)
elif isinstance(other, string_types):
return self == ParserHelper.from_string(other)
elif isinstance(other, Number):
return self.scale == other and not len(self._d)
else:
return self.scale == 1. and super(ParserHelper, self).__eq__(other)
def operate(self, items, op=operator.iadd, cleanup=True):
d = udict(self._d)
for key, value in items:
d[key] = op(d[key], value)
if cleanup:
keys = [key for key, value in d.items() if value == 0]
for key in keys:
del d[key]
return self.__class__(self.scale, d)
def __str__(self):
tmp = '{%s}' % ', '.join(["'{}': {}".format(key, value)
for key, value in sorted(self._d.items())])
return '{} {}'.format(self.scale, tmp)
def __repr__(self):
tmp = '{%s}' % ', '.join(["'{}': {}".format(key, value)
for key, value in sorted(self._d.items())])
return '<ParserHelper({}, {})>'.format(self.scale, tmp)
def __mul__(self, other):
if isinstance(other, string_types):
new = self.add(other, 1)
elif isinstance(other, Number):
new = self.copy()
new.scale *= other
elif isinstance(other, self.__class__):
new = self.operate(other.items())
new.scale *= other.scale
else:
new = self.operate(other.items())
return new
__rmul__ = __mul__
def __pow__(self, other):
d = self._d.copy()
for key in self._d:
d[key] *= other
return self.__class__(self.scale**other, d)
def __truediv__(self, other):
if isinstance(other, string_types):
new = self.add(other, -1)
elif isinstance(other, Number):
new = self.copy()
new.scale /= other
elif isinstance(other, self.__class__):
new = self.operate(other.items(), operator.sub)
new.scale /= other.scale
else:
new = self.operate(other.items(), operator.sub)
return new
__floordiv__ = __truediv__
def __rtruediv__(self, other):
new = self.__pow__(-1)
if isinstance(other, string_types):
new = new.add(other, 1)
elif isinstance(other, Number):
new.scale *= other
elif isinstance(other, self.__class__):
new = self.operate(other.items(), operator.add)
new.scale *= other.scale
else:
new = new.operate(other.items(), operator.add)
return new
#: List of regex substitution pairs.
_subs_re = [('\N{DEGREE SIGN}', " degree"),
(r"([\w\.\-\+\*\\\^])\s+", r"\1 "), # merge multiple spaces
(r"({}) squared", r"\1**2"), # Handle square and cube
(r"({}) cubed", r"\1**3"),
(r"cubic ({})", r"\1**3"),
(r"square ({})", r"\1**2"),
(r"sq ({})", r"\1**2"),
(r"\b([0-9]+\.?[0-9]*)(?=[e|E][a-zA-Z]|[a-df-zA-DF-Z])", r"\1*"), # Handle numberLetter for multiplication
(r"([\w\.\-])\s+(?=\w)", r"\1*"), # Handle space for multiplication
]
#: Compiles the regex and replace {} by a regex that matches an identifier.
_subs_re = [(re.compile(a.format(r"[_a-zA-Z][_a-zA-Z0-9]*")), b) for a, b in _subs_re]
_pretty_table = maketrans('⁰¹²³⁴⁵⁶⁷⁸⁹·⁻', '0123456789*-')
_pretty_exp_re = re.compile(r"⁻?[⁰¹²³⁴⁵⁶⁷⁸⁹]+(?:\.[⁰¹²³⁴⁵⁶⁷⁸⁹]*)?")
def string_preprocessor(input_string):
input_string = input_string.replace(",", "")
input_string = input_string.replace(" per ", "/")
for a, b in _subs_re:
input_string = a.sub(b, input_string)
# Replace pretty format characters
for pretty_exp in _pretty_exp_re.findall(input_string):
exp = '**' + pretty_exp.translate(_pretty_table)
input_string = input_string.replace(pretty_exp, exp)
input_string = input_string.translate(_pretty_table)
# Handle caret exponentiation
input_string = input_string.replace("^", "**")
return input_string
def _is_dim(name):
return name[0] == '[' and name[-1] == ']'
class SharedRegistryObject(object):
"""Base class for object keeping a refrence to the registree.
Such object are for now _Quantity and _Unit, in a number of places it is
that an object from this class has a '_units' attribute.
"""
def _check(self, other):
"""Check if the other object use a registry and if so that it is the
same registry.
Return True is both use a registry and they use the same, False is
other don't use a registry and raise ValueError if other don't use the
same unit registry.
"""
if self._REGISTRY is getattr(other, '_REGISTRY', None):
return True
elif isinstance(other, SharedRegistryObject):
mess = 'Cannot operate with {} and {} of different registries.'
raise ValueError(mess.format(self.__class__.__name__,
other.__class__.__name__))
else:
return False
class PrettyIPython(object):
"""Mixin to add pretty-printers for IPython"""
def _repr_html_(self):
if "~" in self.default_format:
return "{:~H}".format(self)
else:
return "{:H}".format(self)
def _repr_latex_(self):
if "~" in self.default_format:
return "${:~L}$".format(self)
else:
return "${:L}$".format(self)
def _repr_pretty_(self, p, cycle):
if "~" in self.default_format:
p.text("{:~P}".format(self))
else:
p.text("{:P}".format(self))
def to_units_container(unit_like, registry=None):
""" Convert a unit compatible type to a UnitsContainer.
"""
mro = type(unit_like).mro()
if UnitsContainer in mro:
return unit_like
elif SharedRegistryObject in mro:
return unit_like._units
elif string_types in mro:
if registry:
return registry._parse_units(unit_like)
else:
return ParserHelper.from_string(unit_like)
elif dict in mro:
return UnitsContainer(unit_like)
def infer_base_unit(q):
"""Return UnitsContainer of q with all prefixes stripped."""
d = udict()
parse = q._REGISTRY.parse_unit_name
for unit_name, power in q._units.items():
completely_parsed_unit = list(parse(unit_name))[-1]
_, base_unit, __ = completely_parsed_unit
d[base_unit] += power
return UnitsContainer(dict((k, v) for k, v in d.items() if v != 0)) # remove values that resulted in a power of 0
def fix_str_conversions(cls):
"""Enable python2/3 compatible behaviour for __str__."""
def __bytes__(self):
return self.__unicode__().encode(locale.getpreferredencoding())
cls.__unicode__ = __unicode__ = cls.__str__
cls.__bytes__ = __bytes__
if sys.version_info[0] == 2:
cls.__str__ = __bytes__
else:
cls.__str__ = __unicode__
return cls
class SourceIterator(object):
"""Iterator to facilitate reading the definition files.
Accepts any sequence (like a list of lines, a file or another SourceIterator)
The iterator yields the line number and line (skipping comments and empty lines)
and stripping white spaces.
for lineno, line in SourceIterator(sequence):
# do something here
"""
def __new__(cls, sequence):
if isinstance(sequence, SourceIterator):
return sequence
obj = object.__new__(cls)
if sequence is not None:
obj.internal = enumerate(sequence, 1)
obj.last = (None, None)
return obj
def __iter__(self):
return self
def __next__(self):
line = ''
while not line or line.startswith('#'):
lineno, line = next(self.internal)
line = line.split('#', 1)[0].strip()
self.last = lineno, line
return lineno, line
next = __next__
def block_iter(self):
"""Iterate block including header.
"""
return BlockIterator(self)
class BlockIterator(SourceIterator):
"""Like SourceIterator but stops when it finds '@end'
It also raises an error if another '@' directive is found inside.
"""
def __new__(cls, line_iterator):
obj = SourceIterator.__new__(cls, None)
obj.internal = line_iterator.internal
obj.last = line_iterator.last
obj.done_last = False
return obj
def __next__(self):
if not self.done_last:
self.done_last = True
return self.last
lineno, line = SourceIterator.__next__(self)
if line.startswith('@end'):
raise StopIteration
elif line.startswith('@'):
raise DefinitionSyntaxError('cannot nest @ directives', lineno=lineno)
return lineno, line
next = __next__
|
EternityForest/KaithemAutomation
|
kaithem/src/thirdparty/pint/util.py
|
Python
|
gpl-3.0
| 23,567
|
[
"Gaussian"
] |
4833ea8506ebd1c08c408de2d7f4d04459d329f54ab10064066cfc234169b2c1
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.526524
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/deviceinfo.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class deviceinfo(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(deviceinfo, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_15845395 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2deviceinfo>
\t<e2enigmaversion>''')
_v = VFFSL(SL,"enigmaver",True) # u'$enigmaver' on line 4, col 19
if _v is not None: write(_filter(_v, rawExpr=u'$enigmaver')) # from line 4, col 19.
write(u'''</e2enigmaversion>
\t<e2imageversion>''')
_v = VFFSL(SL,"imagever",True) # u'$imagever' on line 5, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$imagever')) # from line 5, col 18.
write(u'''</e2imageversion>
\t<e2webifversion>''')
_v = VFFSL(SL,"webifver",True) # u'$webifver' on line 6, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$webifver')) # from line 6, col 18.
write(u'''</e2webifversion>
\t<e2fpversion>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"fp_version",True)) # u'$str($fp_version)' on line 7, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$str($fp_version)')) # from line 7, col 15.
write(u'''</e2fpversion>
\t<e2devicename>''')
_v = VFFSL(SL,"model",True) # u'$model' on line 8, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$model')) # from line 8, col 16.
write(u'''</e2devicename>
\t<e2frontends>
''')
for tuner in VFFSL(SL,"tuners",True): # generated from line 10, col 3
write(u'''\t\t<e2frontend>
\t\t\t<e2name>''')
_v = VFFSL(SL,"tuner.name",True) # u'$tuner.name' on line 12, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.name')) # from line 12, col 12.
write(u'''</e2name>
\t\t\t<e2model>''')
_v = VFFSL(SL,"tuner.type",True) # u'$tuner.type' on line 13, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.type')) # from line 13, col 13.
write(u'''</e2model>
\t\t</e2frontend>
''')
write(u'''\t</e2frontends>
\t<e2network>
''')
for iface in VFFSL(SL,"ifaces",True): # generated from line 18, col 3
write(u'''\t\t<e2interface>
\t\t\t<e2name>''')
_v = VFFSL(SL,"iface.name",True) # u'$iface.name' on line 20, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$iface.name')) # from line 20, col 12.
write(u'''</e2name>
\t\t\t<e2mac>''')
_v = VFFSL(SL,"iface.mac",True) # u'$iface.mac' on line 21, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mac')) # from line 21, col 11.
write(u'''</e2mac>
\t\t\t<e2dhcp>''')
_v = VFFSL(SL,"iface.dhcp",True) # u'$iface.dhcp' on line 22, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$iface.dhcp')) # from line 22, col 12.
write(u'''</e2dhcp>
\t\t\t<e2ip>''')
_v = VFFSL(SL,"iface.ip",True) # u'$iface.ip' on line 23, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$iface.ip')) # from line 23, col 10.
write(u'''</e2ip>
\t\t\t<e2gateway>''')
_v = VFFSL(SL,"iface.gw",True) # u'$iface.gw' on line 24, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$iface.gw')) # from line 24, col 15.
write(u'''</e2gateway>
\t\t\t<e2netmask>''')
_v = VFFSL(SL,"iface.mask",True) # u'$iface.mask' on line 25, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mask')) # from line 25, col 15.
write(u'''</e2netmask>
\t\t</e2interface>
''')
write(u'''\t</e2network>
\t<e2hdds>
''')
for hd in VFFSL(SL,"hdd",True): # generated from line 30, col 3
write(u'''\t\t<e2hdd>
\t\t\t<e2model>''')
_v = VFFSL(SL,"hd.model",True) # u'$hd.model' on line 32, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$hd.model')) # from line 32, col 13.
write(u'''</e2model>
\t\t\t<e2capacity>''')
_v = VFFSL(SL,"hd.capacity",True) # u'$hd.capacity' on line 33, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$hd.capacity')) # from line 33, col 16.
write(u'''</e2capacity>
\t\t\t<e2free>''')
_v = VFFSL(SL,"hd.free",True) # u'$hd.free' on line 34, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$hd.free')) # from line 34, col 12.
write(u'''</e2free>
\t\t</e2hdd>
''')
write(u'''\t</e2hdds>
</e2deviceinfo>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_15845395
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_deviceinfo= 'respond'
## END CLASS DEFINITION
if not hasattr(deviceinfo, '_initCheetahAttributes'):
templateAPIClass = getattr(deviceinfo, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(deviceinfo)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=deviceinfo()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/web/deviceinfo.py
|
Python
|
gpl-2.0
| 9,021
|
[
"VisIt"
] |
7395c291b178c357e674f886c09830dc8d303ae019feb9e10d6bb90ab0fb1e34
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
if "jobs" in globals()["pyladamodules"]:
queues = "debug", "regular", "low"
""" List of slurm or pbs queues allowed for use.
This is used by ipython's %launch magic function.
It is not required for slurm systems.
If empty, then %launch will not have a queue option.
"""
|
pylada/pylada-light
|
config/carver_mpi.py
|
Python
|
gpl-3.0
| 1,456
|
[
"CRYSTAL",
"VASP"
] |
867e9cde5b6ef4c74b89d6199f83dd71d97a4083f18aa28ca2ce5e55df123a69
|
# -*- coding: utf-8 -*-
#
# CampbellSiegert.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Campbell & Siegert approximation example
----------------------------------------------
Example script that applies Campbell's theorem and Siegert's rate
approximation to and integrate-and-fire neuron.
This script calculates the firing rate of an integrate-and-fire neuron
in response to a series of Poisson generators, each specified with a
rate and a synaptic weight. The calculated rate is compared with a
simulation using the ``iaf_psc_alpha`` model
References:
~~~~~~~~~~~~
.. [1] Papoulis A (1991). Probability, Random Variables, and
Stochastic Processes, McGraw-Hill
.. [2] Siegert AJ (1951). On the first passage time probability problem,
Phys Rev 81: 617-623
Authors
~~~~~~~~
S. Schrader, Siegert implentation by T. Tetzlaff
"""
###############################################################################
# First, we import all necessary modules for simulation and analysis. Scipy
# should be imported before nest.
from scipy.special import erf
from scipy.optimize import fmin
import numpy as np
import nest
###############################################################################
# We first set the parameters of neurons, noise and the simulation. First
# settings are with a single Poisson source, second is with two Poisson
# sources with half the rate of the single source. Both should lead to the
# same results.
weights = [0.1] # (mV) psp amplitudes
rates = [10000.] # (1/s) rate of Poisson sources
# weights = [0.1, 0.1] # (mV) psp amplitudes
# rates = [5000., 5000.] # (1/s) rate of Poisson sources
C_m = 250.0 # (pF) capacitance
E_L = -70.0 # (mV) resting potential
I_e = 0.0 # (nA) external current
V_reset = -70.0 # (mV) reset potential
V_th = -55.0 # (mV) firing threshold
t_ref = 2.0 # (ms) refractory period
tau_m = 10.0 # (ms) membrane time constant
tau_syn_ex = .5 # (ms) excitatory synaptic time constant
tau_syn_in = 2.0 # (ms) inhibitory synaptic time constant
simtime = 20000 # (ms) duration of simulation
n_neurons = 10 # number of simulated neurons
###############################################################################
# For convenience we define some units.
pF = 1e-12
ms = 1e-3
pA = 1e-12
mV = 1e-3
mu = 0.0
sigma2 = 0.0
J = []
assert(len(weights) == len(rates))
###############################################################################
# In the following we analytically compute the firing rate of the neuron
# based on Campbell's theorem [1]_ and Siegerts approximation [2]_.
for rate, weight in zip(rates, weights):
if weight > 0:
tau_syn = tau_syn_ex
else:
tau_syn = tau_syn_in
t_psp = np.arange(0., 10. * (tau_m * ms + tau_syn * ms), 0.0001)
# We define the form of a single PSP, which allows us to match the
# maximal value to or chosen weight.
def psp(x):
return - ((C_m * pF) / (tau_syn * ms) * (1 / (C_m * pF)) *
(np.exp(1) / (tau_syn * ms)) *
(((-x * np.exp(-x / (tau_syn * ms))) /
(1 / (tau_syn * ms) - 1 / (tau_m * ms))) +
(np.exp(-x / (tau_m * ms)) - np.exp(-x / (tau_syn * ms))) /
((1 / (tau_syn * ms) - 1 / (tau_m * ms)) ** 2)))
min_result = fmin(psp, [0], full_output=1, disp=0)
# We need to calculate the PSC amplitude (i.e., the weight we set in NEST)
# from the PSP amplitude, that we have specified above.
fudge = -1. / min_result[1]
J.append(C_m * weight / (tau_syn) * fudge)
# We now use Campbell's theorem to calculate mean and variance of
# the input due to the Poisson sources. The mean and variance add up
# for each Poisson source.
mu += (rate * (J[-1] * pA) * (tau_syn * ms) *
np.exp(1) * (tau_m * ms) / (C_m * pF))
sigma2 += rate * (2 * tau_m * ms + tau_syn * ms) * \
(J[-1] * pA * tau_syn * ms * np.exp(1) * tau_m * ms /
(2 * (C_m * pF) * (tau_m * ms + tau_syn * ms))) ** 2
mu += (E_L * mV)
sigma = np.sqrt(sigma2)
###############################################################################
# Having calculate mean and variance of the input, we can now employ
# Siegert's rate approximation.
num_iterations = 100
upper = (V_th * mV - mu) / (sigma * np.sqrt(2))
lower = (E_L * mV - mu) / (sigma * np.sqrt(2))
interval = (upper - lower) / num_iterations
tmpsum = 0.0
for cu in range(0, num_iterations + 1):
u = lower + cu * interval
f = np.exp(u ** 2) * (1 + erf(u))
tmpsum += interval * np.sqrt(np.pi) * f
r = 1. / (t_ref * ms + tau_m * ms * tmpsum)
###############################################################################
# We now simulate neurons receiving Poisson spike trains as input,
# and compare the theoretical result to the empirical value.
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
neurondict = {'V_th': V_th,
'tau_m': tau_m,
'tau_syn_ex': tau_syn_ex,
'tau_syn_in': tau_syn_in,
'C_m': C_m,
'E_L': E_L,
't_ref': t_ref,
'V_m': E_L,
'V_reset': E_L}
###############################################################################
# Neurons and devices are instantiated. We set a high threshold as we want
# free membrane potential. In addition we choose a small resolution for
# recording the membrane to collect good statistics.
nest.SetDefaults('iaf_psc_alpha', neurondict)
n = nest.Create('iaf_psc_alpha', n_neurons)
n_free = nest.Create('iaf_psc_alpha', 1, {'V_th': 1e12})
pg = nest.Create('poisson_generator', len(rates),
[{'rate': float(rate_i)} for rate_i in rates])
vm = nest.Create('voltmeter', 1, {'interval': .1})
sd = nest.Create('spike_detector')
###############################################################################
# We connect devices and neurons and start the simulation.
for indx in range(len(pg)):
nest.Connect(pg[indx], n,
syn_spec={'weight': float(J[indx]), 'delay': 0.1})
nest.Connect(pg[indx], n_free, syn_spec={'weight': J[indx]})
nest.Connect(vm, n_free)
nest.Connect(n, sd)
nest.Simulate(simtime)
###############################################################################
# Here we read out the recorded membrane potential. The first 500 steps are
# omitted so initial transients do not perturb our results. We then print the
# results from theory and simulation.
v_free = vm.get('events', 'V_m')[500:-1]
print('mean membrane potential (actual / calculated): {0} / {1}'
.format(np.mean(v_free), mu * 1000))
print('variance (actual / calculated): {0} / {1}'
.format(np.var(v_free), sigma2 * 1e6))
print('firing rate (actual / calculated): {0} / {1}'
.format(nest.GetStatus(sd, 'n_events')[0] /
(n_neurons * simtime * ms), r))
|
tammoippen/nest-simulator
|
pynest/examples/CampbellSiegert.py
|
Python
|
gpl-2.0
| 7,544
|
[
"NEURON"
] |
22f764aa759afce7be0c2776f4056ccc6b404a7ce183ed8f720138bdd44777c2
|
# coding=utf-8
# qingfanyi - Chinese to English translation tool
# Copyright (C) 2016 Rohan McGovern <rohan@mcgovern.id.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyatspi.state import STATE_SHOWING
from qingfanyi import debug
def _is_showing(accessible_object):
return accessible_object.getState().contains(STATE_SHOWING)
def get_text_object(accessible_object):
"""
:param accessible_object: an AtSpi.Accessible
:return: the text interface for an accessible object if implemented, otherwise None.
"""
try:
return accessible_object.queryText()
except NotImplementedError:
return None
def visit_visible(root, callback, level=0):
"""
Visit every visible object in a hierarchy and invoke a provided callback.
:param root: an AtSpi.Accessible object
:param callback: invoked for each visible object with two parameters: the object,
and the distance from root (e.g. 0 == root, 1 == child of root,
2 == grandchild ...)
"""
debug('%s%s' % (' ' * level, root))
if not _is_showing(root):
debug('%s PRUNE' % (' ' * level))
return
callback(root, level)
for child in root:
visit_visible(child, callback, level + 1)
|
rohanpm/qingfanyi
|
qingfanyi/atspi.py
|
Python
|
gpl-3.0
| 1,863
|
[
"VisIt"
] |
3929c1b527a66917cd3adbeeb96d2bcda35d476d1b64175f8773ff2344c78d27
|
# -*- coding: utf-8 -*-
#
# lin_rate_ipn_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
''' Network of linear rate neurons
----------------------------------------
This script simulates an excitatory and an inhibitory population
of lin_rate_ipn neurons with delayed excitatory and instantaneous
inhibitory connections. The rate of all neurons is recorded using
a multimeter. The resulting rate for one excitatory and one
inhibitory neuron is plotted.
'''
import nest
import pylab
import numpy
'''
Assigning the simulation parameters to variables.
'''
dt = 0.1 # the resolution in ms
T = 100.0 # Simulation time in ms
'''
Definition of the number of neurons
'''
order = 50
NE = int(4 * order) # number of excitatory neurons
NI = int(1 * order) # number of inhibitory neurons
N = int(NE+NI) # total number of neurons
'''
Definition of the connections
'''
d_e = 5. # delay of excitatory connections in ms
g = 5.0 # ratio inhibitory weight/excitatory weight
epsilon = 0.1 # connection probability
w = 0.1/numpy.sqrt(N) # excitatory connection strength
KE = int(epsilon * NE) # number of excitatory synapses per neuron (outdegree)
KI = int(epsilon * NI) # number of inhibitory synapses per neuron (outdegree)
K_tot = int(KI + KE) # total number of synapses per neuron
connection_rule = 'fixed_outdegree' # connection rule
'''
Definition of the neuron model and its neuron parameters
'''
neuron_model = 'lin_rate_ipn' # neuron model
neuron_params = {'linear_summation': True,
# type of non-linearity (not affecting linear rate models)
'tau': 10.0,
# time constant of neuronal dynamics in ms
'mean': 2.0,
# mean of Gaussian white noise input
'std': 5.
# standard deviation of Gaussian white noise input
}
'''
Configuration of the simulation kernel by the previously defined time
resolution used in the simulation. Setting "print_time" to True prints
the already processed simulation time as well as its percentage of the
total simulation time.
'''
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt, "use_wfr": False,
"print_time": True,
"overwrite_files": True})
print("Building network")
'''
Configuration of the neuron model using SetDefaults().
'''
nest.SetDefaults(neuron_model, neuron_params)
'''
Creation of the nodes using `Create`.
'''
n_e = nest.Create(neuron_model, NE)
n_i = nest.Create(neuron_model, NI)
'''
To record from the rate neurons a multimeter is created and the
parameter `record_from` is set to `'rate'` as well as the recording
interval to `dt`
'''
mm = nest.Create('multimeter', params={'record_from': ['rate'],
'interval': dt})
'''
Specify synapse and connection dictionaries:
Connections originating from excitatory neurons are associatated
with a delay d (rate_connection_delayed).
Connections originating from inhibitory neurons are not associatated
with a delay (rate_connection_instantaneous).
'''
syn_e = {'weight': w, 'delay': d_e, 'model': 'rate_connection_delayed'}
syn_i = {'weight': -g*w, 'model': 'rate_connection_instantaneous'}
conn_e = {'rule': connection_rule, 'outdegree': KE}
conn_i = {'rule': connection_rule, 'outdegree': KI}
'''
Connect rate units
'''
nest.Connect(n_e, n_e, conn_e, syn_e)
nest.Connect(n_i, n_i, conn_i, syn_i)
nest.Connect(n_e, n_i, conn_i, syn_e)
nest.Connect(n_i, n_e, conn_e, syn_i)
'''
Connect recording device to rate units
'''
nest.Connect(mm, n_e+n_i)
'''
Simulate the network
'''
nest.Simulate(T)
'''
Plot rates of one excitatory and one inhibitory neuron
'''
data = nest.GetStatus(mm)[0]['events']
rate_ex = data['rate'][numpy.where(data['senders'] == n_e[0])]
rate_in = data['rate'][numpy.where(data['senders'] == n_i[0])]
times = data['times'][numpy.where(data['senders'] == n_e[0])]
pylab.figure()
pylab.plot(times, rate_ex, label='excitatory')
pylab.plot(times, rate_in, label='inhibitory')
pylab.xlabel('time (ms)')
pylab.ylabel('rate (a.u.)')
pylab.show()
|
apeyser/nest-simulator
|
pynest/examples/lin_rate_ipn_network.py
|
Python
|
gpl-2.0
| 4,759
|
[
"Gaussian",
"NEURON"
] |
e89eaa2543e1f75f1638ade6019aa094c5da7f0ef0aacf42492f218a199e49e5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abinit Post Process Application
author: Martin Alexandre
last edited: May 2013
"""
import sys,os
#Gui
import gui.open_graph as OpenGraph
#Utility
import utility.write as Write
import utility.canvas as Canvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar2
from numpy import linspace,random
try :
from PyQt4 import Qt,QtGui,QtCore,Qwt5
except :
pass;
#---------------------------------------------------#
#---------------------------------------------------#
#----------------GRAPHICS---------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
class graphic(QtGui.QMainWindow):
x = 0
y = 0
xlabel = ""
ylabel = ""
average = True
def __init__(self,pX,pY,pxlbl,pylbl,average=True, adjust=False,\
name = "Graphics",point = False,marker='.',marker_size=25, parent= None):
self.x = pX
self.y = pY
self.xlabel=pxlbl
self.ylabel=pylbl
self.average = average
self.initUI(parent,adjust,name,point,marker,marker_size)
self.raise_()
def initUI(self, parent,adjust,name,point,marker,marker_size):
QtGui.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle(name)
self.resize(1000, 1000)
screen = QtGui.QDesktopWidget().screenGeometry()
self.move(random.randint(0, screen.height()),random.randint( 0, screen.width()))
self.open = QtGui.QAction('&Open', self)
self.open.setShortcut('Ctrl+O')
self.open.setStatusTip('import graphics')
self.connect(self.open, QtCore.SIGNAL('triggered()'),self.openGraph)
self.close = QtGui.QAction('&Exit', self)
self.close.setShortcut('Ctrl+Q')
self.close.setStatusTip('Exit application')
self.connect(self.close, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
self.printG = QtGui.QAction('&Print', self)
self.printG.setShortcut('Ctrl+P')
self.printG.setStatusTip('Print graphics')
self.connect(self.printG, QtCore.SIGNAL('triggered()'),self.printGraph)
self.save = QtGui.QAction('&Save', self)
self.save.setShortcut('Ctrl+S')
self.save.setStatusTip('Save Graphic')
self.connect(self.save, QtCore.SIGNAL('triggered()'), self.showDialog)
self.setStatusBar(QtGui.QStatusBar())
self.menubar = self.menuBar()
self.fileMenu1 = self.menubar.addMenu('&File')
#self.fileMenu1.addAction(self.open)
self.fileMenu1.addAction(self.save)
self.fileMenu1.addAction(self.printG)
self.fileMenu1.addAction(self.close)
self.main_widget = QtGui.QWidget(self)
self.main_lay = QtGui.QHBoxLayout(self.main_widget)
#Creation of the graphics widged
self.graph_widget = QtGui.QWidget(self)
self.graph_lay = QtGui.QVBoxLayout(self.graph_widget)
self.G = Canvas.CanvasQT(self.main_widget, width=6, height=4, dpi=100,\
x=self.x,y=self.y,pxlbl=self.xlabel,pylbl=self.ylabel\
,point=point,marker=marker,marker_size=marker_size)
self.navigation_toolbar = NavigationToolbar2(self.G, self)
#creation of the option widget
self.test = QtGui.QWidget()
self.test.hide()
self.option_layout = QtGui.QGridLayout(self.test)
self.lbl2 = QtGui.QLabel("select file :", self)
self.lbl2.setFixedSize(70,36)
self.tefile = QtGui.QTextEdit()
self.tefile.setReadOnly(True)
self.tefile.setFixedSize(100,36)
self.browse = QtGui.QPushButton('&Browse', self)
self.browse.setFixedSize(100,36)
# self.connect(self.browse ,QtCore.SIGNAL("clicked()"),self.openFile)
self.tbro = QtGui.QTextBrowser()
self.tbro.setFixedHeight(500)
self.tbro.setFixedWidth(200)
self.tbro.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.lbl3 = QtGui.QLabel("x :", self)
self.lbl3.setFixedSize(15,36)
self.CBox1 = QtGui.QComboBox()
self.CBox1.addItem("")
self.CBox1.setFixedSize(65,36)
# self.connect(self.CBox1,QtCore.SIGNAL('currentIndexChanged(const QString&)'),self.changeData)
self.lbl4 = QtGui.QLabel("y :", self)
self.lbl4.setFixedSize(15,36)
self.CBox2= QtGui.QComboBox()
self.CBox2.addItem("")
self.CBox2.setFixedSize(65,36)
# self.connect(self.CBox2,QtCore.SIGNAL('currentIndexChanged(const QString&)'),self.changeData)
self.imp = QtGui.QPushButton('&import', self)
self.imp.setFixedSize(100,36)
# self.connect(self.imp ,QtCore.SIGNAL("clicked()"),self.close)
self.option_layout.addWidget(self.lbl2 , 0, 0, 1, 1, QtCore.Qt.AlignCenter)
self.option_layout.addWidget(self.tefile , 0, 1, 1, 4,QtCore.Qt.AlignLeft)
self.option_layout.addWidget(self.browse , 0 ,5, 1, 1,QtCore.Qt.AlignCenter)
self.option_layout.addWidget(self.tbro , 1, 0, 10, 6, QtCore.Qt.AlignCenter)
self.option_layout.addWidget(self.lbl3 ,12, 0, 1, 1, QtCore.Qt.AlignRight)
self.option_layout.addWidget(self.CBox1 ,12, 1, 1, 1, QtCore.Qt.AlignLeft)
self.option_layout.addWidget(self.lbl4 ,12, 2, 1, 1, QtCore.Qt.AlignRight)
self.option_layout.addWidget(self.CBox2 ,12, 3, 1, 1, QtCore.Qt.AlignLeft)
self.option_layout.addWidget(self.imp ,12, 5, 1, 1, QtCore.Qt.AlignLeft)
self.graph_lay.addWidget(self.G)
self.graph_lay.addWidget(self.navigation_toolbar, 0)
self.main_lay.addWidget(self.graph_widget)
self.main_lay.addWidget(self.test)
if self.average:
self.checkbox_average =QtGui.QCheckBox("Show average Value")
self.connect(self.checkbox_average, QtCore.SIGNAL('clicked()'), self.addAverage)
self.lbl1 = QtGui.QLabel()
self.lbl1.setFixedWidth(250)
average = self.averageValue()
display = self.displayData(average,self.getStandardDeviation(self.y ,average))
self.lbl1.setText('Average : ' + display)
self.navigation_toolbar.addWidget(self.lbl1)
self.navigation_toolbar.addWidget(self.checkbox_average)
if adjust:
self.G.adjust_x_lim(self.x,self.y)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
def showDialog(self):
fname = QtGui.QFileDialog.getSaveFileName(self,"Save Graphics",os.getcwd())
if fname !="":
Write.SaveFile(fname).saveGraph(self.x,self.y,self.xlabel,self.ylabel)
def openGraph(self):
self.importGraph = OpenGraph.OpenGraph()
self.importGraph.raise_()
if self.importGraph.exec_():
data = self.importGraph.importGraph()
else:
data = self.importGraph.importGraph()
if data !=0 :
self.addPlot(data[0],data[1],bar = False,point = False)
def setPlot(self,x,y,pxlbl,pylbl,adjust=False,point =False):
self.G.setPlot(x,y,pxlbl,pylbl,adjust=adjust,point = point)
def addGraph(self,px,py,bar = False,point = False,marker='.',marker_size=25):
self.G.addPlot(px,py,bar = bar,point = point, marker=marker,marker_size=marker_size)
def addPlot(self,px,py,bar = False,point = False,marker='.',marker_size=25):
self.G.addPlot(px,py,bar = bar,point = point, marker=marker,marker_size=marker_size)
def addLegend(self,legend,markerscale = 1):
self.G.addLegend(legend,markerscale=markerscale)
def addAverage(self):
if self.checkbox_average.isChecked():
ytemp = self.averageValue()
ya = []
for i in range(len(self.y)):
ya.insert(i,ytemp)
self.G.addPlot(self.x,ya)
self.addLegend([self.ylabel,self.lbl1.text()])
#beta self.test.show()
else:
#beta self.test.hide()
self.G.setPlot(self.x,self.y,self.xlabel,self.ylabel)
def closeEvent(self, event):
del self
def update(self,pX,pY,pxlbl,pylbl,adjust=False, name = '',point = False,marker='.', marker_size=25):
self.setWindowTitle(pylbl)
self.x = pX
self.y = pY
self.xlabel=pxlbl
self.ylabel=pylbl
if self.average:
average = self.averageValue()
display = self.displayData(average,self.getStandardDeviation(self.y ,average))
self.lbl1.setText('Average : ' + display)
self.G.setPlot(self.x,self.y,self.xlabel,self.ylabel,point=point,marker=marker, marker_size=marker_size)
if self.isCheckbox_averageChecked():
self.addAverage()
if adjust:
self.G.adjust_x_lim(self.x,self.y)
if name != '':
self.setWindowTitle(name)
def updatePos(self,pX,pY,pxlbl,pylbl,adjust=False, name = '',point = True,marker='.', marker_size=2):
self.setWindowTitle(pylbl)
self.x = pX
self.y = pY
self.xlabel=pxlbl
self.ylabel=pylbl
self.G.setPlot(self.x,self.y,self.xlabel,self.ylabel,point=point,marker=marker, marker_size=marker_size)
def averageValue(self):
ytemp = 0
for i in range(len(self.y)):
ytemp+=float(self.y[i])
ytemp/=len(self.y)
return ytemp
def getStandardDeviation(self,data ,averageData = 0):
res = 0
if averageData == 0:
averageData = self.getMoy(data)
for i in range(len(data)):
res += (data[i] - averageData)**2
res /= len(data)
res = res**0.5
return res
def displayData(self,data,deviation):
if(abs(data) < 1e-04 and deviation < 1e-04):
return str("%.4g" %data) + ' +/- ' + str("%.4g" %deviation)
if(abs(data) > 1e-04 and deviation < 1e-04 ):
return str(round(data,2)) + ' +/- ' + str("%.4g" %deviation)
if(abs(data) > 1e-04 and deviation > 1e-04 ):
return str(round(data,2)) + ' +/- ' + str(round(deviation,2))
return ''
def isCheckbox_averageChecked(self):
try:
return self.checkbox_average.isChecked()
except:
return False
def printGraph(self):
######BETA###########
#printer = Qt.QPrinter()
#printer.A4
#printer.HighResolution
#printer.Color
#anotherWidget= Qt.QPrintDialog(printer,self)
#print
#if(anotherWidget.exec_() != Qt.QDialog.Accepted):
#return
#print 'test' + printer.outputFileName()
#else:
#print 'test' + printer.outputFileName()
#self.G.print_figure(printer)
#print 'test' + printer.outputFileName()
#print printer.outputFileName()
#p = Qt.QPixmap.grabWidget(self.G)
#printLabel = Qt.QLabel()
#printLabel.setPixmap(p)
#painter = Qt.QPainter(printer)
#self.G.render(painter)
#printLabel.render(painter)
#painter.end()
printer = Qt.QPrinter(Qt.QPrinter.HighResolution)
printer.setOutputFileName('bode-example-%s.ps' % Qt.qVersion())
printer.setCreator('Bode example')
printer.setOrientation(Qt.QPrinter.Landscape)
printer.setColorMode(Qt.QPrinter.Color)
#docName = self.plot.title().text()
#if not docName.isEmpty():
#docName.replace(Qt.QRegExp(Qt.QString.fromLatin1('\n')), self.tr(' -- '))
#printer.setDocName(docName)
dialog = Qt.QPrintDialog(printer)
if dialog.exec_():
#filter = Qt.PrintFilter()
#if (QPrinter.GrayScale == printer.colorMode()):
#filter.setOptions(
#Qt.QwtPlotPrintFilter.PrintAll
#& ~QwtPlotPrintFilter.PrintBackground
#| QwtPlotPrintFilter.PrintFrameWithScales)
self.G.print_(printer)#, filter)
#---------------------------------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
|
jmbeuken/abinit
|
scripts/post_processing/appa/gui/graph.py
|
Python
|
gpl-3.0
| 12,464
|
[
"ABINIT"
] |
ff1380d4759cbb49e5ec48cf6b37e6ac59b035efad1b79fca13a18e2a1f9e1ea
|
#!/Users/seb/Work/code/ParaView/build/bin/pvpython
import sys
from vtk import vtkMetaImageReader, vtkProgrammableFilter, vtkUnsignedCharArray, vtkJPEGWriter, vtkPNGWriter
basePath = '/Users/seb/Work/projects/NE-Phase2/'
reader = vtkMetaImageReader()
reader.SetFileName("/Users/seb/Work/projects/NE-Phase2/Patient05.mha")
reader.Update()
writer = vtkJPEGWriter() # vtkJPEGWriter()
filter = vtkProgrammableFilter()
def unfoldData():
inputDS = filter.GetInputDataObject(0, 0)
outputDS = filter.GetImageDataOutput()
dims = inputDS.GetDimensions()
# dims[1] * dims[2]
nbSlices = (dims[1] * dims[2]) / 2048
outputDS.SetDimensions(dims[0], dims[1] * dims[2] / nbSlices, nbSlices)
outputDS.SetOrigin(0,0,0)
outputDS.SetSpacing(1,1,1)
for arrayIdx in range(inputDS.GetPointData().GetNumberOfArrays()):
array = inputDS.GetPointData().GetArray(arrayIdx)
size = dims[0] * dims[1] * dims[2]
print size, array.GetNumberOfTuples()
newArray = vtkUnsignedCharArray()
newArray.SetName(array.GetName())
newArray.SetNumberOfComponents(3)
newArray.SetNumberOfTuples(size)
outputDS.GetPointData().AddArray(newArray)
progress = int(size / 100)
count = 0
for idx in range(size):
value = array.GetValue(idx)
newArray.SetValue(idx * 3, int(value%256))
newArray.SetValue(idx * 3 + 1, int(value/256%256))
newArray.SetValue(idx * 3 + 2, int(value/256/256))
if idx % progress == 0:
count = count + 1
print count
# sys.stdout.write('.')
# sys.stdout.flush()
#sys. "\rProcessing %s: %d %s" % (array.GetName(), count, "/-\\|"[count%4])
print
filter.SetInputData(reader.GetOutput())
filter.SetExecuteMethod(unfoldData)
filter.Update()
dsToEncode = filter.GetOutput()
for arrayIdx in range(dsToEncode.GetPointData().GetNumberOfArrays()):
array = dsToEncode.GetPointData().GetArray(arrayIdx)
dsToEncode.GetPointData().SetActiveScalars(array.GetName())
writer.SetFilePattern(basePath + array.GetName() + '_%d.jpg')
writer.SetFileDimensionality(2)
writer.SetInputData(dsToEncode)
writer.Write()
|
Kitware/cinema
|
scripts/probe_data/medical_vtk_python-FIXME.py
|
Python
|
bsd-3-clause
| 2,200
|
[
"ParaView",
"VTK"
] |
ae8402bc2046f6a964ff67dc33e80cd772a7b34037aca07d44cc2303f1451e79
|
#!/usr/bin/env python
#
# Copyright (C) 2015, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Unit test for the core.spline module.
"""
import unittest
import scipy.special
from ximpol.core.spline import *
from ximpol.utils.logging_ import suppress_logging
suppress_logging()
class TestSplineLinear(unittest.TestCase):
"""Unit test for xInterpolatedUnivariateSplineLinear.
"""
@classmethod
def setUpClass(cls):
"""Setup.
Create a few objects to be used for testing.
"""
cls.num_points = 100
cls.x1 = numpy.linspace(0, 2*numpy.pi, cls.num_points)
cls.y1 = numpy.sin(cls.x1)
cls.x2 = numpy.linspace(0, numpy.pi, cls.num_points)
cls.y2 = numpy.sin(cls.x2)
cls.x3 = numpy.linspace(0, 10, 100)
cls.y3 = 3*cls.x3
cls.s1 = xInterpolatedUnivariateSplineLinear(cls.x1, cls.y1)
cls.s2 = xInterpolatedUnivariateSplineLinear(cls.x2, cls.y2)
cls.s3 = xInterpolatedUnivariateSplineLinear(cls.x3, cls.y3)
def test_len(self):
"""Test the basic object instantiation.
"""
# Check we get the number of points right.
self.assertEqual(len(self.s1), self.num_points)
def test_evaluation(self):
"""Test the object evaluation.
"""
# This is a linear interpolator, so the interpolated values must
# be identical, within rounding errors, to the original grid of
# values.
_delta = abs(self.s1(self.x1) - self.y1)
self.assertTrue(_delta.max() < 1e-9, 'max. diff. %.9f' % _delta.max())
# s1 and s2 are built with different sets of points, but with the same
# underlying function, so they should be fairly close at any given
# point.
_x = numpy.linspace(0, numpy.pi, 10)
_delta = abs(self.s1(_x) - self.s2(_x))
self.assertTrue(_delta.max() < 1e-3, 'max. diff. %.9f' % _delta.max())
def test_multiplication(self):
"""Test the interpolator multiplication.
"""
# Evaluate s1*s2 in x2 should give the same answer than multiplying
# s1(x2)*y2.
_m = self.s1*self.s2
_delta = abs(_m(self.x2) - self.s1(self.x2)*self.y2)
self.assertTrue(_delta.max() < 1e-9, 'max. diff. %.9f' % _delta.max())
# And the result of the multiplication should be an instance of
# the original operand class.
self.assertTrue(isinstance(_m, xInterpolatedUnivariateSplineLinear))
def test_sum(self):
"""Test the interpolator sum.
"""
# Evaluate s1 + s2 in x2 should give the same answer than adding
# s1(x2) + y2.
_s = self.s1 + self.s2
_delta = abs(_s(self.x2) - (self.s1(self.x2) + self.y2))
self.assertTrue(_delta.max() < 1e-9, 'max. diff. %.9f' % _delta.max())
# And the result of the multiplication should be an instance of
# the original operand class.
self.assertTrue(isinstance(_s, xInterpolatedUnivariateSplineLinear))
def test_extrapolation(self):
"""Test interpolator extrapolation.
"""
# Calculate one extrapolated value by hand and compare it to the
# value from the interpolator.
_xa = self.x1[-2]
_xb = self.x1[-1]
_ya = self.y1[-2]
_yb = self.y1[-1]
_x = _xb + 0.2
_y = _ya + (_yb - _ya)/(_xb - _xa)*(_x - _xa)
_delta = abs(self.s1(_x) - _y)
self.assertTrue(_delta < 1e-9, 'max. diff. %.9f' % _delta)
def test_norm(self):
"""Test the normalization calculation.
"""
_delta = abs(self.s3.norm() - 100.*3./2)
self.assertTrue(_delta < 1e6, 'norm. diff. %.9f' % _delta)
def test_cdf(self):
""" The cdf must be 0 at xmin and 1 at xmax.
"""
cdf = self.s3.build_cdf()
_delta = abs(cdf(self.s3.xmin()))
self.assertTrue(_delta < 1e-3, 'ppf(xmin) %.9f' % _delta)
_delta = abs(cdf(self.s3.xmax()) - 1.)
self.assertTrue(_delta < 1e-3, 'ppf(xmax) - 1 %.9f' % _delta)
def test_ppf(self):
""" The ppf must be defined between 0 and 1 (where is equal to the
xmin and xmax values of the original spline).
"""
ppf = self.s3.build_ppf()
_delta = abs(ppf.xmin())
self.assertTrue(_delta < 1e-3, 'ppf xmin %.9f' % _delta)
_delta = abs(ppf.xmax() - 1.)
self.assertTrue(_delta < 1e-3, 'ppf (xmax - 1) %.9f' % _delta)
_delta = abs(ppf(0) - self.s3.xmin())
self.assertTrue(_delta < 1e-3, 'ppf(0) - xmin %.9f' % _delta)
_delta = abs(ppf(1) - self.s3.xmax())
self.assertTrue(_delta < 1e-3, 'ppf(1) - xmax %.9f' % _delta)
def test_cdf_erf(self):
"""Test the cdf for a gaussian function.
"""
_x = numpy.linspace(-5, 5, 100)
_y = 1./numpy.sqrt(2.*numpy.pi)*numpy.exp(-0.5*_x**2)
pdf = xInterpolatedUnivariateSplineLinear(_x, _y)
cdf = pdf.build_cdf()
delta = abs(cdf(_x) - 0.5*(1. + scipy.special.erf(_x/numpy.sqrt(2.))))
max_delta = delta.max()
err_msg = 'maximum absolute delta %.4e' % max_delta
self.assertTrue(max_delta < 5e-4, err_msg)
def test_sort(self):
"""Test the automatic sorting functionality.
"""
_x = numpy.random.sample(100)
_y = _x**2
s = xInterpolatedUnivariateSplineLinear(_x, _y)
_x.sort()
self.assertTrue((s.x == _x).all())
self.assertTrue((s.y == _x**2).all())
def test_non_unique(self):
"""The spline constructor must fail when non-unique values are passed.
"""
_x = numpy.array([1, 1, 2, 3, 4])
_y = _x**2
with self.assertRaises(AssertionError):
s = xInterpolatedUnivariateSplineLinear(_x, _y)
if __name__ == '__main__':
unittest.main()
|
lucabaldini/ximpol
|
ximpol/test/test_spline.py
|
Python
|
gpl-3.0
| 6,545
|
[
"Gaussian"
] |
833a58eb92b77fd0497bf48aa34b96cf6bc667b072c91c8734965f45de09995a
|
import doctest
def test_symmetry():
"""
DocTests (pychemia.crystal.symmetry) :
"""
import pychemia
if pychemia.HAS_SPGLIB:
import pychemia.crystal
dt = doctest.testmod(pychemia.crystal.symmetry, verbose=True)
assert dt.failed == 0
def test_lattice():
"""
DocTests (pychemia.crystal.lattice) :
"""
import pychemia.crystal.lattice
dt = doctest.testmod(pychemia.crystal.lattice, verbose=True, optionflags=doctest.NORMALIZE_WHITESPACE)
assert dt.failed == 0
|
MaterialsDiscovery/PyChemia
|
tests/test_1_doctest_crystal.py
|
Python
|
mit
| 575
|
[
"CRYSTAL"
] |
4073414871e2b839f5c9f3dd4cf1d1724e30f36f52bf3599a8b2abe5b2890a37
|
#pylint: disable=C0111
#pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
#pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
#pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
#pylint: disable=W0613
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from lettuce.django import django_url
from nose.tools import assert_equals # pylint: disable=E0611
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert world.browser.title == 'Dashboard'
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
world.register_by_course_id(course_id, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
|
TsinghuaX/edx-platform
|
common/djangoapps/terrain/steps.py
|
Python
|
agpl-3.0
| 6,516
|
[
"VisIt"
] |
4bf4d6003a2c9fd22d0f73ef804f53de33c52340fd01d91d823830f6c254019e
|
#!/usr/bin/python
"""
Copyright 2017 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import Cookie
import dbSession
import cgi
import MySQLdb
import time
from datetime import timedelta, datetime
import ghLists
import dbShared
import ghObjects
RES_STATS = ['ER','CR','CD','DR','FL','HR','MA','PE','OQ','SR','UT']
def getResourceSQL(wpCriteria, favCols, joinStr, orderCol, orderStr, criteriaStr, galaxyCriteriaStr, mine):
if mine != '':
criteriaStr += ' AND favGroup IS NOT NULL'
sqlStr1 = 'SELECT tResources.spawnID, spawnName, tResources.galaxy, tResources.entered, tResources.enteredBy, tResources.resourceType, rt1.resourceTypeName, rt1.resourceGroup,'
sqlStr1 += ' CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER,'
sqlStr1 += ' CASE WHEN rt1.CRmax > 0 THEN ((CR-rt1.CRmin) / (rt1.CRmax-rt1.CRmin))*100 ELSE NULL END AS CRperc, CASE WHEN rt1.CDmax > 0 THEN ((CD-rt1.CDmin) / (rt1.CDmax-rt1.CDmin))*100 ELSE NULL END AS CDperc, CASE WHEN rt1.DRmax > 0 THEN ((DR-rt1.DRmin) / (rt1.DRmax-rt1.DRmin))*100 ELSE NULL END AS DRperc, CASE WHEN rt1.FLmax > 0 THEN ((FL-rt1.FLmin) / (rt1.FLmax-rt1.FLmin))*100 ELSE NULL END AS FLperc, CASE WHEN rt1.HRmax > 0 THEN ((HR-rt1.HRmin) / (rt1.HRmax-rt1.HRmin))*100 ELSE NULL END AS HRperc, CASE WHEN rt1.MAmax > 0 THEN ((MA-rt1.MAmin) / (rt1.MAmax-rt1.MAmin))*100 ELSE NULL END AS MAperc,'
sqlStr1 += ' CASE WHEN rt1.PEmax > 0 THEN ((PE-rt1.PEmin) / (rt1.PEmax-rt1.PEmin))*100 ELSE NULL END AS PEperc, CASE WHEN rt1.OQmax > 0 THEN ((OQ-rt1.OQmin) / (rt1.OQmax-rt1.OQmin))*100 ELSE NULL END AS OQperc, CASE WHEN rt1.SRmax > 0 THEN ((SR-rt1.SRmin) / (rt1.SRmax-rt1.SRmin))*100 ELSE NULL END AS SRperc, CASE WHEN rt1.UTmax > 0 THEN ((UT-rt1.UTmin) / (rt1.UTmax-rt1.UTmin))*100 ELSE NULL END AS UTperc, CASE WHEN rt1.ERmax > 0 THEN ((ER-rt1.ERmin) / (rt1.ERmax-rt1.ERmin))*100 ELSE NULL END AS ERperc,'
sqlStr1 += ' rt1.containerType, tResources.verified, tResources.verifiedBy, tResources.unavailable, tResources.unavailableBy, rg1.groupName, rt1.resourceCategory, rg2.groupName AS categoryName, rt1.resourceGroup, (SELECT Max(concentration) FROM tWaypoint WHERE tWaypoint.spawnID=tResources.spawnID AND tWaypoint.unavailable IS NULL AND (' + wpCriteria + ')) AS wpMaxConc' + favCols + ', ' + orderCol + ' FROM tResources INNER JOIN tResourceType rt1 ON tResources.resourceType = rt1.resourceType INNER JOIN tResourceGroup rg1 ON rt1.resourceGroup = rg1.resourceGroup INNER JOIN tResourceGroup rg2 ON rt1.resourceCategory = rg2.resourceGroup' + joinStr + ' WHERE ' + criteriaStr + galaxyCriteriaStr
sqlStr1 += orderStr
if fetchSize.isdigit():
sqlStr1 += ' LIMIT ' + fetchSize
sqlStr1 += ';'
return sqlStr1
def getResourceCountSQL(wpCriteria, favCols, joinStr, orderCol, orderStr, criteriaStr, galaxyCriteriaStr, mine):
if mine != '':
criteriaStr += ' AND favGroup IS NOT NULL'
sqlStr1 = 'SELECT Count(*)'
sqlStr1 += ' FROM tResources INNER JOIN tResourceType rt1 ON tResources.resourceType = rt1.resourceType INNER JOIN tResourceGroup rg1 ON rt1.resourceGroup = rg1.resourceGroup INNER JOIN tResourceGroup rg2 ON rt1.resourceCategory = rg2.resourceGroup'
sqlStr1 += joinStr + ' WHERE ' + criteriaStr + galaxyCriteriaStr + ';'
return sqlStr1
def getResourceData(conn, resSQL, userReputation, activeUser, formatType):
# get resource data for given criteria
resourceHTML = ''
s = None
cursor = conn.cursor()
if (cursor):
lastValue = None
cursor.execute(resSQL)
row = cursor.fetchone()
if row == None:
resourceHTML = 'No resources found!'
while (row != None):
if s == None:
if formatType == 'json':
resourceHTML = ' [\n'
else:
resourceHTML = '<table width="100%" class=resourceStats>'
# populate this resource to object and print it
s = ghObjects.resourceSpawn()
s.spawnID = row[0]
s.spawnName = row[1]
s.spawnGalaxy = row[2]
s.resourceType = row[5]
s.resourceTypeName = row[6]
s.containerType = row[30]
s.stats.CR = row[8]
s.stats.CD = row[9]
s.stats.DR = row[10]
s.stats.FL = row[11]
s.stats.HR = row[12]
s.stats.MA = row[13]
s.stats.PE = row[14]
s.stats.OQ = row[15]
s.stats.SR = row[16]
s.stats.UT = row[17]
s.stats.ER = row[18]
s.percentStats.CR = row[19]
s.percentStats.CD = row[20]
s.percentStats.DR = row[21]
s.percentStats.FL = row[22]
s.percentStats.HR = row[23]
s.percentStats.MA = row[24]
s.percentStats.PE = row[25]
s.percentStats.OQ = row[26]
s.percentStats.SR = row[27]
s.percentStats.UT = row[28]
s.percentStats.ER = row[29]
s.entered = row[3]
s.enteredBy = row[4]
s.verified = row[31]
s.verifiedBy = row[32]
s.unavailable = row[33]
s.unavailableBy = row[34]
s.maxWaypointConc = row[39]
if row[40] != None:
s.favorite = 1
s.despawnAlert = row[41]
if sort == "quality":
s.overallScore = row[42]
s.planets = dbShared.getSpawnPlanets(conn, row[0], False, row[2])
if formatType == 'json':
resourceHTML += ' {'
else:
resourceHTML += ' <tr><td>'
if formatType == 'mobile':
resourceHTML += s.getMobileHTML(activeUser)
elif formatType == 'compare':
resourceHTML += s.getHTML(1, '', activeUser, userReputation)
elif formatType == 'json':
resourceHTML += s.getJSON()
else:
resourceHTML += s.getHTML(0, '', activeUser, userReputation)
if formatType == 'json':
resourceHTML += ' },\n'
else:
resourceHTML += '</td></tr>'
lastValue = row[42]
row = cursor.fetchone()
if formatType == 'json':
resourceHTML += ' ]'
else:
resourceHTML += ' </table>'
if fetchSize.isdigit() and cursor.rowcount == int(fetchSize):
if formatType == 'json':
resourceHTML += ',\n "last_value" : ' + str(lastValue)
else:
resourceHTML += '<div style="display:none;">maxRowsReached' + str(lastValue) + '</div>'
cursor.close()
return resourceHTML
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
galaxy = form.getfirst('galaxy', '')
planet = form.getfirst('planetSel', '')
planetName = form.getfirst('planetName', '')
resGroup = form.getfirst('resGroup', '')
resCategory = form.getfirst('resCategory', '')
resType = form.getfirst('resType', '')
sort = form.getfirst('sort', '')
formatType = form.getfirst('formatType', '')
userBy = form.getfirst('userBy', '')
userAction = form.getfirst('userAction', '')
minVals = form.getfirst('minVals', '')
maxVals = form.getfirst('maxVals', '')
percVals = form.getfirst('percVals', '')
available = form.getfirst('available', '')
verified = form.getfirst('verified', '')
lastValue = form.getfirst('lastValue', '')
compare = form.getfirst('compare', '')
favorite = form.getfirst('favorite', 'undefined')
fetchSize = form.getfirst('fetchSize', '20')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
galaxy = dbShared.dbInsertSafe(galaxy)
planet = dbShared.dbInsertSafe(planet)
planetName = dbShared.dbInsertSafe(planetName)
resGroup = dbShared.dbInsertSafe(resGroup)
resCategory = dbShared.dbInsertSafe(resCategory)
resType = dbShared.dbInsertSafe(resType)
sort = dbShared.dbInsertSafe(sort)
userBy = dbShared.dbInsertSafe(userBy)
userAction = dbShared.dbInsertSafe(userAction)
minVals = dbShared.dbInsertSafe(minVals)
maxVals = dbShared.dbInsertSafe(maxVals)
percVals = dbShared.dbInsertSafe(percVals)
lastValue = dbShared.dbInsertSafe(lastValue)
favorite = dbShared.dbInsertSafe(favorite)
# Get a session
logged_state = 0
linkappend = ''
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
joinStr = ""
criteriaStr = ""
galaxyCriteriaStr = ""
orderCol = "spawnID"
orderStr = " ORDER BY " + orderCol + " DESC"
groupType = 1
galaxyState = 0
errorStr = ""
percVals = percVals.replace("%","")
if galaxy == "":
errorStr = "Error: No Galaxy Specified"
else:
galaxyState = dbShared.galaxyState(galaxy)
if (resGroup != "any" and resGroup != ""):
joinStr = joinStr + " INNER JOIN (SELECT resourceType FROM tResourceTypeGroup WHERE resourceGroup='" + resGroup + "' GROUP BY resourceType) rtgg ON rt1.resourceType = rtgg.resourceType"
if (resCategory != "any" and resCategory != ""):
joinStr = joinStr + " INNER JOIN (SELECT resourceType FROM tResourceTypeGroup WHERE resourceGroup='" + resCategory + "' GROUP BY resourceType) rtgc ON rt1.resourceType = rtgc.resourceType"
criteriaStr = "tResources.galaxy=" + galaxy
if (planet == "" and planetName != ""):
planet = dbShared.getPlanetID(planetName)
if (planet != "any" and planet != ""):
joinStr = joinStr + " INNER JOIN tResourcePlanet ON tResources.spawnID = tResourcePlanet.spawnID"
criteriaStr += " AND planetID=" + planet
if (resType != "any" and resType != ""):
criteriaStr = criteriaStr + " AND tResources.resourceType='" + resType + "'"
if logged_state == 1:
#for later when nonpublic waypoints in
#wpCriteria = 'shareLevel=256 OR owner="' + currentUser + '" OR (shareLevel=64 AND owner IN (SELECT f1.friendID FROM tUserFriends f1 INNER JOIN tUserFriends f2 ON f1.userID=f2.friendID WHERE f1.userID="' + currentUser + '")) OR waypointID IN (SELECT uw.waypointID FROM tUserWaypoints uw WHERE unlocked IS NOT NULL AND uw.userID="' + currentUser + '")'
wpCriteria = 'shareLevel=256'
if favorite == "on":
joinStr = joinStr + " INNER JOIN (SELECT itemID, favGroup, despawnAlert FROM tFavorites WHERE userID='" + currentUser + "' AND favType=1) favs ON tResources.spawnID = favs.itemID"
else:
joinStr = joinStr + ' LEFT JOIN (SELECT itemID, favGroup, despawnAlert FROM tFavorites WHERE userID="' + currentUser + '" AND favType=1) favs ON tResources.spawnID = favs.itemID'
favCols = ', favGroup, despawnAlert'
else:
wpCriteria = 'shareLevel=256'
favCols = ', NULL, NULL'
if (userBy != '' and userAction != ''):
galaxyCriteriaStr += (" AND {0}By='{1}'").format(userAction, userBy)
if available != 'undefined':
galaxyCriteriaStr += ' AND tResources.unavailable IS NULL'
else:
if not fetchSize.isdigit() or len(fetchSize) > 2:
errorStr = "Error: Invalid fetch size when including unavailable resources."
if verified != 'undefined':
galaxyCriteriaStr += ' AND tResources.verified IS NOT NULL'
mins = minVals.split(",")
if len(mins) == len(RES_STATS):
for i in range(len(mins)):
if mins[i].isdigit():
criteriaStr = criteriaStr + " AND " + RES_STATS[i] + " >= " + str(mins[i])
maxs = maxVals.split(",")
if len(maxs) == len(RES_STATS):
for i in range(len(maxs)):
if maxs[i].isdigit():
criteriaStr = criteriaStr + " AND " + RES_STATS[i] + " <= " + str(maxs[i])
if sort == "time":
orderCol = "UNIX_TIMESTAMP(tResources.entered)"
orderStr = " ORDER BY " + orderCol + " DESC"
if sort == "quality":
weightStr = ""
weights = percVals.split(",")
if len(RES_STATS) == len(weights):
for i in range(len(weights)):
if weights[i].isdigit():
weightStr = weightStr + ("+CASE WHEN {0}max > 0 THEN {0}*(" + str(weights[i]) + "/100) ELSE 0 END").format(RES_STATS[i])
if weightStr == "":
weightStr += ' ((CASE WHEN CRmax > 0 THEN CR*.06 ELSE 0 END + CASE WHEN CDmax > 0 THEN CD*12.74 ELSE 0 END + CASE WHEN DRmax > 0 THEN DR*12.26 ELSE 0 END + CASE WHEN FLmax > 0 THEN FL*3.22 ELSE 0 END + CASE WHEN HRmax > 0 THEN HR*1.27 ELSE 0 END + CASE WHEN MAmax > 0 THEN MA*5.1 ELSE 0 END + CASE WHEN PEmax > 0 THEN PE*9.34 ELSE 0 END + CASE WHEN OQmax > 0 THEN OQ*30.64 ELSE 0 END + CASE WHEN SRmax > 0 THEN SR*9.16 ELSE 0 END + CASE WHEN UTmax > 0 THEN UT*16.2 ELSE 0 END)'
weightStr += ' / (CASE WHEN CRmax > 0 THEN .06 ELSE 0 END + CASE WHEN CDmax > 0 THEN 12.74 ELSE 0 END + CASE WHEN DRmax > 0 THEN 12.26 ELSE 0 END + CASE WHEN FLmax > 0 THEN 3.22 ELSE 0 END + CASE WHEN HRmax > 0 THEN 1.27 ELSE 0 END + CASE WHEN MAmax > 0 THEN 5.1 ELSE 0 END + CASE WHEN PEmax > 0 THEN 9.34 ELSE 0 END + CASE WHEN OQmax > 0 THEN 30.64 ELSE 0 END + CASE WHEN SRmax > 0 THEN 9.16 ELSE 0 END + CASE WHEN UTmax > 0 THEN 16.2 ELSE 0 END))'
else:
weightStr = weightStr[1:]
orderCol = weightStr
orderStr = " ORDER BY " + orderCol + " DESC"
if lastValue != "":
criteriaStr = criteriaStr + (" AND {0} < {1}").format(orderCol,lastValue)
# Only show update tools if user logged in and has positive reputation
stats = dbShared.getUserStats(currentUser, galaxy).split(",")
userReputation = int(stats[2])
if formatType == 'json':
print 'Content-type: text/json\n'
else:
print 'Content-type: text/html\n'
if (errorStr == ""):
resData = ''
tokenPosition = -1
conn = dbShared.ghConn()
if (compare == '' or compare == 'undefined' or logged_state == 0):
resSQL = getResourceSQL(wpCriteria, favCols, joinStr, orderCol, orderStr, criteriaStr, galaxyCriteriaStr, '')
resData = getResourceData(conn, resSQL, userReputation, logged_state > 0 and galaxyState == 1, formatType)
tokenPosition = resData.find('maxRowsReached')
if tokenPosition > -1:
resData += '<div style="text-align:center;"><button id="nextResourcesButton" class="ghButton" style="margin:10px;" onclick="moreResources(\''+ resData[tokenPosition+14:resData.find('</div>', tokenPosition)] + '\', \'next\');">Next 20</button></div>'
else:
# Include side by side comparison of inventory
formatType = 'compare'
resData = '<div class="resourceCompareGroup">'
resData += '<div class="inlineBlock" style="width:50%">'
resData += '<h4>Galaxy</h4>'
resSQL = getResourceSQL(wpCriteria, favCols, joinStr, orderCol, orderStr, criteriaStr, galaxyCriteriaStr, '')
resData += getResourceData(conn, resSQL, userReputation, logged_state > 0 and galaxyState == 1, formatType)
resData += '</div><div class="inlineBlock" style="width:50%">'
resData += '<h4>My Inventory</h4>'
resSQL = getResourceSQL(wpCriteria, favCols, joinStr, orderCol, orderStr, criteriaStr, '', 'y')
resData += getResourceData(conn, resSQL, userReputation, logged_state > 0 and galaxyState == 1, formatType)
resData += '</div></div>'
tokenPosition = resData.find('maxRowsReached')
if tokenPosition > -1:
resData += '<div style="text-align:center;"><button id="nextResourcesButton" class="ghButton" style="margin:10px;" onclick="moreResources(\''+ resData[tokenPosition+14:resData.find('</div>', tokenPosition)] + '\', \'next\');">Next 20</button></div>'
# Include total results info for json service
if formatType == 'json':
responseData = '{\n'
resCountSQL = getResourceCountSQL(wpCriteria, favCols, joinStr, orderCol, orderStr, criteriaStr, galaxyCriteriaStr, '')
cursor = conn.cursor()
if (cursor):
cursor.execute(resCountSQL)
row = cursor.fetchone()
if row == None or row[0] == 0:
responseData += '"response" :\n { "total_results" : 0,\n "total_pages" : 0,\n'
resData = '"' + resData + '"'
else:
if fetchSize.isdigit():
responseData += '"response" :\n { "total_results" : ' + str(row[0]) + ',\n "total_pages" : ' + str("%.0f" % (float((row[0] / int(fetchSize)) + 1))) + ',\n'
else:
responseData += '"response" :\n { "total_results" : ' + str(row[0]) + ',\n "total_pages" : 1,\n'
cursor.close()
responseData += ' "server_time" : "' + datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S") + '",\n'
print responseData + ' "resources" : '
resData += '\n }\n}'
conn.close()
print resData
sys.exit(200)
else:
if formatType == 'json':
print '{ "response" : "' + errorStr + '" }'
else:
print '<h2>' + errorStr + '</h2>'
sys.exit(500)
|
druss316/G-Harvestor
|
html/getResources.py
|
Python
|
gpl-3.0
| 16,603
|
[
"Galaxy"
] |
d80be1cd175e426696479391c3f81b5f59452987c59791c9592e9ed04a02fd9e
|
from setuptools import setup, find_packages
setup(name = 'coevo',
version = '0.2.0',
description = 'library for comparing coevolution identifying methods',
url = 'in progress',
author = 'Aram Avila-Herrera',
author_email = 'Aram.Avila-Herrera@ucsf.edu',
license = 'GPL',
packages = find_packages(),
install_requires = ['pandas', 'biopython'],
scripts = [
'bin/fasta_to_phy.py',
'bin/fasta_to_psicov.py',
'bin/get_dists.py',
'bin/join_fastas.py',
'bin/make_attributes.py',
'bin/map_column_to_resnum.py',
'bin/min_dists.py',
'bin/split_faa_on_col.py'
],
zip_safe = False
)
|
aavilahe/coevo_analysis_pypackage
|
setup.py
|
Python
|
gpl-3.0
| 831
|
[
"Biopython"
] |
4bcf2513c8111d79088e77267d88a84a57381d70a13511741dc6f4cb251501e0
|
# creates: fcc100.png fcc110.png bcc100.png fcc111.png bcc110.png bcc111.png hcp0001.png fcc111o.png fcc211o.png bcc110o.png bcc111o.png hcp0001o.png ontop-site.png hollow-site.png fcc-site.png hcp-site.png bridge-site.png diamond100.png diamond111.png hcp10m10.png
from ase import Atoms
from ase.io import write
from ase.lattice.surface import (fcc100, fcc110, bcc100, fcc111,
bcc110, bcc111, hcp0001, hcp10m10,
diamond100, diamond111, add_adsorbate,
fcc211)
surfaces = ['fcc100', 'fcc110', 'bcc100', 'hcp10m10', 'diamond100',
'fcc111', 'bcc110', 'bcc111', 'hcp0001', 'diamond111', 'fcc211']
symbols = {'fcc': 'Cu', 'bcc': 'Fe', 'hcp': 'Ru', 'dia': 'C'}
radii = {'fcc': 1.1, 'bcc': 1.06, 'hcp': 1.08, 'dia': 0.5}
adsorbates = {'ontop': 'H', 'hollow': 'O', 'fcc': 'N', 'hcp': 'C',
'bridge': 'F'}
def save(name, slab):
print('save %s' % name)
write(name + '.png', slab, show_unit_cell=2, radii=radii[name[:3]],
scale=10)
for name in surfaces:
f = eval(name)
for kwargs in [{}, {'orthogonal': False}, {'orthogonal': True}]:
print name, kwargs
try:
slab = f(symbols[name[:3]], (3, 4, 5), vacuum=4, **kwargs)
except (TypeError, NotImplementedError):
continue
try:
for site in slab.adsorbate_info['sites']:
if site.endswith('bridge'):
h = 1.5
else:
h = 1.2
add_adsorbate(slab, adsorbates.get(site, 'F'), h, site)
except KeyError:
pass
if kwargs.get('orthogonal', None):
name += 'o'
save(name, slab)
for site, symbol in adsorbates.items():
write('%s-site.png' % site, Atoms(symbol), radii=1.08, scale=10)
|
askhl/ase
|
doc/ase/surface.py
|
Python
|
gpl-2.0
| 1,866
|
[
"ASE"
] |
9f53169683321f2cc8845a05f26d0e39bb3a7f0a6ef8fbd73b63eecd6da06569
|
# -*-python-*-
#
# Copyright (C) 1999-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# accept.py: parse/handle the various Accept headers from the client
#
# -----------------------------------------------------------------------
import re
import string
def language(hdr):
"Parse an Accept-Language header."
# parse the header, storing results in a _LanguageSelector object
return _parse(hdr, _LanguageSelector())
# -----------------------------------------------------------------------
_re_token = re.compile(r'\s*([^\s;,"]+|"[^"]*")+\s*')
_re_param = re.compile(r';\s*([^;,"]+|"[^"]*")+\s*')
_re_split_param = re.compile(r'([^\s=])\s*=\s*(.*)')
def _parse(hdr, result):
# quick exit for empty or not-supplied header
if not hdr:
return result
pos = 0
while pos < len(hdr):
name = _re_token.match(hdr, pos)
if not name:
raise AcceptLanguageParseError()
a = result.item_class(string.lower(name.group(1)))
pos = name.end()
while 1:
# are we looking at a parameter?
match = _re_param.match(hdr, pos)
if not match:
break
param = match.group(1)
pos = match.end()
# split up the pieces of the parameter
match = _re_split_param.match(param)
if not match:
# the "=" was probably missing
continue
pname = string.lower(match.group(1))
if pname == 'q' or pname == 'qs':
try:
a.quality = float(match.group(2))
except ValueError:
# bad float literal
pass
elif pname == 'level':
try:
a.level = float(match.group(2))
except ValueError:
# bad float literal
pass
elif pname == 'charset':
a.charset = string.lower(match.group(2))
result.append(a)
if hdr[pos:pos+1] == ',':
pos = pos + 1
return result
class _AcceptItem:
def __init__(self, name):
self.name = name
self.quality = 1.0
self.level = 0.0
self.charset = ''
def __str__(self):
s = self.name
if self.quality != 1.0:
s = '%s;q=%.3f' % (s, self.quality)
if self.level != 0.0:
s = '%s;level=%.3f' % (s, self.level)
if self.charset:
s = '%s;charset=%s' % (s, self.charset)
return s
class _LanguageRange(_AcceptItem):
def matches(self, tag):
"Match the tag against self. Returns the qvalue, or None if non-matching."
if tag == self.name:
return self.quality
# are we a prefix of the available language-tag
name = self.name + '-'
if tag[:len(name)] == name:
return self.quality
return None
class _LanguageSelector:
"""Instances select an available language based on the user's request.
Languages found in the user's request are added to this object with the
append() method (they should be instances of _LanguageRange). After the
languages have been added, then the caller can use select_from() to
determine which user-request language(s) best matches the set of
available languages.
Strictly speaking, this class is pretty close for more than just
language matching. It has been implemented to enable q-value based
matching between requests and availability. Some minor tweaks may be
necessary, but simply using a new 'item_class' should be sufficient
to allow the _parse() function to construct a selector which holds
the appropriate item implementations (e.g. _LanguageRange is the
concrete _AcceptItem class that handles matching of language tags).
"""
item_class = _LanguageRange
def __init__(self):
self.requested = [ ]
def select_from(self, avail):
"""Select one of the available choices based on the request.
Note: if there isn't a match, then the first available choice is
considered the default. Also, if a number of matches are equally
relevant, then the first-requested will be used.
avail is a list of language-tag strings of available languages
"""
# tuples of (qvalue, language-tag)
matches = [ ]
# try matching all pairs of desired vs available, recording the
# resulting qvalues. we also need to record the longest language-range
# that matches since the most specific range "wins"
for tag in avail:
longest = 0
final = 0.0
# check this tag against the requests from the user
for want in self.requested:
qvalue = want.matches(tag)
#print 'have %s. want %s. qvalue=%s' % (tag, want.name, qvalue)
if qvalue is not None and len(want.name) > longest:
# we have a match and it is longer than any we may have had.
# the final qvalue should be from this tag.
final = qvalue
longest = len(want.name)
# a non-zero qvalue is a potential match
if final:
matches.append((final, tag))
# if there are no matches, then return the default language tag
if not matches:
return avail[0]
# get the highest qvalue and its corresponding tag
matches.sort()
qvalue, tag = matches[-1]
# if the qvalue is zero, then we have no valid matches. return the
# default language tag.
if not qvalue:
return avail[0]
# if there are two or more matches, and the second-highest has a
# qvalue equal to the best, then we have multiple "best" options.
# select the one that occurs first in self.requested
if len(matches) >= 2 and matches[-2][0] == qvalue:
# remove non-best matches
while matches[0][0] != qvalue:
del matches[0]
#print "non-deterministic choice", matches
# sequence through self.requested, in order
for want in self.requested:
# try to find this one in our best matches
for qvalue, tag in matches:
if want.matches(tag):
# this requested item is one of the "best" options
### note: this request item could match *other* "best" options,
### so returning *this* one is rather non-deterministic.
### theoretically, we could go further here, and do another
### search based on the ordering in 'avail'. however, note
### that this generally means that we are picking from multiple
### *SUB* languages, so I'm all right with the non-determinism
### at this point. stupid client should send a qvalue if they
### want to refine.
return tag
# NOTREACHED
# return the best match
return tag
def append(self, item):
self.requested.append(item)
class AcceptLanguageParseError(Exception):
pass
def _test():
s = language('en')
assert s.select_from(['en']) == 'en'
assert s.select_from(['en', 'de']) == 'en'
assert s.select_from(['de', 'en']) == 'en'
# Netscape 4.x and early version of Mozilla may not send a q value
s = language('en, ja')
assert s.select_from(['en', 'ja']) == 'en'
s = language('fr, de;q=0.9, en-gb;q=0.7, en;q=0.6, en-gb-foo;q=0.8')
assert s.select_from(['en']) == 'en'
assert s.select_from(['en-gb-foo']) == 'en-gb-foo'
assert s.select_from(['de', 'fr']) == 'fr'
assert s.select_from(['de', 'en-gb']) == 'de'
assert s.select_from(['en-gb', 'en-gb-foo']) == 'en-gb-foo'
assert s.select_from(['en-bar']) == 'en-bar'
assert s.select_from(['en-gb-bar', 'en-gb-foo']) == 'en-gb-foo'
# non-deterministic. en-gb;q=0.7 matches both avail tags.
#assert s.select_from(['en-gb-bar', 'en-gb']) == 'en-gb'
|
marcellodesales/svnedge-console
|
svn-server/lib/viewvc/accept.py
|
Python
|
agpl-3.0
| 7,744
|
[
"VisIt"
] |
ed67f3986b044f3f9afe3799260ec0004957b974eb922bc91749da4e1beca0a7
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkSelectionLink(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkSelectionLink(), 'Processing.',
(), ('vtkSelection',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkSelectionLink.py
|
Python
|
bsd-3-clause
| 474
|
[
"VTK"
] |
33efc7bc7f7c5465deaec97889efc6a06138f3feba196916f7e58dc7e4f0f0d0
|
import numpy as np
from math import sqrt
from ase import Atom, Atoms
from ase.optimize import QuasiNewton, FIRE
from ase.constraints import FixAtoms
from ase.neb import NEB
from ase.io import write, PickleTrajectory
from ase.calculators.emt import ASAP
# Distance between Cu atoms on a (100) surface:
d = 2.74
h1 = d * sqrt(3) / 2
h2 = d * sqrt(2.0 / 3)
initial = Atoms(symbols='Pt',
positions=[(0, 0, 0)],#(1.37,0.79,2.24),(2.74,1.58,4.48),(0,0,6.72),(1.37,0.79,8.96),(2.74,1.58,11.2)],
cell=([(d,0,0),(d/2,h1,0),(d/2,h1/3,-h2)]),
pbc=(True, True, True))
initial *= (7, 8, 6) # 5x5 (100) surface-cell
cell = initial.get_cell()
cell[2] = (0, 0, 22)
initial.set_cell(cell)
#initial.set_pbc((True,True,False))
# Approximate height of Ag atom on Cu(100) surfece:
h0 = 2.2373
initial += Atom('Pt', (10.96, 11.074, h0))
initial += Atom('Pt', (13.7, 11.074, h0))
initial += Atom('Pt', (9.59, 8.701, h0))
initial += Atom('Pt', (12.33, 8.701, h0))
initial += Atom('Pt', (15.07, 8.701, h0))
initial += Atom('Pt', (10.96, 6.328, h0))
initial += Atom('Pt', (13.7, 6.328, h0))
if 0:
view(initial)
# Make band:
images = [initial.copy() for i in range(7)]
neb = NEB(images)
# Set constraints and calculator:
indices = np.compress(initial.positions[:, 2] < -5.0, range(len(initial)))
constraint = FixAtoms(indices)
for image in images:
image.set_calculator(ASAP())
image.constraints.append(constraint)
# Displace last image:
for i in xrange(1,8,1):
images[-1].positions[-i] += (d/2, -h1/3, 0)
write('initial.traj', images[0])
# Relax height of Ag atom for initial and final states:
for image in [images[0], images[-1]]:
QuasiNewton(image).run(fmax=0.01)
if 0:
write('initial.pckl', image[0])
write('finial.pckl', image[-1])
# Interpolate positions between initial and final states:
neb.interpolate()
for image in images:
print image.positions[-1], image.get_potential_energy()
traj = PickleTrajectory('mep.traj', 'w')
dyn = FIRE(neb, dt=0.1)
#dyn = MDMin(neb, dt=0.1)
#dyn = QuasiNewton(neb)
dyn.attach(neb.writer(traj))
dyn.run(fmax=0.01,steps=150)
for image in images:
print image.positions[-1], image.get_potential_energy()
|
grhawk/ASE
|
tools/ase/examples/Pt_island.py
|
Python
|
gpl-2.0
| 2,208
|
[
"ASE"
] |
7cae4702016db74368d5a18cb30678e25042b9193155677b34da5179fb9901e7
|
import json
from coalib.bearlib import deprecate_settings
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
from coala_utils.param_conversion import negate
@linter(executable='coffeelint',
use_stdin=True)
class CoffeeLintBear:
"""
Check CoffeeScript code for a clean and consistent style.
For more information about coffeelint, visit <http://www.coffeelint.org/>.
"""
LANGUAGES = {'CoffeeScript'}
REQUIREMENTS = {NpmRequirement('coffeelint', '1')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting', 'Smell', 'Complexity', 'Duplication'}
severity_map = {'warn': RESULT_SEVERITY.NORMAL,
'error': RESULT_SEVERITY.MAJOR,
'ignore': RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file):
return '--reporter=raw', '--stdin', '-f', config_file
@staticmethod
@deprecate_settings(indent_size='tab_width',
allow_increment=(
'no_decr_or_incrementation_operators', negate),
allow_no_parameters=(
'no_empty_parameter_list', negate),
allow_empty_functions=('no_empty_functions', negate),
allow_this_statements=('no_this', negate),
allow_implicit_parentheses=(
'no_implicit_parentheses', negate),
allow_interpolation_in_single_quotes=(
'no_interpolation_in_single_quotes', negate),
allow_stand_alone_at_sign=(
'no_stand_alone_at_sign', negate),
allow_throwing_strings=(
'disable_throwing_strings', negate),
allow_unnecessary_double_quotes=(
'no_unnecessary_double_quotes', negate),
allow_bitwise_operators=(
'use_english_operator', negate),
force_braces='no_implicit_braces')
def generate_config(filename, file,
max_line_length: int=79,
max_line_length_affect_comments: bool=True,
space_before_and_after_arrow: bool=True,
check_braces_spacing: bool=False,
braces_spacing_width: int=1,
spacing_in_empty_braces: int=0,
class_naming_camelCase: bool=True,
spaces_before_and_after_colon: bool=False,
spaces_before_colon: int=0,
spaces_after_colon: int=1,
enforce_newline_at_EOF: bool=True,
use_spaces: bool=True,
indent_size: int=2,
number_of_newlines_after_classes: int=2,
prohibit_embedding_javascript_snippet: bool=True,
force_braces: bool=False,
allow_implicit_parentheses: bool=True,
allow_interpolation_in_single_quotes: bool=True,
allow_stand_alone_at_sign: bool=False,
allow_throwing_strings: bool=False,
allow_trailing_semicolons: bool=False,
allow_trailing_whitespaces: bool=False,
allow_unnecessary_double_quotes: bool=True,
allow_bitwise_operators: bool=True,
spaces_around_operators: bool=True,
space_after_comma: bool=True,
cyclomatic_complexity: int=0,
prevent_duplicate_keys: bool=True,
consistent_line_endings_style: str='',
allow_this_statements: bool=True,
allow_increment: bool=True,
allow_no_parameters: bool=True,
allow_empty_functions: bool=False,
enforce_parentheses_on_non_empty_constructors:
bool=True
):
"""
:param max_line_length:
Maximum number of characters per line.
:param max_line_length_affect_comments:
Determines if ``max_line_length`` should also affects comments or
not.
:param space_before_and_after_arrow:
Determines if spaces should be used before and after the arrow.
:param check_braces_spacing:
Checks if proper spacing is used inside curly braces.
:param braces_spacing_width:
Determines the number of blank spaces after the opening ``{`` and
before the closing brace ``}`` given that there is something within
the braces.
:param spacing_in_empty_braces:
Determines the number of blank spaces after the opening ``{`` and
before the closing brace ``}`` given empty content.
:param class_naming_camelCase:
Checks whether the classes name should be in camel-case or not.
:param spaces_before_and_after_colon:
Checks the number of spaces before and after colon.
:param spaces_before_colon:
Determines the number of blank spaces before colon when
``spaces_before_and_after_colon == True``.
:param spaces_after_colon:
Determines the number of space after colon when
``spaces_before_and_after_colon == True``.
:param enforce_newline_at_EOF:
Checks if the file ends with a single newline.
:param use_spaces:
Forbids tabs in indentation and applies two spaces for this
purpose.
:param indent_size:
Number of spaces per indentation level.
:param number_of_newlines_after_classes:
Determines the number of newlines that separate the class
definition and the rest of the code.
:param prohibit_embedding_javascript_snippet:
Prevents some JavaScript elements like ``eval`` to affect
CoffeeScript.
:param force_braces:
Prohibits implicit braces when declaring object literals.
Example: If ``force_braces = True`` then
```
1:2, 3:4
```
is prohibited, whereas
```
{1:2, 3:4}
```
is accepted.
:param allow_implicit_parentheses:
Allows implicit parentheses.
:param allow_interpolation_in_single_quotes:
Allows string interpolation in a single quoted string.
Example: If ``allow_interpolation_in_single_quotes = False`` then
```
f = '#{bar}'
```
is prohibited, whereas
```
f = "#{bar}"
```
is correct.
:param allow_stand_alone_at_sign:
Allows the use of stand alone ``@``.
Example: If ``allow_stand_alone_at_sign = False``
```
@ notok
not(@).ok
@::
```
are prohibited, whereas
```
@alright
@(fn)
@ok()
@[ok]
@ok()
```
are accepted.
:param allow_throwing_strings:
Allows throwing string literals or interpolation.
Example: If ``allow_throwing_strings = False``
```
throw 'my error'
throw "#{1234}"
```
will not be permitted.
:param allow_trailing_semicolons:
Prohibits trailing semicolons when ``False`` since they are
not useful. The semicolon is meaningful only if there's another
instruction on the same line.
Example: If ``allow_trailing_semicolon = False``
```
x = '1234'; console.log(x)
```
Here the semicolon is meaningful.
```
alert('end of line');
```
This semicolon is redundant.
:param allow_trailing_whitespaces:
Checks whether to allow trailing whitespacess in the code or not.
:param allow_unnecessary_double_quotes:
Allows enclosing strings in double quotes.
:param allow_bitwise_operators:
Determines if ``and``, ``or``, ``is`` and ``isnt`` should be used
instead of ``&&``, ``||``, ``==`` and ``!=``.
:param spaces_around_operators:
Enforces that operators have spaces around them.
:param space_after_comma:
Checks if there is a blank space after commas.
:param cyclomatic_complexity:
Maximum cyclomatic complexity of the file.
:param prevent_duplicate_keys:
Prevents defining duplicate keys in object literals and classes.
:param enforce_parentheses_on_non_empty_constructors:
Requires constructors with parameters to include parentheses.
Example:
```
class Foo
# Warn about missing parentheses here
a = new Foo
b = new bar.foo.Foo
# The parentheses make it clear no parameters are intended
c = new Foo()
d = new bar.foo.Foo()
e = new Foo 1, 2
f = new bar.foo.Foo 1, 2
```
:param consistent_line_endings_style:
The option to ``line_endings``, its value is either ``unix`` or
``windows``.
:param allow_this_statements:
Allows the use of ``this``. ``@`` should be used if ``False``.
:param allow_increment:
Allows the use of increment and decrement arithmetic operators.
:param allow_no_parameters:
Allows empty parameter lists in function definitions.
:param allow_empty_functions:
Allows declaring empty functions.
"""
coffee_configs = {'max_line_length':
{'value': max_line_length,
'level': 'error',
'limitComments':
max_line_length_affect_comments}}
coffee_configs['arrow_spacing'] = (
{'level': 'error' if space_before_and_after_arrow else 'ignore'})
if check_braces_spacing:
coffee_configs['braces_spacing'] = (
{'level': 'error',
'spaces': braces_spacing_width,
'empty_object_spaces': spacing_in_empty_braces})
if class_naming_camelCase:
coffee_configs['camel_case_classes'] = {'level': 'error'}
if spaces_before_and_after_colon:
coffee_configs['colon_assignment_spacing'] = (
{'level': 'error',
'spacing': {'left': spaces_before_colon,
'right': spaces_after_colon}})
coffee_configs['eol_last'] = (
{'level': 'error' if enforce_newline_at_EOF else 'ignore'})
coffee_configs['newlines_after_classes'] = (
{'value': number_of_newlines_after_classes,
'level': 'error'})
coffee_configs['no_backticks'] = (
{'level': 'error'
if prohibit_embedding_javascript_snippet else 'ignore'})
if force_braces:
coffee_configs['no_implicit_braces'] = (
{'level': 'error', 'strict': True})
if not allow_implicit_parentheses:
coffee_configs['no_implicit_parens'] = (
{'strict': True, 'level': 'error'})
coffee_configs['no_interpolation_in_single_quotes'] = (
{'level': 'error'
if not allow_interpolation_in_single_quotes else 'ignore'})
if not allow_stand_alone_at_sign:
coffee_configs['no_stand_alone_at'] = {'level': 'error'}
if use_spaces:
coffee_configs['no_tabs'] = {'level': 'error'}
coffee_configs['indentation'] = (
{'value': indent_size, 'level': 'error'})
coffee_configs['no_throwing_strings'] = (
{'level': 'error' if not allow_throwing_strings else 'ignore'})
coffee_configs['no_trailing_semicolons'] = (
{'level': 'error' if not allow_trailing_semicolons else 'ignore'})
if not allow_trailing_whitespaces:
coffee_configs['no_trailing_whitespace'] = (
{'level': 'error',
'allowed_in_comments': True,
'allowed_in_empty_lines': True})
if not allow_unnecessary_double_quotes:
coffee_configs['no_unnecessary_double_quotes'] = {'level': 'error'}
if not allow_bitwise_operators:
coffee_configs['prefer_english_operator'] = (
{'level': 'error', 'doubleNotLevel': 'ignore'})
if spaces_around_operators:
coffee_configs['space_operators'] = {'level': 'error'}
if space_after_comma:
coffee_configs['spacing_after_comma'] = {'level': 'warn'}
coffee_configs['cyclomatic_complexity'] = (
{'value': cyclomatic_complexity,
'level': ('error' if cyclomatic_complexity else 'ignore')})
coffee_configs['duplicate_key'] = (
{'level': 'error' if prevent_duplicate_keys else 'ignore'})
if enforce_parentheses_on_non_empty_constructors:
coffee_configs['non_empty_constructor_needs_parens'] = (
{'level': 'error'})
if consistent_line_endings_style:
coffee_configs['line_endings'] = (
{'level': 'error', 'value': consistent_line_endings_style})
if not allow_this_statements:
coffee_configs['no_this'] = {'level': 'error'}
if not allow_increment:
coffee_configs['no_plusplus'] = {'level': 'error'}
coffee_configs['no_empty_param_list'] = (
{'level': 'error' if not allow_no_parameters else 'ignore'})
coffee_configs['no_empty_functions'] = (
{'level': 'error' if not allow_empty_functions else 'ignore'})
return json.dumps(coffee_configs)
def process_output(self, output, filename, file):
output = json.loads(output)
assert len(output) == 1, (
'More than 1 file parsed, something went wrong')
for item in tuple(output.values())[0]:
yield Result.from_values(
origin='{} ({})'.format(self.name, item['rule']),
message=item['message'],
file=filename,
line=item.get('lineNumber', None),
end_line=item.get('lineNumberEnd', None),
severity=self.severity_map[item['level']],
additional_info=item.get('description',
item.get('context', '')))
|
sounak98/coala-bears
|
bears/coffee_script/CoffeeLintBear.py
|
Python
|
agpl-3.0
| 15,288
|
[
"VisIt"
] |
0f8e27856f867ee6c31c48699dfb4b3a15e47708aa3ef1898bd7f5b5c83b0bd7
|
# Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from ordereddict import OrderedDict
from os import linesep as NL
from options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-e]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
return self.dic[option]
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
# Add parameters from layer parameter file
def add_params(self, mcp):
dic, name = self.dic, self.dic['name']
#print "dropout" + name
dic['dropout'] = 0.0
if name in mcp.sections():
dic['dropout'] = mcp.safe_get_float(name, 'dropout', default=0.0)
#print "dropout = " + str(dic['dropout'])
#pass
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], value, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], value, ", ".join("'%d'" % s for s in lst)))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
layers_new = []
for i, l in enumerate(layers):
layers_new += [l]
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(i, layers, layers_new)
return layers_new
# revised by dudalong, 20130129, layerwise training
@staticmethod
def parse_layers_layerwise(layer_cfg_path, param_cfg_path, model, layers=[]):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
incLayers = []
if len(incLayers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.read([layer_cfg_path])
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
incLayers += [layer_parsers[ltype]().parse(name, mcp, incLayers, model)]
incLayers = LayerParser.detach_neuron_layers(incLayers)
for l in incLayers:
lp = layer_parsers[l['type']]()
l['parser'].optimize(incLayers)
del l['parser']
for l in incLayers:
if not l['type'].startswith('cost.'):
found = max(l['name'] in [incLayers[n]['name'] for n in l2['inputs']] for l2 in incLayers if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (l['name'], l['type']))
prev_names = [p['name'] for p in layers]
cur_names = [p['name'] for p in incLayers]
if set(prev_names) == set(cur_names):
incLayers = layers
else:
#for name in prev_names:
# if name not in cur_names:
# raise LayerParsingError("Layer '%s' is not in current layers" % (name))
_new = 0
for i, l in enumerate(layers):
if len(incLayers) > i and l['name'] == incLayers[i]['name'] and not _new:
incLayers[i] = l;
else:
_new = 1
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.read([param_cfg_path])
for l in incLayers:
if not mcp.has_section(l['name']) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (l['name'], l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
lp.dic['conserveMem'] = model.op.get_value('conserve_mem')
except LayerParsingError, e:
print e
sys.exit(1)
return incLayers
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers=[]):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.read([layer_cfg_path])
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers += [layer_parsers[ltype]().parse(name, mcp, layers, model)]
layers = LayerParser.detach_neuron_layers(layers)
for l in layers:
lp = layer_parsers[l['type']]()
l['parser'].optimize(layers)
del l['parser']
for l in layers:
if not l['type'].startswith('cost.'):
found = max(l['name'] in [layers[n]['name'] for n in l2['inputs']] for l2 in layers if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (l['name'], l['type']))
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.read([param_cfg_path])
for l in layers:
if not mcp.has_section(l['name']) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (l['name'], l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
lp.dic['conserveMem'] = model.op.get_value('conserve_mem')
print "==================================\nfinish parse_layers\n"
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputs']):
l = layers[inp]
if l['outputs'] == dic['outputs'] and sum('inputs' in ll and inp in ll['inputs'] for ll in layers) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
if not l['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer.
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
prev_names = [p['name'] for p in prev_layers]
for inp in dic['inputs']:
if inp not in prev_names:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputs'] = [prev_names.index(inp) for inp in dic['inputs']]
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
for inp in dic['inputs']:
if prev_layers[inp]['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, prev_names[inp]))
dic['numInputs'] = [prev_layers[i]['outputs'] for i in dic['inputs']]
# numInputs
print "dic['numInputs'] = " + str(dic['numInputs'])
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d", name, self.num_inputs)
# input_layers = [prev_layers[i] for i in dic['inputs']]
# dic['gradConsumer'] = any(l['gradConsumer'] for l in dic['inputLayers'])
# dic['usesActs'] = dic['gradConsumer'] # A conservative setting by default for layers with input
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s', producing %dx%d %d-channel output" % (name, dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['maxScale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
layer_names = set([l['name'] for l in layers])
if wish not in layer_names:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layer_names:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, idx, layers, layers_new):
dic = self.dic
self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % layers[idx]['name'])
dic['type'] = 'neuron'
dic['inputs'] = layers[idx]['name']
dic['neuron'] = layers[idx]['neuron']
dic = self.parse(dic['name'], FakeConfigParser(dic), layers_new)
# Link upper layers to this new one
for l in layers[idx+1:]:
if 'inputs' in l:
l['inputs'] = [i + (i >= len(layers_new) - 1) for i in l['inputs']]
if 'weightSourceLayerIndices' in l:
l['weightSourceLayerIndices'] = [i + (i >= len(layers_new)) for i in l['weightSourceLayerIndices']]
layers_new += [dic]
# print "Initialized implicit neuron layer '%s', producing %d outputs" % (dic['name'], dic['outputs'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
print "Initialized elementwise sum layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self):
LayerWithInputParser.__init__(self)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['epsW'] = mcp.safe_get_float_list(name, 'epsW')
dic['epsB'] = mcp.safe_get_float(name, 'epsB')
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['wc'] = mcp.safe_get_float_list(name, 'wc')
self.verify_num_params(['epsW', 'momW', 'wc'])
dic['gradConsumer'] = dic['epsB'] > 0 or any(w > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayerIndices'][i] >= 0:
src_name = layers[layer['weightSourceLayerIndices'][i]]['name']
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayerIndices'][i] = -1
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, src_name, src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayerIndices' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayerIndices'][i] >= 0: # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayerIndices'][i]] if dic['weightSourceLayerIndices'][i] < len(self.prev_layers) else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
print 'rows is'+ str(rows[i])
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print ' finish make_weight'
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order='C', dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def make_biases_specific(self, numBiases, rows, cols, order='C'):
dic = self.dic
dic['biases'] = dic['initB'] * n.ones((numBiases * rows, cols), order='C', dtype=n.single)
dic['biasesInc'] = n.zeros((rows, cols), order='C', dtype=n.single)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
#self.verify_num_params(['initW', 'weightSource'])
prev_names = map(lambda x: x['name'], prev_layers)
dic['weightSourceLayerIndices'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_idx = src_layer_matrix_idx = -1
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if prev_names.count(src_layer_name) == 0 and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_idx] if src_layer_name != name else dic
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayerIndices'] += [src_layer_idx]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
print "__init the FCLAYER"
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
#print "dic['inputs] = " + str(dic['inputs'])
dic['usesActs'] = False
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
# droprate
#dic['dropRate'] = mcp.safe_get_float(name, 'dropRate', default=0.0)
dic['numBiases'] = mcp.safe_get_int(name, 'numBiases', default=1)
dic['specBiases'] = mcp.safe_get_int(name, 'specBiases', default=0)
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
print "finish make_weights"
if int(dic['specBiases']) == 0:
self.make_biases(1, dic['outputs'], order='C')
else:
self.make_biases_specific(dic['numBiases'], 1, dic['outputs'], order='C')
print "Initialized fully-connected layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, idx):
layer = layers[idx]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp in xrange(len(layer['inputs'])):
src_layer_idx = layer['weightSourceLayerIndices'][inp]
if layer['weightSourceLayerIndices'][inp] >= 0:
src_layer = layers[src_layer_idx]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_idx)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for i, l in enumerate(layers):
if 'weightSourceLayerIndices' in l and idx in l['weightSourceLayerIndices']:
LocalLayerParser.conv_to_local(layers, i)
return layer
# Returns (groups, filterChannels) array that represents the set
# of image channels to which each group is connected
def gen_rand_conns(self, groups, channels, filterChannels, inputIdx):
dic = self.dic
overSample = groups * filterChannels / channels
filterConns = [x for i in xrange(overSample) for x in nr.permutation(range(channels))]
if dic['initCFunc']: # Initialize connectivity from outside source
filterConns = self.call_init_func('initCFunc', (groups, channels, filterChannels), input_idx=inputIdx)
if len(filterConns) != overSample * channels:
raise LayerParsingError("Layer '%s[%d]': random connectivity initialization function %s must return list of length <groups> * <filterChannels> = %d; got: %d" % (dic['name'], inputIdx, dic['initCFunc'], len(filterConns)))
if any(c not in range(channels) for c in filterConns):
raise LayerParsingError("Layer '%s[%d]': random connectivity initialization function %s must return list of channel indices in the range 0-<channels-1> = 0-%d." % (dic['name'], inputIdx, dic['initCFunc'], channels-1))
# Every "channels" sub-slice should be a permutation of range(channels)
if any(len(set(c)) != len(c) for c in [filterConns[o*channels:(o+1)*channels] for o in xrange(overSample)]):
raise LayerParsingError("Layer '%s[%d]': random connectivity initialization function %s must return list of channel indices such that every non-overlapping sub-list of <channels> = %d elements is a permutation of the integers 0-<channels-1> = 0-%d." % (dic['name'], inputIdx, dic['initCFunc'], channels, channels-1))
elif dic['weightSourceLayerIndices'][inputIdx] >= 0: # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayerIndices'][inputIdx]] if dic['weightSourceLayerIndices'][inputIdx] < len(self.prev_layers) else dic
src_inp = dic['weightSourceMatrixIndices'][inputIdx]
if 'randSparse' not in src_layer or not src_layer['randSparse']:
raise LayerParsingError("Layer '%s[%d]': randSparse is true in this layer but false in weight sharing source layer '%s[%d]'." % (dic['name'], inputIdx, src_layer['name'], src_inp))
if (groups, channels, filterChannels) != (src_layer['groups'][src_inp], src_layer['channels'][src_inp], src_layer['filterChannels'][src_inp]):
raise LayerParsingError("Layer '%s[%d]': groups, channels, filterChannels set to %d, %d, %d, respectively. Does not match setting in weight sharing source layer '%s[%d]': %d, %d, %d." % (dic['name'], inputIdx, groups, channels, filterChannels, src_layer['name'], src_inp, src_layer['groups'][src_inp], src_layer['channels'][src_inp], src_layer['filterChannels'][src_inp]))
filterConns = src_layer['filterConns'][src_inp]
return filterConns
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['randSparse'] = mcp.safe_get_bool_list(name, 'randSparse', default=[False]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'randSparse', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
print dic['imgSize']
dic['modulesX'] = [1 + int(ceil((2 * padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if max(dic['randSparse']): # When randSparse is turned on for any input, filterChannels must be given for all of them
dic['filterChannels'] = mcp.safe_get_int_list(name, 'filterChannels', default=dic['filterChannels'])
self.verify_num_params(['filterChannels'])
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
if dic['filterSize'][i] > 2 * dic['padding'][i] + dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + 2 * padding (%d)" % (name, i, dic['filterSize'][i], 2 * dic['padding'][i] + dic['imgSize'][i]))
if dic['randSparse'][i]: # Random sparse connectivity requires some extra checks
if dic['groups'][i] == 1:
raise LayerParsingError("Layer '%s[%d]': number of groups must be greater than 1 when using random sparse connectivity" % (name, i))
self.verify_divisible(dic['channels'][i], dic['filterChannels'][i], 'channels', 'filterChannels', input_idx=i)
self.verify_divisible(dic['filterChannels'][i], 4, 'filterChannels', input_idx=i)
self.verify_divisible( dic['groups'][i]*dic['filterChannels'][i], dic['channels'][i], 'groups * filterChannels', 'channels', input_idx=i)
dic['filterConns'][i] = self.gen_rand_conns(dic['groups'][i], dic['channels'][i], dic['filterChannels'][i], i)
else:
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['partialSum'] = mcp.safe_get_int(name, 'partialSum')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
if dic['partialSum'] != 0 and dic['modules'] % dic['partialSum'] != 0:
raise LayerParsingError("Layer '%s': convolutional layer produces %dx%d=%d outputs per filter, but given partialSum parameter (%d) does not divide this number" % (name, dic['modulesX'], dic['modulesX'], dic['modules'], dic['partialSum']))
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s', producing %dx%d %d-channel output" % (name, dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s', producing %dx%d %d-channel output" % (name, dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['outputs'] = model.train_data_provider.get_data_dims(idx=dic['dataIdx'])
print "data['outputs'] = " + str(dic['outputs'])
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = prev_layers[dic['inputs'][0]]['outputs']
print "Initialized softmax layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# revised by dudalong, 20120126, revise 'pool' != 'avg' to dic['pool'] != 'avg'
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
# Avg pooler does not use its acts or inputs
#dic['usesActs'] = 'pool' != 'avg'
#dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], ['max', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s', producing %dx%d %d-channel output" % (dic['pool'], name, dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
print dic.keys()
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s', producing %dx%d %d-channel output" % (self.norm_type, name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self,mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if prev_layers[dic['inputs'][1]]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, prev_layers[dic['inputs'][1]]['name'], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s'" % name
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s'" % name
return dic
class CrossCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s'" % name
return dic
class CrossEntropyParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized CrossEntropy cost '%s'" % name
return dic
# All the layer parsers
layer_parsers = {'data': lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser(),
'cost.cross': lambda : CrossCostParser(),
'cost.crossentropy': lambda : CrossEntropyParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False)],
key=lambda x:x.type)
|
phecy/Deep-Neural-Network-Ranking-Dropout
|
train-ranknet/layer.py
|
Python
|
gpl-3.0
| 66,218
|
[
"Gaussian",
"NEURON"
] |
4f48973302bf122e14478c144c8b0796814ad029b8d57988e84c16be54200978
|
# -*- coding: UTF-8 -*-
"""
living
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
__version__ = '4.0'
content = {
'living_section': ['Living', 'My life', 'Family', 'Home', 'Kids',
],
'living_headline': ['<#food_headline#>'],
'food_headline':[
'Fresh Takes from Island Favorites',
'Grab a Bite - Breakfast & Lunch',
u'Eating Out on Martha’s Vineyard',
'Empower your portal with Real-Time News Feeds and Search',
'Slow Food',
'1-year-old farmers marketspreads under 200-year-old oak',
'Cheese Recall at <#town_us#> Farmers Market',
u'Farmers Market Vendor Spotlight: Norm’s Apiary',
'Farmers Market Report: Blueberries Today',
'Trend: Raspberries and Cabbage Today',
u'The Green Room: BBMG’s <#name#> Interviews Slow Food Founder',
'Prawn Singapore Rice Noodles!',
'The <#town_us#> NAPW Chapter met for dinner',
'Wine tasting with <#name#>',
'To market, to <#town_us#> market',
'Our list of 40 farmers markets across',
'Stork Craft Custom Tuscany Espresso Finish Glider and Ottoman',
'Free lower lumbar pillow, Chocolate Cushions',
'<#time_months#> 2-<#time_monthday#> at <#town_us#> farmers markets',
'Cheese Recall at <#town_us#> Farmers Market',
'Farmers market meeting set at <#town_us#>',
'At <#town_us#> Farmers Market Vendors are Tempting Taste Buds',
],
'living_ankeiler': [
u"""Scottish Bakehouse on Martha’s Vineyard is a favorite for egg sandwiches, fresh baked pastries, sandwiches, salads, soups and locally sourced main dishes offer islanders and visitors a one-stop option for any meal or snack daily. Offering gluten free options. Catering and take out available. Farmers market on site during the summer!""",
],
}
|
LettError/filibuster
|
Lib/filibuster/content/living.py
|
Python
|
mit
| 2,015
|
[
"ESPResSo"
] |
3c8949d5be7d54d5a7068c5c3f13327fa568026343aaae3ad2eed3d5c8a3c84a
|
# -*- coding: utf-8 -*-
import os
import re
import uuid
import urllib
import logging
import hashlib
import calendar
import datetime
import urlparse
import subprocess
import unicodedata
from HTMLParser import HTMLParser
from collections import OrderedDict
import pytz
import blinker
from flask import request
from dulwich.repo import Repo
from dulwich.object_store import tree_lookup_path
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from website import language
from website import settings
from website.util import web_url_for
from website.util import api_url_for
from website.exceptions import NodeStateError
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.util.permissions import DEFAULT_CONTRIBUTOR_PERMISSIONS
html_parser = HTMLParser()
logger = logging.getLogger(__name__)
def utc_datetime_to_timestamp(dt):
return float(
str(calendar.timegm(dt.utcnow().utctimetuple())) + '.' + str(dt.microsecond)
)
def normalize_unicode(ustr):
return unicodedata.normalize('NFKD', ustr)\
.encode('ascii', 'ignore')
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
signals = blinker.Namespace()
contributor_added = signals.signal('contributor-added')
unreg_contributor_added = signals.signal('unreg-contributor-added')
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
MetaSchema.remove()
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
class ApiKey(StoredObject):
# The key is also its primary key
_id = fields.StringField(
primary=True,
default=lambda: str(ObjectId()) + str(uuid.uuid4())
)
# A display name
label = fields.StringField()
@property
def user(self):
return self.user__keyed[0] if self.user__keyed else None
@property
def node(self):
return self.node__keyed[0] if self.node__keyed else None
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow)
action = fields.StringField()
params = fields.DictionaryField()
user = fields.ForeignField('user', backref='created')
api_key = fields.ForeignField('apikey', backref='created')
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to.
"""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class Node(GuidStoredObject, AddonModelMixin):
redirect_mode = 'proxy'
#: Whether this is a pointer or not
primary = True
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'is_public',
'is_deleted',
'wiki_pages_current',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other')
])
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
# Privacy
is_public = fields.BooleanField(default=False)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False)
is_folder = fields.BooleanField(default=False)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False)
deleted_date = fields.DateTimeField()
is_registration = fields.BooleanField(default=False)
registered_date = fields.DateTimeField()
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
is_fork = fields.BooleanField(default=False)
forked_date = fields.DateTimeField()
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
files_current = fields.DictionaryField()
files_versions = fields.DictionaryField()
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True, index=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked')
registered_from = fields.ForeignField('node', backref='registrations')
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node')
api_keys = fields.ForeignField('apikey', list=True, backref='keyed')
piwik_site_id = fields.StringField()
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
# Crash if parent provided and not project
project = kwargs.get('project')
if project and project.category != 'project':
raise ValueError('Parent must be a project.')
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def can_view(self, auth):
if not auth and not self.is_public:
return False
return self.is_public or auth.user \
and self.has_permission(auth.user, 'read') \
or auth.private_key in self.private_link_keys_active
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param User user: User to grant permission to
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
try:
return permission in self.permissions[user._id]
except KeyError:
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.can_edit(auth)
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
#
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
#
if getattr(self, 'project', None):
# Append log to parent
self.project.nodes.append(self)
self.project.save()
# Define log fields for component
log_action = NodeLog.NODE_CREATED
log_params = {
'node': self._primary_key,
'project': self.project._primary_key,
}
else:
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'project': self._primary_key,
}
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder:
need_update = False
if need_update:
self.update_search()
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.files_current = {}
new.files_versions = {}
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.add_contributor(contributor=auth.user, log=False, save=False)
new.template_node = self
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'project': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return None
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
if title is None or not title.strip():
raise ValidationValueError('Title cannot be blank.')
original_title = self.title
self.title = title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'project': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
self.description = description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'project': self.parent_node, # None if no parent
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
import website.search.search as search
search.update_node(self)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
folder_old = os.path.join(settings.UPLOADS_PATH, self._primary_key)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `files_current` and
# `wiki_pages_current` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its file and wiki objects to
# build the correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(contributor=user, log=False, save=False)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'project': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message)
# TODO: Remove after migration to OSF Storage
if settings.COPY_GIT_REPOS and os.path.exists(folder_old):
folder_new = os.path.join(settings.UPLOADS_PATH, forked._primary_key)
Repo(folder_old).clone(folder_new)
return forked
def register_node(self, schema, auth, template, data):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:template: Template name
:data: Form data
"""
# TODO: Throw error instead of returning?
if not self.can_edit(auth):
return
if self.is_folder:
raise NodeStateError("Folders may not be registered")
folder_old = os.path.join(settings.UPLOADS_PATH, self._primary_key)
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `files_current` and
# `wiki_pages_current` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its file and wiki objects to
# build the correct URLs to that content.
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message)
# TODO: Remove after migration to OSF Storage
if settings.COPY_GIT_REPOS and os.path.exists(folder_old):
folder_new = os.path.join(settings.UPLOADS_PATH, registered._primary_key)
Repo(folder_old).clone(folder_new)
registered.nodes = []
for node_contained in original.nodes:
registered_node = node_contained.register_node(
schema, auth, template, data
)
if registered_node is not None:
registered.nodes.append(registered_node)
original.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'project': original.parent_id,
'node': original._primary_key,
'registration': registered._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
original.save()
registered.save()
for node in registered.nodes:
node.update_search()
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
# TODO: Move to NodeFile
def read_file_object(self, file_object):
folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
repo = Repo(folder_name)
tree = repo.commit(file_object.git_commit).tree
mode, sha = tree_lookup_path(repo.get_object, tree, file_object.path)
return repo[sha].data, file_object.content_type
def get_file(self, path, version):
#folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
file_object = self.get_file_object(path, version)
return self.read_file_object(file_object)
def get_file_object(self, path, version=None):
"""Return the :class:`NodeFile` object at the given path.
:param str path: Path to the file.
:param int version: Version number, 0-indexed.
"""
# TODO: Fix circular imports
from website.addons.osffiles.model import NodeFile
from website.addons.osffiles.exceptions import (
InvalidVersionError,
VersionNotFoundError,
)
folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
err_msg = 'Upload directory is not a git repo'
assert os.path.exists(os.path.join(folder_name, ".git")), err_msg
try:
file_versions = self.files_versions[path.replace('.', '_')]
# Default to latest version
version = version if version is not None else len(file_versions) - 1
except (AttributeError, KeyError):
raise ValueError('Invalid path: {}'.format(path))
if version < 0:
raise InvalidVersionError('Version number must be >= 0.')
try:
file_id = file_versions[version]
except IndexError:
raise VersionNotFoundError('Invalid version number: {}'.format(version))
except TypeError:
raise InvalidVersionError('Invalid version type. Version number'
'must be an integer >= 0.')
return NodeFile.load(file_id)
def remove_file(self, auth, path):
'''Removes a file from the filesystem, NodeFile collection, and does a git delete ('git rm <file>')
:param auth: All the auth informtion including user, API key.
:param path:
:raises: website.osffiles.exceptions.FileNotFoundError if file is not found.
'''
from website.addons.osffiles.model import NodeFile
from website.addons.osffiles.exceptions import FileNotFoundError
from website.addons.osffiles.utils import urlsafe_filename
file_name_key = urlsafe_filename(path)
repo_path = os.path.join(settings.UPLOADS_PATH, self._primary_key)
# TODO make sure it all works, otherwise rollback as needed
# Do a git delete, which also removes from working filesystem.
try:
subprocess.check_output(
['git', 'rm', path],
cwd=repo_path,
shell=False
)
repo = Repo(repo_path)
message = '{path} deleted'.format(path=path)
committer = self._get_committer(auth)
repo.do_commit(message, committer)
except subprocess.CalledProcessError as error:
if error.returncode == 128:
raise FileNotFoundError('File {0!r} was not found'.format(path))
raise
if file_name_key in self.files_current:
nf = NodeFile.load(self.files_current[file_name_key])
nf.is_deleted = True
nf.save()
self.files_current.pop(file_name_key, None)
if file_name_key in self.files_versions:
for i in self.files_versions[file_name_key]:
nf = NodeFile.load(i)
nf.is_deleted = True
nf.save()
self.files_versions.pop(file_name_key)
self.add_log(
action=NodeLog.FILE_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'path': path
},
auth=auth,
log_date=nf.date_modified,
save=False,
)
# Updates self.date_modified
self.save()
@staticmethod
def _get_committer(auth):
user = auth.user
api_key = auth.api_key
if api_key:
commit_key_msg = ':{}'.format(api_key.label)
if api_key.user:
commit_name = api_key.user.fullname
commit_id = api_key.user._primary_key
commit_category = 'user'
if api_key.node:
commit_name = api_key.node.title
commit_id = api_key.node._primary_key
commit_category = 'node'
elif user:
commit_key_msg = ''
commit_name = user.fullname
commit_id = user._primary_key
commit_category = 'user'
else:
raise Exception('Must provide either user or api_key.')
committer = u'{name}{key_msg} <{category}-{id}@osf.io>'.format(
name=commit_name,
key_msg=commit_key_msg,
category=commit_category,
id=commit_id,
)
committer = normalize_unicode(committer)
return committer
def add_file(self, auth, file_name, content, size, content_type):
"""
Instantiates a new NodeFile object, and adds it to the current Node as
necessary.
"""
from website.addons.osffiles.model import NodeFile
from website.addons.osffiles.exceptions import FileNotModified
# TODO: Reading the whole file into memory is not scalable. Fix this.
# This node's folder
folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
# TODO: This should be part of the build phase, not here.
# verify the upload root exists
if not os.path.isdir(settings.UPLOADS_PATH):
os.mkdir(settings.UPLOADS_PATH)
# Make sure the upload directory contains a git repo.
if os.path.exists(folder_name):
if os.path.exists(os.path.join(folder_name, ".git")):
repo = Repo(folder_name)
else:
# ... or create one
repo = Repo.init(folder_name)
else:
# if the Node's folder isn't there, create it.
os.mkdir(folder_name)
repo = Repo.init(folder_name)
# Is this a new file, or are we updating an existing one?
file_is_new = not os.path.exists(os.path.join(folder_name, file_name))
if not file_is_new:
# Get the hash of the old file
old_file_hash = hashlib.md5()
with open(os.path.join(folder_name, file_name), 'rb') as f:
for chunk in iter(
lambda: f.read(128 * old_file_hash.block_size),
b''
):
old_file_hash.update(chunk)
# If the file hasn't changed
if old_file_hash.digest() == hashlib.md5(content).digest():
raise FileNotModified()
# Write the content of the temp file into a new file
with open(os.path.join(folder_name, file_name), 'wb') as f:
f.write(content)
# Deal with git
repo.stage([str(file_name)])
committer = self._get_committer(auth)
commit_id = repo.do_commit(
message=unicode(file_name +
(' added' if file_is_new else ' updated')),
committer=committer,
)
# Deal with creating a NodeFile in the database
node_file = NodeFile(
path=file_name,
filename=file_name,
size=size,
node=self,
uploader=auth.user,
git_commit=commit_id,
content_type=content_type,
)
node_file.save()
# Add references to the NodeFile to the Node object
file_name_key = node_file.clean_filename
# Reference the current file version
self.files_current[file_name_key] = node_file._primary_key
# Create a version history if necessary
if file_name_key not in self.files_versions:
self.files_versions[file_name_key] = []
# Add reference to the version history
self.files_versions[file_name_key].append(node_file._primary_key)
self.add_log(
action=NodeLog.FILE_ADDED if file_is_new else NodeLog.FILE_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'path': node_file.path,
'version': len(self.files_versions),
'urls': {
'view': node_file.url(self),
'download': node_file.download_url(self),
},
},
auth=auth,
log_date=node_file.date_uploaded,
save=False,
)
self.save()
return node_file
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
api_key = auth.api_key if auth else None
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
api_key=api_key,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
if self.node__parent:
parent = self.node__parent[0]
parent.logs.append(log)
parent.save()
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
# Note: Check `parent_node` rather than `category` to avoid database
# inconsistencies [jmcarp]
if self.parent_node is None:
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute,
_guid=_guid, *args, **kwargs)
else:
return web_url_for(view_name, pid=self.parent_node._primary_key,
nid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
# Note: Check `parent_node` rather than `category` to avoid database
# inconsistencies [jmcarp]
if self.parent_node is None:
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute,
*args, **kwargs)
else:
return api_url_for(view_name, pid=self.parent_node._primary_key,
nid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error("Node {0} has a parent that is not a project".format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
if self.category == 'project':
return '/project/{}/'.format(self._primary_key)
else:
if self.node__parent and self.node__parent[0].category == 'project':
return '/project/{}/node/{}/'.format(
self.parent_id,
self._primary_key
)
logger.error("Node {0} has a parent that is not a project".format(self._id))
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def citation_apa(self):
return u'{authors} ({year}). {title}. Retrieved from Open Science Framework, <a href="{url}">{display_url}</a>'.format(
authors=self.author_list(and_delim='&'),
year=self.logs[-1].date.year if self.logs else '?',
title=self.title,
url=self.url,
display_url=self.display_absolute_url,
)
@property
def citation_mla(self):
return u'{authors} "{title}." Open Science Framework, {year}. <a href="{url}">{display_url}</a>'.format(
authors=self.author_list(and_delim='and'),
year=self.logs[-1].date.year if self.logs else '?',
title=self.title,
url=self.url,
display_url=self.display_absolute_url,
)
@property
def citation_chicago(self):
return u'{authors} "{title}." Open Science Framework ({year}). <a href="{url}">{display_url}</a>'.format(
authors=self.author_list(and_delim='and'),
year=self.logs[-1].date.year if self.logs else '?',
title=self.title,
url=self.url,
display_url=self.display_absolute_url,
)
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
rv = super(Node, self).delete_addon(addon_name, auth, _force)
if rv:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return rv
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(old._id)
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor)
if message:
status.push_status_message(message)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributor': contributor._id,
},
auth=auth,
save=False,
)
self.save()
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
users = []
user_ids = []
permissions_changed = {}
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
self.set_visible(user, user_dict['visible'], auth=auth)
users.append(user)
user_ids.append(user_dict['id'])
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
contributor_added.send(self, contributor=contributor, auth=auth)
return True
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param contributors: A list of User objects to add as contributors.
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(username=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth informtion including user, API key.
"""
if permissions == 'public' and not self.is_public:
self.is_public = True
elif permissions == 'private' and self.is_public:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message)
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
self.save()
return True
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version:
id = self.wiki_pages_versions[key][version - 1]
else:
id = self.wiki_pages_current[key]
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': html_parser.unescape(self.title),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration
}
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class MailRecord(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
data = fields.DictionaryField()
records = fields.AbstractForeignField(list=True, backref='created')
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def node_icon(self, node):
if node.category == 'project':
node_type = "reg-project" if node.is_registration else "project"
else:
node_type = "reg-component" if node.is_registration else "component"
return "/static/img/hgrid/{0}.png".format(node_type)
def to_json(self):
return {
"id": self._id,
"date_created": self.date_created.strftime('%m/%d/%Y %I:%M %p UTC'),
"key": self.key,
"name": self.name,
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'imgUrl': self.node_icon(x)}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
|
AndrewSallans/osf.io
|
website/project/model.py
|
Python
|
apache-2.0
| 91,883
|
[
"VisIt"
] |
67eee369bf114609c08c74001181954773183772c648ef90396d391fbb84f17f
|
# -*- coding: utf-8 -*-
"""
Functions which model peaks and decorator to allow those functions
to be translated and scaled and have the bounds of their parameters
respected.
Unless otherwise noted, the formulation of how each function is
written is taken from DiMarco & Bombi 2001 J Chrom A
Example:
t = np.linspace(-10,10,200)
peak = gaussian(t, x=2, w=2)
fitted_peak = lorentzian(t, **fit_to(lorentzian, t, l))
plt.plot(t, peak, 'k-', t, fitted_peak, 'r-')
plt.show()
"""
import inspect
from functools import wraps
import numpy as np
from numpy import exp, sqrt, abs, log
from scipy.special import erfc, i1, gamma
# These functions allow us to use commonsense notation
# in labelling parameter bounds. The sqrt is to allow
# the bounded parameter to be multiplied without fear of it
# rounding outside the bounds.
openhi = lambda i: np.nextafter(i, -1)
openlow = lambda i: i + np.sqrt(np.nextafter(i, 1) - i)
def bounds(**kwargs):
"""
Using this function as a decorator allow us to label which
parameters of a function shouldn't have their values allowed
to fall outside of a specific range.
"""
def wrap(f):
f._pbounds = kwargs
return f
return wrap
def peak_model(f):
"""
Given a function that models a peak, add scale and location arguments to
For all functions, v is vertical offset, h is height
x is horizontal offset (1st moment), w is width (2nd moment),
s is skewness (3rd moment), e is excess (4th moment)
"""
@wraps(f)
def wrapped_f(t, **kw):
# load kwargs with default values
# do this here instead of in the def because we want to parse
# all of kwargs later to copy values to pass into f
def_vals = {'v': 0.0, 'h': 1.0, 'x': 0.0, 'w': 1.0, 's': 1.1, 'e': 1.0}
for v in def_vals:
if v not in kw:
kw[v] = def_vals[v]
# this copies all of the defaults into what the peak function needs
anames, _, _, _ = inspect.getargspec(f)
fkw = dict([(arg, kw[arg]) for arg in anames if arg in kw])
# some functions use location or width parameters explicitly
# if not, adjust the timeseries accordingly
ta = t
if 'x' not in anames:
ta = ta - kw['x']
if 'w' not in anames:
ta = ta / kw['w']
# finally call the function
mod = f(ta, **fkw)
# recalcualte, making the peak maximize at x
mod = f(ta + ta[mod.argmax()], **fkw)
return kw['v'] + kw['h'] / max(mod) * mod
args = set(['v', 'h', 'x', 'w'])
anames, _, _, _ = inspect.getargspec(f)
wrapped_f._peakargs = list(args.union([a for a in anames \
if a not in ('t', 'r')]))
return wrapped_f
@peak_model
def bigaussian(t, w, s):
#for an example of use: http://www.biomedcentral.com/1471-2105/11/559
# Di Marco & Bombi use formulation with w1 & w2, but it
# looks better to use formulation with w and s
w1, w2 = w * exp(-s) / (1 + exp(-s)), w / (1 + exp(-s))
y = np.empty(len(t))
y[t < 0] = exp(-t[t < 0] ** 2 / (2 * w1 ** 2)) / sqrt(2 * np.pi)
y[t >= 0] = exp(-t[t >= 0] ** 2 / (2 * w2 ** 2)) / sqrt(2 * np.pi)
return y
@peak_model
def box(t):
y = np.zeros(len(t))
y[np.logical_and(t > -0.5, t < 0.5)] = 1.0
return y
@bounds(w=(openlow(0.), np.inf), s=(1., np.inf))
@peak_model
def exp_mod_gaussian(t, w, s):
#http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
exp_t = exp((w ** 2 - 2 * s * t) / (2 * s ** 2))
erf_t = erfc((w ** 2 - s * t) / (s * w))
return (w ** 1.5) / (1.414214 * s) * exp_t * erf_t
@peak_model
def extreme_value(t):
return exp(-exp(-t) - t + 1)
@bounds(w=(openlow(0.), np.inf), s=(1., np.inf))
@peak_model
def gamma_dist(t, w, s):
# from Wikipedia: not the same as Di Marco & Bombi's formulation
# s > 1
y = np.zeros(len(t))
y[t > 0] = t[t > 0] ** (s - 1) * exp(-t[t > 0] / w) / (w ** s * gamma(s))
return y
@peak_model
def gaussian(t):
"""
Gaussian
"""
return exp(-0.5 * t ** 2)
@bounds(w=(openlow(0.), np.inf), x=(openlow(0.), np.inf))
@peak_model
def giddings(t, w, x):
print(w, x)
# w != 0
y = np.zeros(len(t))
y[t > 0] = (1. / w) * sqrt(x / t[t > 0]) * exp((t[t > 0] + x) / -w)
#TODO: "overflow encountered in i1"
#y[t > 0] *= i1(2. * sqrt(x * t[t > 0]) / w)
#trying to keep the shape, but not allow such high numbers?
y[t > 0] *= i1(np.linspace(2, 10, sum(t > 0)))
return y
@bounds(w=(openlow(0.1), np.inf), s=(openlow(0.), np.inf))
@peak_model
def haarhoffvanderlinde(t, w, s):
# s here = s * z in Di Marco & Bombi
# w, s != 0
y = w * exp(-0.5 * (t / w) ** 2) / (s * sqrt(2 * np.pi))
print(s, w)
y /= 1. / (exp(s / w ** 2) - 1.) + 0.5 * erfc(t / (w * sqrt(2)))
return y
@bounds(w=(openlow(0.), np.inf), s=(openlow(1.), np.inf))
@peak_model
def lognormal(t, w, s, r=2.):
# r is the ratio between h and the height at
# which s is computed: normally 2.
y = np.zeros(len(t))
#TODO: if log(s) rounds to 0, big problems here
lt = -log(r) / log(s) ** 2
# try to adjust timing so peak stays centered at 0
ta = t + (w - 1) / (1.12383 * s - 0.780647)
y[ta > 0] = exp(lt * log(ta[ta > 0] / w * (s ** 2 - 1) / s) ** 2)
return y
@peak_model
def lorentzian(t, a):
# from Wikipedia: not the same as Di Marco & Bombi's formulation
#return 1. / (1. + 4. * t ** 2)
return a / (np.pi * (a ** 2 + t ** 2))
@peak_model
def papai_pap(t, s, e):
#s is skewness, e is excess
y = np.zeros(len(t))
ft = t[t > 0]
y[t > 0] = 1 + (s / 6.) * (ft ** 2 - 3. * ft)
y[t > 0] -= (e / 24.) * (ft ** 4 - 6 * ft ** 2 + 3)
y[t > 0] *= exp(-0.5 * ft)
return y
@peak_model
def parabola(t):
y = np.zeros(len(t))
mask = np.logical_and(t > -1, t < 1)
y[mask] = 1 - t[mask] ** 2
return y
@bounds(a=(openlow(0.), np.inf))
@peak_model
def pearsonVII(t, a):
return (1 + 4 * t ** 2 * (2 ** (1 / a) - 1)) ** -a
@peak_model
def poisson(t, a):
# a > 1
y = np.zeros(len(t))
y[t > 0] = exp((1 - a) * (t[t > 0] - log(t[t > 0]) - 1))
return y
@bounds(s=(openlow(0.), np.inf))
@peak_model
def studentt(t, s):
# s != 0
return (1 + (t ** 2) / s) ** (-0.5 * (s + 1))
@peak_model
def triangle(t):
y = np.zeros(len(t))
mask = np.logical_and(t > -0.5, t < 0.5)
y[mask] = 1 - abs(t[mask]) / 0.5
return y
@bounds(a=(openlow(1.), np.inf))
@peak_model
def weibull3(t, a):
#TODO: doesn't work?
y = np.zeros(len(t))
at = (a - 1.) / a
tt = t[t > 0] + ((a - 1.) / a) ** (1. / a)
y[t > 0] = at ** at * tt ** (a - 1.) * exp(-tt ** a + at)
return y
## FUNCTIONS TO DO
#def chesler_cram_a(t, a, b, c, d):
#def chesler_cram_b(t, a, b, c, d, e):
#def cumulative(t, w, a):
#def f_variance(t, s1, s2):
#def gladney_dowden_a(t, w, s):
#def gladney_dowden_b(t, w, a, b):
#def haldna_phi(t, w, s):
#def intermediate(t, a, b):
#def li_a(t, w):
#def li_b(t, w, a1, a2, b1, b2):
#def losev(t, w1, w2, a):
#def nonlinearchromatography(t, x, w, s):
# # also r & v?
#def pearsonIV(t, w, s1, s2):
#def pearsonIVa(t, w, s):
#def pearsonIVb(t, w, s):
#def pseudovoight1(t, a):
#def pseudovoight2(t, s, a):
#def pulse(t, a):
def gram_charlier(t, w, *n):
#TODO: implement this; Berberan-Santos '07 has
# ways to calculate cumulant values
raise NotImplementedError
def edgeworth_cramer(t, *n):
#y = exp(-0.5 * t ** 2)
raise NotImplementedError
peak_models = [bigaussian, box, exp_mod_gaussian, extreme_value, gamma_dist,
gaussian, giddings, haarhoffvanderlinde, lognormal, lorentzian,
papai_pap, parabola, pearsonVII, poisson, studentt, triangle,
weibull3]
|
molliewebb/aston
|
aston/peaks/PeakModels.py
|
Python
|
gpl-3.0
| 7,849
|
[
"Gaussian"
] |
27972f11599adb407617b93e3bcaa4210f321e1bd34c3029250a3cc23a9dfdf4
|
"""Pylint plugin for checking quote type on strings."""
from __future__ import absolute_import
import tokenize
from pylint.__pkginfo__ import numversion as pylint_version
from pylint.checkers import BaseTokenChecker
from pylint.interfaces import IAstroidChecker, ITokenChecker
CONFIG_OPTS = ('single', 'double')
SMART_CONFIG_OPTS = tuple('%s-avoid-escape' % c for c in CONFIG_OPTS)
QUOTES = ('\'', '"')
SINGLE_QUOTE_OPTS = dict(zip(CONFIG_OPTS, QUOTES))
SMART_QUOTE_OPTS = dict(zip(CONFIG_OPTS + SMART_CONFIG_OPTS, QUOTES + QUOTES))
TRIPLE_QUOTE_OPTS = dict(zip(CONFIG_OPTS, [q * 3 for q in QUOTES]))
class StringQuoteChecker(BaseTokenChecker):
"""Pylint checker for the consistent use of characters in strings.
This checker will check for quote consistency among string literals,
triple quoted strings, and docstrings. Each of those three can be
configured individually to use either single quotes (') or double
quotes (").
Additionally string literals can enforce avoiding escaping chars, e.g.
enforcing single quotes (') most of the time, except if the string itself
contains a single quote, then enforce double quotes (").
"""
__implements__ = (ITokenChecker, IAstroidChecker, )
name = 'string_quotes'
msgs = {
'C4001': (
'Invalid string quote %s, should be %s',
'invalid-string-quote',
'Used when the string quote character does not match the '
'value configured in the `string-quote` option.'
),
'C4002': (
'Invalid triple quote %s, should be %s',
'invalid-triple-quote',
'Used when the triple quote characters do not match the '
'value configured in the `triple-quote` option.'
),
'C4003': (
'Invalid docstring quote %s, should be %s',
'invalid-docstring-quote',
'Used when the docstring quote characters do not match the '
'value configured in the `docstring-quote` option.'
)
}
options = (
(
'string-quote',
dict(
type='choice',
metavar='<{0}, {1}, {2} or {3}>'.format(*CONFIG_OPTS + SMART_CONFIG_OPTS),
default=CONFIG_OPTS[0],
choices=CONFIG_OPTS + SMART_CONFIG_OPTS,
help='The quote character for string literals.'
)
),
(
'triple-quote',
dict(
type='choice',
metavar='<{0} or {1}>'.format(*CONFIG_OPTS),
default=CONFIG_OPTS[0],
choices=CONFIG_OPTS,
help='The quote character for triple-quoted strings (non-docstring).'
)
),
(
'docstring-quote',
dict(
type='choice',
metavar='<{0} or {1}>'.format(*CONFIG_OPTS),
default=CONFIG_OPTS[1],
choices=CONFIG_OPTS,
help='The quote character for triple-quoted docstrings.'
)
)
)
# we need to check quote usage via tokenization, as the AST walk will
# only tell us what the doc is, but not how it is quoted. we need to
# store any triple quotes found during tokenization and check against
# these when performing the walk. if a triple-quote string matches to
# a node's docstring, it is checked and removed from this collection.
# once we leave the module, any remaining triple quotes in this collection
# are checked as regular triple quote strings.
_tokenized_triple_quotes = {}
def visit_module(self, node):
"""Visit module and check for docstring quote consistency.
Args:
node: the module node being visited.
"""
self._process_for_docstring(node, 'module')
# pylint: disable=unused-argument
def leave_module(self, node):
"""Leave module and check remaining triple quotes.
Args:
node: the module node we are leaving.
"""
for triple_quote in self._tokenized_triple_quotes.values():
self._check_triple_quotes(triple_quote)
# after we are done checking these, clear out the triple-quote
# tracking collection so nothing is left over for the next module.
self._tokenized_triple_quotes = {}
def visit_classdef(self, node):
"""Visit class and check for docstring quote consistency.
Args:
node: the class node being visited.
"""
self._process_for_docstring(node, 'class')
def visit_functiondef(self, node):
"""Visit function and check for docstring quote consistency.
Args:
node: the function node being visited.
"""
self._process_for_docstring(node, 'function')
def visit_asyncfunctiondef(self, node):
"""Visit an asynchronous function and check for docstring quote consistency.
Args:
node: the async function node being visited.
"""
self._process_for_docstring(node, 'function')
def _process_for_docstring(self, node, node_type):
"""Check for docstring quote consistency.
Args:
node: the AST node being visited.
node_type: the type of node being operated on.
"""
# if there is no docstring, don't need to do anything.
if node.doc is not None:
# the module is everything, so to find the docstring, we
# iterate line by line from the start until the first element
# to find the docstring, as it cannot appear after the first
# element in the body.
if node_type == 'module':
# if there are no nodes that make up the body, then all we
# have is the module docstring
if not node.body:
# in this case, we should only have the module docstring
# parsed in the node, so the only record in the
# self._tokenized_triple_quotes dict will correspond to
# the module comment. this can vary by index depending
# on the presence of a shebang, encoding, etc at the top
# of the file.
for key in list(self._tokenized_triple_quotes.keys()):
quote_record = self._tokenized_triple_quotes.get(key)
if quote_record:
self._check_docstring_quotes(quote_record)
del self._tokenized_triple_quotes[key]
else:
for i in range(0, node.body[0].lineno):
quote_record = self._tokenized_triple_quotes.get(i)
if quote_record:
self._check_docstring_quotes(quote_record)
del self._tokenized_triple_quotes[i]
break
else:
# the node has a docstring so we check the tokenized triple
# quotes to find a matching docstring token that follows the
# function/class definition.
if not node.body:
# if there is no body to the class, the class def only
# contains the docstring, so the only quotes we are
# tracking should correspond to the class docstring.
lineno = self._find_docstring_line_for_no_body(node.fromlineno)
quote_record = self._tokenized_triple_quotes.get(lineno)
if quote_record:
self._check_docstring_quotes(quote_record)
del self._tokenized_triple_quotes[lineno]
else:
doc_row = self._find_docstring_line(node.fromlineno, node.tolineno)
quote_record = self._tokenized_triple_quotes.get(doc_row)
if quote_record:
self._check_docstring_quotes(quote_record)
del self._tokenized_triple_quotes[doc_row]
def _find_docstring_line_for_no_body(self, start):
"""Find the docstring associated with a definition with no body
in the node.
In these cases, the provided start and end line number for that
element are the same, so we must get the docstring based on the
sequential position of known docstrings.
Args:
start: the row where the class / function starts.
Returns:
int: the row number where the docstring is found.
"""
tracked = sorted(list(self._tokenized_triple_quotes.keys()))
for i in tracked:
if min(start, i) == start:
return i
return None
def _find_docstring_line(self, start, end):
"""Find the row where a docstring starts in a function or class.
This will search for the first match of a triple quote token in
row sequence from the start of the class or function.
Args:
start: the row where the class / function starts.
end: the row where the class / function ends.
Returns:
int: the row number where the docstring is found.
"""
for i in range(start, end + 1):
if i in self._tokenized_triple_quotes:
return i
return None
def process_tokens(self, tokens):
"""Process the token stream.
This is required to override the parent class' implementation.
Args:
tokens: the tokens from the token stream to process.
"""
for tok_type, token, (start_row, start_col), _, _ in tokens:
if tok_type == tokenize.STRING:
# 'token' is the whole un-parsed token; we can look at the start
# of it to see whether it's a raw or unicode string etc.
self._process_string_token(token, start_row, start_col)
def _process_string_token(self, token, start_row, start_col):
"""Internal method for identifying and checking string tokens
from the token stream.
Args:
token: the token to check.
start_row: the line on which the token was found.
start_col: the column on which the token was found.
"""
for i, char in enumerate(token):
if char in QUOTES:
break
# pylint: disable=undefined-loop-variable
# ignore prefix markers like u, b, r
norm_quote = token[i:]
# triple-quote strings
if len(norm_quote) >= 3 and norm_quote[:3] in TRIPLE_QUOTE_OPTS.values():
self._tokenized_triple_quotes[start_row] = (token, norm_quote[:3], start_row, start_col)
return
# single quote strings
preferred_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)
# Smart case.
if self.config.string_quote in SMART_CONFIG_OPTS:
other_quote = next(q for q in QUOTES if q != preferred_quote)
# If using the other quote avoids escaping, we switch to the other quote.
if preferred_quote in token[i + 1:-1] and other_quote not in token[i + 1:-1]:
preferred_quote = other_quote
if norm_quote[0] != preferred_quote:
self._invalid_string_quote(
quote=norm_quote[0],
row=start_row,
correct_quote=preferred_quote,
col=start_col,
)
def _check_triple_quotes(self, quote_record):
"""Check if the triple quote from tokenization is valid.
Args:
quote_record: a tuple containing the info about the string
from tokenization, giving the (token, quote, row number, column).
"""
_, triple, row, col = quote_record
if triple != TRIPLE_QUOTE_OPTS.get(self.config.triple_quote):
self._invalid_triple_quote(triple, row, col)
def _check_docstring_quotes(self, quote_record):
"""Check if the docstring quote from tokenization is valid.
Args:
quote_record: a tuple containing the info about the string
from tokenization, giving the (token, quote, row number).
"""
_, triple, row, col = quote_record
if triple != TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote):
self._invalid_docstring_quote(triple, row, col)
def _invalid_string_quote(self, quote, row, correct_quote=None, col=None):
"""Add a message for an invalid string literal quote.
Args:
quote: The quote characters that were found.
row: The row number the quote character was found on.
correct_quote: The quote characters that is required. If None
(default), will use the one from the config.
col: The column the quote characters were found on.
"""
if not correct_quote:
correct_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)
self.add_message(
'invalid-string-quote',
line=row,
args=(quote, correct_quote),
**self.get_offset(col)
)
@staticmethod
def get_offset(col):
"""Return kwargs to pass to add_message.
col_offset is not present in all versions of pylint, so
attempt to determine if col_offset is supported, if so
return a dictionary returning col_offset otherwise return
{}.
Args:
col: The integer column offset to possibly include in
the kwargs.
Returns:
dict: Keyword arguments to pass to add_message
"""
if (2, 2, 2) < pylint_version:
return {'col_offset': col}
return {}
def _invalid_triple_quote(self, quote, row, col=None):
"""Add a message for an invalid triple quote.
Args:
quote: The quote characters that were found.
row: The row number the quote characters were found on.
col: The column the quote characters were found on.
"""
self.add_message(
'invalid-triple-quote',
line=row,
args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)),
**self.get_offset(col)
)
def _invalid_docstring_quote(self, quote, row, col=None):
"""Add a message for an invalid docstring quote.
Args:
quote: The quote characters that were found.
row: The row number the quote characters were found on.
col: The column the quote characters were found on.
"""
self.add_message(
'invalid-docstring-quote',
line=row,
args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)),
**self.get_offset(col)
)
|
endlessm/chromium-browser
|
third_party/chromite/third_party/pylint-quotes/pylint_quotes/checker.py
|
Python
|
bsd-3-clause
| 14,978
|
[
"VisIt"
] |
45a9eb969887daee134b37a82f9a720a594f2191998e2091240ab94bb472d38d
|
# -*- coding: utf-8 -*-
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import numpy as np
import moose
import moose.fixXreacs as fixXreacs
def makeModel():
# create container for model
num = 1 # number of compartments
model = moose.Neutral( '/model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.x1 = 1.0e-6 # Set it to a 1 micron single-voxel cylinder
# create molecules and reactions
s = moose.Pool( '/model/compartment/s' )
rXfer = moose.Reac( '/model/compartment/rXfer' )
#####################################################################
# Put in endo compartment. Add molecule s
endo = moose.EndoMesh( '/model/endo' )
endo.isMembraneBound = True
endo.surround = compartment
es = moose.Pool( '/model/endo/s' )
#####################################################################
moose.connect( rXfer, 'sub', s, 'reac' )
moose.connect( rXfer, 'prd', es, 'reac' )
volRatio = compartment.volume / endo.volume
rXfer.Kf = 0.04 # 0.04/sec
rXfer.Kb = 0.02 # 0.02/sec
#####################################################################
fixXreacs.fixXreacs( '/model' )
#fixXreacs.restoreXreacs( '/model' )
#fixXreacs.fixXreacs( '/model' )
#####################################################################
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
eksolve = moose.Ksolve( '/model/endo/ksolve' )
edsolve = moose.Dsolve( '/model/endo/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 2 )
s.vec.concInit = [1.0]*num
estoich = moose.Stoich( '/model/endo/stoich' )
estoich.compartment = endo
estoich.ksolve = eksolve
estoich.dsolve = edsolve
estoich.path = "/model/endo/##"
assert( edsolve.numPools == 1 )
edsolve.buildMeshJunctions( dsolve )
plot1 = moose.Table2( '/model/plot1' )
plot2 = moose.Table2( '/model/plot2' )
moose.connect( '/model/plot1', 'requestOut', s, 'getN' )
moose.connect( '/model/plot2', 'requestOut', es, 'getN' )
plot3 = moose.Table2( '/model/plot3' )
plot4 = moose.Table2( '/model/plot4' )
moose.connect( '/model/plot3', 'requestOut', s, 'getConc' )
moose.connect( '/model/plot4', 'requestOut', es, 'getConc' )
def almostEq( a, b ):
return abs(a-b)/(a+b) < 5e-5
def test_xreac4():
runtime = 100
makeModel()
moose.reinit()
moose.start( runtime )
assert( almostEq( 2.0 * moose.element( 'model/compartment/s' ).conc,
moose.element( '/model/endo/s' ).conc ) )
moose.delete( '/model' )
def main():
test_xreac4()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_Xreacs4.py
|
Python
|
gpl-3.0
| 3,342
|
[
"MOOSE"
] |
0681460b5984f7b579e590cd6cb5bb712a3745c9aebba3ff4616374114571b7c
|
# Copyright (C) 2010-2021 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd.math
import espressomd.lb
import espressomd.lbboundaries
import espressomd.observables
import espressomd.shapes
import espressomd.accumulators
AGRID = .5
VISC = 2.7
DENS = 1.7
TIME_STEP = 0.1
BOX_L = 16.0
EFFECTIVE_RADIUS = BOX_L / 2.0 - 1.0
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP}
OBS_PARAMS = {'n_r_bins': 12,
'n_phi_bins': 1,
'n_z_bins': 1,
'min_r': 1.0,
'min_phi': -np.pi,
'min_z': 0.0,
'max_r': EFFECTIVE_RADIUS,
'max_phi': np.pi,
'max_z': BOX_L / 2.,
'sampling_density': 1.0}
def taylor_couette(v1, v2, r1, r2):
# Taylor-Couette equation
omega1 = v1 / r1
omega2 = v2 / r2
eta = r1 / r2
a = (omega2 - omega1 * eta**2) / (1. - eta**2)
b = r1**2 * (omega1 - omega2) / (1. - eta**2)
return a, b
class LBCircularCouetteCommon:
"""
Check the lattice-Boltzmann velocity-driven flow in a cylindrical
constraint by comparing to the analytical solution.
"""
system = espressomd.System(box_l=[BOX_L, BOX_L, BOX_L / 2.])
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
params = {'axis': [0, 0, 1],
'orientation': [1, 0, 0]}
def tearDown(self):
self.system.actors.clear()
self.system.lbboundaries.clear()
def test_taylor_couette_flow(self):
"""
Rotate a shell filled with fluid with a non-rotating rod at the center.
The solution to the Navier-Stokes equation, assuming an infinite rod,
is the Taylor-Couette equation.
"""
# disable periodicity except in the flow direction
self.system.periodicity = np.logical_not(self.params['axis'])
lbf = self.lb_class(**LB_PARAMS)
self.system.actors.add(lbf)
# create an outer cylinder that is rotating; this is achieved by
# creating an octagon with a slip velocity parallel to each face
sc = np.cos(np.pi / 4.)
normals = [
[-1, 0, 0],
[0, -1, 0],
[1, 0, 0],
[0, 1, 0],
[-sc, sc, 0],
[sc, -sc, 0],
[sc, sc, 0],
[-sc, -sc, 0],
]
dists = [
2. * AGRID - BOX_L,
2. * AGRID - BOX_L,
2. * AGRID,
2. * AGRID,
2. * AGRID - BOX_L / 2.,
2. * AGRID - BOX_L / 2.,
2. * AGRID + BOX_L * (np.sqrt(2.) - 1.) / 2.,
2. * AGRID - BOX_L * (1. + (np.sqrt(2.) - 1.) / 2.),
]
# outer cylinder with tangential slip velocity
slip_vel = 0.01
for normal, dist in zip(normals, dists):
self.system.lbboundaries.add(espressomd.lbboundaries.LBBoundary(
shape=espressomd.shapes.Wall(normal=normal, dist=dist),
velocity=slip_vel * np.cross(normal, self.params['axis'])))
# inner cylinder without slip velocity
self.system.lbboundaries.add(espressomd.lbboundaries.LBBoundary(
shape=espressomd.shapes.Cylinder(
center=self.system.box_l / 2.0, axis=self.params['axis'],
direction=1, radius=1., length=BOX_L * 1.5)))
# the system needs to be fully symmetric
mask = np.copy(lbf[:, :, :].boundary.astype(bool))
np.testing.assert_array_equal(mask, np.flip(mask, axis=0))
np.testing.assert_array_equal(mask, np.flip(mask, axis=1))
np.testing.assert_array_equal(mask, np.flip(mask, axis=2))
# the system needs to be closed in the x and y directions
np.testing.assert_array_equal(mask[0, :, :], 1)
np.testing.assert_array_equal(mask[-1, :, :], 1)
np.testing.assert_array_equal(mask[:, 0, :], 1)
np.testing.assert_array_equal(mask[:, -1, :], 1)
ctp = espressomd.math.CylindricalTransformationParameters(
center=[BOX_L / 2.0, BOX_L / 2.0, 0.0],
axis=self.params['axis'],
orientation=self.params['orientation'])
local_obs_params = OBS_PARAMS.copy()
local_obs_params['transform_params'] = ctp
obs = espressomd.observables.CylindricalLBVelocityProfile(
**local_obs_params)
# simulate until profile converges
mid_indices = [int((EFFECTIVE_RADIUS / AGRID) / 2) - 2,
int((BOX_L / AGRID) / 2), int((BOX_L / 2. / AGRID) / 2)]
diff = float("inf")
old_val = lbf[mid_indices].velocity[1]
while diff > 1e-6:
self.system.integrator.run(10)
new_val = lbf[mid_indices].velocity[1]
diff = abs(new_val - old_val)
old_val = new_val
r = obs.bin_centers()[:, :, :, 0].reshape(-1)
v_r, v_phi, v_z = np.copy(obs.calculate()).reshape([-1, 3]).T
# check velocity is zero for the radial and axial components
np.testing.assert_allclose(v_r, 0., atol=1e-6)
np.testing.assert_allclose(v_z, 0., atol=1e-8)
# check azimuthal velocity in the Couette regime
a_ref, b_ref = taylor_couette(
0.0, slip_vel, 1., BOX_L / 2. - 2. * AGRID)
v_phi_ref = a_ref * r + b_ref / r
v_phi_drift = np.mean(v_phi) - np.mean(v_phi_ref)
np.testing.assert_allclose(v_phi_drift, 0., atol=5e-4)
np.testing.assert_allclose(v_phi - v_phi_drift, v_phi_ref, atol=1e-3)
@utx.skipIfMissingFeatures(['LB_BOUNDARIES'])
class LBCPUCircularCouette(LBCircularCouetteCommon, ut.TestCase):
"""Test for the CPU implementation of the LB."""
lb_class = espressomd.lb.LBFluid
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(['LB_BOUNDARIES_GPU'])
class LBGPUCircularCouette(LBCircularCouetteCommon, ut.TestCase):
"""Test for the GPU implementation of the LB."""
lb_class = espressomd.lb.LBFluidGPU
if __name__ == '__main__':
ut.main()
|
espressomd/espresso
|
testsuite/python/lb_circular_couette.py
|
Python
|
gpl-3.0
| 6,742
|
[
"ESPResSo"
] |
f8faff72809c331223318f2abcc49617041a3bb29ce0205d7c2b86a7572e4bbe
|
#!/usr/bin/env python2
# Try to determine how much RAM is currently being used per program.
# Note per _program_, not per process. So for example this script
# will report RAM used by all httpd process together. In detail it reports:
# sum(private RAM for program processes) + sum(Shared RAM for program processes)
# The shared RAM is problematic to calculate, and this script automatically
# selects the most accurate method available for your kernel.
# Author: P@draigBrady.com
# Source: http://www.pixelbeat.org/scripts/ps_mem.py
# V1.0 06 Jul 2005 Initial release
# V1.1 11 Aug 2006 root permission required for accuracy
# V1.2 08 Nov 2006 Add total to output
# Use KiB,MiB,... for units rather than K,M,...
# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for
# 2.6 kernels up to and including 2.6.9.
# There it represented the total file backed extent
# V1.4 23 Nov 2006 Remove total from output as it's meaningless
# (the shared values overlap with other programs).
# Display the shared column. This extra info is
# useful, especially as it overlaps between programs.
# V1.5 26 Mar 2007 Remove redundant recursion from human()
# V1.6 05 Jun 2007 Also report number of processes with a given name.
# Patch from riccardo.murri@gmail.com
# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which
# fixes some over-estimation and allows totalling.
# Enumerate the PIDs directly rather than using ps,
# which fixes the possible race between reading
# RSS with ps, and shared memory with this program.
# Also we can show non truncated command names.
# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps
# as otherwise could match libraries causing a crash.
# Patch from patrice.bouchand.fedora@gmail.com
# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.
# Reported by Andrey Borzenkov <arvidjaar@mail.ru>
# V2.0 15 Jan 2010 From a report by Brock Noland <brockn@gmail.com>
# about overreporting of RAM usage of his java progs,
# handle linux clones that have pids. I.E. that have
# CLONE_VM specified without CLONE_THREAD.
# V2.1 20 Jan 2010 Append [deleted] or [updated] to programs which are
# no longer on disk or have a new version available.
# Add a --split-args option to group programs based
# on the full command line, which could be used
# to monitor separate "pmon" processes for example:
# ps_mem.py | grep [p]mon
# V2.2 16 Feb 2010 Support python 3.
# Patch from Brian Harring <ferringb@gmail.com>
# Notes:
#
# All interpreted programs where the interpreter is started
# by the shell or with env, will be merged to the interpreter
# (as that's what's given to exec). For e.g. all python programs
# starting with "#!/usr/bin/env python" will be grouped under python.
# You can change this by using the full command line but that will
# have the undesirable affect of splitting up programs started with
# differing parameters (for e.g. mingetty tty[1-6]).
#
# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels
# (rmap vm without smaps) it can not be accurately determined how many pages
# are shared between processes in general or within a program in our case:
# http://lkml.org/lkml/2005/7/6/250
# A warning is printed if overestimation is possible.
# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared
# value in /proc/$pid/statm is the total file-backed extent of a process.
# We ignore that, introducing more overestimation, again printing a warning.
# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows
# us to calculate a more accurate value for the total RAM used by programs.
#
# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming
# they're the only programs that have the same /proc/$PID/smaps file for
# each instance. This will fail if there are multiple real instances of a
# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes
# its memory map while we're checksumming each /proc/$PID/smaps.
#
# I don't take account of memory allocated for a program
# by other programs. For e.g. memory used in the X server for
# a program could be determined, but is not.
import sys, os, string
try:
# md5 module is deprecated on python 2.6
# so try the newer hashlib first
import hashlib
md5_new = hashlib.md5
except ImportError:
import md5
md5_new = md5.new
if os.geteuid() != 0:
sys.stderr.write("Sorry, root permission required.\n");
sys.exit(1)
split_args=False
if len(sys.argv)==2 and sys.argv[1] == "--split-args":
split_args = True
PAGESIZE=os.sysconf("SC_PAGE_SIZE")/1024 #KiB
our_pid=os.getpid()
#(major,minor,release)
def kernel_ver():
kv=open("/proc/sys/kernel/osrelease", "rt").readline().split(".")[:3]
for char in "-_":
kv[2]=kv[2].split(char)[0]
return (int(kv[0]), int(kv[1]), int(kv[2]))
kv=kernel_ver()
have_pss=0
#return Private,Shared
#Note shared is always a subset of rss (trs is not always)
def getMemStats(pid):
global have_pss
mem_id = pid #unique
Private_lines=[]
Shared_lines=[]
Pss_lines=[]
Rss=int(open("/proc/"+str(pid)+"/statm", "rt").readline().split()[1])*PAGESIZE
if os.path.exists("/proc/"+str(pid)+"/smaps"): #stat
digester = md5_new()
for line in open("/proc/"+str(pid)+"/smaps", "rb").readlines(): #open
# Note we checksum smaps as maps is usually but
# not always different for separate processes.
digester.update(line)
line = line.decode("ascii")
if line.startswith("Shared"):
Shared_lines.append(line)
elif line.startswith("Private"):
Private_lines.append(line)
elif line.startswith("Pss"):
have_pss=1
Pss_lines.append(line)
mem_id = digester.hexdigest()
Shared=sum([int(line.split()[1]) for line in Shared_lines])
Private=sum([int(line.split()[1]) for line in Private_lines])
#Note Shared + Private = Rss above
#The Rss in smaps includes video card mem etc.
if have_pss:
pss_adjust=0.5 #add 0.5KiB as this average error due to trunctation
Pss=sum([float(line.split()[1])+pss_adjust for line in Pss_lines])
Shared = Pss - Private
elif (2,6,1) <= kv <= (2,6,9):
Shared=0 #lots of overestimation, but what can we do?
Private = Rss
else:
Shared=int(open("/proc/"+str(pid)+"/statm", "rt").readline().split()[2])
Shared*=PAGESIZE
Private = Rss - Shared
return (Private, Shared, mem_id)
def getCmdName(pid):
cmdline = open("/proc/%d/cmdline" % pid, "rt").read().split("\0")
if cmdline[-1] == '' and len(cmdline) > 1:
cmdline = cmdline[:-1]
path = os.path.realpath("/proc/%d/exe" % pid) #exception for kernel threads
if split_args:
return " ".join(cmdline)
if path.endswith(" (deleted)"):
path = path[:-10]
if os.path.exists(path):
path += " [updated]"
else:
#The path could be have prelink stuff so try cmdline
#which might have the full path present. This helped for:
#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)
if os.path.exists(cmdline[0]):
path = cmdline[0] + " [updated]"
else:
path += " [deleted]"
exe = os.path.basename(path)
cmd = open("/proc/%d/status" % pid, "rt").readline()[6:-1]
if exe.startswith(cmd):
cmd=exe #show non truncated version
#Note because we show the non truncated name
#one can have separated programs as follows:
#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)
# 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin
return cmd
cmds={}
shareds={}
mem_ids={}
count={}
for pid in os.listdir("/proc/"):
if not pid.isdigit():
continue
pid = int(pid)
if pid == our_pid:
continue
try:
cmd = getCmdName(pid)
except:
#permission denied or
#kernel threads don't have exe links or
#process gone
continue
try:
private, shared, mem_id = getMemStats(pid)
except:
continue #process gone
if shareds.get(cmd):
if have_pss: #add shared portion of PSS together
shareds[cmd]+=shared
elif shareds[cmd] < shared: #just take largest shared val
shareds[cmd]=shared
else:
shareds[cmd]=shared
cmds[cmd]=cmds.setdefault(cmd,0)+private
if cmd in count:
count[cmd] += 1
else:
count[cmd] = 1
mem_ids.setdefault(cmd,{}).update({mem_id:None})
#Add shared mem for each program
total=0
for cmd in cmds:
cmd_count = count[cmd]
if len(mem_ids[cmd]) == 1 and cmd_count > 1:
# Assume this program is using CLONE_CM without CLONE_THREAD
# so only account for one of the processes
cmds[cmd] /= cmd_count
if have_pss:
shareds[cmd] /= cmd_count
cmds[cmd]=cmds[cmd]+shareds[cmd]
total+=cmds[cmd] #valid if PSS available
if sys.version_info >= (2, 6):
sort_list = sorted(cmds.items(), key=lambda x:x[1])
else:
sort_list = cmds.items()
sort_list.sort(lambda x,y:cmp(x[1],y[1]))
# list wrapping is redundant on <py3k, needed for >=pyk3 however
sort_list=list(filter(lambda x:x[1],sort_list)) #get rid of zero sized processes
#The following matches "du -h" output
#see also human.py
def human(num, power="Ki"):
powers=["Ki","Mi","Gi","Ti"]
while num >= 1000: #4 digits
num /= 1024.0
power=powers[powers.index(power)+1]
return "%.1f %s" % (num,power)
def cmd_with_count(cmd, count):
if count>1:
return "%s (%u)" % (cmd, count)
else:
return cmd
sys.stdout.write(" Private + Shared = RAM used\tProgram \n\n")
for cmd in sort_list:
sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" % (human(cmd[1]-shareds[cmd[0]]),
human(shareds[cmd[0]]), human(cmd[1]),
cmd_with_count(cmd[0], count[cmd[0]])))
if have_pss:
sys.stdout.write("%s\n%s%8sB\n%s\n" % ("-" * 33,
" " * 24, human(total), "=" * 33))
sys.stdout.write("\n Private + Shared = RAM used\tProgram \n\n")
#Warn of possible inaccuracies
#2 = accurate & can total
#1 = accurate only considering each process in isolation
#0 = some shared mem not reported
#-1= all shared mem not reported
def shared_val_accuracy():
"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""
if kv[:2] == (2,4):
if open("/proc/meminfo", "rt").read().find("Inact_") == -1:
return 1
return 0
elif kv[:2] == (2,6):
if os.path.exists("/proc/"+str(os.getpid())+"/smaps"):
if open("/proc/"+str(os.getpid())+"/smaps", "rt").read().find("Pss:")!=-1:
return 2
else:
return 1
if (2,6,1) <= kv <= (2,6,9):
return -1
return 0
else:
return 1
vm_accuracy = shared_val_accuracy()
if vm_accuracy == -1:
sys.stderr.write(
"Warning: Shared memory is not reported by this system.\n"
)
sys.stderr.write(
"Values reported will be too large, and totals are not reported\n"
)
elif vm_accuracy == 0:
sys.stderr.write(
"Warning: Shared memory is not reported accurately by this system.\n"
)
sys.stderr.write(
"Values reported could be too large, and totals are not reported\n"
)
elif vm_accuracy == 1:
sys.stderr.write(
"Warning: Shared memory is slightly over-estimated by this system\n"
"for each program, so totals are not reported.\n"
)
|
philwo/pysk
|
tools/ps_mem.py
|
Python
|
apache-2.0
| 12,468
|
[
"Brian"
] |
f8e85213c59e83d3d290551784d1918c25265644a43c6bb5e728ab63fc7f7ccf
|
import glob
import pickle
from collections import deque
from itertools import product
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from moviepy.editor import VideoFileClip
from natsort import natsorted
from sklearn.linear_model import ElasticNet
sns.set_style("whitegrid", {'axes.grid': False})
def get_obj_img_pts(img_cal_names, num_x=9, num_y=6):
'''Generate object/image points given filenames of calibration images
params:
num_x = the number of inner corner points along the x-axis of the test grid
num_y = the number of inner corner points along the y-axis of the test grid
'''
# generate object points
obj_pt = np.array(list(product(range(num_y), range(num_x), range(1))), np.float32)
obj_pt[:, [0, 1]] = obj_pt[:, [1, 0]]
obj_pts = []
img_pts = []
img_cals = []
img_cal_names_ret = []
for idx, img_cal_name in enumerate(img_cal_names):
img_cal = mpimg.imread(img_cal_name)
img_gray = cv2.cvtColor(img_cal, cv2.COLOR_RGB2GRAY)
ret, img_pt = cv2.findChessboardCorners(img_gray, (num_x, num_y), None)
if ret:
print('corners_found: {}'.format(img_cal_name))
obj_pts.append(obj_pt)
img_pts.append(img_pt)
# visualize the image points on calibration images
cv2.drawChessboardCorners(img_cal, (num_x, num_y), img_pt, ret)
img_cals.append(img_cal)
img_cal_names_ret.append(img_cal_name)
return obj_pts, img_pts, img_cals, img_cal_names_ret
def correct_dist(img, obj_pts, img_pts):
'''Undistort an image given object/image points
'''
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(obj_pts, img_pts, img_size, None, None)
return cv2.undistort(img, mtx, dist_coeffs, None, mtx)
def img_subplots(imgs, img_names=None, f_size=(12, 10), f_cols=4):
'''Create subplots of images and return figure handle
'''
assert (len(imgs) == len(img_names))
f_rows = np.ceil(len(imgs) / f_cols).astype('int')
fig, f_axes = plt.subplots(f_rows, f_cols, figsize=f_size)
fig.set_tight_layout(True)
for idx, f_ax in enumerate(f_axes.reshape(-1)):
f_ax.axis("off")
if idx < len(imgs):
img = imgs[idx]
color_map = "gray" if len(img.shape) == 2 else None
f_ax.imshow(img, cmap=color_map)
if img_names is not None:
f_ax.set_title(img_names[idx])
return fig
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def region_of_interest(img, roi_vertex_scales):
"""Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
vertices shall have the shape (num_of_polygon, num_of_vertices, 2)
eg: vertices = np.array([[(wd*.45, ht*.53),(wd*.05, ht), (wd*.98, ht), (wd*.55, ht*.53)]], dtype=np.int32)
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
mask_color = (255,) * channel_count
ht, wd, _ = img.shape
else:
mask_color = 255
ht, wd = img.shape
vertices = np.int32([[(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in roi_vertex_scales]])
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, line_thickness=2, line_color=(0, 255, 0)):
"""Returns an image with hough lines drawn.
`img` should be the output of a Canny transform.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((*img.shape, 3), dtype=np.uint8)
draw_lines(line_img, lines, thickness=line_thickness, color=line_color)
return line_img
def draw_lines(img, lines, thickness=2, color=(255, 0, 0)):
""" Draw interpolated lanes on img
"""
lane_1st, lane_2nd = [], []
height, width, _ = img.shape
# separate the line segments based on slope and their position in the image
for line in lines:
for x1, y1, x2, y2 in line:
if ((x2 - x1) != 0) and ((y2 - y1) / (x2 - x1) < 0) and ((x1 + x2) / 2 / width < 0.55):
lane_1st.append(line)
elif ((x2 - x1) != 0) and ((y2 - y1) / (x2 - x1) > 0) and ((x1 + x2) / 2 / width > 0.55):
lane_2nd.append(line)
# fit the left and right lane separately with ElasticNet
x_pred = np.arange(img.shape[1]).reshape(-1, 1)
for lane in [np.array(lane_1st), np.array(lane_2nd)]:
lane = lane.reshape(lane.shape[0] * 2, 2)
X, y = lane[:, 0], lane[:, 1]
reg = ElasticNet().fit(X.reshape(-1, 1), y)
y_pred = np.hstack((x_pred, reg.predict(x_pred).reshape(-1, 1)))
cv2.polylines(img, np.int32([y_pred]), False, color, thickness)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def select_color(img, colors):
''' Return img with specified color selected
colors is a list of (color_lower, color_upper) tuples
'''
img_color_select = np.zeros_like(img)
for color_lower, color_upper in colors:
color_mask = cv2.inRange(img, color_lower, color_upper)
img_color_select += cv2.bitwise_and(img, img, mask=color_mask)
return img_color_select
def sobel_thresh(img, th=(30, 100), kernel_size=3, op_dirs=(1, 0), debug=False):
'''Absolute gradient thresholding
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, *op_dirs, ksize=kernel_size))
img_sobel_scaled = np.uint8(255 * img_sobel / np.max(img_sobel))
img_bin = img2binary(img_sobel_scaled, th)
if debug:
return img_sobel_scaled, img_bin
else:
return img_bin
def mag_thresh(img, th=(30, 100), kernel_size=3, debug=False):
'''Gradient magnitude thresholding
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_sobel_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=kernel_size)
img_sobel_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=kernel_size)
img_sobel_mag = np.sqrt(img_sobel_x ** 2 + img_sobel_y ** 2)
img_sobel_scaled = np.uint8(255 * img_sobel_mag / np.max(img_sobel_mag))
img_bin = img2binary(img_sobel_scaled, th)
if debug:
return img_sobel_scaled, img_bin
else:
return img_bin
def img2binary(img, th=(75, 225)):
'''Covert an image to a binary mask given thresholds
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_bin = np.zeros_like(img)
img_bin[(img > th[0]) & (img <= th[1])] = 1
return img_bin
def threshold_multi(img, roi_vertex_scales, colors_rgb, colors_hls, sobel_th=(80, 150), debug=False):
img = gaussian_blur(img, kernel_size=3)
img_rgb = img
img_hls = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HLS)
img_yuv = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2YUV)
# color selection in RGB and HLS space
img_rgb_bin = img2binary(select_color(img_rgb, colors_rgb), th=[0, 255])
img_hls_bin = img2binary(select_color(img_hls, colors_hls), th=(0, 255))
img_color_bin = img_rgb_bin | img_hls_bin
# U abs gradient th (YUV)
img_channel = img_yuv[:, :, 1]
img_u_sobel = sobel_thresh(img_channel, th=sobel_th, kernel_size=9, op_dirs=[1, 0])
# combine thresholded binary images
img_bin_combined = img_color_bin | img_u_sobel
img_bin_combined_roi = region_of_interest(img_bin_combined, roi_vertex_scales)
if debug:
return img_color_bin, img_u_sobel, img_bin_combined, img_bin_combined_roi
else:
return img_bin_combined_roi
def get_perspective_matrix(img, src_scales, dst_scales):
if len(img.shape) == 3:
ht, wd, _ = img.shape
elif len(img.shape) == 2:
ht, wd = img.shape
else:
raise Exception("Only 2D images are supported.")
src = np.float32([(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in src_scales])
dst = np.float32([(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in dst_scales])
M = cv2.getPerspectiveTransform(src, dst)
inv_M = cv2.getPerspectiveTransform(dst, src)
return M, inv_M, src
def get_binary_lane(img_strips, window_scale, offset=0.10):
'''Return a segmented lane using the sliding window method'''
lane = []
window = np.array(window_scale) * img_strips[0].shape[1]
for img_strip in reversed(img_strips):
img_windowed = np.zeros_like(img_strip)
img_windowed[:, window[0]:window[1]] = img_strip[:, window[0]:window[1]]
lane_pts_x = np.where(np.sum(img_windowed, axis=0))
if len(lane_pts_x[0]) > 5:
lane.append(img_windowed)
lane_mean = np.mean(lane_pts_x)
lane_offset = offset * img_strip.shape[1]
window = [int(lane_mean - lane_offset), int(lane_mean + lane_offset)]
else:
lane.append(np.zeros_like(img_windowed))
return np.vstack(reversed(lane))
def fit_lane_pts(pts, y_fit_range=None, num_pts_y_fit=300):
'''Return fitted points or coefficeints of 2nd order fitting x = F(y).
params:
pts: tuple of x_array and y_array `(x_array, y_array)`
'''
pts_x, pts_y = reversed(pts)
coeffs = np.polyfit(pts_y, pts_x, 2)
if y_fit_range is not None:
pts_y_fit = np.linspace(0, y_fit_range, num=num_pts_y_fit)
pts_x_fit = np.polyval(coeffs, pts_y_fit)
return pts_x_fit, pts_y_fit
else:
return coeffs
# def fit_lane_pts_with_coeffs(coeffs, y_fit_range, num_pts_y_fit=300):
# '''Return fitted points given coefficients
#
# params:
# coeffs: 2nd order fitting coefficients with the highest order first
# '''
#
# pts_y_fit = np.linspace(0, y_fit_range, num=num_pts_y_fit)
# pts_x_fit = np.polyval(coeffs, pts_y_fit)
#
# return pts_x_fit, pts_y_fit
def calc_curvature(pts, xm_per_pix=3.7 / 700, ym_per_pix=30 / 720):
'''Calculate curvature given scales from pixel space to real physical space'''
pts = np.array(pts).T * np.array([ym_per_pix, xm_per_pix])
pts = (pts[:, 0], pts[:, 1])
coeffs = fit_lane_pts(pts)
y_eval = np.max(pts[1])
curve_radius = ((1 + (2 * coeffs[0] * y_eval + coeffs[1]) ** 2) ** 1.5) / np.absolute(2 * coeffs[0])
return curve_radius
def lane_detection(img, ROI_vertex_scales,
src_scales, dst_scales,
colors_rgb,
colors_hls,
sobel_th=(80, 150),
num_bins=20,
window_L=(0.1, 0.45),
window_R=(0.6, 0.90),
draw_lane_color=(0, 255, 0),
debug=False):
img_corr = correct_dist(img, obj_pts, img_pts)
ht, wd, _ = img.shape
M, inv_M, pts_src = get_perspective_matrix(img, src_scales, dst_scales)
img_warped = cv2.warpPerspective(img_corr, M, (wd, ht), flags=cv2.INTER_LINEAR)
# thresholding corrected image and
# perspective transformation of the resulting binary image
img_bin = threshold_multi(img_corr, ROI_vertex_scales, colors_rgb=colors_rgb, colors_hls=colors_hls, sobel_th=sobel_th)
img_bin_warped = cv2.warpPerspective(img_bin, M, (wd, ht), flags=cv2.INTER_LINEAR)
# split perpective transformed binary image into multiple horizontal strips
# img_bin_blurred = gaussian_blur(img_bin_warped, kernel_size=blur_kernel_size)
img_bin_splits = np.vsplit(img_bin_warped, num_bins)
# isolate the left and right lane with sliding windows
lane_L = get_binary_lane(img_bin_splits, window_L)
lane_R = get_binary_lane(img_bin_splits, window_R)
pts_L = np.where(lane_L)
pts_R = np.where(lane_R)
if (len(pts_L[0]) < 3) | (len(pts_R[0]) < 3):
return img_corr
# calculate curvature for left/right lane
pts_fit_L = fit_lane_pts(pts_L, y_fit_range=img_bin.shape[0], num_pts_y_fit=300)
curve_radius_L = calc_curvature(pts_L)
pts_fit_R = fit_lane_pts(pts_R, y_fit_range=img_bin.shape[0], num_pts_y_fit=300)
curve_radius_R = calc_curvature(pts_R)
# [curve_radius_L, curve_radius_R]
# create an image to draw the lines on
lane_warped_color = np.zeros_like(img_corr, dtype=np.uint8)
# draw fitted points to a lane image
pts_draw = np.hstack([pts_fit_L, np.fliplr(pts_fit_R)]).T.reshape(-1, 1, 2).astype(np.int)
cv2.fillPoly(lane_warped_color, [pts_draw], draw_lane_color)
# inverse perspective transform of lane image
lane_color = cv2.warpPerspective(lane_warped_color, inv_M, (wd, ht), flags=cv2.INTER_LINEAR)
# overlay detected lanes with the undistorted image
img_combined = cv2.addWeighted(img_corr, 1, lane_color, 0.3, 0)
if debug:
print("The left curvature is {:.1f} m".format(curve_radius_L))
print("The right curvature is {:.1f} m".format(curve_radius_R))
print("")
pts_warp_roi = np.int32(pts_src.reshape([-1, 1, 2]))
pts_roi = np.int32([[(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in ROI_vertex_scales]])
img_warp_roi = cv2.polylines(img_corr, [pts_warp_roi], True, (0, 255, 0), 5) # draw the warp region in green
img_warp_roi = cv2.polylines(img_warp_roi, [pts_roi], True, (0, 0, 255), 5) # draw the roi selection in blue
return img_warp_roi, img_warped, img_bin, img_bin_warped, lane_L, lane_R, lane_warped_color, img_combined
else:
return img_combined
# Define a class to receive the characteristics of each line detection
class LaneDetector:
def __init__(self, N=30, TOLERANCE_CURVATURE=2, TOLERANCE_PTS=100):
# append to history if the current curvature is less than TOLERANCE_CURVATURE
# compared to the average curvature over N confident frames
self.TOLERANCE_CURVATURE = TOLERANCE_CURVATURE
# proceed with lane curve fitting if detected points are greater than TOLERANCE_PTS
self.TOLERANCE_PTS = TOLERANCE_PTS
# x,y values and fitted polynomial coeffs of the last n fits
# assuming 30 frames per second
self.N = N
self.pts_fit_L_last_n = deque(maxlen=self.N)
self.pts_fit_R_last_n = deque(maxlen=self.N)
# average x,y values of the fitted lanes over the last n fit
self.pts_L_last = None
self.pts_R_last = None
# radius of curvature of the line in some units
self.curve_radius = 0
self.curve_radius_last_n = deque(maxlen=self.N)
self.curve_radius_avg = 0
self.curve_radius_diff = 0
# distance in meters of vehicle center from the line
self.vehicle_offset = None
self.vehicle_offset_last_n = deque(maxlen=self.N)
self.vehicle_offset_avg = None
# # difference in fit coefficients between last and new fits
# self.coeffs_L_last_n = deque(maxlen=self.N)
# self.coeffs_R_last_n = deque(maxlen=self.N)
# self.coeffs_L_avg = None
# self.coeffs_R_avg = None
# self.fit_coeffs_diffs = np.array([0, 0, 0], dtype='float')
# lane mask
self.lane_mask = None
self.lane_masks = []
# problematic frames
self.frame_N = 0
self.error_frame_N = 0
self.error_frames = []
def get_binary_lane(self, img_strips, window_scale, offset=0.10):
'''Return a segmented lane using the sliding window method'''
lane = []
img_window_masks = []
window = (np.array(window_scale) * img_strips[0].shape[1]).astype(np.int)
for img_strip in reversed(img_strips):
img_windowed = np.zeros_like(img_strip)
img_windowed[:, window[0]:window[1]] = img_strip[:, window[0]:window[1]]
img_window_mask = np.zeros_like(img_strip)
img_window_mask[:, window[0]:window[1]] = 1
img_window_masks.append(img_window_mask)
lane_pts_x = np.where(np.sum(img_windowed, axis=0))
if len(lane_pts_x[0]) > 5:
lane.append(img_windowed)
lane_mean = np.mean(lane_pts_x)
lane_offset = offset * img_strip.shape[1]
window = [int(lane_mean - lane_offset), int(lane_mean + lane_offset)]
else:
lane.append(np.zeros_like(img_windowed))
return np.vstack(reversed(lane)), np.vstack(reversed(img_window_masks))
def calc_curvature(self, pts, xm_per_pix=3.7 / 700, ym_per_pix=30 / 720):
'''Calculate curvature given scales from pixel space to real physical space'''
pts = np.array(pts).T * np.array([ym_per_pix, xm_per_pix])
pts = (pts[:, 0], pts[:, 1])
coeffs = fit_lane_pts(pts)
y_eval = np.max(pts[1])
curve_radius = ((1 + (2 * coeffs[0] * y_eval + coeffs[1]) ** 2) ** 1.5) / np.absolute(2 * coeffs[0])
return curve_radius, coeffs
def lane_detection(self, img, ROI_vertex_scales,
src_scales, dst_scales,
colors_rgb,
colors_hls,
sobel_th=(80, 150),
num_bins=20,
window_L=(0.1, 0.45),
window_R=(0.6, 0.90),
draw_lane_color=(0, 255, 0),
debug=False):
img_corr = correct_dist(img, obj_pts, img_pts)
ht, wd, _ = img.shape
M, inv_M, pts_src = get_perspective_matrix(img, src_scales, dst_scales)
img_warped = cv2.warpPerspective(img_corr, M, (wd, ht), flags=cv2.INTER_LINEAR)
# thresholding corrected image and
# perspective transformation of the resulting binary image
img_bin = threshold_multi(img_corr, ROI_vertex_scales, colors_rgb=colors_rgb, colors_hls=colors_hls, sobel_th=sobel_th)
img_bin_warped = cv2.warpPerspective(img_bin, M, (wd, ht), flags=cv2.INTER_LINEAR)
# split perpective transformed binary image into multiple horizontal strips
img_bin_splits = np.vsplit(img_bin_warped, num_bins)
# isolate the left and right lane with masks generated with sliding windows
# if lane_mask is not defined, search the lane lines from scratch
# else use the previous window for lane lines detection
if not self.lane_mask:
lane_L, mask_L = self.get_binary_lane(img_bin_splits, window_L)
lane_R, mask_R = self.get_binary_lane(img_bin_splits, window_R)
self.lane_mask = [mask_L, mask_R]
else:
mask_L, mask_R = self.lane_mask
lane_L = cv2.bitwise_and(img_bin_warped, mask_L)
lane_R = cv2.bitwise_and(img_bin_warped, mask_R)
# get (i,j) coordinates for the lane points
pts_L = np.where(lane_L)
pts_R = np.where(lane_R)
# if the number of lane points detected is less than TOLERANCE_PTS for either lane,
# use the detected points from the last and current frame for subsequent fitting
if (len(pts_L[0]) < self.TOLERANCE_PTS) | (len(pts_R[0]) < self.TOLERANCE_PTS):
self.lane_mask = None
self.error_frame_N += 1
self.error_frames.append(img)
if self.pts_L_last is not None:
# concatenate (i,j) coordinates of points detected for the last and current frame
pts_L = [pts_last + pts for (pts_last, pts) in zip(self.pts_L_last, pts_L)]
pts_R = [pts_last + pts for (pts_last, pts) in zip(self.pts_R_last, pts_R)]
else:
return img_corr
else:
self.pts_L_last = pts_L
self.pts_R_last = pts_R
# calculate curvature for left/right lane
# the curve radius is estimated as the mean of left/right lane, which is smoothed over the last n frames
pts_fit_L = fit_lane_pts(pts_L, y_fit_range=img_bin.shape[0], num_pts_y_fit=ht)
curve_radius_L, coeffs_L = self.calc_curvature(pts_L)
pts_fit_R = fit_lane_pts(pts_R, y_fit_range=img_bin.shape[0], num_pts_y_fit=ht)
curve_radius_R, coeffs_R = self.calc_curvature(pts_R)
self.curve_radius = np.mean([curve_radius_L, curve_radius_R])
self.curve_radius_diff = np.abs((self.curve_radius - self.curve_radius_avg) / self.curve_radius_avg)
# if the lane curve difference is less than TOLERANCE_CURVATURE or is the first frame
# append the current curvature and coefficients to their respective double ended queue
if (self.curve_radius_diff < self.TOLERANCE_CURVATURE) or (self.frame_N == 0):
self.curve_radius_last_n.append(self.curve_radius)
self.curve_radius_avg = np.mean(self.curve_radius_last_n)
# self.coeffs_L_last_n.append(coeffs_L)
# self.coeffs_R_last_n.append(coeffs_R)
# self.coeffs_L_avg = np.mean(self.coeffs_L_last_n, axis=0)
# self.coeffs_R_avg = np.mean(self.coeffs_R_last_n, axis=0)
else:
self.lane_mask = None
# estimate vehicle offset from the center of the road
# using the x coordinates of the last 10 points from the bottom of the frame
xm_per_pix = 3.7 / 700 # meters per pixel
# here a negative sign is needed to measure offsets with respect to the center of the road
self.vehicle_offset = -xm_per_pix * (np.mean(pts_fit_L[0][-10:]) + np.mean(pts_fit_R[0][-10:]) - wd) / 2
self.vehicle_offset_last_n.append(self.vehicle_offset)
self.vehicle_offset_avg = np.mean(self.vehicle_offset_last_n)
# create an image to draw fitted points on
lane_warped_color = np.zeros_like(img_corr, dtype=np.uint8)
# draw fitted points to a lane image
pts_draw = np.hstack([pts_fit_L, np.fliplr(pts_fit_R)]).T.reshape(-1, 1, 2).astype(np.int)
cv2.fillPoly(lane_warped_color, [pts_draw], draw_lane_color)
# inverse perspective transform of lane image
lane_color = cv2.warpPerspective(lane_warped_color, inv_M,
(wd, ht), flags=cv2.INTER_LINEAR)
lane_color = region_of_interest(lane_color, ROI_vertex_scales)
# overlay detected lanes with the undistorted image
img_combined = cv2.addWeighted(img_corr, 1, lane_color, 0.3, 0)
# draw text onto the image
img_txt = "Radius of curvature: {:7.1f}m Offset from road center: {:7.3f}m Errors: {:3.0f} /{:5.0f}".format(self.curve_radius_avg,
self.vehicle_offset_avg,
self.error_frame_N,
self.frame_N)
img_txt_offset = (int(wd * 0.01), int(ht * 0.04))
pts_txt_bounding_box = np.int32([(0, 0), (wd, 0), (wd, ht * 0.05), (0, ht * 0.05)]).reshape([-1, 1, 2])
img_combined = cv2.fillPoly(img_combined, [pts_txt_bounding_box], (43, 43, 43))
cv2.putText(img_combined,
img_txt,
img_txt_offset,
cv2.FONT_HERSHEY_COMPLEX, 0.8,
(250, 250, 250), 1)
self.frame_N += 1
if debug:
print("The left curvature is {:.1f} m".format(curve_radius_L))
print("The right curvature is {:.1f} m".format(curve_radius_R))
print("")
# draw perspective warp and ROI bounding box
pts_warp_roi = np.int32(pts_src.reshape([-1, 1, 2]))
pts_roi = np.int32([[(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in ROI_vertex_scales]])
img_warp_roi = cv2.polylines(img_corr, [pts_warp_roi], True, (0, 255, 0), 5) # blue for perspective transform bounding box
img_warp_roi = cv2.polylines(img_warp_roi, [pts_roi], True, (0, 0, 255), 5) # green for ROI bounding box
return img_warp_roi, img_warped, img_bin, img_bin_warped, lane_L, lane_R, lane_warped_color, img_combined
else:
return img_combined
def process_image(img):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# the solution is detailed in the function lane_detection above
return lane_detector.lane_detection(img, ROI_vertex_scales, src_scales, dst_scales,
colors_rgb, colors_hls,
window_L=window_L, window_R=window_R, debug=False)
NUM_X = 9
NUM_Y = 6
LOAD_OBJ_IMG_PTS = True
img_cal_names = natsorted(glob.glob('camera_cal/*.jpg'))
if not LOAD_OBJ_IMG_PTS:
obj_pts, img_pts, img_cals, img_cal_names_ret = get_obj_img_pts(img_cal_names, num_x=NUM_X, num_y=NUM_Y)
with open(r"obj_img_pts", "wb") as file_output:
pickle.dump([obj_pts, img_pts], file_output)
else:
with open(r"obj_img_pts", "rb") as file_input:
obj_pts, img_pts = pickle.load(file_input)
ROI_vertex_scales = [(0.48, 0.59), (0.52, 0.59), (0.65, 0.65), (0.95, 1), (0.05, 1), (0.35, 0.65)]
src_x_top, src_x_bot = 0.42, 0.065 # counting from the left edge
src_y_top, src_y_bot = 0.67, 0
dst_x_top, dst_x_bot = 0.2, 0.2 # counting from the left edge
dst_y_top, dst_y_bot = 0.2, 0
src_scales = [(src_x_top, src_y_top), (1 - src_x_top, src_y_top),
(1 - src_x_bot, 1 - src_y_bot), (src_x_bot, 1 - src_y_bot)]
dst_scales = [(dst_x_top, dst_y_top), (1 - dst_x_top, dst_y_top),
(1 - dst_x_top, 1 - dst_y_top), (dst_x_top, 1 - dst_y_bot)]
colors_rgb = [(np.uint8([190, 190, 190]), np.uint8([255, 255, 255]))]
colors_hls = [(np.uint8([0, 120, 150]), np.uint8([75, 255, 255])),
(np.uint8([75, 180, 0]), np.uint8([120, 255, 35]))]
window_L = (0.1, 0.45)
window_R = (0.6, 0.90)
lane_detector = LaneDetector()
# img_test_names = natsorted(glob.glob("test_images/*.jpg"))
# for img_test_name in img_test_names[0:2]:
# print(img_test_name)
# img = mpimg.imread(img_test_name)
#
# imgs = lane_detector.lane_detection(img, ROI_vertex_scales, src_scales, dst_scales,
# colors_rgb, colors_hls,
# window_L=window_L, window_R=window_R, debug=True)
#
# img_names = ["undistorted", "perspective warped", "thresholded", "thresholded warped",
# "left lane", "right lane", "warped detected lanes", "combined with detected lanes"]
#
# fig = img_subplots(imgs, img_names, f_size=(10, 12), f_cols=2)
# fig.suptitle(img_test_name, y=1.05, fontsize=16)
# plt.axis("on")
#
# plt.show()
# clip_files = ["test.mp4", "challenge_video.mp4", "harder_challenge_video.mp4"]
clip_files = ["project_video.mp4", "challenge_video.mp4", "harder_challenge_video.mp4"]
for clip_file in clip_files[0:1]:
clip = VideoFileClip(clip_file)
clip_out = clip.fl_image(process_image)
clip_out.write_videofile("z_sol_" + clip_file, audio=False)
print("======================================================")
|
jingzhehu/udacity_sdcnd
|
term1/P4_advanced_lane_lines/P4_advanced_lane_finding.py
|
Python
|
apache-2.0
| 27,866
|
[
"Gaussian"
] |
11c85154639317128aad374411711f6d7f267019464530b691a4aea29c042fe9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request,
kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied,
kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found,
kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
shenyushun/cookiecutter-simple-django-cn
|
{{cookiecutter.project_slug}}/config/urls.py
|
Python
|
mit
| 1,166
|
[
"VisIt"
] |
40e6bbcb4c3dbc51609bc32248080e8e615e88e870540cf99fc17786e9a42e83
|
########################################################################
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2011/12/14 15:07:01
########################################################################
""" :mod: PfnTestCase
=======================
.. module: PfnTestCase
:synopsis: test case for Pfn module
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
test case for Pfn module
"""
__RCSID__ = "$Id $"
# #
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2011/12/14 15:07:12
# # imports
import unittest
# sut
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
########################################################################
class PfnTests( unittest.TestCase ):
"""
.. class:: PfnTests
"""
def setUp( self ):
self.default_pfns = {
None : {'Errno': 0, 'Message': "wrong 'pfn' argument value in function call, expected non-empty string, got <type 'NoneType'>", 'OK': False},
"" : { "OK" : False, 'Errno': 0, "Message" : "wrong 'pfn' argument value in function call, expected non-empty string, got <type 'NoneType'>"},
"/a/b/c" : { 'OK': True, 'Value': {'Protocol': '', 'WSUrl': '', 'FileName': 'c', 'Host': '', 'Path': '/a/b', 'Port': ''} },
"proto:/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': '', 'Path': '/a/b', 'Port': ''}},
"proto://host/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': ''}},
"proto://host:port/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': 'port'}},
"proto://host:port//a/b/c?SvcClass=toto" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': 'host', 'Path': '//a/b', 'Port': 'port', 'Options' : 'SvcClass=toto'}},
"proto://host:port/a/b/c?SvcClass=toto" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': 'port', 'Options' : 'SvcClass=toto'}},
}
# We keep some standard non srm specific url that used to be supported, just for backward compatibility
self.srm_pfns = {
None : {'Errno': 0, 'Message': "wrong 'pfn' argument value in function call, expected non-empty string, got <type 'NoneType'>", 'OK': False},
"" : { "OK" : False, 'Errno': 0, "Message" : "wrong 'pfn' argument value in function call, expected non-empty string, got <type 'NoneType'>"},
"/a/b/c" : { 'OK': True, 'Value': {'Protocol': '', 'WSUrl': '', 'FileName': 'c', 'Host': '', 'Path': '/a/b', 'Port': ''} },
"proto:/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': '', 'Path': '/a/b', 'Port': ''}},
"proto://host/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': ''}},
"proto://host:port/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': 'port'}},
"proto://host:port/wsurl?=/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '/wsurl?=', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': 'port'}},
"proto://host:port/wsurl?blah=/a/b/c" : {'OK': True, 'Value': {'Protocol': 'proto', 'WSUrl': '/wsurl?blah=', 'FileName': 'c', 'Host': 'host', 'Path': '/a/b', 'Port': 'port'}},
}
def test_01_srm_parse( self ):
""" pfnparse and pfnparse_old
:param self: self reference
"""
for pfn, result in self.srm_pfns.iteritems():
parseResult = pfnparse( pfn )
self.assertEqual( parseResult['OK'], result['OK'] )
if result['OK']:
self.assertEqual( parseResult['Value'], result['Value'] )
def test_02_default_parse( self ):
""" pfnparse and pfnparse_old
:param self: self reference
"""
for pfn, result in self.default_pfns.iteritems():
parseResult = pfnparse( pfn, srmSpecific = False )
self.assertEqual( parseResult['OK'], result['OK'] )
if result['OK']:
self.assertEqual( parseResult['Value'], result['Value'] )
def test_03_srm_unparse( self ):
""" pfnunparse and pfnunparse_old
:param self: self reference
"""
for pfn, result in self.srm_pfns.items():
if result["OK"]:
unparseResult = pfnunparse( result["Value"] )
self.assertEqual( unparseResult, { "OK" : True, "Value" : pfn } )
self.assertEqual( pfnunparse( None )['OK'], False )
self.assertEqual( pfnunparse( "Path" )['OK'], False )
def test_03_default_unparse( self ):
""" pfnunparse and pfnunparse_old
:param self: self reference
"""
for pfn, result in self.default_pfns.items():
if result["OK"]:
unparseResult = pfnunparse( result["Value"], srmSpecific = False )
self.assertEqual( unparseResult, { "OK" : True, "Value" : pfn } )
self.assertEqual( pfnunparse( None )['OK'], False )
self.assertEqual( pfnunparse( "Path" )['OK'], False )
# # test execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
suite = testLoader.loadTestsFromTestCase( PfnTests )
unittest.TextTestRunner( verbosity = 3 ).run( suite )
|
andresailer/DIRAC
|
Core/Utilities/test/Test_Pfn.py
|
Python
|
gpl-3.0
| 5,256
|
[
"DIRAC"
] |
eb2d9a3ac4ed369e872f63bd997ec7254ee6feb4bfc3f88414635ad53f0c3b8a
|
import re
import numpy as np
import pandas as pd
import mdtraj as md
import simtk.unit as u
import simtk.openmm.app.element as element
baseline = 0.32 * u.angstrom ** 3
elemental_coefficients = dict(C=1.51, H=0.17, O=0.57, N=1.05, S=2.99, P=2.48, F=0.22, Cl=2.16, Br=3.29, I=5.45)
elemental_coefficients = {key:val * u.angstrom ** 3 for (key, val) in elemental_coefficients.items()}
def polarizability(traj, add_baseline=True):
"""Estimate the polarizabilty of a simulation box using the simple
elemental regression model (e.g. counting elements) of Sales, 2002.
Returns
-------
alpha : simtk.unit, [Angstroms^3]
The polarizability
"""
elements = [a.element.symbol for a in traj.top.atoms]
alpha = np.sum(elemental_coefficients[e] for e in elements)
if add_baseline:
alpha += baseline
return alpha
def dielectric_correction(traj):
"""Estimate the polarizabilty of a simulation box using the simple
"""
volume = traj.unitcell_volumes.mean() * (u.nanometers ** 3) # nm is mdtraj length unit system
return 4 * np.pi * polarizability(traj) / volume
def polarizability_from_formula(formula, add_baseline=True):
"""
Returns
-------
alpha : simtk.unit, [Angstroms^3]
The polarizability
"""
element_dict = formula_to_element_counts(formula)
#return([number * elemental_coefficients[e] for e, number in element_dict.items()])
alpha = np.sum([number * elemental_coefficients[e] for e, number in element_dict.items()])
if add_baseline:
alpha += baseline
return alpha
def dielectric_correction_from_formula(formula, mass_density, add_baseline=True):
alpha = polarizability_from_formula(formula, add_baseline)
element_dict = formula_to_element_counts(formula)
molar_mass = np.sum([number * element.Element.getBySymbol(e).mass for e, number in element_dict.items()])
molar_mass *= (u.kilograms / u.dalton)
molar_mass /= (u.AVOGADRO_CONSTANT_NA * u.mole)
molar_volume = (molar_mass / mass_density)
return 4 * np.pi * alpha / molar_volume
def formula_to_element_counts(test):
pattern = r'([A-Z][a-z]{0,2}\d*)'
pieces = re.split(pattern, test)
print "\ntest=%r pieces=%r" % (test, pieces)
data = pieces[1::2]
rubbish = filter(None, pieces[0::2])
pattern2 = r'([A-Z][a-z]{0,2})'
results = {}
for piece in data:
print(piece)
element, number = re.split(pattern2, piece)[1:]
try:
number = int(number)
except ValueError:
number = 1
results[element] = number
return results
element.Element.getBySymbol("Cl").mass
|
jchodera/LiquidBenchmark
|
src/polarizability.py
|
Python
|
gpl-2.0
| 2,695
|
[
"Dalton",
"MDTraj",
"OpenMM"
] |
eb7302673c9f7b6cf4d8e5ae4433dfc6a3b961c80c07488c3774c7546e9f181e
|
#Copyright 2009 Erik Tollerud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the internals for the FitGui gui.
"""
#TODO: change single select to click-to-do-action
from __future__ import division,with_statement
import numpy as np
try: #this is the old-style import - below is for traits 4.x
from enthought.traits.api import HasTraits,Bool,Button,Float,Int,Color, \
Instance,Tuple,Array,List,Dict,Str,Property, \
on_trait_change,cached_property
from enthought.traits.ui.api import View,VGroup,HGroup,Item,TupleEditor, \
ListEditor
from enthought.tvtk.pyface.scene_editor import SceneEditor
from enthought.mayavi.tools.mlab_scene_model import MlabSceneModel
from enthought.mayavi.core.ui.mayavi_scene import MayaviScene
except ImportError:
from traits.api import HasTraits,Bool,Button,Float,Int,Color, \
Instance,Tuple,Array,List,Dict,Str,Property, \
on_trait_change,cached_property
from traitsui.api import View,VGroup,HGroup,Item,TupleEditor, \
ListEditor
from tvtk.pyface.scene_editor import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi.core.ui.mayavi_scene import MayaviScene
from .fitgui import FitGui,TraitedModel
class MultiFitGui(HasTraits):
"""
data should be c x N where c is the number of data columns/axes and N is
the number of points
"""
doplot3d = Bool(False)
show3d = Button('Show 3D Plot')
replot3d = Button('Replot 3D')
scalefactor3d = Float(0)
do3dscale = Bool(False)
nmodel3d = Int(1024)
usecolor3d = Bool(False)
color3d = Color((0,0,0))
scene3d = Instance(MlabSceneModel,())
plot3daxes = Tuple(('x','y','z'))
data = Array(shape=(None,None))
weights = Array(shape=(None,))
curveaxes = List(Tuple(Int,Int))
axisnames = Dict(Int,Str)
invaxisnames = Property(Dict,depends_on='axisnames')
fgs = List(Instance(FitGui))
traits_view = View(VGroup(Item('fgs',editor=ListEditor(use_notebook=True,page_name='.plotname'),style='custom',show_label=False),
Item('show3d',show_label=False)),
resizable=True,height=900,buttons=['OK','Cancel'],title='Multiple Model Data Fitters')
plot3d_view = View(VGroup(Item('scene3d',editor=SceneEditor(scene_class=MayaviScene),show_label=False,resizable=True),
Item('plot3daxes',editor=TupleEditor(cols=3,labels=['x','y','z']),label='Axes'),
HGroup(Item('do3dscale',label='Scale by weight?'),
Item('scalefactor3d',label='Point scale'),
Item('nmodel3d',label='Nmodel')),
HGroup(Item('usecolor3d',label='Use color?'),Item('color3d',label='Relation Color',enabled_when='usecolor3d')),
Item('replot3d',show_label=False),springy=True),
resizable=True,height=800,width=800,title='Multiple Model3D Plot')
def __init__(self,data,names=None,models=None,weights=None,dofits=True,**traits):
"""
:param data: The data arrays
:type data: sequence of c equal-length arrays (length N)
:param names: Names
:type names: sequence of strings, length c
:param models:
The models to fit for each pair either as strings or
:class:`astroypsics.models.ParametricModel` objects.
:type models: sequence of models, length c-1
:param weights: the weights for each point or None for no weights
:type weights: array-like of size N or None
:param dofits:
If True, the data will be fit to the models when the object is
created, otherwise the models will be passed in as-is (or as
created).
:type dofits: bool
extra keyword arguments get passed in as new traits
(r[finmask],m[finmask],l[finmask]),names='rh,Mh,Lh',weights=w[finmask],models=models,dofits=False)
"""
super(MultiFitGui,self).__init__(**traits)
self._lastcurveaxes = None
data = np.array(data,copy=False)
if weights is None:
self.weights = np.ones(data.shape[1])
else:
self.weights = np.array(weights)
self.data = data
if data.shape[0] < 2:
raise ValueError('Must have at least 2 columns')
if isinstance(names,basestring):
names = names.split(',')
if names is None:
if len(data) == 2:
self.axisnames = {0:'x',1:'y'}
elif len(data) == 3:
self.axisnames = {0:'x',1:'y',2:'z'}
else:
self.axisnames = dict((i,str(i)) for i in data)
elif len(names) == len(data):
self.axisnames = dict([t for t in enumerate(names)])
else:
raise ValueError("names don't match data")
#default to using 0th axis as parametric
self.curveaxes = [(0,i) for i in range(len(data))[1:]]
if models is not None:
if len(models) != len(data)-1:
raise ValueError("models don't match data")
for i,m in enumerate(models):
fg = self.fgs[i]
newtmodel = TraitedModel(m)
if dofits:
fg.tmodel = newtmodel
fg.fitmodel = True #should happen automatically, but this makes sure
else:
oldpard = newtmodel.model.pardict
fg.tmodel = newtmodel
fg.tmodel .model.pardict = oldpard
if dofits:
fg.fitmodel = True
def _data_changed(self):
self.curveaxes = [(0,i) for i in range(len(self.data))[1:]]
def _axisnames_changed(self):
for ax,fg in zip(self.curveaxes,self.fgs):
fg.plot.x_axis.title = self.axisnames[ax[0]] if ax[0] in self.axisnames else ''
fg.plot.y_axis.title = self.axisnames[ax[1]] if ax[1] in self.axisnames else ''
self.plot3daxes = (self.axisnames[0],self.axisnames[1],self.axisnames[2] if len(self.axisnames) > 2 else self.axisnames[1])
@on_trait_change('curveaxes[]')
def _curveaxes_update(self,names,old,new):
ax=[]
for t in self.curveaxes:
ax.append(t[0])
ax.append(t[1])
if set(ax) != set(range(len(self.data))):
self.curveaxes = self._lastcurveaxes
return #TOOD:check for recursion
if self._lastcurveaxes is None:
self.fgs = [FitGui(self.data[t[0]],self.data[t[1]],weights=self.weights) for t in self.curveaxes]
for ax,fg in zip(self.curveaxes,self.fgs):
fg.plot.x_axis.title = self.axisnames[ax[0]] if ax[0] in self.axisnames else ''
fg.plot.y_axis.title = self.axisnames[ax[1]] if ax[1] in self.axisnames else ''
else:
for i,t in enumerate(self.curveaxes):
if self._lastcurveaxes[i] != t:
self.fgs[i] = fg = FitGui(self.data[t[0]],self.data[t[1]],weights=self.weights)
ax = self.curveaxes[i]
fg.plot.x_axis.title = self.axisnames[ax[0]] if ax[0] in self.axisnames else ''
fg.plot.y_axis.title = self.axisnames[ax[1]] if ax[1] in self.axisnames else ''
self._lastcurveaxes = self.curveaxes
def _show3d_fired(self):
self.edit_traits(view='plot3d_view')
self.doplot3d = True
self.replot3d = True
def _plot3daxes_changed(self):
self.replot3d = True
@on_trait_change('weights',post_init=True)
def weightsChanged(self):
for fg in self.fgs:
if fg.weighttype != 'custom':
fg.weighttype = 'custom'
fg.weights = self.weights
@on_trait_change('data','fgs','replot3d','weights')
def _do_3d(self):
if self.doplot3d:
M = self.scene3d.mlab
try:
xi = self.invaxisnames[self.plot3daxes[0]]
yi = self.invaxisnames[self.plot3daxes[1]]
zi = self.invaxisnames[self.plot3daxes[2]]
x,y,z = self.data[xi],self.data[yi],self.data[zi]
w = self.weights
M.clf()
if self.scalefactor3d == 0:
sf = x.max()-x.min()
sf *= y.max()-y.min()
sf *= z.max()-z.min()
sf = sf/len(x)/5
self.scalefactor3d = sf
else:
sf = self.scalefactor3d
glyph = M.points3d(x,y,z,w,scale_factor=sf)
glyph.glyph.scale_mode = 0 if self.do3dscale else 1
M.axes(xlabel=self.plot3daxes[0],ylabel=self.plot3daxes[1],zlabel=self.plot3daxes[2])
try:
xs = np.linspace(np.min(x),np.max(x),self.nmodel3d)
#find sequence of models to go from x to y and z
ymods,zmods = [],[]
for curri,mods in zip((yi,zi),(ymods,zmods)):
while curri != xi:
for i,(i1,i2) in enumerate(self.curveaxes):
if curri==i2:
curri = i1
mods.insert(0,self.fgs[i].tmodel.model)
break
else:
raise KeyError
ys = xs
for m in ymods:
ys = m(ys)
zs = xs
for m in zmods:
zs = m(zs)
if self.usecolor3d:
c = (self.color3d[0]/255,self.color3d[1]/255,self.color3d[2]/255)
M.plot3d(xs,ys,zs,color=c)
else:
M.plot3d(xs,ys,zs,np.arange(len(xs)))
except (KeyError,TypeError):
M.text(0.5,0.75,'Underivable relation')
except KeyError:
M.clf()
M.text(0.25,0.25,'Data problem')
@cached_property
def _get_invaxisnames(self):
d={}
for k,v in self.axisnames.iteritems():
d[v] = k
return d
def fit_data_multi(data,names=None,weights=None,models=None):
"""
fit a data set consisting of a variety of curves simultaneously. A GUI
application instance must already exist (e.g. interactive mode of
ipython)
returns a tuple of models e.g. [xvsy,xvsz]
"""
if len(data.shape) !=2 or data.shape[0]<2:
raise ValueError('data must be 2D with first dimension >=2')
if models is not None and len(models) != data.shape[0]:
raise ValueError('Number of models does not match number of data sets')
mfg = MultiFitGui(data,names,models,weights=weights)
res = mfg.edit_traits(kind='livemodal')
if res:
return tuple([fg.tmodel.model for fg in mfg.fgs])
else:
return None
|
eteq/pymodelfit
|
pymodelfit/multifitgui.py
|
Python
|
apache-2.0
| 11,844
|
[
"Mayavi"
] |
fd164fde264df48cbe4f0eb152c5deb65c5762f4cbf4b2da03bd075549f61db1
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance_obj()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance_obj(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance_obj(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance_obj(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
kimjaejoong/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 59,398
|
[
"exciting"
] |
ebacdec1b72ead60f2692bbf16477087ec0a11ab808320add719f8968c861809
|
from pylab import plot,grid,title,subplot,xlabel,ylabel,text,subplots_adjust,fill_between,mean,connect,show
import shogun as sg
import util
util.set_title('PRC example')
util.DISTANCE=0.5
subplots_adjust(hspace=0.3)
pos = util.get_realdata(True)
neg = util.get_realdata(False)
features=util.get_realfeatures(pos, neg)
labels=util.get_labels()
# classifiers
gk = sg.GaussianKernel(features, features, 1.0)
svm = sg.LibSVM(1000.0, gk, labels)
svm.train()
lda = sg.LDA(1,features,labels)
lda.train()
## plot points
subplot(211)
plot(pos[0,:], pos[1,:], "r.")
plot(neg[0,:], neg[1,:], "b.")
grid(True)
title('Data',size=10)
# plot PRC for SVM
subplot(223)
PRC_evaluation = sg.PRCEvaluation()
PRC_evaluation.evaluate(svm.apply(),labels)
PRC = PRC_evaluation.get_PRC()
plot(PRC[0], PRC[1])
fill_between(PRC[0],PRC[1],0,alpha=0.1)
text(0.55,mean(PRC[1])/3,'auPRC = %.5f' % PRC_evaluation.get_auPRC())
grid(True)
xlabel('Precision')
ylabel('Recall')
title('LibSVM (Gaussian kernel, C=%.3f) PRC curve' % svm.get_C1(),size=10)
# plot PRC for LDA
subplot(224)
PRC_evaluation.evaluate(lda.apply(),labels)
PRC = PRC_evaluation.get_PRC()
plot(PRC[0], PRC[1])
fill_between(PRC[0],PRC[1],0,alpha=0.1)
text(0.55,mean(PRC[1])/3,'auPRC = %.5f' % PRC_evaluation.get_auPRC())
grid(True)
xlabel('Precision')
ylabel('Recall')
title('LDA (gamma=%.3f) PRC curve' % lda.get_gamma(),size=10)
connect('key_press_event', util.quit)
show()
|
shogun-toolbox/shogun
|
examples/undocumented/python/graphical/prc.py
|
Python
|
bsd-3-clause
| 1,418
|
[
"Gaussian"
] |
0d16314c8349e349e3a71d04fcf4f782c13964edfe523b87e6e831aa6eed4bdc
|
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models, tools
class ResCityItCode(models.Model):
#
# First...
# FIXME: URL in class description is no more useful...
#
# Visit:
# https://www.agenziaentrate.gov.it/wps/content/Nsilib/Nsi/
# Strumenti/Codici+attivita+e+tributo/Codici+territorio/
# ... and then click on "Consultazione Archivio Comuni e Stati esteri".
# You will be redirected on:
# https://www.agenziaentrate.gov.it/wps/content/nsilib/nsi/schede/
# fabbricatiterreni/archivio+comuni+e+stati+esteri/
# consultazione+archivio+comuni+stati+esteri
# Here, you can download the new and updated file CSV.
# (last update on 30/07/2018)
#
#
# ... BUT then...
# TODO: Find out how to import the new CSV without breaking existing data.
#
# The new CSV as a new structure:
# - some records have been deleted.
# - some columns no longer exist.
# - ...
# - ... and so on...
# - ...
#
#
# Good luck! ;)
#
"""
To create res.city.it.code.csv:
http://www.agenziaentrate.gov.it/wps/content/Nsilib/Nsi/Strumenti/
Codici+attivita+e+tributo/Codici+territorio/Comuni+italia+esteri/
- download the file named: Codici Comuni d’Italia - xls
- open it in LibreOffice and save it as .ods
- some date cells contain a "'" to be removed using Calc's menu
Data / Text to columns
- rows 216,1122 contain wrong written dates
- dates format must be yyyy-mm-dd
- add first column with numeric ids
- change first row with column names from res.city.it.code model
id,national_code,cadastre_code,province,name,notes,national_code_var,
cadastre_code_var,province_var,name_var,creation_date,var_date
- save as csv in data/res.city.it.code.csv
"""
_name = "res.city.it.code"
_description = "National city codes"
national_code = fields.Char("National code", size=4)
cadastre_code = fields.Char("Belfiore cadastre code (not used anymore)", size=4)
province = fields.Char("Province", size=5)
name = fields.Char("Name")
notes = fields.Char("Notes", size=4)
national_code_var = fields.Char("National code variation", size=4)
cadastre_code_var = fields.Char("Cadastre code variation", size=4)
province_var = fields.Char("Province variation", size=5)
name_var = fields.Char("Name variation", size=100)
creation_date = fields.Date("Creation date")
var_date = fields.Date("Variation date")
class ResCityItCodeDistinct(models.Model):
_name = "res.city.it.code.distinct"
_description = "National city codes distinct"
_auto = False
name = fields.Char("Name", size=100)
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute(
"""
CREATE OR REPLACE VIEW res_city_it_code_distinct AS (
SELECT name, MAX(id) AS id FROM res_city_it_code
GROUP BY name)
"""
)
|
OCA/l10n-italy
|
l10n_it_fiscalcode/model/res_city_it_code.py
|
Python
|
agpl-3.0
| 3,137
|
[
"VisIt"
] |
1cd8c761bc097c15918087362746e9458e3465a77d1b18ce07630cdd1202de2f
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
exec(open('nanoget/version.py').read())
setup(
name='nanoget',
version=__version__,
description='Functions to extract information from Oxford Nanopore sequencing data and alignments.',
long_description=open(path.join(here, "README.md")).read(),
long_description_content_type="text/markdown",
url='https://github.com/wdecoster/nanoget',
author='Wouter De Coster',
author_email='decosterwouter@gmail.com',
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='nanopore sequencing plotting quality control',
python_requires='>=3',
packages=find_packages() + ['scripts'],
install_requires=['pandas>=0.22.0',
'numpy',
'biopython',
'pysam>0.10.0.0'],
package_dir={'nanoget': 'nanoget'},
data_files=[("", ["LICENSE"])])
|
wdecoster/nanoget
|
setup.py
|
Python
|
gpl-3.0
| 1,470
|
[
"Biopython",
"pysam"
] |
bcc733ef9dbb996f09ab884bee43626bd3fe0ed5ea1e442ab3e297af329c8949
|
"""
FEZ basic types
"""
from __future__ import print_function
from xnb_parse.xna_types.xna_primitive import Enum
class FaceOrientation(Enum):
__slots__ = ()
enum_values = dict(enumerate(['Left', 'Down', 'Back', 'Right', 'Top', 'Front']))
class LevelNodeType(Enum):
__slots__ = ()
enum_values = dict(enumerate(['Node', 'Hub', 'Lesser']))
xml_tag = 'NodeType'
class CollisionType(Enum):
__slots__ = ()
enum_values = dict(enumerate(['AllSides', 'TopOnly', 'None', 'Immaterial', 'TopNoStraightLedge']))
class Viewpoint(Enum):
__slots__ = ()
enum_values = dict(enumerate(['None', 'Front', 'Right', 'Back', 'Left', 'Up', 'Down', 'Perspective']))
class NpcAction(Enum):
__slots__ = ()
enum_values = dict(enumerate(['None', 'Idle', 'Idle2', 'Idle3', 'Walk', 'Turn', 'Talk', 'Burrow', 'Hide', 'ComeOut',
'TakeOff', 'Fly', 'Land']))
class ActorType(Enum):
__slots__ = ()
enum_values = dict(enumerate(['None', 'Ladder', 'Bouncer', 'Sign', 'GoldenCube', 'PickUp', 'Bomb', 'Destructible',
'DestructiblePermanent', 'Vase', 'Door', 'Heart', 'Watcher', 'Crystal', 'BlackHole',
'Vine', 'BigBomb', 'TntBlock', 'TntPickup', 'MotorBlock', 'Hurt', 'Checkpoint',
'TreasureChest', 'CubeShard', 'BigHeart', 'SkeletonKey', 'ExploSwitch', 'PushSwitch',
'EightBitDoor', 'PushSwitchSticky', 'PushSwitchPermanent', 'SuckBlock', 'WarpGate',
'OneBitDoor', 'SpinBlock', 'PivotHandle', 'FourBitDoor', 'LightningPlatform',
'LightningGhost', 'Tombstone', 'SplitUpCube', 'UnlockedDoor', 'Hole', 'Couch',
'Valve', 'Rumbler', 'Waterfall', 'Trickle', 'Drips', 'Geyser', 'ConnectiveRail',
'BoltHandle', 'BoltNutBottom', 'BoltNutTop', 'CodeMachine', 'NumberCube',
'LetterCube', 'TriSkull', 'Tome', 'SecretCube', 'LesserGate', 'Crumbler',
'LaserEmitter', 'LaserBender', 'LaserReceiver', 'RebuildingHexahedron', 'TreasureMap',
'Timeswitch', 'TimeswitchMovingPart', 'Mail', 'Mailbox', 'Bookcase', 'TwoBitDoor',
'SixteenBitDoor', 'ThirtyTwoBitDoor', 'SixtyFourBitDoor', 'Owl', 'Bell',
'RotatingGroup', 'BigWaterfall', 'Telescope', 'SinkPickup', 'QrCode', 'FpsPost',
'PieceOfHeart', 'SecretPassage', 'Piston']))
class SurfaceType(Enum):
__slots__ = ()
enum_values = dict(enumerate(['Grass', 'Metal', 'Stone', 'Wood']))
class LiquidType(Enum):
__slots__ = ()
enum_values = dict(enumerate(['None', 'Water', 'Blood', 'Lava', 'Sewer', 'Purple', 'Green']))
class PathEndBehavior(Enum):
__slots__ = ()
enum_values = dict(enumerate(['Bounce', 'Loop', 'Stop']))
class ComparisonOperator(Enum):
__slots__ = ()
enum_values = {-1: 'None', 0: 'Equal', 1: 'Greater', 2: 'GreaterEqual', 3: 'Less', 4: 'LessEqual', 5: 'NotEqual'}
class CodeInput(Enum):
__slots__ = ()
enum_values = {0: 'None', 1: 'Up', 2: 'Down', 4: 'Left', 8: 'Right', 16: 'SpinLeft', 32: 'SpinRight', 64: 'Jump'}
class VibrationMotor(Enum):
__slots__ = ()
enum_values = dict(enumerate(['None', 'LeftLow', 'RightHigh']))
|
fesh0r/xnb_parse
|
xnb_parse/xna_types/fez/fez_basic.py
|
Python
|
mit
| 3,464
|
[
"CRYSTAL"
] |
fdd41b3177a89be2259f74052a16abbcae42f9e0b44a937337628add4d1fc6d6
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
PASS_VARS = {
'check_mode': 'check_mode',
'debug': '_debug',
'diff': '_diff',
'keep_remote_files': '_keep_remote_files',
'module_name': '_name',
'no_log': 'no_log',
'remote_tmp': '_remote_tmp',
'selinux_special_fs': '_selinux_special_fs',
'shell_executable': '_shell',
'socket': '_socket_path',
'syslog_facility': '_syslog_facility',
'tmpdir': '_tmpdir',
'verbosity': '_verbosity',
'version': 'ansible_version',
}
PASS_BOOLS = ('no_log', 'debug', 'diff')
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import atexit
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
else:
sj_version = json.__version__.split('.')
if sj_version < ['1', '6']:
# Version 1.5 released 2007-01-18 does not have the encoding parameter which we need
print('\n{"msg": "Error: Ansible requires the stdlib json or simplejson >= 1.6. Neither was found!", "failed": true}')
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.common._collections_compat import (
deque,
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (size / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
def _json_encode_fallback(obj):
if isinstance(obj, Set):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Cannot json serialize %s" % to_native(obj))
def jsonify(data, **kwargs):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
except UnicodeDecodeError:
continue
raise UnicodeError('Invalid unicode encoding encountered')
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self.aliases = {}
self._legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set, the module needs to create it and
# clean it up once finished.
if self._tmpdir is None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if not os.path.exists(basedir):
self.warn("Module remote_tmp %s did not exist and was created "
"with a mode of 0700, this may cause issues when "
"running as another user. To avoid this, create the "
"remote_tmp dir with the correct permissions "
"manually" % basedir)
os.makedirs(basedir, mode=0o700)
basefile = "ansible-moduletmp-%s-" % time.time()
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
elif k.startswith('_ansible_'):
# handle setting internal properties from internal ansible vars
key = k.replace('_ansible_', '')
if key in PASS_BOOLS:
setattr(self, PASS_VARS[key], self.boolean(v))
else:
setattr(self, PASS_VARS[key], v)
# clean up internal params:
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ", ".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
term = 'any'
else:
term = 'all'
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ', '.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand shellisms
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
JimCircadian/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 117,167
|
[
"VisIt"
] |
e6f788843f83fa16291f4ba1cc55ec6c8c4bb893778ace25098b5a7a6ac1f1de
|
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
class EntryParser(NodeVisitor):
def __init__(self, grammar, text):
self.entry = {}
ast = Grammar(grammar).parse(text)
self.visit(ast)
def visit_name(self, n, vc):
self.entry['name'] = n.text
def visit_gender(self, n, vc):
self.entry['gender'] = n.text
def visit_age(self, n, vc):
self.entry['age'] = n.text
def generic_visit(self, n, vc):
pass
grammar = """\
entry = name sep? gender? (sep age)?
sep = ws "," ws
ws = " "*
name = ~"[A-z]*"
gender = "male" / "female"
age = ~"[0-9]*"
"""
text = """\
Bob
Kim,female,30
Joe,male
"""
for line in text.splitlines():
print( EntryParser(grammar, line).entry )
|
codeyash/plugins
|
PyPlugins/PhpParser/py/try.py
|
Python
|
apache-2.0
| 796
|
[
"VisIt"
] |
adad29f92eddad38ac909cb3fb8d886d32b9e035b11a51971e00d3c2cbf36706
|
"""Mayavi/traits GUI for converting data from KIT systems"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import numpy as np
from scipy.linalg import inv
from threading import Thread
from ..externals.six.moves import queue
from ..io.meas_info import _read_dig_points, _make_dig_points
from ..utils import logger
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import confirm, error, FileDialog, OK, YES, information
from traits.api import (HasTraits, HasPrivateTraits, cached_property,
Instance, Property, Bool, Button, Enum, File,
Float, Int, List, Str, Array, DelegatesTo)
from traitsui.api import (View, Item, HGroup, VGroup, spring, TextEditor,
CheckListEditor, EnumEditor, Handler)
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except Exception:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = MayaviScene = MlabSceneModel = Bool = Button = Float = \
DelegatesTo = Enum = File = Instance = Int = List = Property = \
Str = Array = spring = View = Item = HGroup = VGroup = EnumEditor = \
NoButtons = CheckListEditor = SceneEditor = TextEditor = trait_wraith
from ..io.kit.kit import RawKIT, KIT
from ..transforms import (apply_trans, als_ras_trans, als_ras_trans_mm,
get_ras_to_neuromag_trans, Transform)
from ..coreg import _decimate_points, fit_matched_points
from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
from ._help import read_tooltips
from ._viewer import (HeadViewController, headview_item, PointObject,
_testing_mode)
use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
backend_is_wx = False # is there a way to determine this?
if backend_is_wx:
# wx backend allows labels for wildcards
hsp_points_wildcard = ['Head Shape Points (*.txt)|*.txt']
hsp_fid_wildcard = ['Head Shape Fiducials (*.txt)|*.txt']
kit_con_wildcard = ['Continuous KIT Files (*.sqd;*.con)|*.sqd;*.con']
else:
hsp_points_wildcard = ['*.txt']
hsp_fid_wildcard = ['*.txt']
kit_con_wildcard = ['*.sqd;*.con']
tooltips = read_tooltips('kit2fiff')
class Kit2FiffModel(HasPrivateTraits):
"""Data Model for Kit2Fiff conversion
- Markers are transformed into RAS coordinate system (as are the sensor
coordinates).
- Head shape digitizer data is transformed into neuromag-like space.
"""
# Input Traits
markers = Instance(CombineMarkersModel, ())
sqd_file = File(exists=True, filter=kit_con_wildcard)
hsp_file = File(exists=True, filter=hsp_points_wildcard, desc="Digitizer "
"head shape")
fid_file = File(exists=True, filter=hsp_fid_wildcard, desc="Digitizer "
"fiducials")
stim_coding = Enum(">", "<", "channel")
stim_chs = Str("")
stim_chs_array = Property(depends_on='stim_chs')
stim_chs_ok = Property(depends_on='stim_chs_array')
stim_chs_comment = Property(depends_on='stim_chs_array')
stim_slope = Enum("-", "+")
stim_threshold = Float(1.)
# Marker Points
use_mrk = List(list(range(5)), desc="Which marker points to use for the "
"device head coregistration.")
# Derived Traits
mrk = Property(depends_on='markers.mrk3.points')
# Polhemus Fiducials
elp_raw = Property(depends_on=['fid_file'])
hsp_raw = Property(depends_on=['hsp_file'])
polhemus_neuromag_trans = Property(depends_on=['elp_raw'])
# Polhemus data (in neuromag space)
elp = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
fid = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
hsp = Property(depends_on=['hsp_raw', 'polhemus_neuromag_trans'])
# trans
dev_head_trans = Property(depends_on=['elp', 'mrk', 'use_mrk'])
head_dev_trans = Property(depends_on=['dev_head_trans'])
# info
sqd_fname = Property(Str, depends_on='sqd_file')
hsp_fname = Property(Str, depends_on='hsp_file')
fid_fname = Property(Str, depends_on='fid_file')
can_save = Property(Bool, depends_on=['stim_chs_ok', 'sqd_file', 'fid',
'elp', 'hsp', 'dev_head_trans'])
@cached_property
def _get_can_save(self):
"Only allow saving when either all or no head shape elements are set."
if not self.stim_chs_ok or not self.sqd_file:
return False
has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp) and
np.any(self.elp) and np.any(self.fid))
if has_all_hsp:
return True
has_any_hsp = self.hsp_file or self.fid_file or np.any(self.mrk)
return not has_any_hsp
@cached_property
def _get_dev_head_trans(self):
if (self.mrk is None) or not np.any(self.fid):
return np.eye(4)
src_pts = self.mrk
dst_pts = self.elp
n_use = len(self.use_mrk)
if n_use < 3:
error(None, "Estimating the device head transform requires at "
"least 3 marker points. Please adjust the markers used.",
"Not Enough Marker Points")
return
elif n_use < 5:
src_pts = src_pts[self.use_mrk]
dst_pts = dst_pts[self.use_mrk]
trans = fit_matched_points(src_pts, dst_pts, out='trans')
return trans
@cached_property
def _get_elp(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[3:8]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_elp_raw(self):
if not self.fid_file:
return
try:
pts = _read_dig_points(self.fid_file)
if len(pts) < 8:
raise ValueError("File contains %i points, need 8" % len(pts))
except Exception as err:
error(None, str(err), "Error Reading Fiducials")
self.reset_traits(['fid_file'])
raise
else:
return pts
@cached_property
def _get_fid(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[:3]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_fid_fname(self):
if self.fid_file:
return os.path.basename(self.fid_file)
else:
return '-'
@cached_property
def _get_head_dev_trans(self):
return inv(self.dev_head_trans)
@cached_property
def _get_hsp(self):
if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
return np.empty((0, 3))
else:
pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
return pts
@cached_property
def _get_hsp_fname(self):
if self.hsp_file:
return os.path.basename(self.hsp_file)
else:
return '-'
@cached_property
def _get_hsp_raw(self):
fname = self.hsp_file
if not fname:
return
try:
pts = _read_dig_points(fname)
n_pts = len(pts)
if n_pts > KIT.DIG_POINTS:
msg = ("The selected head shape contains {n_in} points, "
"which is more than the recommended maximum ({n_rec}). "
"The file will be automatically downsampled, which "
"might take a while. A better way to downsample is "
"using FastScan.")
msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS)
information(None, msg, "Too Many Head Shape Points")
pts = _decimate_points(pts, 5)
except Exception as err:
error(None, str(err), "Error Reading Head Shape")
self.reset_traits(['hsp_file'])
raise
else:
return pts
@cached_property
def _get_mrk(self):
return apply_trans(als_ras_trans, self.markers.mrk3.points)
@cached_property
def _get_polhemus_neuromag_trans(self):
if self.elp_raw is None:
return
pts = apply_trans(als_ras_trans_mm, self.elp_raw[:3])
nasion, lpa, rpa = pts
trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
trans = np.dot(trans, als_ras_trans_mm)
return trans
@cached_property
def _get_sqd_fname(self):
if self.sqd_file:
return os.path.basename(self.sqd_file)
else:
return '-'
@cached_property
def _get_stim_chs_array(self):
if not self.stim_chs.strip():
return True
try:
out = eval("r_[%s]" % self.stim_chs, vars(np))
if out.dtype.kind != 'i':
raise TypeError("Need array of int")
except:
return None
else:
return out
@cached_property
def _get_stim_chs_comment(self):
if self.stim_chs_array is None:
return "Invalid!"
elif self.stim_chs_array is True:
return "Ok: Default channels"
else:
return "Ok: %i channels" % len(self.stim_chs_array)
@cached_property
def _get_stim_chs_ok(self):
return self.stim_chs_array is not None
def clear_all(self):
"""Clear all specified input parameters"""
self.markers.clear = True
self.reset_traits(['sqd_file', 'hsp_file', 'fid_file', 'use_mrk'])
def get_event_info(self):
"""
Return a string with the number of events found for each trigger value
"""
if len(self.events) == 0:
return "No events found."
count = ["Events found:"]
events = np.array(self.events)
for i in np.unique(events):
n = np.sum(events == i)
count.append('%3i: %i' % (i, n))
return os.linesep.join(count)
def get_raw(self, preload=False):
"""Create a raw object based on the current model settings
"""
if not self.can_save:
raise ValueError("Not all necessary parameters are set")
# stim channels and coding
if self.stim_chs_array is True:
if self.stim_coding == 'channel':
stim_code = 'channel'
raise NotImplementedError("Finding default event channels")
else:
stim = self.stim_coding
stim_code = 'binary'
else:
stim = self.stim_chs_array
if self.stim_coding == 'channel':
stim_code = 'channel'
elif self.stim_coding == '<':
stim_code = 'binary'
elif self.stim_coding == '>':
# if stim is
stim = stim[::-1]
stim_code = 'binary'
else:
raise RuntimeError("stim_coding=%r" % self.stim_coding)
logger.info("Creating raw with stim=%r, slope=%r, stim_code=%r, "
"stimthresh=%r", stim, self.stim_slope, stim_code,
self.stim_threshold)
raw = RawKIT(self.sqd_file, preload=preload, stim=stim,
slope=self.stim_slope, stim_code=stim_code,
stimthresh=self.stim_threshold)
if np.any(self.fid):
raw.info['dig'] = _make_dig_points(self.fid[0], self.fid[1],
self.fid[2], self.elp,
self.hsp)
raw.info['dev_head_t'] = Transform('meg', 'head',
self.dev_head_trans)
return raw
class Kit2FiffFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.kit2fiff_panel.queue.unfinished_tasks:
msg = ("Can not close the window while saving is still in "
"progress. Please wait until all files are processed.")
title = "Saving Still in Progress"
information(None, msg, title)
return False
else:
return True
class Kit2FiffPanel(HasPrivateTraits):
"""Control panel for kit2fiff conversion"""
model = Instance(Kit2FiffModel)
# model copies for view
use_mrk = DelegatesTo('model')
sqd_file = DelegatesTo('model')
hsp_file = DelegatesTo('model')
fid_file = DelegatesTo('model')
stim_coding = DelegatesTo('model')
stim_chs = DelegatesTo('model')
stim_chs_ok = DelegatesTo('model')
stim_chs_comment = DelegatesTo('model')
stim_slope = DelegatesTo('model')
stim_threshold = DelegatesTo('model')
# info
can_save = DelegatesTo('model')
sqd_fname = DelegatesTo('model')
hsp_fname = DelegatesTo('model')
fid_fname = DelegatesTo('model')
# Source Files
reset_dig = Button
# Visualization
scene = Instance(MlabSceneModel)
fid_obj = Instance(PointObject)
elp_obj = Instance(PointObject)
hsp_obj = Instance(PointObject)
# Output
save_as = Button(label='Save FIFF...')
clear_all = Button(label='Clear All')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(
VGroup(VGroup(Item('sqd_file', label="Data",
tooltip=tooltips['sqd_file']),
Item('sqd_fname', show_label=False, style='readonly'),
Item('hsp_file', label='Dig Head Shape'),
Item('hsp_fname', show_label=False, style='readonly'),
Item('fid_file', label='Dig Points'),
Item('fid_fname', show_label=False, style='readonly'),
Item('reset_dig', label='Clear Digitizer Files',
show_label=False),
Item('use_mrk', editor=use_editor, style='custom'),
label="Sources", show_border=True),
VGroup(Item('stim_slope', label="Event Onset", style='custom',
tooltip=tooltips['stim_slope'],
editor=EnumEditor(
values={'+': '2:Peak (0 to 5 V)',
'-': '1:Trough (5 to 0 V)'},
cols=2)),
Item('stim_coding', label="Value Coding", style='custom',
editor=EnumEditor(values={'>': '1:little-endian',
'<': '2:big-endian',
'channel': '3:Channel#'},
cols=3),
tooltip=tooltips["stim_coding"]),
Item('stim_chs', label='Channels', style='custom',
tooltip=tooltips["stim_chs"],
editor=TextEditor(evaluate_name='stim_chs_ok',
auto_set=True)),
Item('stim_chs_comment', label='>', style='readonly'),
Item('stim_threshold', label='Threshold',
tooltip=tooltips['stim_threshold']),
label='Events', show_border=True),
HGroup(Item('save_as', enabled_when='can_save'), spring,
'clear_all', show_labels=False),
Item('queue_feedback', show_label=False, style='readonly'),
Item('queue_current', show_label=False, style='readonly'),
Item('queue_len_str', show_label=False, style='readonly')
)
)
def __init__(self, *args, **kwargs):
super(Kit2FiffPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
raw, fname = self.queue.get()
basename = os.path.basename(fname)
self.queue_len -= 1
self.queue_current = 'Processing: %s' % basename
# task
try:
raw.save(fname, overwrite=True)
except Exception as err:
self.error = str(err)
res = "Error saving: %s"
else:
res = "Saved: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % basename
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
# setup mayavi visualization
m = self.model
self.fid_obj = PointObject(scene=self.scene, color=(25, 225, 25),
point_scale=5e-3)
self.elp_obj = PointObject(scene=self.scene, color=(50, 50, 220),
point_scale=1e-2, opacity=.2)
self.hsp_obj = PointObject(scene=self.scene, color=(200, 200, 200),
point_scale=2e-3)
if not _testing_mode():
for name, obj in zip(['fid', 'elp', 'hsp'],
[self.fid_obj, self.elp_obj, self.hsp_obj]):
m.sync_trait(name, obj, 'points', mutual=False)
m.sync_trait('head_dev_trans', obj, 'trans', mutual=False)
self.scene.camera.parallel_scale = 0.15
self.scene.mlab.view(0, 0, .15)
def _clear_all_fired(self):
self.model.clear_all()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
def _reset_dig_fired(self):
self.reset_traits(['hsp_file', 'fid_file'])
def _save_as_fired(self):
# create raw
try:
raw = self.model.get_raw()
except Exception as err:
error(None, str(err), "Error Creating KIT Raw")
raise
# find default path
stem, _ = os.path.splitext(self.sqd_file)
if not stem.endswith('raw'):
stem += '-raw'
default_path = stem + '.fif'
# save as dialog
dlg = FileDialog(action="save as",
wildcard="fiff raw file (*.fif)|*.fif",
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
fname = dlg.path
if not fname.endswith('.fif'):
fname += '.fif'
if os.path.exists(fname):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.queue.put((raw, fname))
self.queue_len += 1
class Kit2FiffFrame(HasTraits):
"""GUI for interpolating between two KIT marker files"""
model = Instance(Kit2FiffModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
marker_panel = Instance(CombineMarkersPanel)
kit2fiff_panel = Instance(Kit2FiffPanel)
view = View(HGroup(VGroup(Item('marker_panel', style='custom'),
show_labels=False),
VGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', show_label=False),
VGroup(headview_item, show_labels=False),
),
VGroup(Item('kit2fiff_panel', style='custom'),
show_labels=False),
show_labels=False,
),
handler=Kit2FiffFrameHandler(),
height=700, resizable=True, buttons=NoButtons)
def _headview_default(self):
return HeadViewController(scene=self.scene, scale=160, system='RAS')
def _kit2fiff_panel_default(self):
return Kit2FiffPanel(scene=self.scene, model=self.model)
def _marker_panel_default(self):
return CombineMarkersPanel(scene=self.scene, model=self.model.markers,
trans=als_ras_trans)
|
cmoutard/mne-python
|
mne/gui/_kit2fiff_gui.py
|
Python
|
bsd-3-clause
| 20,684
|
[
"Mayavi"
] |
a4c9972176a064bcebae34defe614042db55f525f4dc0d961c4e0b3e4aa0a1ea
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.