text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
########################################################################
# File : JobCleaningAgent.py
# Author : A.T.
########################################################################
""" The Job Cleaning Agent controls removing jobs from the WMS in the end of their life cycle.
This agent will take care of removing user jobs, while production jobs should be removed through the
:mod:`~DIRAC.TransformationSystem.Agent.TransformationCleaningAgent`.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN JobCleaningAgent
:end-before: ##END
:dedent: 2
:caption: JobCleaningAgent options
"""
from __future__ import absolute_import
__RCSID__ = "$Id$"
import time
import os
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
import DIRAC.Core.Utilities.Time as Time
class JobCleaningAgent(AgentModule):
"""
The specific agents must provide the following methods:
* initialize() for initial settings
* beginExecution()
* execute() - the main method called in the agent cycle
* endExecution()
* finalize() - the graceful exit of the method, this one is usually used for the agent restart
"""
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
# clients
# FIXME: shouldn't we avoid using the DBs directly, and instead go through the service?
self.jobDB = None
self.taskQueueDB = None
self.jobLoggingDB = None
self.maxJobsAtOnce = 100
self.jobByJob = False
self.throttlingPeriod = 0.
self.prodTypes = []
self.removeStatusDelay = {}
#############################################################################
def initialize(self):
""" Sets defaults
"""
self.am_setOption("PollingTime", 120)
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
agentTSTypes = self.am_getOption('ProductionTypes', [])
if agentTSTypes:
self.prodTypes = agentTSTypes
else:
self.prodTypes = Operations().getValue(
'Transformations/DataProcessing', ['MCSimulation', 'Merge'])
gLogger.info("Will exclude the following Production types from cleaning %s" % (
', '.join(self.prodTypes)))
self.maxJobsAtOnce = self.am_getOption('MaxJobsAtOnce', 500)
self.jobByJob = self.am_getOption('JobByJob', False)
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod', 0.)
self.removeStatusDelay['Done'] = self.am_getOption('RemoveStatusDelay/Done', 7)
self.removeStatusDelay['Killed'] = self.am_getOption('RemoveStatusDelay/Killed', 7)
self.removeStatusDelay['Failed'] = self.am_getOption('RemoveStatusDelay/Failed', 7)
self.removeStatusDelay['Any'] = self.am_getOption('RemoveStatusDelay/Any', -1)
return S_OK()
def _getAllowedJobTypes(self):
""" Get valid jobTypes
"""
result = self.jobDB.getDistinctJobAttributes('JobType')
if not result['OK']:
return result
cleanJobTypes = []
for jobType in result['Value']:
if jobType not in self.prodTypes:
cleanJobTypes.append(jobType)
self.log.notice("JobTypes to clean %s" % cleanJobTypes)
return S_OK(cleanJobTypes)
#############################################################################
def execute(self):
""" Remove jobs in various status
"""
# Delete jobs in "Deleted" state
result = self.removeJobsByStatus({'Status': 'Deleted'})
if not result['OK']:
return result
# Get all the Job types that can be cleaned
result = self._getAllowedJobTypes()
if not result['OK']:
return result
# No jobs in the system subject to removal
if not result['Value']:
return S_OK()
baseCond = {'JobType': result['Value']}
# Remove jobs with final status
for status in self.removeStatusDelay:
delay = self.removeStatusDelay[status]
if delay < 0:
# Negative delay means don't delete anything...
continue
condDict = dict(baseCond)
if status != 'Any':
condDict['Status'] = status
delTime = str(Time.dateTime() - delay * Time.day)
result = self.removeJobsByStatus(condDict, delTime)
if not result['OK']:
gLogger.warn('Failed to remove jobs in status %s' % status)
return S_OK()
def removeJobsByStatus(self, condDict, delay=False):
""" Remove deleted jobs
"""
if delay:
gLogger.verbose("Removing jobs with %s and older than %s day(s)" % (condDict, delay))
result = self.jobDB.selectJobs(condDict, older=delay, limit=self.maxJobsAtOnce)
else:
gLogger.verbose("Removing jobs with %s " % condDict)
result = self.jobDB.selectJobs(condDict, limit=self.maxJobsAtOnce)
if not result['OK']:
return result
jobList = result['Value']
if len(jobList) > self.maxJobsAtOnce:
jobList = jobList[:self.maxJobsAtOnce]
if not jobList:
return S_OK()
self.log.notice("Deleting %s jobs for %s" % (len(jobList), condDict))
count = 0
error_count = 0
result = SandboxStoreClient(useCertificates=True).unassignJobs(jobList)
if not result['OK']:
gLogger.error("Cannot unassign jobs to sandboxes", result['Message'])
return result
result = self.deleteJobOversizedSandbox(jobList)
if not result['OK']:
gLogger.error(
"Cannot schedule removal of oversized sandboxes", result['Message'])
return result
failedJobs = result['Value']['Failed']
for job in failedJobs:
jobList.pop(jobList.index(job))
# TODO: we should not remove a job if it still has requests in the RequestManager.
# But this logic should go in the client or in the service, and right now no service expose jobDB.removeJobFromDB
if self.jobByJob:
for jobID in jobList:
resultJobDB = self.jobDB.removeJobFromDB(jobID)
resultTQ = self.taskQueueDB.deleteJob(jobID)
resultLogDB = self.jobLoggingDB.deleteJob(jobID)
errorFlag = False
if not resultJobDB['OK']:
gLogger.warn('Failed to remove job %d from JobDB' % jobID, result['Message'])
errorFlag = True
if not resultTQ['OK']:
gLogger.warn('Failed to remove job %d from TaskQueueDB' % jobID, result['Message'])
errorFlag = True
if not resultLogDB['OK']:
gLogger.warn('Failed to remove job %d from JobLoggingDB' % jobID, result['Message'])
errorFlag = True
if errorFlag:
error_count += 1
else:
count += 1
if self.throttlingPeriod:
time.sleep(self.throttlingPeriod)
else:
result = self.jobDB.removeJobFromDB(jobList)
if not result['OK']:
gLogger.error('Failed to delete %d jobs from JobDB' % len(jobList))
else:
gLogger.info('Deleted %d jobs from JobDB' % len(jobList))
for jobID in jobList:
resultTQ = self.taskQueueDB.deleteJob(jobID)
if not resultTQ['OK']:
gLogger.warn('Failed to remove job %d from TaskQueueDB' % jobID, resultTQ['Message'])
error_count += 1
else:
count += 1
result = self.jobLoggingDB.deleteJob(jobList)
if not result['OK']:
gLogger.error('Failed to delete %d jobs from JobLoggingDB' % len(jobList))
else:
gLogger.info('Deleted %d jobs from JobLoggingDB' % len(jobList))
if count > 0 or error_count > 0:
gLogger.info('Deleted %d jobs from JobDB, %d errors' % (count, error_count))
return S_OK()
def deleteJobOversizedSandbox(self, jobIDList):
""" Delete the job oversized sandbox files from storage elements
"""
failed = {}
successful = {}
result = JobMonitoringClient().getJobParameters(jobIDList, 'OutputSandboxLFN')
if not result['OK']:
return result
osLFNList = result['Value']
if not osLFNList:
return S_OK({'Successful': successful, 'Failed': failed})
# Schedule removal of the LFNs now
for jobID, outputSandboxLFNdict in osLFNList.iteritems():
lfn = outputSandboxLFNdict['OutputSandboxLFN']
result = self.jobDB.getJobAttributes(jobID, ['OwnerDN', 'OwnerGroup'])
if not result['OK']:
failed[jobID] = lfn
continue
if not result['Value']:
failed[jobID] = lfn
continue
ownerDN = result['Value']['OwnerDN']
ownerGroup = result['Value']['OwnerGroup']
result = self.__setRemovalRequest(lfn, ownerDN, ownerGroup)
if not result['OK']:
failed[jobID] = lfn
else:
successful[jobID] = lfn
result = {'Successful': successful, 'Failed': failed}
return S_OK(result)
def __setRemovalRequest(self, lfn, ownerDN, ownerGroup):
""" Set removal request with the given credentials
"""
oRequest = Request()
oRequest.OwnerDN = ownerDN
oRequest.OwnerGroup = ownerGroup
oRequest.RequestName = os.path.basename(lfn).strip() + '_removal_request.xml'
oRequest.SourceComponent = 'JobCleaningAgent'
removeFile = Operation()
removeFile.Type = 'RemoveFile'
removedFile = File()
removedFile.LFN = lfn
removeFile.addFile(removedFile)
oRequest.addOperation(removeFile)
return ReqClient().putRequest(oRequest)
| petricm/DIRAC | WorkloadManagementSystem/Agent/JobCleaningAgent.py | Python | gpl-3.0 | 10,062 | [
"DIRAC"
] | 266cff17220f0f4c1013ae6c7810f319fab71da7a942fffb5f971019f202a5a3 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkUnstructuredGridAlgorithm(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkUnstructuredGridAlgorithm(), 'Processing.',
('vtkUnstructuredGrid',), ('vtkUnstructuredGrid',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkUnstructuredGridAlgorithm.py | Python | bsd-3-clause | 527 | [
"VTK"
] | ab51b8672d06d4c68a3751c71bbcdedc5c9a25db148c6dbb55f5dc9ab5037bf9 |
import sys, os
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nvirtualenv = virtualenv:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
if sys.platform == 'win32':
print('Note: without Setuptools installed you will have to use "python -m virtualenv ENV"')
kw = {}
else:
kw = {'scripts': ['scripts/virtualenv']}
here = os.path.dirname(os.path.abspath(__file__))
## Get long_description from index.txt:
f = open(os.path.join(here, 'docs', 'index.txt'))
long_description = f.read().strip()
long_description = long_description.split('split here', 1)[1]
f.close()
f = open(os.path.join(here, 'docs', 'news.txt'))
long_description += "\n\n" + f.read()
f.close()
setup(name='virtualenv',
# If you change the version here, change it in virtualenv.py and
# docs/conf.py as well
version="1.7",
description="Virtual Python Environment builder",
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
],
keywords='setuptools deployment installation distutils',
author='Ian Bicking',
author_email='ianb@colorstudy.com',
maintainer='Jannis Leidel, Carl Meyer and Brian Rosner',
maintainer_email='python-virtualenv@groups.google.com',
url='http://www.virtualenv.org',
license='MIT',
py_modules=['virtualenv'],
packages=['virtualenv_support'],
package_data={'virtualenv_support': ['*-py%s.egg' % sys.version[:3], '*.tar.gz']},
test_suite='nose.collector',
tests_require=['nose', 'Mock'],
**kw
)
| nanonyme/trivial-buildpack-python | virtualenv/setup.py | Python | mit | 2,155 | [
"Brian"
] | 151708cef87f3dcf8b006e81787b58d48b2fe13c3e58301097b650fa9e1d1693 |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
from os import path
import sys
ON_POSIX = 'posix' in sys.builtin_module_names
try:
from subprocess import Popen
except ImportError:
from os import popen3
else:
def popen3(cmd):
from subprocess import PIPE
p = Popen(cmd, shell=True, close_fds=ON_POSIX,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
return p.stdin, p.stdout, p.stderr
def write_svnversion(svnversion, dir):
svnversionfile = path.join(dir, 'svnversion.py')
f = open(svnversionfile,'w')
f.write('svnversion = "%s"\n' % svnversion)
f.close()
print 'svnversion = ' +svnversion+' written to '+svnversionfile
# assert svn:ignore property if the installation is under svn control
# because svnversion.py has to be ignored by svn!
cmd = popen3('svn propset svn:ignore svnversion.py '+dir)[1]
output = cmd.read()
cmd.close()
def get_svnversion_from_svn(dir):
# try to get the last svn version number from svnversion
cmd = popen3('svnversion -n '+dir)[1] # assert we are in the project dir
output = cmd.read().strip()
cmd.close()
if output.startswith('exported'):
# we build from exported source (e.g. rpmbuild)
output = None
return output
svnversion = get_svnversion_from_svn(dir='ase')
if svnversion:
write_svnversion(svnversion, dir='ase')
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/svnversion_io.py | Python | gpl-2.0 | 1,429 | [
"ASE"
] | 057bd09ed36d2e7af29ffd1757275ff3af9ff555e8a77b846ba3695340c73373 |
"""
This module is used to create an appropriate object which can be used to insert records to the Monitoring system.
It always try to insert the records directly. In case of failure the monitoring client is used...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
def getDBOrClient(DB, serverName):
""" Tries to instantiate the DB object and returns it if we manage to connect to the DB,
otherwise returns a Client of the server
"""
from DIRAC import gLogger
from DIRAC.Core.DISET.RPCClient import RPCClient
try:
database = DB()
if database._connected:
return database
except Exception:
pass
gLogger.info('Can not connect to DB will use %s' % serverName)
return RPCClient(serverName)
def getMonitoringDB():
serverName = 'Monitoring/Monitoring'
MonitoringDB = None
try:
from DIRAC.MonitoringSystem.DB.MonitoringDB import MonitoringDB
except Exception:
pass
return getDBOrClient(MonitoringDB, serverName)
monitoringDB = getMonitoringDB()
| yujikato/DIRAC | src/DIRAC/MonitoringSystem/Client/ServerUtils.py | Python | gpl-3.0 | 1,086 | [
"DIRAC"
] | 2597cdb3d29eaecedce7b9fbc1407a4916ad222c4d028b40575e322876a58bf4 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
plainLogger = logging.getLogger(__name__)
from container.utils.visibility import getLogger
logger = getLogger(__name__)
import base64
import datetime
import functools
import time
import inspect
import json
import os
import re
import shutil
import sys
import tarfile
from ruamel.yaml.comments import CommentedMap
from six import reraise, iteritems, string_types, PY3
if PY3:
from functools import reduce
try:
import httplib as StatusCodes
except ImportError:
from http import HTTPStatus as StatusCodes
import container
from container import host_only, conductor_only
from container.engine import BaseEngine
from container import utils, exceptions
from container.utils import (logmux, text, ordereddict_to_list, roles_to_install, modules_to_install,
ansible_config_exists, create_file)
from .secrets import DockerSecretsMixin
try:
import docker
from docker import errors as docker_errors
from docker.utils.ports import build_port_bindings
from docker.errors import DockerException
from docker.api.container import ContainerApiMixin
from docker.models.containers import RUN_HOST_CONFIG_KWARGS
from docker.constants import DEFAULT_TIMEOUT_SECONDS
except ImportError:
raise ImportError(
u'You must install Ansible Container with Docker(tm) support. '
u'Try:\npip install ansible-container[docker]==%s' % (
container.__version__
))
TEMPLATES_PATH = os.path.normpath(
os.path.join(
os.path.dirname(__file__),
'templates'))
FILES_PATH = os.path.normpath(
os.path.join(
os.path.dirname(__file__),
'files'))
DOCKER_VERSION = '17.04.0-ce'
DOCKER_DEFAULT_CONFIG_PATH = os.path.join(os.environ.get('HOME', ''), '.docker', 'config.json')
DOCKER_CONFIG_FILEPATH_CASCADE = [
os.environ.get('DOCKER_CONFIG', ''),
DOCKER_DEFAULT_CONFIG_PATH,
os.path.join(os.environ.get('HOME', ''), '.dockercfg')
]
REMOVE_HTTP = re.compile('^https?://')
# A map of distros and their aliases that we build pre-baked builders for
PREBAKED_DISTROS = {
'centos:7': ['centos:latest', 'centos:centos7'],
'fedora:27': ['fedora:latest'],
'fedora:26': [],
'fedora:25': [],
'amazonlinux:2': ['amazonlinux:2'],
'debian:jessie': ['debian:8', 'debian:latest', 'debian:jessie-slim'],
'debian:stretch': ['debian:9', 'debian:stretch-slim'],
'debian:wheezy': ['debian:7', 'debian:wheezy-slim'],
'ubuntu:precise': ['ubuntu:12.04'],
'ubuntu:trusty': ['ubuntu:14.04'],
'ubuntu:xenial': ['ubuntu:16.04'],
'ubuntu:zesty': ['ubuntu:17.04'],
'alpine:3.5': ['alpine:latest'],
'alpine:3.4': []
}
def log_runs(fn):
@functools.wraps(fn)
def __wrapped__(self, *args, **kwargs):
logger.debug(
u'Call: %s.%s' % (type(self).__name__, fn.__name__),
# because log_runs is a decorator, we need to override the caller
# line & function
caller_func='%s.%s' % (type(self).__name__, fn.__name__),
caller_line=inspect.getsourcelines(fn)[-1],
args=args,
kwargs=kwargs,
)
return fn(self, *args, **kwargs)
return __wrapped__
def get_timeout():
timeout = DEFAULT_TIMEOUT_SECONDS
source = None
if os.environ.get('DOCKER_CLIENT_TIMEOUT'):
timeout_value = os.environ.get('DOCKER_CLIENT_TIMEOUT')
source = 'DOCKER_CLIENT_TIMEOUT'
elif os.environ.get('COMPOSE_HTTP_TIMEOUT'):
timeout_value = os.environ.get('COMPOSE_HTTP_TIMEOUT')
source = 'COMPOSE_HTTP_TIMEOUT'
if source:
try:
timeout = int(timeout_value)
except ValueError:
raise Exception("Error: {0} set to '{1}'. Expected an integer.".format(source, timeout_value))
logger.debug("Setting Docker client timeout to {0}".format(timeout))
return timeout
class Engine(BaseEngine, DockerSecretsMixin):
# Capabilities of engine implementations
CAP_BUILD_CONDUCTOR = True
CAP_BUILD = True
CAP_DEPLOY = True
CAP_IMPORT = True
CAP_INSTALL = True
CAP_LOGIN = True
CAP_PUSH = True
CAP_RUN = True
CAP_VERSION = True
CAP_SIM_SECRETS = True
COMPOSE_WHITELIST = (
'links', 'depends_on', 'cap_add', 'cap_drop', 'command', 'devices',
'dns', 'dns_opt', 'tmpfs', 'entrypoint', 'environment', 'expose',
'external_links', 'extra_hosts', 'labels', 'links', 'logging', 'log_opt', 'networks',
'network_mode', 'pids_limit', 'ports', 'security_opt', 'stop_grace_period',
'stop_signal', 'sysctls', 'ulimits', 'userns_mode', 'volumes',
'volume_driver', 'volumes_from', 'cpu_shares', 'cpu_quota', 'cpuset',
'domainname', 'hostname', 'ipc', 'mac_address', 'mem_limit',
'memswap_limit', 'mem_swappiness', 'mem_reservation', 'oom_score_adj',
'privileged', 'read_only', 'restart', 'shm_size', 'stdin_open', 'tty',
'user', 'working_dir'
)
display_name = u'Docker\u2122 daemon'
_client = None
FINGERPRINT_LABEL_KEY = 'com.ansible.container.fingerprint'
ROLE_LABEL_KEY = 'com.ansible.container.role'
LAYER_COMMENT = 'Built with Ansible Container (https://github.com/ansible/ansible-container)'
@property
def client(self):
if not self._client:
try:
timeout = get_timeout()
self._client = docker.from_env(version='auto', timeout=timeout)
except DockerException as exc:
if 'Connection refused' in str(exc):
raise exceptions.AnsibleContainerDockerConnectionRefused()
elif 'Connection aborted' in str(exc):
raise exceptions.AnsibleContainerDockerConnectionAborted(u"%s" % str(exc))
else:
raise
return self._client
@property
def ansible_build_args(self):
"""Additional commandline arguments necessary for ansible-playbook runs during build"""
return '-c docker'
@property
def ansible_orchestrate_args(self):
"""Additional commandline arguments necessary for ansible-playbook runs during orchestrate"""
return '-c local'
@property
def default_registry_url(self):
return u'https://index.docker.io/v1/'
@property
def default_registry_name(self):
return u'Docker Hub'
@property
def auth_config_path(self):
result = DOCKER_DEFAULT_CONFIG_PATH
for path in DOCKER_CONFIG_FILEPATH_CASCADE:
if path and os.path.exists(path):
result = os.path.normpath(os.path.expanduser(path))
break
return result
@property
def secrets_mount_path(self):
return os.path.join(os.sep, 'docker', 'secrets')
def container_name_for_service(self, service_name):
return u'%s_%s' % (self.project_name, service_name)
def image_name_for_service(self, service_name):
if service_name == 'conductor':
return u'%s-%s' % (self.project_name.lower(), service_name.lower())
result = None
for name, service in iteritems(self.services):
if service.get('containers'):
for c in service['containers']:
container_service_name = u"%s-%s" % (name, c['container_name'])
if container_service_name == service_name:
if c.get('roles'):
result = u'%s-%s' % (self.project_name.lower(), container_service_name.lower())
else:
result = c.get('from')
break
elif name == service_name:
if service.get('roles'):
result = u'%s-%s' % (self.project_name.lower(), name.lower())
else:
result = service.get('from')
if result:
break
if result is None:
raise exceptions.AnsibleContainerConfigException(
u"Failed to resolve image for service {}. The service or container definition "
u"is likely missing a 'from' attribute".format(service_name)
)
return result
def run_kwargs_for_service(self, service_name):
to_return = self.services[service_name].copy()
# remove keys that docker-compose format doesn't accept, or that can't
# be used during the build phase
container_args = inspect.getargspec(ContainerApiMixin.create_container)[0] + RUN_HOST_CONFIG_KWARGS
remove_keys = list(set(to_return.keys()) - set(container_args)) + ['links']
logger.debug("Removing keys", keys=remove_keys)
for key in list(remove_keys):
try:
to_return.pop(key)
except KeyError:
pass
if to_return.get('ports'):
# convert ports from a list to a dict that docker-py likes
new_ports = build_port_bindings(to_return.get('ports'))
to_return['ports'] = new_ports
return to_return
@host_only
def print_version_info(self):
print(json.dumps(self.client.info(), indent=2))
print(json.dumps(self.client.version(), indent=2))
@log_runs
@conductor_only
def run_container(self, image_id, service_name, **kwargs):
"""Run a particular container. The kwargs argument contains individual
parameter overrides from the service definition."""
run_kwargs = self.run_kwargs_for_service(service_name)
run_kwargs.update(kwargs, relax=True)
logger.debug('Running container in docker', image=image_id, params=run_kwargs)
container_obj = self.client.containers.run(
image=image_id,
detach=True,
**run_kwargs
)
log_iter = container_obj.logs(stdout=True, stderr=True, stream=True)
mux = logmux.LogMultiplexer()
mux.add_iterator(log_iter, plainLogger)
return container_obj.id
@log_runs
@host_only
def run_conductor(self, command, config, base_path, params, engine_name=None, volumes=None):
image_id = self.get_latest_image_id_for_service('conductor')
if image_id is None:
raise exceptions.AnsibleContainerConductorException(
u"Conductor container can't be found. Run "
u"`ansible-container build` first")
conductor_settings = config.get('settings', {}).get('conductor', {})
if not volumes:
volumes = {}
def _add_volume(vol):
volume_parts = vol.split(':')
volume_parts[0] = os.path.normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(volume_parts[0]))))
volumes[volume_parts[0]] = {
'bind': volume_parts[1] if len(volume_parts) > 1 else volume_parts[0],
'mode': volume_parts[2] if len(volume_parts) > 2 else 'rw'
}
if params.get('with_volumes'):
for volume in params.get('with_volumes'):
_add_volume(volume)
if conductor_settings.get('volumes'):
for volume in conductor_settings['volumes']:
_add_volume(volume)
if command != 'destroy' and self.CAP_SIM_SECRETS:
self.create_secret_volume()
volumes[self.secrets_volume_name] = {
'bind': self.secrets_mount_path,
'mode': 'rw'
}
pswd_file = params.get('vault_password_file') or config.get('settings', {}).get('vault_password_file')
if pswd_file:
pswd_file_path = os.path.normpath(os.path.abspath(os.path.expanduser(pswd_file)))
if not os.path.exists(pswd_file_path):
logger.warning(u'Vault file %s specified but does not exist. Ignoring it.',
pswd_file_path)
else:
volumes[pswd_file_path] = {
'bind': pswd_file_path,
'mode': 'ro'
}
params['vault_password_file'] = pswd_file_path
vaults = params.get('vault_files') or config.get('settings', {}).get('vault_files')
if vaults:
vault_paths = [os.path.normpath(os.path.abspath(os.path.expanduser(v))) for v in vaults]
for vault_path in vault_paths:
if not os.path.exists(vault_path):
logger.warning(u'Vault file %s specified but does not exist. Ignoring it.',
vault_path)
continue
volumes[vault_path] = {
'bind': vault_path,
'mode': 'ro'
}
params['vault_files'] = vault_paths
permissions = 'ro' if command != 'install' else 'rw'
if params.get('src_mount_path'):
src_path = params['src_mount_path']
else:
src_path = base_path
volumes[src_path] = {'bind': '/_src', 'mode': permissions}
if params.get('deployment_output_path'):
deployment_path = params['deployment_output_path']
if not os.path.isdir(deployment_path):
os.mkdir(deployment_path, 0o755)
volumes[deployment_path] = {'bind': deployment_path, 'mode': 'rw'}
roles_path = None
if params.get('roles_path'):
roles_path = params['roles_path']
elif conductor_settings.get('roles_path'):
roles_path = conductor_settings['roles_path']
expanded_roles_path = []
if roles_path:
for role_path in roles_path:
role_path = os.path.normpath(os.path.abspath(os.path.expanduser(role_path)))
expanded_roles_path.append(role_path)
volumes[role_path] = {'bind': role_path, 'mode': 'ro'}
environ = {}
if os.environ.get('DOCKER_HOST'):
environ['DOCKER_HOST'] = os.environ['DOCKER_HOST']
if os.environ.get('DOCKER_CERT_PATH'):
environ['DOCKER_CERT_PATH'] = '/etc/docker'
volumes[os.environ['DOCKER_CERT_PATH']] = {'bind': '/etc/docker',
'mode': 'ro'}
if os.environ.get('DOCKER_TLS_VERIFY'):
environ['DOCKER_TLS_VERIFY'] = os.environ['DOCKER_TLS_VERIFY']
else:
environ['DOCKER_HOST'] = 'unix:///var/run/docker.sock'
volumes['/var/run/docker.sock'] = {'bind': '/var/run/docker.sock',
'mode': 'rw'}
def _add_var_list(vars):
for var in vars:
key, value = var.split('=', 1)
environ[key] = value
if params.get('with_variables'):
_add_var_list(params['with_variables'])
if conductor_settings.get('environment'):
if isinstance(conductor_settings['environment'], dict):
environ.update(conductor_settings['environment'])
if isinstance(conductor_settings['environment'], list):
_add_var_list(conductor_settings['environment'])
if roles_path:
environ['ANSIBLE_ROLES_PATH'] = "%s:/src/roles:/etc/ansible/roles" % (':').join(expanded_roles_path)
else:
environ['ANSIBLE_ROLES_PATH'] = '/src/roles:/etc/ansible/roles'
if params.get('devel'):
conductor_path = os.path.dirname(container.__file__)
logger.debug(u"Binding Ansible Container code at %s into conductor "
u"container", conductor_path)
volumes[conductor_path] = {'bind': '/_ansible/container', 'mode': 'rw'}
if command in ('login', 'push', 'build'):
config_path = params.get('config_path') or self.auth_config_path
create_file(config_path, '{}')
volumes[config_path] = {'bind': config_path,
'mode': 'rw'}
if not engine_name:
engine_name = __name__.rsplit('.', 2)[-2]
serialized_params = base64.b64encode(json.dumps(params).encode("utf-8")).decode()
serialized_config = base64.b64encode(json.dumps(ordereddict_to_list(config)).encode("utf-8")).decode()
run_kwargs = dict(
name=self.container_name_for_service('conductor'),
command=['conductor',
command,
'--project-name', self.project_name,
'--engine', engine_name,
'--params', serialized_params,
'--config', serialized_config,
'--encoding', 'b64json'],
detach=True,
user='root',
volumes=volumes,
environment=environ,
working_dir='/src',
cap_add=['SYS_ADMIN']
)
# Anytime a playbook is executed, /src is bind mounted to a tmpdir, and that seems to
# require privileged=True
run_kwargs['privileged'] = True
# Support optional volume driver for mounting named volumes to the Conductor
if params.get('volume_driver'):
run_kwargs['volume_driver'] = params['volume_driver']
logger.debug('Docker run:', image=image_id, params=run_kwargs)
try:
container_obj = self.client.containers.run(
image_id,
**run_kwargs
)
except docker_errors.APIError as exc:
if exc.response.status_code == StatusCodes.CONFLICT:
raise exceptions.AnsibleContainerConductorException(
u"Can't start conductor container, another conductor for "
u"this project already exists or wasn't cleaned up.")
reraise(*sys.exc_info())
else:
log_iter = container_obj.logs(stdout=True, stderr=True, stream=True)
mux = logmux.LogMultiplexer()
mux.add_iterator(log_iter, plainLogger)
return container_obj.id
def await_conductor_command(self, command, config, base_path, params, save_container=False):
conductor_id = self.run_conductor(command, config, base_path, params)
try:
while self.service_is_running('conductor'):
time.sleep(0.1)
finally:
exit_code = self.service_exit_code('conductor')
msg = 'Preserving as requested.' if save_container else 'Cleaning up.'
logger.info('Conductor terminated. {}'.format(msg), save_container=save_container,
conductor_id=conductor_id, command_rc=exit_code)
if not save_container:
self.delete_container(conductor_id, remove_volumes=True)
if exit_code:
raise exceptions.AnsibleContainerConductorException(
u'Conductor exited with status %s' % exit_code
)
elif command in ('run', 'destroy', 'stop', 'restart') and params.get('deployment_output_path') \
and not self.debug:
# Remove any ansible-playbook residue
output_path = params['deployment_output_path']
for path in ('files', 'templates'):
shutil.rmtree(os.path.join(output_path, path), ignore_errors=True)
if not self.devel:
for filename in ('playbook.retry', 'playbook.yml', 'hosts'):
if os.path.exists(os.path.join(output_path, filename)):
os.remove(os.path.join(output_path, filename))
def service_is_running(self, service, container_id=None):
try:
running_container = self.client.containers.get(
container_id or self.container_name_for_service(service))
return running_container.status == 'running' and running_container.id
except docker_errors.NotFound:
return False
def service_exit_code(self, service, container_id=None):
try:
container_info = self.client.api.inspect_container(
container_id or self.container_name_for_service(service))
return container_info['State']['ExitCode']
except docker_errors.APIError:
return None
def start_container(self, container_id):
try:
to_start = self.client.containers.get(container_id)
except docker_errors.APIError:
logger.debug(u"Could not find container %s to start", container_id,
id=container_id)
else:
to_start.start()
log_iter = to_start.logs(stdout=True, stderr=True, stream=True)
mux = logmux.LogMultiplexer()
mux.add_iterator(log_iter, plainLogger)
return to_start.id
def stop_container(self, container_id, forcefully=False):
try:
to_stop = self.client.containers.get(container_id)
except docker_errors.APIError:
logger.debug(u"Could not find container %s to stop", container_id,
id=container_id, force=forcefully)
pass
else:
if forcefully:
to_stop.kill()
else:
to_stop.stop(timeout=60)
def restart_all_containers(self):
raise NotImplementedError()
def inspect_container(self, container_id):
try:
return self.client.api.inspect_container(container_id)
except docker_errors.APIError:
return None
def delete_container(self, container_id, remove_volumes=False):
try:
to_delete = self.client.containers.get(container_id)
except docker_errors.APIError:
pass
else:
to_delete.remove(v=remove_volumes)
def get_image_id_for_container_id(self, container_id):
try:
container_info = self.client.containers.get(container_id)
except docker_errors.NotFound:
logger.debug("Could not find container for %s", container_id,
all_containers=self.client.containers.list())
return None
else:
return container_info.image.id
def get_container_id_by_name(self, name):
try:
container_info = self.client.containers.get(name)
except docker_errors.NotFound:
logger.debug("Could not find container for %s", name,
all_containers=[
c.name for c in self.client.containers.list(all=True)])
return None
else:
return container_info.id
def get_intermediate_containers_for_service(self, service_name):
container_substring = self.container_name_for_service(service_name)
for container in self.client.containers.list(all=True):
if container.name.startswith(container_substring) and \
container.name != container_substring:
yield container.name
def get_image_id_by_fingerprint(self, fingerprint):
try:
image = self.client.images.list(
all=True,
filters=dict(label='%s=%s' % (self.FINGERPRINT_LABEL_KEY,
fingerprint)))[0]
except IndexError:
return None
else:
return image.id
def get_fingerprint_for_image_id(self, image_id):
labels = self.get_image_labels(image_id)
return labels.get(self.FINGERPRINT_LABEL_KEY)
def get_image_id_by_tag(self, tag):
try:
image = self.client.images.get(tag)
return image.id
except docker_errors.ImageNotFound:
return None
def get_image_labels(self, image_id):
try:
image = self.client.images.get(image_id)
except docker_errors.ImageNotFound:
return {}
else:
return image.attrs['Config']['Labels']
def get_latest_image_id_for_service(self, service_name):
image = self.get_latest_image_for_service(service_name)
if image is not None:
return image.id
return None
def get_latest_image_for_service(self, service_name):
try:
image = self.client.images.get(
'%s:latest' % self.image_name_for_service(service_name))
except docker_errors.ImageNotFound:
images = self.client.images.list(name=self.image_name_for_service(service_name))
logger.debug(
u"Could not find the latest image for service, "
u"searching for other tags with same image name",
image_name=self.image_name_for_service(service_name),
service=service_name)
if not images:
return None
def tag_sort(i):
return [t for t in i.tags if t.startswith(self.image_name_for_service(service_name))][0]
images = sorted(images, key=tag_sort)
logger.debug('Found images for service',
service=service_name, images=images)
return images[-1]
else:
return image
def containers_built_for_services(self, services):
# Verify all images are built
for service_name in services:
logger.info(u'Verifying service image', service=service_name)
image_id = self.get_latest_image_id_for_service(service_name)
if image_id is None:
raise exceptions.AnsibleContainerMissingImage(
u"Missing image for service '{}'. Run 'ansible-container build' to (re)create it."
.format(service_name)
)
def get_build_stamp_for_image(self, image_id):
build_stamp = None
try:
image = self.client.images.get(image_id)
except docker_errors.ImageNotFound:
raise exceptions.AnsibleContainerConductorException(
"Unable to find image {}".format(image_id)
)
if image and image.tags:
build_stamp = [tag for tag in image.tags if not tag.endswith(':latest')][0].split(':')[-1]
return build_stamp
@conductor_only
def pull_image_by_tag(self, image):
repo = image
tag = 'latest'
if ':' in image:
repo, tag = image.rsplit(':',1)
logger.debug("Pulling image {}:{}".format(repo, tag))
try:
image_id = self.client.images.pull(repo, tag=tag)
except docker_errors.APIError as exc:
raise exceptions.AnsibleContainerException("Failed to pull {}: {}".format(image, str(exc)))
return image_id
@log_runs
@conductor_only
def flatten_container(self,
container_id,
service_name,
metadata):
image_name = self.image_name_for_service(service_name)
image_version = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
image_config = utils.metadata_to_image_config(metadata)
to_squash = self.client.containers.get(container_id)
raw_image = to_squash.export()
logger.debug("Exported service container as tarball", container=image_name)
out = self.client.api.import_image_from_data(
raw_image.read(),
repository=image_name,
tag=image_version
)
logger.debug("Committed flattened image", out=out)
image_id = json.loads(out)['status']
self.tag_image_as_latest(service_name, image_id.split(':')[-1])
return image_id
@log_runs
@conductor_only
def commit_role_as_layer(self,
container_id,
service_name,
fingerprint,
role_name,
metadata,
with_name=False):
metadata = metadata.copy()
to_commit = self.client.containers.get(container_id)
image_name = self.image_name_for_service(service_name)
image_version = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
image_changes = []
volume_specs = metadata.pop('volumes', [])
for volume_spec in volume_specs:
if ':' in volume_spec and not volume_spec.startswith(('/', '$')):
mount_point = volume_spec.split(':', 1)[-1]
elif ':' not in volume_spec:
mount_point = volume_spec
else:
continue
image_changes.append(u'VOLUME %s' % (mount_point,))
image_config = utils.metadata_to_image_config(metadata)
image_config.setdefault('Labels', {})[self.FINGERPRINT_LABEL_KEY] = fingerprint
image_config['Labels'][self.ROLE_LABEL_KEY] = role_name
commit_data = dict(
repository=image_name if with_name else None,
tag=image_version if with_name else None,
message=self.LAYER_COMMENT,
conf=image_config,
changes=u'\n'.join(image_changes)
)
logger.debug('Committing new layer', params=commit_data)
return to_commit.commit(**commit_data).id
def tag_image_as_latest(self, service_name, image_id):
image_obj = self.client.images.get(image_id)
image_obj.tag(self.image_name_for_service(service_name), 'latest')
@conductor_only
def _get_top_level_secrets(self):
"""
Convert the top-level 'secrets' directive to the Docker format
:return: secrets dict
"""
top_level_secrets = dict()
if self.secrets:
for secret, secret_definition in iteritems(self.secrets):
if isinstance(secret_definition, dict):
for key, value in iteritems(secret_definition):
name = '{}_{}'.format(secret, key)
top_level_secrets[name] = dict(external=True)
elif isinstance(secret_definition, string_types):
top_level_secrets[secret] = dict(external=True)
return top_level_secrets
@conductor_only
def generate_orchestration_playbook(self, url=None, namespace=None, vault_files=None, **kwargs):
"""
Generate an Ansible playbook to orchestrate services.
:param url: registry URL where images will be pulled from
:param namespace: registry namespace
:return: playbook dict
"""
states = ['start', 'restart', 'stop', 'destroy']
service_def = {}
for service_name, service in iteritems(self.services):
service_definition = {}
if service.get('roles'):
if url and namespace:
# Reference previously pushed image
service_definition[u'image'] = '{}/{}/{}'.format(re.sub(r'/$', '', url), namespace,
self.image_name_for_service(service_name))
else:
# Check that the image was built
image = self.get_latest_image_for_service(service_name)
if image is None:
raise exceptions.AnsibleContainerConductorException(
u"No image found for service {}, make sure you've run `ansible-container "
u"build`".format(service_name)
)
service_definition[u'image'] = image.tags[0]
else:
try:
# Check if the image is already local
image = self.client.images.get(service['from'])
image_from = image.tags[0]
except docker.errors.ImageNotFound:
image_from = service['from']
logger.warning(u"Image {} for service {} not found. "
u"An attempt will be made to pull it.".format(service['from'], service_name))
service_definition[u'image'] = image_from
for extra in self.COMPOSE_WHITELIST:
if extra in service:
service_definition[extra] = service[extra]
if 'secrets' in service:
service_secrets = []
for secret, secret_engines in iteritems(service[u'secrets']):
if 'docker' in secret_engines:
service_secrets += secret_engines[u'docker']
if service_secrets:
service_definition[u'secrets'] = service_secrets
if self.CAP_SIM_SECRETS:
# Simulate external secrets using a Docker volume
if not 'volumes' in service_definition:
service_definition['volumes'] = []
service_definition['volumes'].append("{}:/run/secrets:ro".format(self.secrets_volume_name))
logger.debug(u'Adding new service to definition',
service=service_name, definition=service_definition)
service_def[service_name] = service_definition
tasks = []
top_level_secrets = self._get_top_level_secrets()
if self.CAP_SIM_SECRETS and top_level_secrets:
# Let compose know that we're using a named volume to simulate external secrets
if not isinstance(self.volumes, dict):
self.volumes = dict()
self.volumes[self.secrets_volume_name] = dict(external=True)
for desired_state in states:
task_params = {
u'project_name': self.project_name,
u'definition': {
u'version': u'3.1' if top_level_secrets else u'2',
u'services': service_def,
}
}
if self.secrets:
task_params[u'definition'][u'secrets'] = top_level_secrets
if self.volumes:
task_params[u'definition'][u'volumes'] = dict(self.volumes)
if desired_state in {'restart', 'start', 'stop'}:
task_params[u'state'] = u'present'
if desired_state == 'restart':
task_params[u'restarted'] = True
if desired_state == 'stop':
task_params[u'stopped'] = True
elif desired_state == 'destroy':
task_params[u'state'] = u'absent'
task_params[u'remove_volumes'] = u'yes'
tasks.append({u'docker_service': task_params, u'tags': [desired_state]})
playbook = []
if self.secrets and self.CAP_SIM_SECRETS:
playbook.append(self.generate_secrets_play(vault_files=vault_files))
playbook.append(CommentedMap([
(u'name', 'Deploy {}'.format(self.project_name)),
(u'hosts', u'localhost'),
(u'gather_facts', False)
]))
if vault_files:
playbook[len(playbook) - 1][u'vars_files'] = [os.path.normpath(os.path.abspath(v)) for v in vault_files]
playbook[len(playbook) - 1][u'tasks'] = tasks
for service in list(self.services.keys()) + ['conductor']:
image_name = self.image_name_for_service(service)
for image in self.client.images.list(name=image_name):
logger.debug('Found image for service', tags=image.tags, id=image.short_id)
for tag in image.tags:
logger.debug('Adding task to destroy image', tag=tag)
playbook[len(playbook) - 1][u'tasks'].append({
u'docker_image': {
u'name': tag,
u'state': u'absent',
u'force': u'yes'
},
u'tags': u'destroy'
})
if self.secrets and self.CAP_SIM_SECRETS:
playbook.append(self.generate_remove_volume_play())
logger.debug(u'Created playbook to run project', playbook=playbook)
return playbook
@conductor_only
def push(self, image_id, service_name, tag=None, namespace=None, url=None, username=None, password=None,
repository_prefix=None, **kwargs):
"""
Push an image to a remote registry.
"""
auth_config = {
'username': username,
'password': password
}
build_stamp = self.get_build_stamp_for_image(image_id)
tag = tag or build_stamp
if repository_prefix:
image_name = "{}-{}".format(repository_prefix, service_name)
elif repository_prefix is None:
image_name = "{}-{}".format(self.project_name, service_name)
elif repository_prefix == '':
image_name = service_name
repository = "{}/{}".format(namespace, image_name)
if url != self.default_registry_url:
url = REMOVE_HTTP.sub('', url)
repository = "%s/%s" % (url.rstrip('/'), repository)
logger.info('Tagging %s' % repository)
self.client.api.tag(image_id, repository, tag=tag)
logger.info('Pushing %s:%s...' % (repository, tag))
stream = self.client.api.push(repository, tag=tag, stream=True, auth_config=auth_config)
last_status = None
for data in stream:
data = data.splitlines()
for line in data:
line = json.loads(line)
if type(line) is dict and 'error' in line:
plainLogger.error(line['error'])
raise exceptions.AnsibleContainerException(
"Failed to push image. {}".format(line['error'])
)
elif type(line) is dict and 'status' in line:
if line['status'] != last_status:
plainLogger.info(line['status'])
last_status = line['status']
else:
plainLogger.debug(line)
@staticmethod
def _prepare_prebake_manifest(base_path, base_image, temp_dir, tarball):
utils.jinja_render_to_temp(TEMPLATES_PATH,
'conductor-src-dockerfile.j2', temp_dir,
'Dockerfile',
conductor_base=base_image,
docker_version=DOCKER_VERSION)
tarball.add(os.path.join(temp_dir, 'Dockerfile'),
arcname='Dockerfile')
utils.jinja_render_to_temp(TEMPLATES_PATH,
'atomic-help.j2', temp_dir,
'help.1',
ansible_container_version=container.__version__)
tarball.add(os.path.join(temp_dir, 'help.1'),
arcname='help.1')
utils.jinja_render_to_temp(TEMPLATES_PATH,
'license.j2', temp_dir,
'LICENSE')
tarball.add(os.path.join(temp_dir, 'LICENSE'),
arcname='LICENSE')
container_dir = os.path.dirname(container.__file__)
tarball.add(container_dir, arcname='container-src')
package_dir = os.path.dirname(container_dir)
# For an editable install, the setup.py and requirements.* will be
# available in the package_dir. Otherwise, our custom sdist (see
# setup.py) would have moved them to FILES_PATH
setup_py_dir = (package_dir
if os.path.exists(os.path.join(package_dir, 'setup.py'))
else FILES_PATH)
req_txt_dir = (package_dir
if os.path.exists(
os.path.join(package_dir, 'conductor-requirements.txt'))
else FILES_PATH)
req_yml_dir = (package_dir
if os.path.exists(
os.path.join(package_dir, 'conductor-requirements.yml'))
else FILES_PATH)
tarball.add(os.path.join(setup_py_dir, 'setup.py'),
arcname='container-src/conductor-build/setup.py')
tarball.add(os.path.join(req_txt_dir, 'conductor-requirements.txt'),
arcname='container-src/conductor-build/conductor'
'-requirements.txt')
tarball.add(os.path.join(req_yml_dir, 'conductor-requirements.yml'),
arcname='container-src/conductor-build/conductor-requirements.yml')
def _prepare_conductor_manifest(self, base_path, base_image, temp_dir, tarball):
source_dir = os.path.normpath(base_path)
for filename in ['ansible.cfg', 'ansible-requirements.txt',
'requirements.yml']:
file_path = os.path.join(source_dir, filename)
if os.path.exists(filename):
tarball.add(file_path,
arcname=os.path.join('build-src', filename))
# Make an empty file just to make sure the build-src dir has something
open(os.path.join(temp_dir, '.touch'), 'w')
tarball.add(os.path.join(temp_dir, '.touch'),
arcname='build-src/.touch')
prebaked = base_image in reduce(lambda x, y: x + [y[0]] + y[1],
PREBAKED_DISTROS.items(), [])
if prebaked:
base_image = [k for k, v in PREBAKED_DISTROS.items()
if base_image in [k] + v][0]
conductor_base = 'container-conductor-%s:%s' % (
base_image.replace(':', '-'),
container.__version__
)
if not self.get_image_id_by_tag(conductor_base):
conductor_base = 'ansible/%s' % conductor_base
else:
conductor_base = 'container-conductor-%s:%s' % (
base_image.replace(':', '-'),
container.__version__
)
run_commands = []
if modules_to_install(base_path):
run_commands.append('pip install --no-cache-dir -r /_ansible/build/ansible-requirements.txt')
if roles_to_install(base_path):
run_commands.append('ansible-galaxy install -p /etc/ansible/roles -r /_ansible/build/requirements.yml')
if ansible_config_exists(base_path):
run_commands.append('cp /_ansible/build/ansible.cfg /etc/ansible/ansible.cfg')
separator = ' && \\\r\n'
install_requirements = separator.join(run_commands)
utils.jinja_render_to_temp(TEMPLATES_PATH,
'conductor-local-dockerfile.j2', temp_dir,
'Dockerfile',
install_requirements=install_requirements,
original_base=base_image,
conductor_base=conductor_base,
docker_version=DOCKER_VERSION)
tarball.add(os.path.join(temp_dir, 'Dockerfile'),
arcname='Dockerfile')
@log_runs
@host_only
def build_conductor_image(self, base_path, base_image, prebaking=False, cache=True, environment=None):
if environment is None:
environment = []
with utils.make_temp_dir() as temp_dir:
logger.info('Building Docker Engine context...')
tarball_path = os.path.join(temp_dir, 'context.tar')
tarball_file = open(tarball_path, 'wb')
tarball = tarfile.TarFile(fileobj=tarball_file,
mode='w')
source_dir = os.path.normpath(base_path)
for filename in ['ansible.cfg', 'ansible-requirements.txt',
'requirements.yml']:
file_path = os.path.join(source_dir, filename)
if os.path.exists(file_path):
tarball.add(file_path,
arcname=os.path.join('build-src', filename))
# Make an empty file just to make sure the build-src dir has something
open(os.path.join(temp_dir, '.touch'), 'w')
tarball.add(os.path.join(temp_dir, '.touch'), arcname='build-src/.touch')
tarball.add(os.path.join(FILES_PATH, 'get-pip.py'),
arcname='contrib/get-pip.py')
container_dir = os.path.dirname(container.__file__)
tarball.add(container_dir, arcname='container-src')
package_dir = os.path.dirname(container_dir)
# For an editable install, the setup.py and requirements.* will be
# available in the package_dir. Otherwise, our custom sdist (see
# setup.py) would have moved them to FILES_PATH
setup_py_dir = (package_dir
if os.path.exists(os.path.join(package_dir, 'setup.py'))
else FILES_PATH)
req_txt_dir = (package_dir
if os.path.exists(os.path.join(package_dir, 'conductor-requirements.txt'))
else FILES_PATH)
req_yml_dir = (package_dir
if os.path.exists(os.path.join(package_dir, 'conductor-requirements.yml'))
else FILES_PATH)
tarball.add(os.path.join(setup_py_dir, 'setup.py'),
arcname='container-src/conductor-build/setup.py')
tarball.add(os.path.join(req_txt_dir, 'conductor-requirements.txt'),
arcname='container-src/conductor-build/conductor-requirements.txt')
tarball.add(os.path.join(req_yml_dir, 'conductor-requirements.yml'),
arcname='container-src/conductor-build/conductor-requirements.yml')
utils.jinja_render_to_temp(TEMPLATES_PATH,
'conductor-src-dockerfile.j2', temp_dir,
'Dockerfile',
conductor_base=base_image,
docker_version=DOCKER_VERSION,
environment=environment)
tarball.add(os.path.join(temp_dir, 'Dockerfile'),
arcname='Dockerfile')
#for context_file in ['builder.sh', 'ansible-container-inventory.py',
# 'ansible.cfg', 'wait_on_host.py', 'ac_galaxy.py']:
# tarball.add(os.path.join(TEMPLATES_PATH, context_file),
# arcname=context_file)
if prebaking:
self.client.images.pull(*base_image.split(':', 1))
self._prepare_prebake_manifest(base_path, base_image, temp_dir,
tarball)
tag = 'container-conductor-%s:%s' % (base_image.replace(':', '-'),
container.__version__)
else:
self._prepare_conductor_manifest(base_path, base_image, temp_dir,
tarball)
tag = self.image_name_for_service('conductor')
logger.debug('Context manifest:')
for tarinfo_obj in tarball.getmembers():
logger.debug('tarball item: %s (%s bytes)', tarinfo_obj.name,
tarinfo_obj.size, file=tarinfo_obj.name,
bytes=tarinfo_obj.size, terse=True)
tarball.close()
tarball_file.close()
tarball_file = open(tarball_path, 'rb')
logger.info('Starting Docker build of Ansible Container Conductor image (please be patient)...')
# FIXME: Error out properly if build of conductor fails.
if self.debug:
for line in self.client.api.build(fileobj=tarball_file,
custom_context=True,
tag=tag,
rm=True,
decode=True,
nocache=not cache):
try:
if line.get('status') == 'Downloading':
# skip over lines that give spammy byte-by-byte
# progress of downloads
continue
elif 'errorDetail' in line:
raise exceptions.AnsibleContainerException(
"Error building conductor image: {0}".format(line['errorDetail']['message']))
except ValueError:
pass
except exceptions.AnsibleContainerException:
raise
# this bypasses the fancy colorized logger for things that
# are just STDOUT of a process
plainLogger.debug(text.to_text(line.get('stream', json.dumps(line))).rstrip())
return self.get_image_id_by_tag(tag)
else:
image = self.client.images.build(fileobj=tarball_file,
custom_context=True,
tag=tag,
rm=True,
nocache=not cache)
return image.id
def get_runtime_volume_id(self, mount_point):
try:
container_data = self.client.api.inspect_container(
self.container_name_for_service('conductor')
)
except docker_errors.APIError:
raise ValueError('Conductor container not found.')
mounts = container_data['Mounts']
try:
usr_mount, = [mount for mount in mounts if mount['Destination'] == mount_point]
except ValueError:
raise ValueError('Runtime volume %s not found on Conductor' % mount_point)
return usr_mount['Name']
@host_only
def import_project(self, base_path, import_from, bundle_files=False, force=False, **kwargs):
from .importer import DockerfileImport
dfi = DockerfileImport(base_path,
self.project_name,
import_from,
bundle_files,
force)
dfi.run()
@conductor_only
def login(self, username, password, email, url, config_path):
"""
If username and password are provided, authenticate with the registry.
Otherwise, check the config file for existing authentication data.
"""
if username and password:
try:
self.client.login(username=username, password=password, email=email,
registry=url, reauth=True)
except docker_errors.APIError as exc:
raise exceptions.AnsibleContainerConductorException(
u"Error logging into registry: {}".format(exc)
)
except Exception:
raise
self._update_config_file(username, password, email, url, config_path)
username, password = self._get_registry_auth(url, config_path)
if not username:
raise exceptions.AnsibleContainerConductorException(
u'Please provide login credentials for registry {}.'.format(url))
return username, password
@staticmethod
@conductor_only
def _update_config_file(username, password, email, url, config_path):
"""Update the config file with the authorization."""
try:
# read the existing config
config = json.load(open(config_path, "r"))
except ValueError:
config = dict()
if not config.get('auths'):
config['auths'] = dict()
if not config['auths'].get(url):
config['auths'][url] = dict()
encoded_credentials = dict(
auth=base64.b64encode(username + b':' + password),
email=email
)
config['auths'][url] = encoded_credentials
try:
json.dump(config, open(config_path, "w"), indent=5, sort_keys=True)
except Exception as exc:
raise exceptions.AnsibleContainerConductorException(
u"Failed to write registry config to {0} - {1}".format(config_path, exc)
)
@staticmethod
@conductor_only
def _get_registry_auth(registry_url, config_path):
"""
Retrieve from the config file the current authentication for a given URL, and
return the username, password
"""
username = None
password = None
try:
docker_config = json.load(open(config_path))
except ValueError:
# The configuration file is empty
return username, password
if docker_config.get('auths'):
docker_config = docker_config['auths']
auth_key = docker_config.get(registry_url, {}).get('auth', None)
if auth_key:
username, password = base64.b64decode(auth_key).split(':', 1)
return username, password
@conductor_only
def pre_deployment_setup(self, project_name, services, **kwargs):
pass
| chouseknecht/ansible-container | container/docker/engine.py | Python | lgpl-3.0 | 53,581 | [
"Galaxy"
] | 008b70cfee43f387fbfb0aefd55d52f7713a4ddfe7e6af3cfd28870f5a54384f |
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import ctypes, ctypes.wintypes as types, _winreg as winreg, struct, datetime
import winerror, win32con
# Binding to C library {{{
advapi32 = ctypes.windll.advapi32
HKEY = types.HKEY
PHKEY = ctypes.POINTER(HKEY)
DWORD = types.DWORD
BYTE = types.BYTE
LONG = types.LONG
ULONG = types.ULONG
LPDWORD = ctypes.POINTER(DWORD)
LPBYTE = ctypes.POINTER(BYTE)
LPCWSTR = types.LPCWSTR
LPWSTR = types.LPWSTR
LPCVOID = types.LPCVOID
HKEY_CURRENT_USER = HKCU = HKEY(ULONG(winreg.HKEY_CURRENT_USER).value)
HKEY_CLASSES_ROOT = HKCR = HKEY(ULONG(winreg.HKEY_CLASSES_ROOT).value)
HKEY_LOCAL_MACHINE = HKLM = HKEY(ULONG(winreg.HKEY_LOCAL_MACHINE).value)
KEY_READ = winreg.KEY_READ
KEY_ALL_ACCESS = winreg.KEY_ALL_ACCESS
RRF_RT_ANY = 0x0000ffff
RRF_NOEXPAND = 0x10000000
RRF_ZEROONFAILURE = 0x20000000
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", DWORD), ("dwHighDateTime", DWORD)]
def default_errcheck(result, func, args):
if result != getattr(winerror, 'ERROR_SUCCESS', 0): # On shutdown winerror becomes None
raise ctypes.WinError(result)
return args
null = object()
class a(object):
def __init__(self, name, typ, default=null, in_arg=True):
self.typ = typ
if default is null:
self.spec = ((1 if in_arg else 2), name)
else:
self.spec = ((1 if in_arg else 2), name, default)
def cwrap(name, restype, *args, **kw):
params = (restype,) + tuple(x.typ for x in args)
paramflags = tuple(x.spec for x in args)
func = ctypes.WINFUNCTYPE(*params)((name, kw.get('lib', advapi32)), paramflags)
func.errcheck = kw.get('errcheck', default_errcheck)
return func
RegOpenKey = cwrap(
'RegOpenKeyExW', LONG, a('key', HKEY), a('sub_key', LPCWSTR), a('options', DWORD, 0), a('access', ULONG, KEY_READ), a('result', PHKEY, in_arg=False))
RegCreateKey = cwrap(
'RegCreateKeyExW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, ''), a('reserved', DWORD, 0), a('cls', LPWSTR, None), a('options', DWORD, 0),
a('access', ULONG, KEY_ALL_ACCESS), a('security', ctypes.c_void_p, 0), a('result', PHKEY, in_arg=False), a('disposition', LPDWORD, in_arg=False))
RegCloseKey = cwrap('RegCloseKey', LONG, a('key', HKEY))
def enum_value_errcheck(result, func, args):
if result == winerror.ERROR_SUCCESS:
return args
if result == winerror.ERROR_MORE_DATA:
raise ValueError('buffer too small')
if result == winerror.ERROR_NO_MORE_ITEMS:
raise StopIteration()
raise ctypes.WinError(result)
RegEnumValue = cwrap(
'RegEnumValueW', LONG, a('key', HKEY), a('index', DWORD), a('value_name', LPWSTR), a('value_name_size', LPDWORD), a('reserved', LPDWORD),
a('value_type', LPDWORD), a('data', LPBYTE), a('data_size', LPDWORD), errcheck=enum_value_errcheck)
def last_error_errcheck(result, func, args):
if result == 0:
raise ctypes.WinError()
return args
ExpandEnvironmentStrings = cwrap(
'ExpandEnvironmentStringsW', DWORD, a('src', LPCWSTR), a('dest', LPWSTR), a('size', DWORD), errcheck=last_error_errcheck, lib=ctypes.windll.kernel32)
def expand_environment_strings(src):
buf = ctypes.create_unicode_buffer(32 * 1024)
ExpandEnvironmentStrings(src, buf, len(buf))
return buf.value
def convert_to_registry_data(value, has_expansions=False):
if value is None:
return None, winreg.REG_NONE, 0
if isinstance(value, (type(''), bytes)):
buf = ctypes.create_unicode_buffer(value)
return buf, (winreg.REG_EXPAND_SZ if has_expansions else winreg.REG_SZ), len(buf) * 2
if isinstance(value, (list, tuple)):
buf = ctypes.create_unicode_buffer('\0'.join(map(type(''), value)) + '\0\0')
return buf, winreg.REG_MULTI_SZ, len(buf) * 2
if isinstance(value, (int, long)):
try:
raw, dtype = struct.pack(str('L'), value), winreg.REG_DWORD
except struct.error:
raw = struct.pack(str('Q'), value), win32con.REG_QWORD
buf = ctypes.create_string_buffer(raw)
return buf, dtype, len(buf)
raise ValueError('Unknown data type: %r' % value)
def convert_registry_data(raw, size, dtype):
if dtype == winreg.REG_NONE:
return None
if dtype == winreg.REG_BINARY:
return ctypes.string_at(raw, size)
if dtype in (winreg.REG_SZ, winreg.REG_EXPAND_SZ, winreg.REG_MULTI_SZ):
ans = ctypes.wstring_at(raw, size // 2).rstrip('\0')
if dtype == winreg.REG_MULTI_SZ:
ans = tuple(ans.split('\0'))
elif dtype == winreg.REG_EXPAND_SZ:
ans = expand_environment_strings(ans)
return ans
if dtype == winreg.REG_DWORD:
if size == 0:
return 0
return ctypes.cast(raw, LPDWORD).contents.value
if dtype == win32con.REG_QWORD:
if size == 0:
return 0
return ctypes.cast(raw, ctypes.POINTER(types.QWORD)).contents.value
raise ValueError('Unsupported data type: %r' % dtype)
try:
RegSetKeyValue = cwrap(
'RegSetKeyValueW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), a('name', LPCWSTR, None),
a('dtype', DWORD, winreg.REG_SZ), a('data', LPCVOID, None), a('size', DWORD))
except Exception:
raise RuntimeError('calibre requires Windows Vista or newer to run, the last version of calibre'
' that could run on Windows XP is version 1.48, available from: http://download.calibre-ebook.com/')
def delete_value_errcheck(result, func, args):
if result == winerror.ERROR_FILE_NOT_FOUND:
return args
if result != 0:
raise ctypes.WinError(result)
return args
RegDeleteKeyValue = cwrap(
'RegDeleteKeyValueW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), a('name', LPCWSTR, None), errcheck=delete_value_errcheck)
RegDeleteTree = cwrap(
'RegDeleteTreeW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), errcheck=delete_value_errcheck)
RegEnumKeyEx = cwrap(
'RegEnumKeyExW', LONG, a('key', HKEY), a('index', DWORD), a('name', LPWSTR), a('name_size', LPDWORD), a('reserved', LPDWORD, None),
a('cls', LPWSTR, None), a('cls_size', LPDWORD, None), a('last_write_time', ctypes.POINTER(FILETIME), in_arg=False),
errcheck=enum_value_errcheck)
def get_value_errcheck(result, func, args):
if result == winerror.ERROR_SUCCESS:
return args
if result == winerror.ERROR_MORE_DATA:
raise ValueError('buffer too small')
if result == winerror.ERROR_FILE_NOT_FOUND:
raise KeyError('No such value found')
raise ctypes.WinError(result)
RegGetValue = cwrap(
'RegGetValueW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), a('value_name', LPCWSTR, None), a('flags', DWORD, RRF_RT_ANY),
a('data_type', LPDWORD, 0), a('data', ctypes.c_void_p, 0), a('size', LPDWORD, 0), errcheck=get_value_errcheck
)
RegLoadMUIString = cwrap(
'RegLoadMUIStringW', LONG, a('key', HKEY), a('value_name', LPCWSTR, None), a('data', LPWSTR, None), a('buf_size', DWORD, 0),
a('size', LPDWORD, 0), a('flags', DWORD, 0), a('directory', LPCWSTR, None), errcheck=get_value_errcheck
)
def filetime_to_datettime(ft):
timestamp = ft.dwHighDateTime
timestamp <<= 32
timestamp |= ft.dwLowDateTime
return datetime.datetime(1601, 1, 1, 0, 0, 0) + datetime.timedelta(microseconds=timestamp/10)
# }}}
class Key(object):
def __init__(self, create_at=None, open_at=None, root=HKEY_CURRENT_USER, open_mode=KEY_READ):
root = getattr(root, 'hkey', root)
self.was_created = False
if create_at is not None:
self.hkey, self.was_created = RegCreateKey(root, create_at)
elif open_at is not None:
self.hkey = RegOpenKey(root, open_at, 0, open_mode)
else:
self.hkey = HKEY()
def get(self, value_name=None, default=None, sub_key=None):
data_buf = ctypes.create_string_buffer(1024)
len_data_buf = DWORD(len(data_buf))
data_type = DWORD(0)
while True:
len_data_buf.value = len(data_buf)
try:
RegGetValue(self.hkey, sub_key, value_name, RRF_RT_ANY | RRF_NOEXPAND | RRF_ZEROONFAILURE,
ctypes.byref(data_type), data_buf, len_data_buf)
break
except ValueError:
data_buf = ctypes.create_string_buffer(2 * len(data_buf))
except KeyError:
return default
return convert_registry_data(data_buf, len_data_buf.value, data_type.value)
def get_mui_string(self, value_name=None, default=None, directory=None, fallback=True):
data_buf = ctypes.create_unicode_buffer(1024)
len_data_buf = DWORD(len(data_buf))
size = DWORD(0)
while True:
len_data_buf.value = len(data_buf)
try:
RegLoadMUIString(self.hkey, value_name, data_buf, 2 * len(data_buf), ctypes.byref(size), 0, directory)
break
except ValueError:
data_buf = ctypes.create_unicode_buffer(max(2 * len(data_buf), size.value // 2))
except KeyError:
return default
except WindowsError as err:
if fallback and err.errno == winerror.ERROR_BAD_COMMAND:
return self.get(value_name=value_name, default=default)
raise
return data_buf.value
def iterkeynames(self, get_last_write_times=False):
' Iterate over the names of all keys in this key '
name_buf = ctypes.create_unicode_buffer(1024)
lname_buf = DWORD(len(name_buf))
i = 0
while True:
lname_buf.value = len(name_buf)
try:
file_time = RegEnumKeyEx(self.hkey, i, name_buf, ctypes.byref(lname_buf))
except ValueError:
raise RuntimeError('Enumerating keys failed with buffer too small, which should never happen')
if get_last_write_times:
yield name_buf.value[:lname_buf.value], filetime_to_datettime(file_time)
else:
yield name_buf.value[:lname_buf.value]
i += 1
def delete_value(self, name=None, sub_key=None):
' Delete the named value from this key. If name is None the default value is deleted. If name does not exist, not error is reported. '
RegDeleteKeyValue(self.hkey, sub_key, name)
def delete_tree(self, sub_key=None):
''' Delete this all children of this key. Note that a key is not
actually deleted till the last handle to it is closed. Also if you
specify a sub_key, then the sub-key is deleted completely. If sub_key
does not exist, no error is reported.'''
RegDeleteTree(self.hkey, sub_key)
def set(self, name=None, value=None, sub_key=None, has_expansions=False):
''' Set a value for this key (with optional sub-key). If name is None,
the Default value is set. value can be an integer, a string or a list
of strings. If you want to use expansions, set has_expansions=True. '''
value, dtype, size = convert_to_registry_data(value, has_expansions=has_expansions)
RegSetKeyValue(self.hkey, sub_key, name, dtype, value, size)
def set_default_value(self, sub_key=None, value=None, has_expansions=False):
self.set(sub_key=sub_key, value=value, has_expansions=has_expansions)
def sub_key(self, path, allow_create=True, open_mode=KEY_READ):
' Create (or open) a sub-key at the specified relative path. When opening instead of creating, use open_mode '
if allow_create:
return Key(create_at=path, root=self.hkey)
return Key(open_at=path, root=self.hkey)
def itervalues(self, get_data=False, sub_key=None):
'''Iterate over all values in this key (or optionally the specified
sub-key. If get_data is True also return the data for every value,
otherwise, just the name.'''
key = self.hkey
if sub_key is not None:
try:
key = RegOpenKey(key, sub_key)
except WindowsError:
return
try:
name_buf = ctypes.create_unicode_buffer(16385)
lname_buf = DWORD(len(name_buf))
if get_data:
data_buf = (BYTE * 1024)()
ldata_buf = DWORD(len(data_buf))
vtype = DWORD(0)
i = 0
while True:
lname_buf.value = len(name_buf)
if get_data:
ldata_buf.value = len(data_buf)
try:
RegEnumValue(
key, i, name_buf, ctypes.byref(lname_buf), None, ctypes.byref(vtype), data_buf, ctypes.byref(ldata_buf))
except ValueError:
data_buf = (BYTE * ldata_buf.value)()
continue
data = convert_registry_data(data_buf, ldata_buf.value, vtype.value)
yield name_buf.value[:lname_buf.value], data
else:
RegEnumValue(
key, i, name_buf, ctypes.byref(lname_buf), None, None, None, None)
yield name_buf.value[:lname_buf.value]
i += 1
finally:
if sub_key is not None:
RegCloseKey(key)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __nonzero__(self):
return bool(self.hkey)
def close(self):
if not getattr(self, 'hkey', None):
return
if RegCloseKey is None or HKEY is None:
return # globals become None during exit
RegCloseKey(self.hkey)
self.hkey = HKEY()
def __del__(self):
self.close()
if __name__ == '__main__':
from pprint import pprint
k = Key(open_at=r'Software\RegisteredApplications', root=HKLM)
pprint(tuple(k.itervalues(get_data=True)))
k = Key(r'Software\calibre\winregtest')
k.set('Moose.Cat.1')
k.set('unicode test', 'fällen粗楷体简a\U0001f471')
k.set('none test')
k.set_default_value(r'other\key', '%PATH%', has_expansions=True)
pprint(tuple(k.itervalues(get_data=True)))
pprint(k.get('unicode test'))
k.set_default_value(r'delete\me\please', 'xxx')
pprint(tuple(k.iterkeynames(get_last_write_times=True)))
k.delete_tree('delete')
pprint(tuple(k.iterkeynames(get_last_write_times=True)))
| songfj/calibre | src/calibre/utils/winreg/lib.py | Python | gpl-3.0 | 14,668 | [
"MOOSE"
] | f741d8dd7316718d97ce99516324fbf6494896c8040209929d64ef10e37501bf |
# Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2008 Sun Microsystems Inc."
__license__ = "LGPL"
# If True, it tells Orca to automatically perform a SayAll operation
# when a message is first loaded.
#
sayAllOnLoad = False
# Whether we should grab focus on a focusable ancestor when setting
# the caret position. See bug 608149.
#
grabFocusOnAncestor = True
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/Thunderbird/script_settings.py | Python | gpl-3.0 | 1,210 | [
"ORCA"
] | 86de4716a9ebdabfb6e43da164156cd0355c49c743f2c5fcbe96ffb98bc629ae |
from sfepy.base.base import *
import sfepy.base.ioutils as io
from sfepy.fem import ProblemDefinition
from sfepy.base.conf import get_standard_keywords
##
# c: 03.07.2007, r: 27.02.2008
def save_only( conf, save_names, problem = None ):
"""Save information available prior to setting equations and
solving them."""
if problem is None:
problem = ProblemDefinition.from_conf( conf, init_variables = False )
if save_names.regions is not None:
problem.save_regions( save_names.regions )
if save_names.field_meshes is not None:
problem.save_field_meshes( save_names.field_meshes )
if save_names.region_field_meshes is not None:
problem.save_region_field_meshes( save_names.region_field_meshes )
if save_names.ebc is not None:
if not hasattr( problem, 'variables' ):
problem.set_variables( conf.variables )
try:
ts = TimeStepper.from_conf( conf.ts )
ts.set_step( 0 )
except:
ts = None
try:
problem.variables.equation_mapping( conf.ebcs, conf.epbcs,
problem.domain.regions, ts,
conf.funmod )
except Exception, e:
output( 'cannot make equation mapping!' )
output( 'reason: %s' % e )
else:
problem.save_ebc( save_names.ebc )
##
# 20.03.2007, c
# 30.03.2007
# 28.05.2007
# 03.07.2007
# 18.07.2007
# 02.10.2007
# 03.10.2007
def solve_stationary( conf, data = None, save_names = None, nls_status = None ):
if data is None:
# Term-dependent data.
data = {}
problem = ProblemDefinition.from_conf( conf )
problem.time_update( None )
if save_names is not None:
save_only( conf, save_names, problem = problem )
state = problem.solve( nls_status = nls_status )
return problem, state, data
def prepare_save_data( ts, conf ):
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace( 0, ts.n_step - 1, save_steps ).astype( nm.int32 )
is_save = nm.unique1d( is_save )
return ts.suffix, is_save
def time_step_function( ts, state0, problem, data ):
problem.time_update( ts )
if ts.step == 0:
problem.apply_ebc( state0 )
state = state0.copy()
problem.init_time( ts )
if problem.equations.caches:
# Initialize caches.
ev = problem.get_evaluator( ts = ts, **data )
vec_r, ret = ev.eval_residual( state )
if ret == 0: # OK.
err = nla.norm( vec_r )
output( 'initial residual: %e' % err )
else:
output( 'initial residual evaluation failed, giving up...' )
raise ValueError
else:
# Just initialize data of state variables.
problem.variables.data_from_state( state )
if problem.is_linear():
# Assemble linear system matrix for all
# time steps.
ev = problem.get_evaluator( ts = ts, mtx = problem.mtx_a, **data )
mtx_a, ret = ev.eval_tangent_matrix( state )
if ret != 0:
output( 'matrix evaluation failed, giving up...' )
raise ValueError
else:
mtx_a = None
# Initialize solvers (and possibly presolve the matrix).
problem.init_solvers( ts = ts, mtx = mtx_a, **data )
# Initialize variables with history.
problem.init_variables( state0 )
else:
state = problem.solve( state0 = state0, ts = ts, **data )
return state
def solve_evolutionary_op( problem,
save_results = True, return_history = False,
post_process_hook = None, step_hook = None ):
"""TODO return_history"""
data = {}
time_solver = problem.get_time_solver( step_fun = time_step_function,
step_args = (problem, data) )
suffix, is_save = prepare_save_data( time_solver.ts,
problem.conf )
state0 = problem.create_state_vector()
problem.setup_ic()
problem.apply_ic( state0 )
ii = 0
for ts, state in time_solver( state0 ):
if step_hook is not None:
step_hook( problem, ts, state )
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name( suffix = suffix % ts.step )
problem.save_state( filename, state,
post_process_hook = post_process_hook )
ii += 1
problem.advance( ts )
return state, data
def solve_stationary_op( problem, save_results = True, ts = None,
post_process_hook = None ):
data = {}
problem.time_update( ts )
state = problem.solve()
if save_results:
problem.save_state( problem.get_output_name(), state,
post_process_hook = post_process_hook )
return state, data
def solve_direct( conf, options, problem = None, step_hook = None,
post_process_hook = None ):
"""Generic (simple) problem solver."""
if problem is None:
problem = ProblemDefinition.from_conf( conf )
if options.output_filename_trunk:
ofn_trunk = options.output_filename_trunk
problem.ofn_trunk = ofn_trunk
if options.output_format:
problem.output_format = options.output_format
ofn_trunk = problem.ofn_trunk
save_names = Struct( ebc = None, regions = None, field_meshes = None,
region_field_meshes = None )
if options.save_ebc:
save_names.ebc = ofn_trunk + '_ebc.vtk'
if options.save_regions:
save_names.regions = ofn_trunk + '_region'
if options.save_field_meshes:
save_names.field_meshes = ofn_trunk + '_field'
if options.save_region_field_meshes:
save_names.region_field_meshes = ofn_trunk + '_region_field'
is_extra_save = False
for name, val in save_names.to_dict().iteritems():
if val is not None:
is_extra_save = True
break
if is_extra_save:
save_only( conf, save_names )
if options.solve_not:
return None, None, None
if hasattr( conf.options, 'ts' ):
##
# Time-dependent problem.
out = solve_evolutionary_op( problem, options,
post_process_hook = post_process_hook,
step_hook = step_hook )
else:
##
# Stationary problem.
out = solve_stationary_op( problem, options,
post_process_hook = post_process_hook )
state, data = out
return problem, state, data
| certik/sfepy | sfepy/solvers/generic.py | Python | bsd-3-clause | 6,822 | [
"VTK"
] | 83b5bf4188c0c0b5175f279da7188e4e7391813bf6b18b2909f60b071ce5018b |
from datetime import datetime, timedelta
import logging
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils import html
from django.utils.safestring import mark_safe
import pytz
from corehq import Domain
from corehq.apps import reports
from corehq.apps.app_manager.models import get_app, Form, RemoteApp
from corehq.apps.app_manager.util import ParentCasePropertyBuilder
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from corehq.apps.domain.middleware import CCHQPRBACMiddleware
from corehq.apps.reports.display import xmlns_to_name
from couchdbkit.ext.django.schema import *
from corehq.apps.reports.exportfilters import form_matches_users, is_commconnect_form, default_form_filter, \
default_case_filter
from corehq.apps.users.models import WebUser, CommCareUser, CouchUser
from couchexport.models import SavedExportSchema, GroupExportConfiguration, FakeSavedExportSchema
from couchexport.transforms import couch_to_excel_datetime, identity
from couchexport.util import SerializableFunction
import couchforms
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from django.conf import settings
from django.core.validators import validate_email
from corehq.apps.reports.dispatcher import (ProjectReportDispatcher,
CustomProjectReportDispatcher)
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
import json
import calendar
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import get_url_base
from django_prbac.exceptions import PermissionDenied
class HQUserType(object):
REGISTERED = 0
DEMO_USER = 1
ADMIN = 2
UNKNOWN = 3
COMMTRACK = 4
human_readable = [settings.COMMCARE_USER_TERM,
ugettext_noop("demo_user"),
ugettext_noop("admin"),
ugettext_noop("Unknown Users"),
ugettext_noop("CommTrack")]
toggle_defaults = (True, False, False, False, False)
count = len(human_readable)
included_defaults = (True, True, True, True, False)
@classmethod
def use_defaults(cls):
return cls._get_manual_filterset(cls.included_defaults, cls.toggle_defaults)
@classmethod
def all_but_users(cls):
no_users = [True] * cls.count
no_users[cls.REGISTERED] = False
return cls._get_manual_filterset(cls.included_defaults, no_users)
@classmethod
def commtrack_defaults(cls):
# this is just a convenience method for clairty on commtrack projects
return cls.all()
@classmethod
def all(cls):
defaults = (True,) * cls.count
return cls._get_manual_filterset(defaults, cls.toggle_defaults)
@classmethod
def _get_manual_filterset(cls, included, defaults):
"""
manually construct a filter set. included and defaults should both be
arrays of booleans mapping to values in human_readable and whether they should be
included and defaulted, respectively.
"""
return [HQUserToggle(i, defaults[i]) for i in range(len(cls.human_readable)) if included[i]]
@classmethod
def use_filter(cls, ufilter):
return [HQUserToggle(i, unicode(i) in ufilter) for i in range(len(cls.human_readable))]
class HQToggle(object):
type = None
show = False
name = None
def __init__(self, type, show, name):
self.type = type
self.name = name
self.show = show
def __repr__(self):
return "%(klass)s[%(type)s:%(show)s:%(name)s]" % dict(
klass = self.__class__.__name__,
type=self.type,
name=self.name,
show=self.show
)
class HQUserToggle(HQToggle):
def __init__(self, type, show):
name = _(HQUserType.human_readable[type])
super(HQUserToggle, self).__init__(type, show, name)
class TempCommCareUser(CommCareUser):
filter_flag = IntegerProperty()
def __init__(self, domain, username, uuid):
if username == HQUserType.human_readable[HQUserType.DEMO_USER]:
filter_flag = HQUserType.DEMO_USER
elif username == HQUserType.human_readable[HQUserType.ADMIN]:
filter_flag = HQUserType.ADMIN
else:
filter_flag = HQUserType.UNKNOWN
super(TempCommCareUser, self).__init__(
domain=domain,
username=username,
_id=uuid,
date_joined=datetime.utcnow(),
is_active=False,
user_data={},
first_name='',
last_name='',
filter_flag=filter_flag
)
def save(self, **params):
raise NotImplementedError
@property
def userID(self):
return self._id
@property
def username_in_report(self):
if self.filter_flag == HQUserType.UNKNOWN:
final = mark_safe('%s <strong>[unregistered]</strong>' % html.escape(self.username))
elif self.filter_flag == HQUserType.DEMO_USER:
final = mark_safe('<strong>%s</strong>' % html.escape(self.username))
else:
final = mark_safe('<strong>%s</strong> (%s)' % tuple(map(html.escape, [self.username, self.user_id])))
return final
@property
def raw_username(self):
return self.username
class Meta:
app_label = 'reports'
DATE_RANGE_CHOICES = ['last7', 'last30', 'lastn', 'lastmonth', 'since', 'range']
class ReportConfig(CachedCouchDocumentMixin, Document):
_extra_json_properties = ['url', 'report_name', 'date_description']
domain = StringProperty()
# the prefix of the report dispatcher class for this report, used to
# get route name for url reversing, and report names
report_type = StringProperty()
report_slug = StringProperty()
subreport_slug = StringProperty(default=None)
name = StringProperty()
description = StringProperty()
owner_id = StringProperty()
filters = DictProperty()
date_range = StringProperty(choices=DATE_RANGE_CHOICES)
days = IntegerProperty(default=None)
start_date = DateProperty(default=None)
end_date = DateProperty(default=None)
def delete(self, *args, **kwargs):
notifications = self.view('reportconfig/notifications_by_config',
reduce=False, include_docs=True, key=self._id).all()
for n in notifications:
n.config_ids.remove(self._id)
if n.config_ids:
n.save()
else:
n.delete()
return super(ReportConfig, self).delete(*args, **kwargs)
@classmethod
def by_domain_and_owner(cls, domain, owner_id, report_slug=None,
stale=True, **kwargs):
if stale:
#kwargs['stale'] = settings.COUCH_STALE_QUERY
pass
if report_slug is not None:
key = ["name slug", domain, owner_id, report_slug]
else:
key = ["name", domain, owner_id]
db = cls.get_db()
result = cache_core.cached_view(db, "reportconfig/configs_by_domain", reduce=False,
include_docs=True, startkey=key, endkey=key + [{}], wrapper=cls.wrap, **kwargs)
return result
@classmethod
def default(self):
return {
'name': '',
'description': '',
#'date_range': 'last7',
'days': None,
'start_date': None,
'end_date': None,
'filters': {}
}
def to_complete_json(self):
json = super(ReportConfig, self).to_json()
for key in self._extra_json_properties:
json[key] = getattr(self, key)
return json
@property
@memoized
def _dispatcher(self):
dispatchers = [ProjectReportDispatcher,
CustomProjectReportDispatcher,
ADMSectionDispatcher]
for dispatcher in dispatchers:
if dispatcher.prefix == self.report_type:
return dispatcher()
raise Exception("Unknown dispatcher: %s" % self.report_type)
def get_date_range(self):
"""Duplicated in reports.config.js"""
date_range = self.date_range
# allow old report email notifications to represent themselves as a
# report config by leaving the default date range up to the report
# dispatcher
if not date_range:
return {}
import datetime
from dateutil.relativedelta import relativedelta
today = datetime.date.today()
if date_range == 'since':
start_date = self.start_date
end_date = today
elif date_range == 'range':
start_date = self.start_date
end_date = self.end_date
elif date_range == 'lastmonth':
end_date = today
start_date = today - relativedelta(months=1) + timedelta(days=1) # add one day to handle inclusiveness
else:
end_date = today
if date_range == 'last7':
days = 7
elif date_range == 'last30':
days = 30
elif date_range == 'lastn':
days = self.days
else:
raise Exception("Invalid date range")
start_date = today - datetime.timedelta(days=days)
if start_date is None or end_date is None:
# this is due to bad validation. see: http://manage.dimagi.com/default.asp?110906
logging.error('scheduled report %s is in a bad state (no startdate or enddate)' % self._id)
return {}
return {'startdate': start_date.isoformat(),
'enddate': end_date.isoformat()}
@property
@memoized
def query_string(self):
from urllib import urlencode
params = self.filters.copy()
if self._id != 'dummy':
params['config_id'] = self._id
params.update(self.get_date_range())
return urlencode(params, True)
@property
@memoized
def view_kwargs(self):
kwargs = {'domain': self.domain,
'report_slug': self.report_slug}
if self.subreport_slug:
kwargs['subreport_slug'] = self.subreport_slug
return kwargs
@property
@memoized
def url(self):
try:
from django.core.urlresolvers import reverse
return reverse(self._dispatcher.name(), kwargs=self.view_kwargs) \
+ '?' + self.query_string
except Exception:
return "#"
@property
@memoized
def report(self):
"""
Returns None if no report is found for that report slug, which happens
when a report is no longer available. All callers should handle this
case.
"""
return self._dispatcher.get_report(self.domain, self.report_slug)
@property
def report_name(self):
try:
if self.report is None:
return _("Deleted Report")
else:
return _(self.report.name)
except Exception:
return _("Unsupported Report")
@property
def full_name(self):
if self.name:
return "%s (%s)" % (self.name, self.report_name)
else:
return self.report_name
@property
def date_description(self):
if self.date_range == 'lastmonth':
return "Last Month"
elif self.days and not self.start_date:
day = 'day' if self.days == 1 else 'days'
return "Last %d %s" % (self.days, day)
elif self.end_date:
return "From %s to %s" % (self.start_date, self.end_date)
elif self.start_date:
return "Since %s" % self.start_date
else:
return ''
@property
@memoized
def owner(self):
try:
return WebUser.get_by_user_id(self.owner_id)
except CouchUser.AccountTypeError:
return CommCareUser.get_by_user_id(self.owner_id)
def get_report_content(self, attach_excel=False):
"""
Get the report's HTML content as rendered by the static view format.
"""
try:
if self.report is None:
return _("The report used to create this scheduled report is no"
" longer available on CommCare HQ. Please delete this"
" scheduled report and create a new one using an available"
" report."), None
except Exception:
pass
from django.http import HttpRequest, QueryDict
request = HttpRequest()
request.couch_user = self.owner
request.user = self.owner.get_django_user()
request.domain = self.domain
request.couch_user.current_domain = self.domain
request.GET = QueryDict(self.query_string + '&filterSet=true')
# Make sure the request gets processed by PRBAC Middleware
CCHQPRBACMiddleware.apply_prbac(request)
try:
response = self._dispatcher.dispatch(request, render_as='email',
**self.view_kwargs)
if attach_excel is True:
file_obj = self._dispatcher.dispatch(request, render_as='excel',
**self.view_kwargs)
else:
file_obj = None
return json.loads(response.content)['report'], file_obj
except PermissionDenied:
return _("We are sorry, but your saved report '%(config_name)s' "
"is no longer accessible because your subscription does "
"not allow Custom Reporting. Please talk to your Project "
"Administrator about enabling Custom Reports. If you "
"want CommCare HQ to stop sending this message, please "
"visit %(saved_reports_url)s to remove this "
"Emailed Report.") % {
'config_name': self.name,
'saved_reports_url': "%s%s" % (
get_url_base(), reverse(
'saved_reports', args=[request.domain])),
}, None
except Exception as e:
notify_exception(None, "Error generating report")
return _("An error occurred while generating this report."), None
class UnsupportedScheduledReportError(Exception):
pass
class ReportNotification(CachedCouchDocumentMixin, Document):
domain = StringProperty()
owner_id = StringProperty()
recipient_emails = StringListProperty()
config_ids = StringListProperty()
send_to_owner = BooleanProperty()
attach_excel = BooleanProperty()
hour = IntegerProperty(default=8)
minute = IntegerProperty(default=0)
day = IntegerProperty(default=1)
interval = StringProperty(choices=["daily", "weekly", "monthly"])
@property
def is_editable(self):
try:
self.report_slug
return False
except AttributeError:
return True
@classmethod
def by_domain_and_owner(cls, domain, owner_id, stale=True, **kwargs):
if stale:
kwargs['stale'] = settings.COUCH_STALE_QUERY
key = [domain, owner_id]
db = cls.get_db()
result = cache_core.cached_view(db, "reportconfig/user_notifications", reduce=False,
include_docs=True, startkey=key, endkey=key + [{}],
wrapper=cls.wrap, **kwargs)
return result
@property
def all_recipient_emails(self):
# handle old documents
if not self.owner_id:
return [self.owner.get_email()]
emails = []
if self.send_to_owner:
if self.owner.is_web_user():
emails.append(self.owner.username)
else:
email = self.owner.get_email()
try:
validate_email(email)
emails.append(email)
except Exception:
pass
emails.extend(self.recipient_emails)
return emails
@property
@memoized
def owner(self):
id = self.owner_id
try:
return WebUser.get_by_user_id(id)
except CouchUser.AccountTypeError:
return CommCareUser.get_by_user_id(id)
@property
@memoized
def configs(self):
"""
Access the notification's associated configs as a list, transparently
returning an appropriate dummy for old notifications which have
`report_slug` instead of `config_ids`.
"""
if self.config_ids:
configs = ReportConfig.view('_all_docs', keys=self.config_ids,
include_docs=True).all()
configs = [c for c in configs if not hasattr(c, 'deleted')]
elif self.report_slug == 'admin_domains':
raise UnsupportedScheduledReportError("admin_domains is no longer "
"supported as a schedulable report for the time being")
else:
# create a new ReportConfig object, useful for its methods and
# calculated properties, but don't save it
class ReadonlyReportConfig(ReportConfig):
def save(self, *args, **kwargs):
pass
config = ReadonlyReportConfig()
object.__setattr__(config, '_id', 'dummy')
config.report_type = ProjectReportDispatcher.prefix
config.report_slug = self.report_slug
config.domain = self.domain
config.owner_id = self.owner_id
configs = [config]
return configs
@property
def day_name(self):
if self.interval == 'weekly':
return calendar.day_name[self.day]
return {
"daily": _("Every day"),
"monthly": _("Day %s of every month" % self.day),
}[self.interval]
@classmethod
def day_choices(cls):
"""Tuples for day of week number and human-readable day of week"""
return tuple([(val, calendar.day_name[val]) for val in range(7)])
@classmethod
def hour_choices(cls):
"""Tuples for hour number and human-readable hour"""
return tuple([(val, "%s:00" % val) for val in range(24)])
def send(self):
from dimagi.utils.django.email import send_HTML_email
from corehq.apps.reports.views import get_scheduled_report_response
# Scenario: user has been removed from the domain that they
# have scheduled reports for. Delete this scheduled report
if not self.owner.is_member_of(self.domain):
self.delete()
return
if self.all_recipient_emails:
title = "Scheduled report from CommCare HQ"
if hasattr(self, "attach_excel"):
attach_excel = self.attach_excel
else:
attach_excel = False
body, excel_files = get_scheduled_report_response(self.owner, self.domain, self._id, attach_excel=attach_excel)
for email in self.all_recipient_emails:
send_HTML_email(title, email, body.content, email_from=settings.DEFAULT_FROM_EMAIL, file_attachments=excel_files)
class AppNotFound(Exception):
pass
class HQExportSchema(SavedExportSchema):
doc_type = 'SavedExportSchema'
domain = StringProperty()
transform_dates = BooleanProperty(default=True)
@property
def global_transform_function(self):
if self.transform_dates:
return couch_to_excel_datetime
else:
return identity
@classmethod
def wrap(cls, data):
if 'transform_dates' not in data:
data['transform_dates'] = False
self = super(HQExportSchema, cls).wrap(data)
if not self.domain:
self.domain = self.index[0]
return self
class FormExportSchema(HQExportSchema):
doc_type = 'SavedExportSchema'
app_id = StringProperty()
include_errors = BooleanProperty(default=False)
@property
@memoized
def app(self):
if self.app_id:
try:
return get_app(self.domain, self.app_id, latest=True)
except Http404:
logging.error('App %s in domain %s not found for export %s' % (
self.app_id,
self.domain,
self.get_id
))
raise AppNotFound()
else:
return None
@classmethod
def wrap(cls, data):
self = super(FormExportSchema, cls).wrap(data)
if self.filter_function == 'couchforms.filters.instances':
# grandfather in old custom exports
self.include_errors = False
self.filter_function = None
return self
@property
def filter(self):
user_ids = set(CouchUser.ids_by_domain(self.domain))
user_ids.update(CouchUser.ids_by_domain(self.domain, is_active=False))
def _top_level_filter(form):
# careful, closures used
return form_matches_users(form, user_ids) or is_commconnect_form(form)
f = SerializableFunction(_top_level_filter)
if self.app_id is not None:
f.add(reports.util.app_export_filter, app_id=self.app_id)
if not self.include_errors:
f.add(couchforms.filters.instances)
actual = SerializableFunction(default_form_filter, filter=f)
return actual
@property
def domain(self):
return self.index[0]
@property
def xmlns(self):
return self.index[1]
@property
def formname(self):
return xmlns_to_name(self.domain, self.xmlns, app_id=self.app_id)
@property
@memoized
def question_order(self):
try:
if not self.app:
return []
except AppNotFound:
if settings.DEBUG:
return []
raise
else:
questions = self.app.get_questions(self.xmlns)
order = []
for question in questions:
if not question['value']: # question probably belongs to a broken form
continue
index_parts = question['value'].split('/')
assert index_parts[0] == ''
index_parts[1] = 'form'
index = '.'.join(index_parts[1:])
order.append(index)
return order
def get_default_order(self):
return {'#': self.question_order}
def uses_cases(self):
if not self.app or isinstance(self.app, RemoteApp):
return False
form = self.app.get_form_by_xmlns(self.xmlns)
if form and isinstance(form, Form):
return bool(form.active_actions())
return False
class FormDeidExportSchema(FormExportSchema):
@property
def transform(self):
return SerializableFunction()
@classmethod
def get_case(cls, doc, case_id):
pass
class CaseExportSchema(HQExportSchema):
doc_type = 'SavedExportSchema'
@property
def filter(self):
return SerializableFunction(default_case_filter)
@property
def domain(self):
return self.index[0]
@property
def domain_obj(self):
return Domain.get_by_name(self.domain)
@property
def case_type(self):
return self.index[1]
@property
def applications(self):
return self.domain_obj.full_applications(include_builds=False)
@property
def case_properties(self):
props = set([])
for app in self.applications:
builder = ParentCasePropertyBuilder(app, ("name",))
props |= set(builder.get_properties(self.case_type))
return props
class FakeFormExportSchema(FakeSavedExportSchema):
def remap_tables(self, tables):
# kill the weird confusing stuff, and rename the main table to something sane
tables = _apply_removal(tables, ('#|#export_tag|#', '#|location_|#', '#|history|#'))
return _apply_mapping(tables, {
'#': 'Forms',
})
def _apply_mapping(export_tables, mapping_dict):
def _clean(tabledata):
def _clean_tablename(tablename):
return mapping_dict.get(tablename, tablename)
return (_clean_tablename(tabledata[0]), tabledata[1])
return map(_clean, export_tables)
def _apply_removal(export_tables, removal_list):
return [tabledata for tabledata in export_tables if not tabledata[0] in removal_list]
class HQGroupExportConfiguration(CachedCouchDocumentMixin, GroupExportConfiguration):
"""
HQ's version of a group export, tagged with a domain
"""
domain = StringProperty()
def get_custom_exports(self):
def _rewrap(export):
# custom wrap if relevant
try:
return {
'form': FormExportSchema,
'case': CaseExportSchema,
}[export.type].wrap(export._doc)
except KeyError:
return export
for custom in list(self.custom_export_ids):
custom_export = self._get_custom(custom)
if custom_export:
yield _rewrap(custom_export)
def exports_of_type(self, type):
return self._saved_exports_from_configs([
config for config, schema in self.all_exports if schema.type == type
])
@property
@memoized
def form_exports(self):
return self.exports_of_type('form')
@property
@memoized
def case_exports(self):
return self.exports_of_type('case')
@classmethod
def by_domain(cls, domain):
return cache_core.cached_view(cls.get_db(), "groupexport/by_domain",
key=domain,
reduce=False,
include_docs=True,
wrapper=cls.wrap,
)
@classmethod
def get_for_domain(cls, domain):
"""
For when we only expect there to be one of these per domain,
which right now is always.
"""
groups = cls.by_domain(domain)
if groups:
if len(groups) > 1:
logging.error("Domain %s has more than one group export config! This is weird." % domain)
return groups[0]
return HQGroupExportConfiguration(domain=domain)
@classmethod
def add_custom_export(cls, domain, export_id):
group = cls.get_for_domain(domain)
if export_id not in group.custom_export_ids:
group.custom_export_ids.append(export_id)
group.save()
return group
@classmethod
def remove_custom_export(cls, domain, export_id):
group = cls.get_for_domain(domain)
updated = False
while export_id in group.custom_export_ids:
group.custom_export_ids.remove(export_id)
updated = True
if updated:
group.save()
return group
class CaseActivityReportCache(Document):
domain = StringProperty()
timezone = StringProperty()
last_updated = DateTimeProperty()
active_cases = DictProperty()
closed_cases = DictProperty()
inactive_cases = DictProperty()
landmark_data = DictProperty()
_couch_view = "reports/case_activity"
_default_case_key = "__DEFAULT__"
_case_list = None
@property
def case_list(self):
if not self._case_list:
key = ["type", self.domain]
data = get_db().view(self._couch_view,
group=True,
group_level=3,
startkey=key,
endkey=key+[{}]
).all()
self._case_list = [None] + [item.get('key',[])[-1] for item in data]
return self._case_list
_now = None
@property
def now(self):
if not self._now:
self._now = datetime.now(tz=pytz.timezone(str(self.timezone)))
self._now = self._now.replace(hour=23, minute=59, second=59, microsecond=999999)
return self._now
_milestone = None
@property
def milestone(self):
if not self._milestone:
self._milestone = self._now - timedelta(days=121)
return self._milestone
def _get_user_id_counts(self, data):
result = dict()
for item in data:
count = item.get('value', 0)
user_id = item.get('key',[])[-1]
if user_id:
if not user_id in result:
result[user_id] = count
else:
result[user_id] += count
return result
def _generate_landmark(self, landmark, case_type=None):
"""
Generates a dict with counts per owner_id of the # cases modified or closed in
the last <landmark> days.
"""
prefix = "" if case_type is None else "type"
key = [prefix, self.domain]
if case_type is not None:
key.append(case_type)
past = self.now - timedelta(days=landmark+1)
data = get_db().view(self._couch_view,
group=True,
startkey=key+[past.isoformat()],
endkey=key+[self.now.isoformat(), {}]
).all()
return self._get_user_id_counts(data)
def _generate_status_key(self, case_type, status="open"):
prefix = ["status"]
key = [self.domain, status]
if case_type is not None:
prefix.append("type")
key.append(case_type)
return [" ".join(prefix)] + key
def _generate_case_status(self, milestone=120, case_type=None, active=True, status="open"):
"""
inactive: Generates a dict with counts per owner_id of the number of cases that are open,
but haven't been modified in the last 120 days.
active: Generates a dict with counts per owner_id of the number of cases that are open
and have been modified in the last 120 days.
"""
key = self._generate_status_key(case_type, status)
milestone = self.now - timedelta(days=milestone+1) + (timedelta(microseconds=1) if active else timedelta(seconds=0))
data = get_db().view(self._couch_view,
group=True,
startkey=key+([milestone.isoformat()] if active else []),
endkey=key+([self.now.isoformat()] if active else [milestone.isoformat()])
).all()
return self._get_user_id_counts(data)
def case_key(self, case_type):
return case_type if case_type is not None else self._default_case_key
def day_key(self, days):
return "%s_days" % days
def update_landmarks(self, landmarks=None):
landmarks = landmarks if landmarks else [30, 60, 90]
for case_type in self.case_list:
case_key = self.case_key(case_type)
if not case_key in self.landmark_data:
self.landmark_data[case_key] = dict()
for landmark in landmarks:
self.landmark_data[case_key][self.day_key(landmark)] = self._generate_landmark(landmark, case_type)
def update_status(self, milestone=120):
for case_type in self.case_list:
case_key = self.case_key(case_type)
if case_key not in self.active_cases:
self.active_cases[case_key] = dict()
if case_key not in self.inactive_cases:
self.inactive_cases[case_key] = dict()
if case_key not in self.closed_cases:
self.closed_cases[case_key] = dict()
self.active_cases[case_key][self.day_key(milestone)] = self._generate_case_status(milestone, case_type)
self.closed_cases[case_key][self.day_key(milestone)] = self._generate_case_status(milestone,
case_type, status="closed")
self.inactive_cases[case_key][self.day_key(milestone)] = self._generate_case_status(milestone,
case_type, active=False)
@classmethod
def get_by_domain(cls, domain, include_docs=True):
return cls.view('reports/case_activity_cache',
reduce=False,
include_docs=include_docs,
key=domain
)
@classmethod
def build_report(cls, domain):
report = cls.get_by_domain(domain.name).first()
if not report:
report = cls(domain=str(domain.name))
report.timezone = domain.default_timezone
report.update_landmarks()
report.update_status()
report.last_updated = datetime.utcnow()
report.save()
return report
| SEL-Columbia/commcare-hq | corehq/apps/reports/models.py | Python | bsd-3-clause | 32,904 | [
"VisIt"
] | efff56c3a09af3ce3008638028ecd85d9a8c9b0b1d8f45d905a7a72fcf9359c1 |
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .version import __version__
# NOTE when you do this PLY adds a module name self. to all tokens
# not sure if this is an unexpected feature or not. It can be worked around
# but for now direct import seems the safest
from . import lex
from . import yacc
import os
import math
try:
from importlib import reload # Python 3
except ImportError:
pass
class MyInfixLexer:
"""
This parser has been cranked to handle Python infix, numpy and MathML2.0 prefix expressions
"""
debug = 0
LexOK = True
LexErrors = None
__pwcntr__ = 0
MathmlToNumpy_funcs = {
'pow': 'pow',
'root': 'pow',
'abs': 'abs',
'exp': 'math.exp',
'ln': 'math.log',
'log': 'math.log10',
'floor': 'numpy.floor',
'ceiling': 'numpy.ceil',
'factorial': None,
'sin': 'numpy.sin',
'cos': 'numpy.cos',
'tan': 'numpy.tan',
'sec': None,
'csc': None,
'cot': None,
'sinh': 'numpy.sinh',
'cosh': 'numpy.cosh',
'tanh': 'numpy.tanh',
'sech': None,
'csch': None,
'coth': None,
'arcsin': 'numpy.arcsin',
'arccos': 'numpy.arccos',
'arctan': 'numpy.arctan',
'arcsec': None,
'arccsc': None,
'arccot': None,
'arcsinh': 'numpy.arcsinh',
'arccosh': 'numpy.arccosh',
'arctanh': 'numpy.arctanh',
'arcsech': None,
'arccsch': None,
'arccoth': None,
'eq': 'operator.eq',
'neq': 'operator.ne',
'gt': 'operator.gt',
'geq': 'operator.ge',
'lt': 'operator.lt',
'leq': 'operator.le',
'ceil': 'numpy.ceil',
'sqrt': 'math.sqrt', # libsbml aliases
'equal': 'operator.eq',
'not_equal': 'operator.ne', # numpy2numpy aliases
'greater': 'operator.gt',
'greater_equal': 'operator.ge', # numpy2numpy aliases
'less': 'operator.lt',
'less_equal': 'operator.le', # numpy2numpy aliases
'ne': 'operator.ne',
'ge': 'operator.ge',
'le': 'operator.le', # operator2operator
'piecewise': 'self._piecewise_',
'_piecewise_': 'self._piecewise_',
'not': 'operator.not_',
'not_': 'operator.not_',
}
MathmlToNumpy_symb = {
'notanumber': 'numpy.NaN',
'pi': 'numpy.pi',
'infinity': 'numpy.Infinity',
'exponentiale': 'numpy.e',
'true': 'True',
'false': 'False',
'True': 'True',
'False': 'False',
}
SymbolReplacements = None
FunctionReplacements = None
MathmlToInfix = {
'and': 'and',
'or': 'or',
'true': 'True',
'false': 'False',
'xor': 'xor',
}
precedence = (
('left', 'ANDOR'),
('left', 'EQUISYMB'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'POWER'),
('right', 'UMINUS'),
)
# List of token names
tokens = (
'REAL',
'INT',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'POWER',
'LPAREN',
'RPAREN',
'NOTEQUALS',
'NAME',
'ANDOR',
'COMMA',
'EQUISYMB',
'PIECEWISE',
'DELAY',
)
def __init__(self):
self.LexErrors = []
self.Int = r'\d+' # Integer
self.Dec = self.Int + '\.' + self.Int # Decimal
self.Exp = r'([E|e][\+|\-]?)' + self.Int # Exponent
self.Real = (
self.Dec + '(' + self.Exp + ')?' + '|' + self.Int + self.Exp
) # Real - dec w/o optional exp or int with exp
# Simple tokens
self.t_REAL = self.Real
self.t_INT = self.Int
self.t_PLUS = r'\+'
self.t_MINUS = r'-'
self.t_TIMES = r'\*'
self.t_DIVIDE = r'/'
self.t_POWER = '\*\*'
self.t_LPAREN = r'\('
self.t_RPAREN = r'\)'
self.t_COMMA = r','
self.t_NOTEQUALS = r'!='
def t_NAME(self, t):
r'numpy\.[\w]*|math\.[\w]*|operator\.[\w]*|random\.[\w]*|self\.[\w]*|[a-zA-Z_][\w]*'
# names are defined as anything starting with a letter OR numpy. math. or operator.
t.type = 'NAME'
# allow self. to be in names but always remove! dodgy testing stage
t.value = t.value.replace('self.', '')
if t.value == 'and':
t.type = 'ANDOR'
t.value = ' %s ' % t.value
elif t.value == 'or':
t.type = 'ANDOR'
t.value = ' %s ' % t.value
elif t.value == 'xor':
t.type = 'ANDOR'
t.value = ' %s ' % t.value
elif t.value == 'piecewise':
t.type = 'PIECEWISE'
t.value = ' %s ' % t.value
elif t.value == 'delay':
t.type = 'DELAY'
t.value = ' %s ' % t.value
return t
def t_EQUISYMB(self, t):
r'>=|<=|!=|==|>|<'
t.type = 'EQUISYMB'
t.value = ' {} '.format(t.value)
## 'EQUISYMB', t.value
return t
# Define a rule so we can track line numbers
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(self, t):
print("Illegal character '{}'".format(t.value[0]))
self.LexErrors.append(t.value[0])
self.LexOK = False
t.lexer.skip(1)
# Build the lexer
def buildlexer(self, **kwargs):
# try and find a temporary workspace
if 'TMP' in os.environ:
tempDir = os.environ['TMP']
elif 'TEMP' in os.environ:
tempDir = os.environ['TEMP']
else:
tempDir = os.getcwd()
os.chdir(tempDir)
self.lexer = lex.lex(object=self, **kwargs)
# Test it output
def testlexer(self, data):
self.lexer.input(data)
while 1:
tok = self.lexer.token()
if not tok:
break
print(tok)
class MyInfixParser(MyInfixLexer):
ParseOK = True
SymbolErrors = None
ModelUsesNumpyFuncs = 0
names = None
functions = None
output = None
input = None
name_prefix = '<pre>'
name_suffix = '<suf>'
_runCount = 0
_runCountmax = 20
__pwcntr__ = 0
piecewises = None
DelayRemoved = False
def __init__(self):
MyInfixLexer.__init__(self)
self.ParseErrors = []
self.names = []
self.functions = []
self.SymbolErrors = []
self.piecewises = {}
def setNameStr(self, prefix, suffix):
self.name_prefix = str(prefix)
self.name_suffix = str(suffix)
def p_error(self, t):
try:
self.ParseErrors.append(t)
except:
print('p_error generated a parsing error')
tok = yacc.token()
return tok
def p_infix(self, t):
'''Expression : Expression PLUS Expression
| Expression MINUS Expression
| Expression TIMES Expression
| Expression DIVIDE Expression
| Expression EQUISYMB Expression
| Expression ANDOR Expression
| Power
| Number
| Func
| Equivalence
| Piecewise
| NotEquals
| Delay'''
# |UMINUS : add if the
# alternative for p_uminus is used
if len(t.slice) == 4:
t[0] = t[1] + t[2] + t[3]
else:
t[0] = t[1]
def p_notequals(self, t):
'''NotEquals : NOTEQUALS'''
t[0] = t[1]
def p_power(self, t):
'''Power : Expression POWER Expression'''
## t[0] = 'numpy.power('+ t[1] + ',' + t[3] + ')' #changed to make it DeriVar compatible
t[0] = 'pow(' + t[1] + ',' + t[3] + ')'
def p_number(self, t):
'''Number : REAL
| INT
| NAME'''
try:
t[0] = str(float(t[1]))
except ValueError:
if t[1].strip() in self.MathmlToNumpy_symb:
if self.MathmlToNumpy_symb[t[1]] == None:
self.SymbolErrors.append(t[1])
print('\nSymbol \"{}\" not yet supported by PySCeS.'.format(t[1]))
t[0] = 'unknown_symbol_' + t[1]
else:
t[0] = self.MathmlToNumpy_symb[t[1]]
self.ModelUsesNumpyFuncs = 1
elif (
t[1].replace('numpy.', '').replace('math.', '').replace('operator.', '')
in self.MathmlToNumpy_symb
):
t[0] = t[1]
else:
if (
self.SymbolReplacements != None
and t[1].strip() in self.SymbolReplacements
):
# replace symb --> prefix.replacement.suffix
if self.SymbolReplacements[t[1]] not in self.names:
self.names.append(self.SymbolReplacements[t[1]])
t[0] = (
self.name_prefix
+ self.SymbolReplacements[t[1]]
+ self.name_suffix
)
elif (
self.FunctionReplacements != None
and t[1].strip() in self.FunctionReplacements
):
# replace symb --> (replacement)
t[0] = '({})'.format(self.FunctionReplacements[t[1]])
else:
if t[1] not in self.names:
self.names.append(t[1])
t[0] = self.name_prefix + t[1] + self.name_suffix
def p_uminus(self, t):
'''Expression : MINUS Expression %prec UMINUS'''
# Alternative '''UMINUS : MINUS Expression'''
t[0] = t[1] + t[2]
def p_equivalence(self, t):
'''Equivalence : ANDOR LPAREN Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
| ANDOR LPAREN Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression COMMA Expression RPAREN
'''
# this is an almighty hack but i cant see any other way to do it right now ... ALL SUGGESTIONS WELCOME
# changes and(a,b, .....) or(a,b, .....) to (a and b and ...) (a or b or ...)
# and not(b) into self._not_(b)
## print 'equivalence', len(t), t[:]
t[1] = t[1].strip()
if t[1] in self.MathmlToInfix:
t[0] = t[2]
for tt in range(3, len(t)):
if t[tt] == ',':
if t[1] != 'xor':
t[0] += ' {} '.format(t[1])
else:
t[0] += ' {} '.format('!=')
else:
t[0] += '{}'.format(t[tt])
def p_piecewise(self, t):
'''Piecewise : PIECEWISE LPAREN ArgListSemiCol RPAREN'''
t[1] = t[1].strip()
t[0] = self.MathmlToNumpy_funcs[t[1]] + t[2] + t[3] + t[4]
pw = t[3].split(';')
for p in range(len(pw)):
pw[p] = pw[p].strip()
name = '__pw{}__'.format(self.__pwcntr__)
if len(pw) == 3:
self.__pwcntr__ += 1
self.piecewises.update({name: {0: [pw[1], pw[0]], 'other': pw[2]}})
else:
self.__pwcntr__ += 1
self.piecewises.update({name: {}})
if math.modf(len(pw) / 2.0)[0] != 0.0:
self.piecewises[name].update({'other': pw.pop(-1)})
else:
self.piecewises[name].update({'other': None})
for p in range(0, len(pw), 2):
self.piecewises[name].update({p: [pw[p + 1], pw[p]]})
t[0] = self.name_prefix + name + self.name_suffix
def p_delay(self, t):
'''Delay : DELAY LPAREN Expression COMMA Expression RPAREN'''
# for now we just remove the delay on the expression
# updated 201528 now requires a function delayFunc(var, delay) to deal with it
self.DelayRemoved = True
t[0] = '__delayFunc__({}, {})'.format(t[3], t[5])
def p_function(self, t):
'''Func : LPAREN ArgList RPAREN
| NAME LPAREN ArgList RPAREN
| NAME LPAREN RPAREN
'''
# this is to match NAME() which as far as I know is unique to object __calls__
# as well as differentiate between bracketed functions and expressions:
# func( S1 ) and ( S/S05 )
if len(t) == 4:
if t[1] == '(':
t[0] = t[1] + t[2] + t[3]
else:
t[0] = self.name_prefix + t[1] + t[2] + t[3]
# convert root(degree,<expr>) to pow(<expr>, 1/degree)
elif t[1].strip() == 'root':
t[1] = self.MathmlToNumpy_funcs[t[1]]
t[3] = '{}, {}'.format(
t[3][t[3].index(',') + 1 :], 1.0 / float(t[3][: t[3].index(',')]),
)
t[0] = t[1] + t[2] + t[3] + t[4]
elif t[1].strip() in self.MathmlToNumpy_funcs:
if self.MathmlToNumpy_funcs[t[1]] == None:
self.SymbolErrors.append(t[1])
print('\nFunction \"{}\" not supported by PySCeS'.format(t[1]))
t[0] = 'unknown_function_' + t[1] + t[2] + t[3] + t[4]
else:
try:
t[0] = self.MathmlToNumpy_funcs[t[1]] + t[2] + t[3] + t[4]
except Exception as EX:
print('Function Parse error 1 (please report!)\n', EX)
self.ModelUsesNumpyFuncs = True
else:
# t[0] = t[1] + t[2] + t[3]
# or a numpy fucntion
if t[1][:6] == 'numpy.' or t[1][:5] == 'math.' or t[1][:9] == 'operator.':
t[0] = t[1] + t[2] + t[3] + t[4]
else:
# assume some arbitrary function definition
t[0] = self.name_prefix + t[1] + t[2] + t[3] + t[4]
# add to list of functions
if t[1] not in self.functions:
self.functions.append(t[1])
# adapted from Andrew Dalke's GardenSnake
# http://www.dalkescientific.com/writings/diary/GardenSnake.py
# function arguments f(x,y,z)
def p_arglist(self, t):
'''ArgList : Expression
| ArgList COMMA Expression'''
try:
if len(t) == 2:
t[0] = t[1]
elif len(t) == 4:
t[0] = t[1] + t[2] + t[3]
except Exception as EX:
print('Function ArgList error (please report!)\n', EX)
# expression list f(g(x,y); g(a,b))
def p_arglist_semicol(self, t):
'''ArgListSemiCol : Expression
| ArgListSemiCol COMMA Expression'''
try:
if len(t) == 2:
t[0] = t[1]
elif len(t) == 4:
t[0] = t[1] + '; ' + t[3]
except Exception as EX:
print('Function ArgList error (please report!)\n', EX)
def buildparser(self, **kwargs):
self.parser = yacc.yacc(module=self, **kwargs)
def parse(self, data):
self.ParseErrors = []
self.LexErrors = []
self.SymbolErrors = []
self.names = []
self.functions = []
self.input = data
self.ParseOK = True
self.LexOK = True
self.piecewises = {}
self.DelayRemoved = False
self.output = self.parser.parse(data)
## assert len(self.SymbolErrors) == 0, '\nUndefined symbols:\n%s' % self.SymbolErrors
## if len(self.SymbolErrors) != 0:
## print '\nUndefined symbols:\n%s' % self.SymbolErrors
assert self.LexOK, '\nLexer Failure:\n{}'.format(self.LexErrors)
assert self.ParseOK, '\nParser Failure:\n{}'.format(self.ParseErrors)
self._runCount += 1
self.SymbolReplacements = None
self.FunctionReplacements = None
if self._runCount > self._runCountmax:
self._runCount == 0
# we're back !!!
reload(lex)
reload(yacc)
| bgoli/pysces | pysces/core2/InfixParser.py | Python | bsd-3-clause | 18,506 | [
"PySCeS"
] | 22dc76a4e755186afa81bd2ad729b2e97aa40aaa4fc922e54f913c617024fba0 |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This class holds the LatinHyperCube algorithm based on McKay et al. (1979):
McKay, M. D., Beckman, R. J. and Conover, W. J.: Comparison of Three Methods for Selecting Values of Input Variables in the Analysis of Output from a Computer Code, Technometrics, 21(2), 239–245, doi:10.1080/00401706.1979.10489755, 1979.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
from .. import database
import numpy as np
import random
import time
class lhs(_algorithm):
'''
Implements the LatinHyperCube algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
'''
def __init__(self, spot_setup, dbname=None, dbformat=None, parallel='seq',save_sim=True):
_algorithm.__init__(self,spot_setup, dbname=dbname, dbformat=dbformat, parallel=parallel,save_sim=save_sim, dbinit= False)
def sample(self, repetitions):
"""
Samples from the LatinHypercube algorithm.
Input
----------
repetitions: int
Maximum number of runs.
"""
print('Creating LatinHyperCube Matrix')
#Get the names of the parameters to analyse
names = self.parameter()['name']
#Define the jump size between the parameter
segment = 1/float(repetitions)
#Get the minimum and maximum value for each parameter from the distribution
parmin,parmax=self.parameter()['minbound'],self.parameter()['maxbound']
#Create an Matrix to store the parameter sets
Matrix=np.empty((repetitions,len(parmin)))
#Create the LatinHypercube Matrix as in McKay et al. (1979):
for i in range(int(repetitions)):
segmentMin = i * segment
pointInSegment = segmentMin + (random.random() * segment)
parset=pointInSegment *(parmax-parmin)+parmin
Matrix[i]=parset
for i in range(len(names)):
random.shuffle(Matrix[:,i])
print('Start sampling')
starttime=time.time()
intervaltime=starttime
# A generator that produces the parameters
#param_generator = iter(Matrix)
param_generator = ((rep,list(Matrix[rep])) for rep in xrange(int(repetitions)-1))
firstcall=True
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate the objective function
like = self.objectivefunction(evaluation=self.evaluation, simulation=simulations)
if firstcall==True:
parnames = self.parameter()['name']
self.initialize_database(randompar,parnames,simulations,like)
firstcall=False
self.status(rep,like,randompar)
#Save everything in the database
self.datawriter.save(like,randompar,simulations=simulations)
#Progress bar
acttime=time.time()
#Refresh progressbar every second
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (rep,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: #Happens if no database was assigned
pass
print('End of sampling')
text='%i of %i (best like=%g)' % (self.status.rep,repetitions,self.status.objectivefunction)
print(text)
print('Best parameter set')
print(self.status.params)
text='Duration:'+str(round((acttime-starttime),2))+' s'
print(text)
| gitporst/spotpy | spotpy/algorithms/lhs.py | Python | mit | 5,271 | [
"Gaussian"
] | b5bfc23c1df2c71eee3be339030bbeb605ff5fb8b1474141f217df8f07fad4f9 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageWrapPad(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageWrapPad(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkImageWrapPad.py | Python | bsd-3-clause | 487 | [
"VTK"
] | 8b3e7e56dea9eb7af11bddbabb62ce61a512314c4b7a134d5e99474ce96e12ac |
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
# check checkpointing for Regression with IRLSM.
def testGLMCheckpointGaussianLambdaSearch():
print("Checking checkpoint for regression with lambda search ....")
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
enum_columns = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10"]
for cname in enum_columns:
h2o_data[cname] = h2o_data[cname]
myY = "C21"
myX = list(range(20))
print("Setting cold_start to false")
buildModelCheckpointing(h2o_data, myX, myY, "gaussian", "irlsm", False)
print("Setting cold_start to true")
buildModelCheckpointing(h2o_data, myX, myY, "gaussian", "irlsm", True)
def buildModelCheckpointing(training_frame, x_indices, y_index, family, solver, cold_start):
split_frames = training_frame.split_frame(ratios=[0.9], seed=12345)
model = H2OGeneralizedLinearEstimator(family=family, max_iterations=3, solver=solver, lambda_search=True,
cold_start=cold_start)
model.train(training_frame=split_frames[0], x=x_indices, y=y_index, validation_frame=split_frames[1])
modelCheckpoint = H2OGeneralizedLinearEstimator(family=family, checkpoint=model.model_id, solver=solver,
lambda_search=True, cold_start=cold_start)
modelCheckpoint.train(training_frame=split_frames[0], x=x_indices, y=y_index, validation_frame=split_frames[1])
modelLong = H2OGeneralizedLinearEstimator(family=family, solver=solver, lambda_search=True, cold_start=cold_start)
modelLong.train(training_frame=split_frames[0], x=x_indices, y=y_index, validation_frame=split_frames[1])
pyunit_utils.assertEqualCoeffDicts(modelCheckpoint.coef(), modelLong.coef(), tol=1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(testGLMCheckpointGaussianLambdaSearch)
else:
testGLMCheckpointGaussianLambdaSearch()
| h2oai/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7890_glm_checkpoint_IRLSM_gaussian_lambda_search.py | Python | apache-2.0 | 2,128 | [
"Gaussian"
] | 7c10cf96ceda1481448eafebacbb5a04c39bdd34d04446b2223353456c285476 |
""" A set of common tools to be used in pilot commands
"""
__RCSID__ = '$Id$'
import sys
import time
import os
import pickle
import getopt
import imp
import urllib2
import signal
def printVersion(log):
log.info("Running %s" % " ".join(sys.argv))
try:
with open("%s.run" % sys.argv[0], "w") as fd:
pickle.dump(sys.argv[1:], fd)
except OSError:
pass
log.info("Version %s" % __RCSID__)
def pythonPathCheck():
try:
os.umask(18) # 022
pythonpath = os.getenv('PYTHONPATH', '').split(':')
print 'Directories in PYTHONPATH:', pythonpath
for p in pythonpath:
if p == '':
continue
try:
if os.path.normpath(p) in sys.path:
# In case a given directory is twice in PYTHONPATH it has to removed only once
sys.path.remove(os.path.normpath(p))
except Exception as x:
print x
print "[EXCEPTION-info] Failing path:", p, os.path.normpath(p)
print "[EXCEPTION-info] sys.path:", sys.path
raise x
except Exception as x:
print x
print "[EXCEPTION-info] sys.executable:", sys.executable
print "[EXCEPTION-info] sys.version:", sys.version
print "[EXCEPTION-info] os.uname():", os.uname()
raise x
def alarmTimeoutHandler(*args):
raise Exception('Timeout')
def retrieveUrlTimeout(url, fileName, log, timeout=0):
"""
Retrieve remote url to local file, with timeout wrapper
"""
urlData = ''
if timeout:
signal.signal(signal.SIGALRM, alarmTimeoutHandler)
# set timeout alarm
signal.alarm(timeout + 5)
try:
remoteFD = urllib2.urlopen(url)
expectedBytes = 0
# Sometimes repositories do not return Content-Length parameter
try:
expectedBytes = long(remoteFD.info()['Content-Length'])
except Exception as x:
expectedBytes = 0
data = remoteFD.read()
if fileName:
with open(fileName + '-local', "wb") as localFD:
localFD.write(data)
else:
urlData += data
remoteFD.close()
if len(data) != expectedBytes and expectedBytes > 0:
log.error('URL retrieve: expected size does not match the received one')
return False
if timeout:
signal.alarm(0)
if fileName:
return True
else:
return urlData
except urllib2.HTTPError as x:
if x.code == 404:
log.error("URL retrieve: %s does not exist" % url)
if timeout:
signal.alarm(0)
return False
except urllib2.URLError:
log.error('Timeout after %s seconds on transfer request for "%s"' % (str(timeout), url))
return False
except Exception as x:
if x == 'Timeout':
log.error('Timeout after %s seconds on transfer request for "%s"' % (str(timeout), url))
if timeout:
signal.alarm(0)
raise x
class ObjectLoader(object):
""" Simplified class for loading objects from a DIRAC installation.
Example:
ol = ObjectLoader()
object, modulePath = ol.loadObject( 'pilot', 'LaunchAgent' )
"""
def __init__(self, baseModules, log):
""" init
"""
self.__rootModules = baseModules
self.log = log
def loadModule(self, modName, hideExceptions=False):
""" Auto search which root module has to be used
"""
for rootModule in self.__rootModules:
impName = modName
if rootModule:
impName = "%s.%s" % (rootModule, impName)
self.log.debug("Trying to load %s" % impName)
module, parentPath = self.__recurseImport(impName, hideExceptions=hideExceptions)
# Error. Something cannot be imported. Return error
if module is None:
return None, None
# Huge success!
else:
return module, parentPath
# Nothing found, continue
# Return nothing found
return None, None
def __recurseImport(self, modName, parentModule=None, hideExceptions=False):
""" Internal function to load modules
"""
if isinstance(modName, basestring):
modName = modName.split('.')
try:
if parentModule:
impData = imp.find_module(modName[0], parentModule.__path__)
else:
impData = imp.find_module(modName[0])
impModule = imp.load_module(modName[0], *impData)
if impData[0]:
impData[0].close()
except ImportError as excp:
if str(excp).find("No module named %s" % modName[0]) == 0:
return None, None
errMsg = "Can't load %s in %s" % (".".join(modName), parentModule.__path__[0])
if not hideExceptions:
self.log.exception(errMsg)
return None, None
if len(modName) == 1:
return impModule, parentModule.__path__[0]
return self.__recurseImport(modName[1:], impModule,
hideExceptions=hideExceptions)
def loadObject(self, package, moduleName, command):
""" Load an object from inside a module
"""
loadModuleName = '%s.%s' % (package, moduleName)
module, parentPath = self.loadModule(loadModuleName)
if module is None:
return None, None
try:
commandObj = getattr(module, command)
return commandObj, os.path.join(parentPath, moduleName)
except AttributeError as e:
self.log.error('Exception: %s' % str(e))
return None, None
def getCommand(params, commandName, log):
""" Get an instantiated command object for execution.
Commands are looked in the following modules in the order:
1. <CommandExtension>Commands
2. pilotCommands
3. <Extension>.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands
4. <Extension>.WorkloadManagementSystem.PilotAgent.pilotCommands
5. DIRAC.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands
6. DIRAC.WorkloadManagementSystem.PilotAgent.pilotCommands
Note that commands in 3.-6. can only be used of the the DIRAC installation
has been done. DIRAC extensions are taken from -e ( --extraPackages ) option
of the pilot script.
"""
extensions = params.commandExtensions
modules = [m + 'Commands' for m in extensions + ['pilot']]
commandObject = None
# Look for commands in the modules in the current directory first
for module in modules:
try:
impData = imp.find_module(module)
commandModule = imp.load_module(module, *impData)
commandObject = getattr(commandModule, commandName)
except Exception as _e:
pass
if commandObject:
return commandObject(params), module
if params.diracInstalled:
diracExtensions = []
for ext in params.extensions:
if not ext.endswith('DIRAC'):
diracExtensions.append(ext + 'DIRAC')
else:
diracExtensions.append(ext)
diracExtensions += ['DIRAC']
ol = ObjectLoader(diracExtensions, log)
for module in modules:
commandObject, modulePath = ol.loadObject('WorkloadManagementSystem.PilotAgent',
module,
commandName)
if commandObject:
return commandObject(params), modulePath
# No command could be instantitated
return None, None
class Logger(object):
""" Basic logger object, for use inside the pilot. Just using print.
"""
def __init__(self, name='Pilot', debugFlag=False, pilotOutput='pilot.out'):
self.debugFlag = debugFlag
self.name = name
self.out = pilotOutput
def __outputMessage(self, msg, level, header):
if self.out:
with open(self.out, 'a') as outputFile:
for _line in msg.split("\n"):
if header:
outLine = "%s UTC %s [%s] %s" % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()),
level,
self.name,
_line)
print outLine
if self.out:
outputFile.write(outLine + '\n')
else:
print _line
outputFile.write(_line + '\n')
sys.stdout.flush()
def setDebug(self):
self.debugFlag = True
def debug(self, msg, header=True):
if self.debugFlag:
self.__outputMessage(msg, "DEBUG", header)
def error(self, msg, header=True):
self.__outputMessage(msg, "ERROR", header)
def warn(self, msg, header=True):
self.__outputMessage(msg, "WARN", header)
def info(self, msg, header=True):
self.__outputMessage(msg, "INFO", header)
class CommandBase(object):
""" CommandBase is the base class for every command in the pilot commands toolbox
"""
def __init__(self, pilotParams, dummy=''):
""" c'tor
Defines the logger and the pilot parameters
"""
self.pp = pilotParams
self.log = Logger(self.__class__.__name__)
self.debugFlag = False
for o, _ in self.pp.optList:
if o == '-d' or o == '--debug':
self.log.setDebug()
self.debugFlag = True
self.log.debug("\n\n Initialized command %s" % self.__class__)
def executeAndGetOutput(self, cmd, environDict=None):
""" Execute a command on the worker node and get the output
"""
self.log.info("Executing command %s" % cmd)
try:
# spawn new processes, connect to their input/output/error pipes, and obtain their return codes.
import subprocess
_p = subprocess.Popen("%s" % cmd, shell=True, env=environDict, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
# standard output
outData = _p.stdout.read().strip()
for line in outData:
sys.stdout.write(line)
sys.stdout.write('\n')
for line in _p.stderr:
sys.stdout.write(line)
sys.stdout.write('\n')
# return code
returnCode = _p.wait()
self.log.debug("Return code of %s: %d" % (cmd, returnCode))
return (returnCode, outData)
except ImportError:
self.log.error("Error importing subprocess")
def exitWithError(self, errorCode):
""" Wrapper around sys.exit()
"""
self.log.info("List of child processes of current PID:")
retCode, _outData = self.executeAndGetOutput("ps --forest -o pid,%%cpu,%%mem,tty,stat,time,cmd -g %d" % os.getpid())
if retCode:
self.log.error("Failed to issue ps [ERROR %d] " % retCode)
sys.exit(errorCode)
class PilotParams(object):
""" Class that holds the structure with all the parameters to be used across all the commands
"""
MAX_CYCLES = 10
def __init__(self):
""" c'tor
param names and defaults are defined here
"""
self.rootPath = os.getcwd()
self.originalRootPath = os.getcwd()
self.pilotRootPath = os.getcwd()
self.workingDir = os.getcwd()
self.optList = {}
self.keepPythonPath = False
self.debugFlag = False
self.local = False
self.commandExtensions = []
self.commands = ['GetPilotVersion', 'CheckWorkerNode', 'InstallDIRAC', 'ConfigureBasics', 'CheckCECapabilities',
'CheckWNCapabilities', 'ConfigureSite', 'ConfigureArchitecture', 'ConfigureCPURequirements',
'LaunchAgent']
self.extensions = []
self.tags = []
self.reqtags = []
self.site = ""
self.setup = ""
self.configServer = ""
self.installation = ""
self.ceName = ""
self.ceType = ''
self.queueName = ""
self.platform = ""
# in case users want to specify the max number of processors requested, per pilot
self.maxNumberOfProcessors = 0
self.minDiskSpace = 2560 # MB
self.pythonVersion = '27'
self.userGroup = ""
self.userDN = ""
self.maxCycles = self.MAX_CYCLES
self.flavour = 'DIRAC'
self.gridVersion = ''
self.pilotReference = ''
self.releaseVersion = ''
self.releaseProject = ''
self.gateway = ""
self.useServerCertificate = False
self.pilotScriptName = ''
self.genericOption = ''
# DIRAC client installation environment
self.diracInstalled = False
self.diracExtensions = []
# Some commands can define environment necessary to execute subsequent commands
self.installEnv = os.environ
# If DIRAC is preinstalled this file will receive the updates of the local configuration
self.localConfigFile = ''
self.executeCmd = False
self.configureScript = 'dirac-configure'
self.architectureScript = 'dirac-platform'
self.certsLocation = '%s/etc/grid-security' % self.workingDir
self.pilotCFGFile = 'pilot.json'
self.pilotCFGFileLocation = 'http://diracproject.web.cern.ch/diracproject/configs/'
# Parameters that can be determined at runtime only
self.queueParameters = {} # from CE description
self.jobCPUReq = 900 # HS06s, here just a random value
# Pilot command options
self.cmdOpts = (('b', 'build', 'Force local compilation'),
('d', 'debug', 'Set debug flag'),
('e:', 'extraPackages=', 'Extra packages to install (comma separated)'),
('E:', 'commandExtensions=', 'Python module with extra commands'),
('X:', 'commands=', 'Pilot commands to execute commands'),
('g:', 'grid=', 'lcg tools package version'),
('h', 'help', 'Show this help'),
('i:', 'python=', 'Use python<26|27> interpreter'),
('k', 'keepPP', 'Do not clear PYTHONPATH on start'),
('l:', 'project=', 'Project to install'),
('p:', 'platform=', 'Use <platform> instead of local one'),
('m:', 'maxNumberOfProcessors=',
'specify a max number of processors to use'),
('u:', 'url=', 'Use <url> to download tarballs'),
('r:', 'release=', 'DIRAC release to install'),
('n:', 'name=', 'Set <Site> as Site Name'),
('D:', 'disk=', 'Require at least <space> MB available'),
('M:', 'MaxCycles=', 'Maximum Number of JobAgent cycles to run'),
('N:', 'Name=', 'CE Name'),
('Q:', 'Queue=', 'Queue name'),
('y:', 'CEType=', 'CE Type (normally InProcess)'),
('S:', 'setup=', 'DIRAC Setup to use'),
('C:', 'configurationServer=', 'Configuration servers to use'),
('G:', 'Group=', 'DIRAC Group to use'),
('O:', 'OwnerDN', 'Pilot OwnerDN (for private pilots)'),
('U', 'Upload', 'Upload compiled distribution (if built)'),
('V:', 'installation=', 'Installation configuration file'),
('W:', 'gateway=', 'Configure <gateway> as DIRAC Gateway during installation'),
('s:', 'section=', 'Set base section for relative parsed options'),
('o:', 'option=', 'Option=value to add'),
('c', 'cert', 'Use server certificate instead of proxy'),
('C:', 'certLocation=', 'Specify server certificate location'),
('L:', 'pilotCFGLocation=', 'Specify pilot CFG location'),
('F:', 'pilotCFGFile=', 'Specify pilot CFG file'),
('R:', 'reference=', 'Use this pilot reference'),
('x:', 'execute=', 'Execute instead of JobAgent'),
('t:', 'tag=', 'extra tags for resource description'),
('', 'requiredTag=', 'extra required tags for resource description')
)
self.__initOptions()
def __initOptions(self):
""" Parses and interpret options on the command line
"""
self.optList, __args__ = getopt.getopt(sys.argv[1:],
"".join([opt[0] for opt in self.cmdOpts]),
[opt[1] for opt in self.cmdOpts])
for o, v in self.optList:
if o == '-E' or o == '--commandExtensions':
self.commandExtensions = v.split(',')
elif o == '-X' or o == '--commands':
self.commands = v.split(',')
elif o == '-e' or o == '--extraPackages':
self.extensions = v.split(',')
elif o == '-n' or o == '--name':
self.site = v
elif o == '-N' or o == '--Name':
self.ceName = v
elif o == '-y' or o == '--CEType':
self.ceType = v
elif o == '-Q' or o == '--Queue':
self.queueName = v
elif o == '-R' or o == '--reference':
self.pilotReference = v
elif o == '-k' or o == '--keepPP':
self.keepPythonPath = True
elif o == '-d' or o == '--debug':
self.debugFlag = True
elif o in ('-S', '--setup'):
self.setup = v
elif o in ('-C', '--configurationServer'):
self.configServer = v
elif o in ('-G', '--Group'):
self.userGroup = v
elif o in ('-x', '--execute'):
self.executeCmd = v
elif o in ('-O', '--OwnerDN'):
self.userDN = v
elif o in ('-V', '--installation'):
self.installation = v
elif o == '-p' or o == '--platform':
self.platform = v
elif o == '-m' or o == '--maxNumberOfProcessors':
self.maxNumberOfProcessors = int(v)
elif o == '-D' or o == '--disk':
try:
self.minDiskSpace = int(v)
except ValueError:
pass
elif o == '-r' or o == '--release':
self.releaseVersion = v.split(',', 1)[0]
elif o in ('-l', '--project'):
self.releaseProject = v
elif o in ('-W', '--gateway'):
self.gateway = v
elif o == '-c' or o == '--cert':
self.useServerCertificate = True
elif o == '-C' or o == '--certLocation':
self.certsLocation = v
elif o == '-L' or o == '--pilotCFGLocation':
self.pilotCFGFileLocation = v
elif o == '-F' or o == '--pilotCFGFile':
self.pilotCFGFile = v
elif o == '-M' or o == '--MaxCycles':
try:
self.maxCycles = min(self.MAX_CYCLES, int(v))
except ValueError:
pass
elif o in ('-o', '--option'):
self.genericOption = v
elif o in ('-t', '--tag'):
self.tags.append(v)
elif o == '--requiredTag':
self.reqtags.append(v)
| chaen/DIRAC | WorkloadManagementSystem/PilotAgent/pilotTools.py | Python | gpl-3.0 | 18,093 | [
"DIRAC"
] | 9264a279e2e2e67d87b6064feeaa296877134ed7dd02c91fb75a5b7713a0909b |
"""Read and write ASE2's netCDF trajectory files."""
from ase.io.pupynere import NetCDFFile
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
def read_netcdf(filename, index=-1):
nc = NetCDFFile(filename)
dims = nc.dimensions
vars = nc.variables
positions = vars['CartesianPositions']
numbers = vars['AtomicNumbers'][:]
pbc = vars['BoundaryConditions'][:]
cell = vars['UnitCell']
tags = vars['Tags'][:]
if not tags.any():
tags = None
magmoms = vars['MagneticMoments'][:]
if not magmoms.any():
magmoms = None
nimages = positions.shape[0]
attach_calculator = False
if 'PotentialEnergy' in vars:
energy = vars['PotentialEnergy']
attach_calculator = True
else:
energy = nimages * [None]
if 'CartesianForces' in vars:
forces = vars['CartesianForces']
attach_calculator = True
else:
forces = nimages * [None]
if 'Stress' in vars:
stress = vars['Stress']
attach_calculator = True
else:
stress = nimages * [None]
if isinstance(index, int):
indices = [index]
else:
indices = range(nimages)[index]
images = []
for i in indices:
atoms = Atoms(positions=positions[i],
numbers=numbers,
cell=cell[i],
pbc=pbc,
tags=tags, magmoms=magmoms)
if attach_calculator:
calc = SinglePointCalculator(atoms,
energy=energy[i],
forces=forces[i],
stress=stress[i]) # Fixme magmoms
atoms.set_calculator(calc)
images.append(atoms)
if isinstance(index, int):
return images[0]
else:
return images
class LOA:
def __init__(self, images):
self.set_atoms(images[0])
def __len__(self):
return len(self.atoms)
def set_atoms(self, atoms):
self.atoms = atoms
def GetPotentialEnergy(self):
return self.atoms.get_potential_energy()
def GetCartesianForces(self):
return self.atoms.get_forces()
def GetUnitCell(self):
return self.atoms.get_cell()
def GetAtomicNumbers(self):
return self.atoms.get_atomic_numbers()
def GetCartesianPositions(self):
return self.atoms.get_positions()
def GetBoundaryConditions(self):
return self.atoms.get_pbc()
def write_netcdf(filename, images):
from ASE.Trajectories.NetCDFTrajectory import NetCDFTrajectory
if not isinstance(images, (list, tuple)):
images = [images]
loa = LOA(images)
traj = NetCDFTrajectory(filename, loa)
for atoms in images:
loa.set_atoms(atoms)
traj.Update()
traj.Close()
| grhawk/ASE | tools/ase/io/netcdf.py | Python | gpl-2.0 | 2,930 | [
"ASE",
"NetCDF"
] | 286fa9fdbe9f5678bca65e9397f0b66f8a5059f68c5501c9759945b071b9b9b0 |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inference_graph_exporter."""
from lingvo import model_registry
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import inference_graph_exporter
from lingvo.core import inference_graph_pb2
from lingvo.core import predictor
from lingvo.core import py_utils
from lingvo.core import test_utils
class DummyLegacyModel(base_model.BaseTask):
def Inference(self):
if py_utils.use_tpu():
raise NotImplementedError('TPU is not supported.')
with tf.name_scope('inference'):
feed1 = tf.placeholder(name='feed1_node', dtype=tf.float32, shape=[1])
fetch1 = tf.identity(feed1, name='fetch1_node')
return {
'default': (
py_utils.NestedMap({
'fetch1': fetch1,
'fetch_op': fetch1.op, # Tests that ops are supported.
}),
py_utils.NestedMap({
'feed1': feed1,
})),
'unused': (py_utils.NestedMap({}), py_utils.NestedMap({})),
}
@model_registry.RegisterSingleTaskModel
class DummyLegacyModelParams(base_model_params.SingleTaskModelParams):
@classmethod
def Test(cls):
p = base_input_generator.BaseSequenceInputGenerator.Params()
p.name = 'input'
return p
@classmethod
def Task(cls):
p = DummyLegacyModel.Params()
p.name = 'testing'
return p
class DummyModel(base_model.BaseTask):
def Inference(self):
with tf.name_scope('inference'):
feed1 = tf.placeholder(name='feed1_node', dtype=tf.float32, shape=[1])
fetch1 = tf.identity(feed1, name='fetch1_node')
inference_graph = inference_graph_pb2.InferenceGraph()
subgraph = inference_graph.subgraphs['default']
subgraph.feeds['feed1'] = feed1.name
subgraph.fetches['fetch1'] = fetch1.name
# Tests that ops are supported.
subgraph.fetches['fetch_op'] = fetch1.op.name
return inference_graph
@model_registry.RegisterSingleTaskModel
class DummyModelParams(base_model_params.SingleTaskModelParams):
@classmethod
def Test(cls):
p = base_input_generator.BaseSequenceInputGenerator.Params()
p.name = 'input'
return p
@classmethod
def Task(cls):
p = DummyModel.Params()
p.name = 'testing'
return p
class InferenceGraphExporterTest(test_utils.TestCase):
def testExportModelParamsWithSubgraphDict(self):
params = model_registry.GetParams('test.DummyLegacyModelParams', 'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params, subgraph_filter=['default'])
# Should populate subgraphs.
self.assertIn('default', inference_graph.subgraphs)
self.assertNotIn('unused', inference_graph.subgraphs)
subgraph = inference_graph.subgraphs['default']
self.assertIn('feed1', subgraph.feeds)
self.assertIn('fetch1', subgraph.fetches)
self.assertEqual(subgraph.feeds['feed1'], 'inference/feed1_node:0')
self.assertEqual(subgraph.fetches['fetch1'], 'inference/fetch1_node:0')
self.assertEqual(subgraph.fetches['fetch_op'], 'inference/fetch1_node')
def testSubgraphFilterNotValid(self):
params = model_registry.GetParams('test.DummyLegacyModelParams', 'Test')
with self.assertRaises(ValueError):
_ = inference_graph_exporter.InferenceGraphExporter.Export(
params, subgraph_filter=['not-present'])
def testExportModelParamsWithInferenceGraph(self):
params = model_registry.GetParams('test.DummyModelParams', 'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params)
# Should populate subgraphs.
self.assertIn('default', inference_graph.subgraphs)
subgraph = inference_graph.subgraphs['default']
self.assertIn('feed1', subgraph.feeds)
self.assertIn('fetch1', subgraph.fetches)
self.assertEqual(subgraph.feeds['feed1'], 'inference/feed1_node:0')
self.assertEqual(subgraph.fetches['fetch1'], 'inference/fetch1_node:0')
self.assertEqual(subgraph.fetches['fetch_op'], 'inference/fetch1_node')
def testExportModelDoesNotAffectFlagsOnException(self):
initial_flags = {k: tf.flags.FLAGS[k].value for k in tf.flags.FLAGS}
params = model_registry.GetParams('test.DummyLegacyModelParams', 'Test')
with self.assertRaises(NotImplementedError):
inference_graph_exporter.InferenceGraphExporter.Export(
params,
device_options=inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options=None,
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None))
self.assertDictEqual(initial_flags,
{k: tf.flags.FLAGS[k].value for k in tf.flags.FLAGS})
class NoConstGuaranteeScopeTest(test_utils.TestCase):
def testNoConsting(self):
with inference_graph_exporter.ConstGuaranteeScope():
wp = py_utils.WeightParams(
shape=[1],
init=py_utils.WeightInit.Constant(0.0),
dtype=tf.float32,
collections=['v'])
v = py_utils.CreateVariable('v', wp)
self.assertEqual(tf.Tensor, type(v))
with inference_graph_exporter.NoConstGuaranteeScope():
with tf.variable_scope('', reuse=True):
v = py_utils.CreateVariable('v', wp)
self.assertIsInstance(v, tf.Variable)
class LinearModel(base_model.BaseTask):
"""A basic linear model."""
@classmethod
def Params(cls):
p = super().Params()
p.name = 'linear_model'
return p
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w = py_utils.WeightParams(
shape=[3],
init=py_utils.WeightInit.Gaussian(scale=1.0, seed=123456),
dtype=p.dtype)
b = py_utils.WeightParams(
shape=[],
init=py_utils.WeightInit.Gaussian(scale=1.0, seed=234567),
dtype=p.dtype)
self.CreateVariable('w', w)
self.CreateVariable('b', b)
def Inference(self):
"""Computes y = w^T x + b. Returns y and x, as outputs and inputs."""
# Add a dummy file def to the collection
filename = tf.convert_to_tensor(
'dummy.txt', tf.dtypes.string, name='asset_filepath')
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS,
filename)
with tf.name_scope('inference'):
x = tf.placeholder(dtype=tf.float32, name='input')
r = tf.random.stateless_uniform([3],
seed=py_utils.GenerateStepSeedPair(
self.params))
y = tf.reduce_sum((self.vars.w + r) * x) + self.vars.b
return {'default': ({'output': y}, {'input': x})}
class LinearModelTpu(LinearModel):
"""A basic linear model that runs inference on the TPU."""
def Inference(self):
"""Computes y = w^T x + b. Returns y and x, as outputs and inputs."""
with tf.name_scope('inference'):
x = tf.placeholder(dtype=tf.bfloat16, name='input')
def InferenceFn(x):
return tf.reduce_sum(self.vars.w * x) + self.vars.b
y = tf.tpu.rewrite(InferenceFn, [x])
return {'tpu': ({'output': y[0]}, {'input': x})}
@model_registry.RegisterSingleTaskModel
class LinearModelParams(base_model_params.SingleTaskModelParams):
@classmethod
def Test(cls):
p = base_input_generator.BaseSequenceInputGenerator.Params()
p.name = 'input'
return p
@classmethod
def Task(cls):
p = LinearModel.Params()
p.name = 'testing'
return p
@model_registry.RegisterSingleTaskModel
class LinearModelTpuParams(base_model_params.SingleTaskModelParams):
@classmethod
def Test(cls):
p = base_input_generator.BaseSequenceInputGenerator.Params()
p.name = 'input'
return p
@classmethod
def Task(cls):
p = LinearModelTpu.Params()
p.name = 'testing'
return p
@model_registry.RegisterSingleTaskModel
class LinearModelTpuParamsWithEma(base_model_params.SingleTaskModelParams):
@classmethod
def Test(cls):
p = base_input_generator.BaseSequenceInputGenerator.Params()
p.name = 'input'
return p
@classmethod
def Task(cls):
p = LinearModelTpu.Params()
p.name = 'testing'
p.train.ema_decay = 0.99
return p
class InferenceGraphExporterLinearModelTest(test_utils.TestCase):
def testExport(self):
"""Test basic export."""
params = model_registry.GetParams('test.LinearModelParams', 'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params, subgraph_filter=['default'], export_graph_collections=True)
self.assertIn('default', inference_graph.subgraphs)
self.assertEqual(1, len(inference_graph.asset_file_def))
# Check the GLOBAL_VARIABLES graph collection which is needed for
# eager to lift variables from a GraphDef.
self.assertIn('variables', inference_graph.collection_def)
def testExportFreezeDefault(self):
"""Test exporting frozen graph."""
params = model_registry.GetParams('test.LinearModelParams', 'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params, freeze_defaults=True, subgraph_filter=['default'])
self.assertIn('default', inference_graph.subgraphs)
# Test graphs are well-formed and importable.
with tf.Graph().as_default():
tf.import_graph_def(inference_graph.graph_def)
def testTpuBfloat16OverrideExport(self):
"""Test that we can export with tf.bfloat16 dtype."""
params = model_registry.GetParams('test.LinearModelTpuParams', 'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params,
subgraph_filter=['tpu'],
device_options=inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=True,
var_options='ON_DEVICE',
gen_init_op=True,
dtype_override=tf.bfloat16,
fprop_dtype_override=None))
self.assertIn('tpu', inference_graph.subgraphs)
def testTpuBfloat16OverrideExportWithEma(self):
"""Test that we can export with tf.bfloat16 dtype."""
params = model_registry.GetParams('test.LinearModelTpuParamsWithEma',
'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params,
subgraph_filter=['tpu'],
device_options=inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=True,
var_options='ON_DEVICE',
gen_init_op=True,
dtype_override=tf.bfloat16,
fprop_dtype_override=None))
self.assertIn('tpu', inference_graph.subgraphs)
def testExportWithRandomSeeds(self):
"""Test the effect of setting random seeds on export."""
params = model_registry.GetParams('test.LinearModelParams', 'Test')
# Default -- use random_seed = None.
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params, subgraph_filter=['default'])
pred = predictor.Predictor(inference_graph)
[no_op_seed_1] = pred.Run(['output'], input=3)
[no_op_seed_2] = pred.Run(['output'], input=3)
self.assertNotEqual(no_op_seed_1, no_op_seed_2)
pred = predictor.Predictor(inference_graph)
[no_op_seed_3] = pred.Run(['output'], input=3)
self.assertNotEqual(no_op_seed_1, no_op_seed_3)
# Use a fixed random_seed.
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params, subgraph_filter=['default'], random_seed=1234)
pred = predictor.Predictor(inference_graph)
[fixed_op_seed_1] = pred.Run(['output'], input=3)
[fixed_op_seed_2] = pred.Run(['output'], input=3)
self.assertEqual(fixed_op_seed_1, fixed_op_seed_2)
pred = predictor.Predictor(inference_graph)
[fixed_op_seed_3] = pred.Run(['output'], input=3)
self.assertEqual(fixed_op_seed_1, fixed_op_seed_3)
# A different seed gives different results.
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params, subgraph_filter=['default'], random_seed=1235)
pred = predictor.Predictor(inference_graph)
[fixed_op_seed_4] = pred.Run(['output'], input=3)
self.assertNotEqual(fixed_op_seed_1, fixed_op_seed_4)
class GetOutputNamesTest(test_utils.TestCase):
def _TestGraph(self):
params = model_registry.GetParams('test.LinearModelParams', 'Test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(
params)
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(inference_graph.graph_def, name='')
return graph, inference_graph
def testDefault(self):
graph, inference_graph = self._TestGraph()
output_op_names = inference_graph_exporter.GetOutputOpNames(
graph, inference_graph)
self.assertEqual(output_op_names, [
# pyformat: disable
'inference/add_2',
'inference/input',
'testing/b/var',
'testing/b/var/Initializer/random_normal',
'testing/b/var/Initializer/random_normal/RandomStandardNormal',
'testing/b/var/Initializer/random_normal/mean',
'testing/b/var/Initializer/random_normal/mul',
'testing/b/var/Initializer/random_normal/shape',
'testing/b/var/Initializer/random_normal/stddev',
'testing/w/var',
'testing/w/var/Initializer/random_normal',
'testing/w/var/Initializer/random_normal/RandomStandardNormal',
'testing/w/var/Initializer/random_normal/mean',
'testing/w/var/Initializer/random_normal/mul',
'testing/w/var/Initializer/random_normal/shape',
'testing/w/var/Initializer/random_normal/stddev',
# pyformat: enable
])
def testNoPreserveColocationNodes(self):
graph, inference_graph = self._TestGraph()
output_op_names = inference_graph_exporter.GetOutputOpNames(
graph, inference_graph, preserve_colocation_nodes=False)
self.assertEqual(output_op_names, [
# pyformat: disable
'inference/add_2',
'inference/input',
# pyformat: enable
])
def testPreserveSaverRestoreNodes(self):
graph, inference_graph = self._TestGraph()
output_op_names = inference_graph_exporter.GetOutputOpNames(
graph,
inference_graph,
preserve_colocation_nodes=False,
preserve_saver_restore_nodes=True)
self.assertEqual(output_op_names, [
# pyformat: disable
'inference/add_2',
'inference/input',
'save/Const',
'save/restore_all',
# pyformat: enable
])
def testPreserveExtraOps(self):
graph, inference_graph = self._TestGraph()
output_op_names = inference_graph_exporter.GetOutputOpNames(
graph,
inference_graph,
preserve_colocation_nodes=False,
preserve_extra_ops=[
'init_all_tables', 'init_all_variables', 'tpu_init_op'
])
self.assertEqual(output_op_names, [
# pyformat: disable
'inference/add_2',
'inference/input',
'init_all_tables',
'init_all_variables',
# pyformat: enable
])
def testPreserveSaverNodesAndExtraOps(self):
graph, inference_graph = self._TestGraph()
output_op_names = inference_graph_exporter.GetOutputOpNames(
graph,
inference_graph,
preserve_colocation_nodes=False,
preserve_saver_restore_nodes=True,
preserve_extra_ops=[
'init_all_tables', 'init_all_variables', 'tpu_init_op'
])
self.assertEqual(output_op_names, [
# pyformat: disable
'inference/add_2',
'inference/input',
'init_all_tables',
'init_all_variables',
'save/Const',
'save/restore_all',
# pyformat: enable
])
if __name__ == '__main__':
tf.test.main()
| tensorflow/lingvo | lingvo/core/inference_graph_exporter_test.py | Python | apache-2.0 | 16,688 | [
"Gaussian"
] | b449737446b4dd774bc3418b6180cf953a1e38a826cdd232e735cdc502bfca96 |
# -*- coding: utf-8 -*-
"""Warnings for old TSV conversion module."""
import warnings
from ..triples import to_edgelist
from ..triples import to_triples_file as to_tsv
__all__ = [
"to_tsv",
"to_edgelist",
]
warnings.warn(
"""Use pybel.io.triples module instead. Changes in PyBEL v0.15.0:
- pybel.to_tsv renamed to pybel.to_triples_file
Will be removed in PyBEL v0.16.*
""",
DeprecationWarning,
)
| pybel/pybel | src/pybel/io/tsv/__init__.py | Python | mit | 423 | [
"Pybel"
] | e9a3565a4c2b43aa24782e2f253898d4f89a836a5e39bb6c14828848220d469d |
from setuptools import setup
from argparse2tool import __version__
with open("README.rst") as fh:
readme = fh.read()
setup(
name="argparse2tool",
version=__version__,
description="Instrument for forming Galaxy XML and CWL tool descriptions from argparse arguments",
author="Helena Rasche, Anton Khodak",
author_email="hxr@hx42.org",
long_description=readme,
long_description_content_type="text/x-rst",
install_requires=["galaxyxml==0.4.6", "jinja2"],
url="https://github.com/hexylena/argparse2tool",
packages=[
"argparse2tool",
"argparse2tool.dropins.argparse",
"argparse2tool.dropins.click",
"argparse2tool.cmdline2gxml",
"argparse2tool.cmdline2cwl",
],
entry_points={
"console_scripts": ["argparse2tool = argparse2tool.check_path:main"]
},
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
],
include_package_data=True,
data_files=[("", ["LICENSE.TXT"])],
)
| erasche/argparse2tool | setup.py | Python | apache-2.0 | 1,174 | [
"Galaxy"
] | 1bb1dc108f26ffc5de47a790357dd9dd36ee69ed1bce993771d19e6694dd86f7 |
"""
StdoutBackend wrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import logging
import sys
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.ColoredBaseFormatter import ColoredBaseFormatter
class StdoutBackend(AbstractBackend):
"""
StdoutBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we gather a StreamHandler object and a BaseFormatter.
- StreamHandler is from the standard logging library: it is used to write log messages in a desired stream
so it needs a name: here it is stdout.
- ColorBaseFormatter is a custom Formatter object, created for DIRAC in order to get the appropriate display
with color.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self, backendParams=None):
super(StdoutBackend, self).__init__(logging.StreamHandler, ColoredBaseFormatter, backendParams)
def _setHandlerParameters(self, backendParams=None):
"""
Get the handler parameters from the backendParams.
The keys of handlerParams should correspond to the parameter names of the associated handler.
The method should be overridden in every backend that needs handler parameters.
The method should be called before creating the handler object.
:param dict parameters: parameters of the backend. ex: {'FileName': file.log}
"""
self._handlerParams["stream"] = sys.stdout
| ic-hep/DIRAC | src/DIRAC/Resources/LogBackends/StdoutBackend.py | Python | gpl-3.0 | 1,650 | [
"DIRAC"
] | 15b8841040b06e0e5926ddc8fb1a886bcefd249f368c2592dca933dd535468c1 |
#!/usr/bin/python2.7
"""
Returns a bed-like translation of a CDS in which each record corresponds to
a single site in the CDS and includes additional fields for site degenaracy,
position ind CDS, and amino acid encoded.
usage: %prog nibdir genefile [options]
-o, --outfile=o: output file
-f, --format=f: format bed (default), or gtf|gff
-a, --allpositions: 1st, 2nd and 3rd positions are evaluated for degeneracy given the sequence at the other two positions. Many 1d sites in 1st codon positions become 2d sites when considered this way.
-n, --include_name: include the 'name' or 'id' field from the source file on every line of output
"""
import re
import sys
import os
import string
from bx.seq import nib
from bx.bitset import *
from bx.bitset_builders import *
from bx.bitset_utils import *
from bx.gene_reader import *
from bx.cookbook import doc_optparse
GENETIC_CODE = """
TTT (Phe/F)Phenylalanine
TTC (Phe/F)Phenylalanine
TTA (Leu/L)Leucine
TTG (Leu/L)Leucine, Start
TCT (Ser/S)Serine
TCC (Ser/S)Serine
TCA (Ser/S)Serine
TCG (Ser/S)Serine
TAT (Tyr/Y)Tyrosine
TAC (Tyr/Y)Tyrosine
TAA Ochre (Stop)
TAG Amber (Stop)
TGT (Cys/C)Cysteine
TGC (Cys/C)Cysteine
TGA Opal (Stop)
TGG (Trp/W)Tryptophan
CTT (Leu/L)Leucine
CTC (Leu/L)Leucine
CTA (Leu/L)Leucine
CTG (Leu/L)Leucine, Start
CCT (Pro/P)Proline
CCC (Pro/P)Proline
CCA (Pro/P)Proline
CCG (Pro/P)Proline
CAT (His/H)Histidine
CAC (His/H)Histidine
CAA (Gln/Q)Glutamine
CAG (Gln/Q)Glutamine
CGT (Arg/R)Arginine
CGC (Arg/R)Arginine
CGA (Arg/R)Arginine
CGG (Arg/R)Arginine
ATT (Ile/I)Isoleucine, Start2
ATC (Ile/I)Isoleucine
ATA (Ile/I)Isoleucine
ATG (Met/M)Methionine, Start1
ACT (Thr/T)Threonine
ACC (Thr/T)Threonine
ACA (Thr/T)Threonine
ACG (Thr/T)Threonine
AAT (Asn/N)Asparagine
AAC (Asn/N)Asparagine
AAA (Lys/K)Lysine
AAG (Lys/K)Lysine
AGT (Ser/S)Serine
AGC (Ser/S)Serine
AGA (Arg/R)Arginine
AGG (Arg/R)Arginine
GTT (Val/V)Valine
GTC (Val/V)Valine
GTA (Val/V)Valine
GTG (Val/V)Valine, Start2
GCT (Ala/A)Alanine
GCC (Ala/A)Alanine
GCA (Ala/A)Alanine
GCG (Ala/A)Alanine
GAT (Asp/D)Aspartic acid
GAC (Asp/D)Aspartic acid
GAA (Glu/E)Glutamic acid
GAG (Glu/E)Glutamic acid
GGT (Gly/G)Glycine
GGC (Gly/G)Glycine
GGA (Gly/G)Glycine
GGG (Gly/G)Glycine
"""
def translate( codon, genetic_code):
c1,c2,c3 = codon
return genetic_code[c1][c2][c3]
""" parse the doc string to hash the genetic code"""
GEN_CODE = {}
for line in GENETIC_CODE.split('\n'):
if line.strip() == '': continue
f = re.split('\s|\(|\)|\/',line)
codon = f[0]
c1,c2,c3 = codon
aminoacid = f[3]
if c1 not in GEN_CODE: GEN_CODE[c1] = {}
if c2 not in GEN_CODE[c1]: GEN_CODE[c1][c2] = {}
GEN_CODE[c1][c2][c3] = aminoacid
def getnib( nibdir ):
seqs = {}
for nibf in os.listdir( nibdir ):
if not nibf.endswith('.nib'): continue
chr = nibf.replace('.nib','')
file = os.path.join( nibdir, nibf )
seqs[chr] = nib.NibFile( open(file) )
return seqs
REVMAP = string.maketrans("ACGTacgt","TGCAtgca")
def revComp(seq):
return seq[::-1].translate(REVMAP)
def Comp(seq):
return seq.translate(REVMAP)
def codon_degeneracy( codon, position=3 ):
aa = translate( codon, GEN_CODE )
if position==1:
degeneracy1 = [GEN_CODE[ k ][ codon[1] ][ codon[2] ] for k in all].count(aa)
elif position==2:
degeneracy2 = [GEN_CODE[ codon[0] ][ k ][ codon[2] ] for k in all].count(aa)
elif position==3:
degeneracy = GEN_CODE[ codon[0] ][ codon[1] ].values().count(aa)
return degeneracy
def main():
options, args = doc_optparse.parse( __doc__ )
try:
if options.outfile:
out = open( options.outfile, "w")
else:
out = sys.stdout
if options.format:
format = options.format
else:
format = 'bed'
allpositions = bool( options.allpositions )
include_name = bool( options.include_name )
nibdir = args[0]
bedfile = args[1]
except:
doc_optparse.exit()
nibs = getnib(nibdir)
for chrom, strand, cds_exons, name in CDSReader( open(bedfile), format=format):
cds_seq = ''
# genome_seq_index maps the position in CDS to position on the genome
genome_seq_index = []
for (c_start, c_end) in cds_exons:
cds_seq += nibs[chrom].get( c_start, c_end-c_start )
for i in range(c_start,c_end):
genome_seq_index.append(i)
cds_seq = cds_seq.upper()
if strand == '+':
frsts = range( 0, len(cds_seq), 3)
offsign = 1
else:
cds_seq = Comp( cds_seq )
frsts = range( 2, len(cds_seq), 3)
offsign = -1
offone = 1 * offsign
offtwo = 2 * offsign
all = ['A','C','G','T']
for first_pos in frsts:
c1 = first_pos
c2 = first_pos + offone
c3 = first_pos + offtwo
try:
assert c3 < len(cds_seq)
except AssertionError:
print >>sys.stderr, "out of sequence at %d for %s, %d" % (c3, chrom, genome_seq_index[ first_pos ])
continue
codon = cds_seq[c1], cds_seq[c2], cds_seq[c3]
aa = translate( codon, GEN_CODE )
degeneracy3 = str(GEN_CODE[ codon[0] ][ codon[1] ].values().count(aa)) + "d"
if not include_name: name_text = ''
else:
name_text = name.replace(' ','_')
if allpositions:
try:
degeneracy1 = str([GEN_CODE[ k ][ codon[1] ][ codon[2] ] for k in all].count(aa)) + "d"
degeneracy2 = str([GEN_CODE[ codon[0] ][ k ][ codon[2] ] for k in all].count(aa)) + "d"
except TypeError, s:
print >>sys.stderr, GEN_CODE.values()
raise TypeError, s
if strand == '+':
print >>out, chrom, genome_seq_index[c1], genome_seq_index[c1] + 1, cds_seq[c1], degeneracy1, aa, name_text
print >>out, chrom, genome_seq_index[c2], genome_seq_index[c2] + 1, cds_seq[c2], degeneracy2, aa, name_text
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
else:
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
print >>out, chrom, genome_seq_index[c2], genome_seq_index[c2] + 1, cds_seq[c2], degeneracy2, aa, name_text
print >>out, chrom, genome_seq_index[c1], genome_seq_index[c1] + 1, cds_seq[c1], degeneracy1, aa, name_text
else:
if strand == '+':
for b in c1,c2:
print >>out, chrom, genome_seq_index[b], genome_seq_index[b] + 1, cds_seq[b], "1d", aa, name_text
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
else:
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
for b in c2,c1:
print >>out, chrom, genome_seq_index[b], genome_seq_index[b] + 1, cds_seq[b], "1d", aa, name_text
out.close()
if __name__ == '__main__':
main()
#format = sys.argv[1]
#file = sys.argv[2]
#for chr, strand, cds_exons in CDSReader( open(file), format=format):
# s_points = [ "%d,%d" % (a[0],a[1]) for a in cds_exons ]
# print chr, strand, len(cds_exons), "\t".join(s_points)
| poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/gene_fourfold_sites.py | Python | apache-2.0 | 7,678 | [
"Amber"
] | 8f76f17ebefbd7816a5ce2603d72b6bdab4c3ac848d568413881cd9fec0978de |
from pyrates.frontend import CircuitTemplate
from pyrates.utility.grid_search import grid_search
from pyrates.utility.visualization import plot_connectivity
import matplotlib.pyplot as plt
import numpy as np
# parameter definition
dt = 1e-3
dts = 1e-2
cutoff = 100.0
T = 200.0 + cutoff
start = int((0 + cutoff)/dt)
dur = int(5/(0.6*dt))
steps = int(T/dt)
inp = np.zeros((steps, 1))
inp[start:start+dur] = 0.6
# target: delayed biexponential response of the alpha or renshaw neuron
path = "../config/spinal_cord/sc"
neuron = 'alpha'
target_var = 'I_ampa'
model = CircuitTemplate.from_yaml(path).apply().compile(backend='numpy', step_size=dt, solver='euler')
r1 = model.run(simulation_time=T, sampling_step_size=dts, inputs={'m1/m1_dummy/m_in': inp},
outputs={neuron: f'{neuron}/{neuron}_op/{target_var}'})
model.clear()
r1.plot()
plt.show()
# approximation: gamma-distributed feedback
source = 'm1'
param_grid = {'d': np.asarray([1.5, 2.0, 2.5]),
's': np.asarray([0.4, 0.6, 0.8, 1.0])}
param_map = {'d': {'vars': ['delay'], 'edges': [(source, neuron)]},
's': {'vars': ['spread'], 'edges': [(source, neuron)]}}
r2, r_map = grid_search(path, param_grid, param_map, step_size=dt, simulation_time=T,
sampling_step_size=dts, permute_grid=True,
init_kwargs={'backend': 'numpy', 'step_size': dt, 'solver': 'scipy'},
outputs={neuron: f'{neuron}/{neuron}_op/{target_var}'},
inputs={'m1/m1_dummy/m_in': inp})
# calculate difference between target and approximation
n = len(param_grid['d'])
m = len(param_grid['s'])
alpha = 0.95 # controls trade-off between accuracy and complexity of gamma-kernel convolution. alpha = 1.0 for max accuracy.
error = np.zeros((n, m))
indices = [['_'for j in range(m)] for i in range(n)]
for idx in r_map.index:
idx_r = np.argmin(np.abs(param_grid['d'] - r_map.at[idx, 'd']))
idx_c = np.argmin(np.abs(param_grid['s'] - r_map.at[idx, 's']))
r = r2.loc[:, (neuron, idx)]
diff = r - r1.loc[:, neuron]
d, s = r_map.loc[idx, 'd'], r_map.loc[idx, 's']
order = (d/s)**2
error[idx_r, idx_c] = alpha*np.sqrt(diff.T @ diff).iloc[0, 0] + (1-alpha)*order
print(f"delay = {d}, spread = {s}, order = {order}, rate = {order/d}")
indices[idx_r.squeeze()][idx_c.squeeze()] = idx
# display error
fig, ax = plt.subplots()
ax = plot_connectivity(error, xticklabels=param_grid['s'], yticklabels=param_grid['d'], ax=ax)
ax.set_xlabel('s')
ax.set_ylabel('d')
plt.tight_layout()
# display winner together with target
fig2, ax2 = plt.subplots()
winner = np.argmin(error)
idx = np.asarray(indices).flatten()[winner]
ax2.plot(r1.loc[:, neuron])
ax2.plot(r2.loc[:, (neuron, idx)])
ax2.set_title(f"delay = {r_map.loc[idx, 'd']}, spread = {r_map.loc[idx, 's']}")
plt.tight_layout()
plt.show()
| Richert/BrainNetworks | CMC/analysis/spinal_cord_delay_fitting.py | Python | apache-2.0 | 2,866 | [
"NEURON"
] | f1bd7df12234daeb4cfd1d0e46f437c2351e2d32284b7e67ff7aaf900f2f3f90 |
from collections import OrderedDict
import os
import itertools
import re
import numpy as np
from copy import deepcopy
from collections import Iterable
from scipy.spatial.distance import cdist
from chargetools import constants, grids
from chargetools.exceptions import InputError
from chargetools.utils.utils import int_if_close, atomic_number_to_symbol, symbol_to_atomic_number
class Atom(object):
"""
A container for basic properties for an atom.
"""
def __init__(self, label, atomic_number, charge, position=None):
self.label = int(label)
self.atomic_number = int(atomic_number)
self.symbol = atomic_number_to_symbol(atomic_number)
self.charge = charge
if isinstance(position, Iterable):
self.position = np.array(list(map(float, position)))
else:
self.position = None
def __repr__(self):
if self.charge > 0:
charge_str = "{0}+".format(self.charge)
elif self.charge < 0:
charge_str = "{0}-".format(abs(self.charge))
else:
charge_str = "neutral"
return "<{0} ({1})>".format(self.symbol, charge_str)
def __eq__(self, other):
if isinstance(other, Atom):
return other.label == self.label
elif isinstance(other, int):
return self.label == other
def descriptor_compare(self, descriptor):
if isinstance(descriptor, str):
return self.symbol == descriptor
elif isinstance(descriptor, int):
return self.label == descriptor
elif isinstance(descriptor, Atom):
return self == descriptor
@classmethod
def copy(cls, atom):
"""Construct a deep copy of an ``Atom`` object.
:type atom: :class:`charges.molecule.Atom`
:param atom: An instance of ``Atom`` to be copied.
:rtype: :class:`charges.molecule.Atom`
:return: A deep copy of the input ``Atom``.
"""
return cls(atom.label, atom.atomic_number, atom.charge,
deepcopy(atom.position))
@classmethod
def from_ac_line(cls, ac_line_string):
"""
Construct an ``Atom`` object from a line of ``.ac`` file generated by the AnteChamber tool.
:type ac_line_string: str
:param ac_line_string: A single, unmodified line from a AnteChamber format file,
which starts with the word `ATOM`.
:return: The ``Atom`` object representation filled with information extracted from input.
"""
segments = ac_line_string.split()
label, atom_str = segments[1:3]
position = np.array(segments[5:8])
charge = int_if_close(float(segments[8]))
# Atom description in the format of Symbol + Label, e.g. N1, C2, etc.
# Extract atom symbol by regex
symbol = re.findall(r'[A-Z][a-z]?', atom_str)[0]
return cls(label, symbol_to_atomic_number(symbol), charge, position=position)
class Bond(object):
"""
A container for basic properties for a bond. Refers to instances of the :class:`Atom` object.
"""
def __init__(self, *bonding_atoms, bond_order=1):
self.bonding_atoms = bonding_atoms
self.bond_order = bond_order
@classmethod
def copy(cls, bond, all_atoms):
def find_atom_by_label(label, atoms):
for _ in atoms:
if label == _.label:
return _
return None
return cls(*[find_atom_by_label(atom.label, all_atoms) for atom in bond.bonding_atoms],
bond_order=bond.bond_order)
@classmethod
def from_ac_line(cls, ac_line_string, all_atoms):
"""
Construct a ``Bond`` object from a line of ``.ac`` file generated by the AnteChamber tool.
:type ac_line_string: str
:param ac_line_string: A single, unmodified line from a AnteChamber format file,
which starts with the word `BOND`.
:type all_atoms: [Atom, ...]
:param all_atoms: A list of :class:charges.molecule.Atom objects.
Order of the atoms must confer to the order labelled by the AnteChamber file format.
:return: The ``Bond`` object representation filled with information extracted from input.
"""
segments = ac_line_string.split()
bonding_atom_labels = segments[2:4]
bond_order = int(segments[4])
return cls(*[all_atoms[int(label) - 1] for label in bonding_atom_labels],
bond_order=bond_order)
def contains(self, atom, label_only=False):
"""
Checks if an atom is contained in this bond.
:param atom: Atom to be checked.
:param label_only: If `True`, perform a label comparison only.
If `False`, check if the atom objects are same instances.
:return: Whether the atom argument is one of the bonding atoms.
"""
for bonding_atom in self.bonding_atoms:
if not label_only and bonding_atom == atom:
return True
if label_only and atom.label == bonding_atom.label:
return True
return False
class Molecule(object):
def __init__(self, atoms, bonds=None, name=None, charge=0):
self.atoms = atoms
self.bonds = bonds
self.name = name
self.charge = charge
def __repr__(self):
d = {
'name': self.name,
'no_atoms': len(self.atoms),
}
if len(self.atoms) > 1:
d['no_atoms'] = "{0} atoms".format(d['no_atoms'])
else:
d['no_atoms'] = "1 atom"
return "<Molecule: {name}, {no_atoms}>".format(**d)
def __len__(self):
return len(self.atoms)
@classmethod
def copy(cls, molecule):
atoms = [Atom.copy(atom) for atom in molecule.atoms]
if molecule.bonds:
bonds = [Bond.copy(bond, atoms) for bond in molecule.bonds]
else:
bonds = None
return cls(atoms, bonds, molecule.name, molecule.charge)
@classmethod
def from_ac_file(cls, ac_file_name, **kwargs):
with open(ac_file_name, 'r') as f:
lines = f.readlines()
atom_lines, bond_lines = [], []
for line in lines:
if 'ATOM' in line:
atom_lines.append(line)
elif 'BOND' in line:
bond_lines.append(line)
# Read charge from first line
# prefer integer if within 0.01 of a whole number
charge = int_if_close(
float(lines.pop(0).split()[1])
)
# Read atoms
atoms = []
for atom_line in atom_lines:
atoms.append(Atom.from_ac_line(atom_line))
# Read bonds
bonds = []
for bond_line in bond_lines:
bonds.append(Bond.from_ac_line(bond_line, atoms))
return cls(atoms, bonds=bonds, charge=charge, **kwargs)
@classmethod
def from_cube_header(cls, header_lines, *args, **kwargs):
atoms = []
for index, line in enumerate(header_lines):
segments = line.split()
atomic_number = int(segments[0])
atoms.append(Atom(index+1, atomic_number,
charge=int_if_close(float(segments[1]) - atomic_number),
position=np.array(list(map(float, segments[2:]))))
)
return cls(atoms, *args, **kwargs)
def select_label(self, *labels):
try:
if len(labels) == 1:
return self.atoms[labels[0]-1]
else:
return zip([self.atoms[label-1] for label in labels])
except IndexError:
raise InputError('Label number argument is larger than the number of atoms contained in this molecule.')
def select_descriptor(self, *descriptors):
res = []
for descriptor in descriptors:
for atom in self.atoms:
if atom.descriptor_compare(descriptor):
res.append(atom)
return res
def list_of_atom_property(self, property_name):
"""
Outputs a list of atom properties within field ``property_name`.
For example, if ``property_name = 'atomic_number'``,
this function will output a list of atomic numbers ordered by their labels.
:param property_name: Key of the property of interest.
Valid properties are: ``atomic_number``, ``label``, ``symbol``, ``position``, ``charge``.
:return: List of properties.
"""
return [vars(atom)[property_name] for atom in self.atoms]
def if_bonded(self, atom, descriptor, min_bond_order=0.):
"""
Check if argument atoms are bonded within the same bond.
:param atom: can be either:
*. a string of atom symbols, in which case all atoms with that atomic symbol count as being included.
*. an integer of the atom's label.
*. an atom object, in which case its label will be compared rather than an identity comparison.
:param min_bond_order: Minimum bond order that counts as a chemical `bond`.
:return: If all argument atoms are bonded in the same bond.
"""
for i, bond in enumerate(self.bonds):
has_atom, has_descriptor = False, False
for bonding_atom in bond.bonding_atoms:
if bonding_atom.descriptor_compare(atom):
has_atom = True
elif bonding_atom.descriptor_compare(descriptor):
has_descriptor = True
if has_atom and has_descriptor:
return True
return False
def select_bonded(self, atom, min_bond_order=0.):
"""
Select all atoms bonded to the input atom.
:param min_bond_order: Minimum bond order with which an atom is bonded, for the atom to be included.
:param atom: Atom descriptors, can be either:
*. a string of atom symbols, in which case all atoms with that atomic symbol count as being included.
*. an integer of the atom's label.
*. an atom object, in which case its label will be compared rather than an identity comparison.
:return: A list of all bonded atoms.
"""
bonded_atoms = []
for bond in self.bonds:
if bond.contains(atom):
bonded_atoms += [bonding_atom for bonding_atom in bond.bonding_atoms
if bonding_atom is not atom and bond.bond_order > min_bond_order]
return bonded_atoms
def number_connections(self, atom_a, atom_b, min_bond_order=0.):
"""
Get the number of bonds connecting atom A and atom B.
:param atom_a: Atom A, must be an :class:`entities.Atom` object.
:param atom_b: Atom B, must be an :class:`entities.Atom` object.
:param min_bond_order: Minimum bond order for a connection to be counted.
:return: Number of chemical bonds between two input atoms.
"""
def iter_atoms(_a, _b, n, min_bond_order_, stop):
if _a in self.select_bonded(_b, min_bond_order_):
return n
else:
_n = n + 1
if _n > stop:
return float("inf")
for bonded in self.select_bonded(_b):
return iter_atoms(_a, bonded, _n, min_bond_order_, stop)
return iter_atoms(atom_a, atom_b, 1, min_bond_order, len(self.bonds))
class MoleculeWithCharge(Molecule):
all_sampling_schemes = OrderedDict()
all_sampling_schemes['MK-UFF'] = ["(full, mkuff)", "mkuff", "mk-uff", "mk_uff", ]
all_sampling_schemes['CHelpG'] = ["(full, chelpg)", "chelpg"]
all_sampling_schemes['MK'] = ["(full, mk)", "mk", "merz", "kollman"]
all_sampling_schemes['CHelp'] = ["(full, chelp)", "chelp"]
all_charge_methods = {
'NBO': ['nbo', 'natural'],
'Mulliken': ['mulliken', 'mülliken'],
"ESP": ['esp', 'potential', 'electrostatic'] + list(
itertools.chain.from_iterable(all_sampling_schemes.values())
),
}
def __init__(self, charge_file_name, atoms,
charge_method=None,
sampling_scheme=None,
is_restrained=None, is_averaged=None, is_equivalenced=None, is_compromised=None,
*args, **kwargs):
self.charge_file_name = charge_file_name
self.guess_charge_method(charge_file_name)
if charge_method is not None:
self.charge_method = charge_method
if sampling_scheme is not None:
self.sampling_scheme = sampling_scheme
if is_restrained is not None:
self.is_restrained = is_restrained
if is_averaged is not None:
self.is_averaged = is_averaged
if is_equivalenced is not None:
self.is_equivalenced = is_equivalenced
if is_compromised is not None:
self.is_compromised = is_compromised
super(MoleculeWithCharge, self).__init__(atoms, *args, **kwargs)
def guess_charge_method(self, file_name):
file_name = os.path.splitext(file_name)[0].lower()
self.is_averaged = "average" in file_name
self.is_restrained = "resp" in file_name or "restrain" in file_name
self.is_equivalenced = "equivalence" in file_name or self.is_restrained
self.is_compromised = "compromise" in file_name
found = False
for sampling_scheme_name, sampling_scheme_identifiers in self.all_sampling_schemes.items():
for sampling_scheme_identifier in sampling_scheme_identifiers:
if sampling_scheme_identifier.lower() in file_name:
self.sampling_scheme = sampling_scheme_name
found = True
break
if found:
break
else:
self.sampling_scheme = None
found = False
for charge_method, charge_method_identifiers in self.all_charge_methods.items():
for charge_method_identifier in charge_method_identifiers:
if charge_method_identifier.lower() in file_name:
self.charge_method = charge_method
found = True
break
if found:
break
else:
self.charge_method = None
@classmethod
def from_plaintext_list(cls, file_name_full, base_molecule, *args, **kwargs):
with open(file_name_full, 'r') as f:
line = f.read()
if len(line.split()) != len(base_molecule):
raise InputError('The list-formatted charge file must have the same number of list as the base molecule. '
'The base molecule (.ac file?) may point to a different molecule than this charges list.')
molecule = Molecule.copy(base_molecule)
charge = 0
for atom, charge_str in zip(molecule.atoms, line.split()):
atom.charge = int_if_close(float(charge_str))
charge += float(charge_str)
return cls(file_name_full, molecule.atoms, bonds=molecule.bonds,
charge=int_if_close(charge), name=molecule.name, *args, **kwargs)
@classmethod
def from_gaussian_log(cls, file_name_full, base_molecule, *args, **kwargs):
with open(file_name_full, 'r') as f:
lines = f.readlines()
# Locate starting point of ESP charges text block
try:
start_index = lines.index(" ESP charges:\n") + 2
except ValueError:
raise InputError('Cannot find charge information within this Gaussian log file.')
# Parse the input keywords to find the sampling scheme
sampling_scheme = False
for line in lines[58:158]:
if sampling_scheme:
break
for segment in line.split():
if "pop=" in segment:
# look if keyword contains default set of keywords
for name, identifiers in cls.all_sampling_schemes.items():
if sampling_scheme:
break
for identifier in identifiers:
if identifier.lower() in segment.lower():
sampling_scheme = name
if sampling_scheme:
break
if not sampling_scheme:
raise InputError('Cannot find sampling scheme information within this Gaussian log file.')
molecule = Molecule.copy(base_molecule)
for atom, line in zip(molecule.atoms, lines[start_index:]):
if "Sum of ESP charges" in line:
total_charge_str = line.split()[-1]
break
if len(line.split()) >= 3:
charge_str = line.split()[-1]
atom.charge = int_if_close(float(charge_str))
try:
total_charge_str
except NameError:
total_charge_str = '0'
return cls(file_name_full, molecule.atoms, bonds=molecule.bonds,
charge=int_if_close(float(total_charge_str)),
is_averaged=False, is_compromised=False, is_equivalenced=False, is_restrained=False)
@classmethod
def from_file(cls, file_name_full, base_molecule, *args, **kwargs):
file_name, extension = os.path.splitext(file_name_full)
parsers = {
'.txt': cls.from_plaintext_list,
'.log': cls.from_gaussian_log,
}
parser_function = parsers.get(extension.lower())
if parser_function is not None:
return parser_function(file_name_full, base_molecule, *args, **kwargs)
raise InputError('Extension not supported by any of the parser functions.')
@classmethod
def mulliken_from_gaussian_log(cls, file_name_full, base_molecule):
pass
@property
def charge_on(self, atom_label):
"""
Output charge borne by a labelled atom.
"""
for atom in self.atoms:
if atom.label == atom_label:
return atom.charge
raise ValueError('Atom with this label has not been found.')
def reproduce_cube(self, template_cube, **kwargs):
"""
Based on the respective charges of atoms within this molecule,
reproduce the 3-dimensional electrostatic potential as a :class:`charges.cube.Cube` object.
:param template_cube: The reproduced volume will have the same points density and size to this template cube.
:param kwargs: Extra keyword arguments to pass to the :func:`scipy.spatial.distance.cdist` function,
which calculates the distances.
By default, the Euclidean distances are used.
:return: Reproduced potential stored within a new :class:`charges.cube.Cube` object.
"""
atomic_charges = np.array(self.list_of_atom_property('charge'))
positions = np.array(template_cube.molecule.list_of_atom_property('position'))
# Calculate the distances
distances = cdist(template_cube.flat_coordinates, positions, **kwargs)
# Calculate per-atom potential, then sum
potentials = (np.array(atomic_charges) / distances).sum(axis=1)
return grids.Cube.assign_new_values_to(template_cube, potentials.reshape(template_cube.n_voxels))
def error_cube(self, potential, **kwargs):
"""
Based on the respective charges of atoms within this molecule,
calculate the 3D electrostatic potential subtracted by the actual potential values.
This is a wrapper function for the :method:`entities.MoleculeWithCharge.reproduceCube` method.
:param potential: A :class:`grids.Cube` object containing the actual potential.
:param kwargs: Extra keyword arguments to be passed to the distance transform method,
from which distances to atoms are evaluated.
:return: Cube object containing errors of the electrostatic potential.
"""
return grids.Cube.assign_new_values_to(potential,
self.reproduce_cube(potential, **kwargs) - potential,
)
| jincheng95/charges-tools | chargetools/entities.py | Python | mit | 20,327 | [
"Gaussian"
] | fe0a6905440ede1ac6cd1fb8041721289668b46df31b6fd6bb58a12cb158679b |
"""BWA (https://github.com/lh3/bwa)
"""
import os
import signal
import subprocess
from bcbio.pipeline import config_utils
from bcbio import bam, utils
from bcbio.distributed import objectstore
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.ngsalign import alignprep, novoalign, postalign, rtg
from bcbio.provenance import do
from bcbio.rnaseq import gtf
from bcbio.variation import sentieon
import bcbio.pipeline.datadict as dd
from bcbio.bam import fastq
from bcbio.log import logger
galaxy_location_file = "bwa_index.loc"
def align_bam(in_bam, ref_file, names, align_dir, data):
"""Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate
"""
config = data["config"]
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bedtools = config_utils.get_program("bedtools", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used for input and output
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
3, "decrease").upper()
if not utils.file_exists(out_file):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
if not hla_on(data) or needs_separate_hla(data):
bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-", with_hla=False)
else:
bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-", with_hla=True)
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("unset JAVA_HOME && "
"{samtools} sort -n -l 1 -@ {num_cores} -m {max_mem} {in_bam} -T {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa_cmd} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
data["work_bam"] = out_file
hla_file = "HLA-" + out_file
if needs_separate_hla(data) and not utils.file_exists(hla_file):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, hla_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
bwa_cmd = _get_bwa_mem_cmd(data, hla_file, ref_file, "-", with_hla=True)
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("unset JAVA_HOME && "
"{samtools} sort -n -l 1 -@ {num_cores} -m {max_mem} {in_bam} -T {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa_cmd} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
hla_file = _align_mem_hla(fastq_file, pair_file, ref_file, hla_file, names, rg_info, data)
data["hla_bam"] = hla_file
return data
def _get_bwa_mem_cmd(data, out_file, ref_file, fastq1, fastq2="", with_hla=False):
"""Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing.
Commands for HLA post-processing:
base=TEST
run-HLA $base.hla > $base.hla.top
cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all
rm -f $base.hla.HLA*gt
rm -f $base.hla.HLA*gz
"""
alt_file = ref_file + ".alt"
if with_hla:
bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem")))
hla_base = os.path.join(utils.safe_makedir(os.path.join(os.path.dirname(out_file), "hla")),
os.path.basename(out_file) + ".hla")
alt_cmd = (" | {bwakit_dir}/k8 {bwakit_dir}/bwa-postalt.js -p {hla_base} {alt_file}")
else:
alt_cmd = ""
if dd.get_aligner(data) == "sentieon-bwa":
bwa_exe = "sentieon-bwa"
exports = sentieon.license_export(data)
else:
bwa_exe = "bwa"
exports = ""
bwa = config_utils.get_program(bwa_exe, data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
bwa_resources = config_utils.get_resources("bwa", data["config"])
bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])])
if "options" in bwa_resources else "")
rg_info = novoalign.get_rg_info(data["rgnames"])
# For UMI runs, pass along consensus tags
c_tags = "-C" if "umi_bam" in data else ""
pairing = "-p" if not fastq2 else ""
# Restrict seed occurances to 1/2 of default, manage memory usage for centromere repeats in hg38
# https://sourceforge.net/p/bio-bwa/mailman/message/31514937/
# http://ehc.ac/p/bio-bwa/mailman/message/32268544/
mem_usage = "-c 250"
bwa_cmd = ("{exports}{bwa} mem {pairing} {c_tags} {mem_usage} -M -t {num_cores} {bwa_params} -R '{rg_info}' "
"-v 1 {ref_file} {fastq1} {fastq2} ")
return (bwa_cmd + alt_cmd).format(**locals())
def is_precollapsed_bam(data):
return dd.get_umi_type(data) == "fastq_name" and not has_umi(data)
def hla_on(data):
return has_hla(data) and dd.get_hlacaller(data)
def has_umi(data):
return "umi_bam" in data
def has_hla(data):
from bcbio.heterogeneity import chromhacks
return len(chromhacks.get_hla_chroms(dd.get_ref_file(data))) != 0
def fastq_size_output(fastq_file, tocheck):
head_count = 8000000
fastq_file = objectstore.cl_input(fastq_file)
gzip_cmd = "zcat {fastq_file}" if fastq_file.endswith(".gz") else "cat {fastq_file}"
cmd = (utils.local_path_export() + gzip_cmd + " | head -n {head_count} | "
"seqtk sample -s42 - {tocheck} | "
"awk '{{if(NR%4==2) print length($1)}}' | sort | uniq -c")
def fix_signal():
"""Avoid spurious 'cat: write error: Broken pipe' message due to head command.
Work around from:
https://bitbucket.org/brodie/cram/issues/16/broken-pipe-when-heading-certain-output
"""
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
count_out = subprocess.check_output(cmd.format(**locals()), shell=True,
executable="/bin/bash", preexec_fn=fix_signal).decode()
if not count_out.strip():
raise IOError("Failed to check fastq file sizes with: %s" % cmd.format(**locals()))
for count, size in (l.strip().split() for l in count_out.strip().split("\n")):
yield count, size
def _can_use_mem(fastq_file, data, read_min_size=None):
"""bwa-mem handle longer (> 70bp) reads with improved piping.
Randomly samples 5000 reads from the first two million.
Default to no piping if more than 75% of the sampled reads are small.
If we've previously calculated minimum read sizes (from rtg SDF output)
we can skip the formal check.
"""
min_size = 70
if read_min_size and read_min_size >= min_size:
return True
thresh = 0.75
tocheck = 5000
shorter = 0
for count, size in fastq_size_output(fastq_file, tocheck):
if int(size) < min_size:
shorter += int(count)
return (float(shorter) / float(tocheck)) <= thresh
def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
# back compatible -- older files were named with lane information, use sample name now
if names["lane"] != dd.get_sample_name(data):
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
else:
out_file = None
if not out_file or not utils.file_exists(out_file):
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext))
qual_format = data["config"]["algorithm"].get("quality_format", "").lower()
min_size = None
if data.get("align_split") or fastq_file.endswith(".sdf"):
if fastq_file.endswith(".sdf"):
min_size = rtg.min_read_size(fastq_file)
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
else:
final_file = None
if qual_format == "illumina":
fastq_file = alignprep.fastq_convert_pipe_cl(fastq_file, data)
if pair_file:
pair_file = alignprep.fastq_convert_pipe_cl(pair_file, data)
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
# If we cannot do piping, use older bwa aln approach
if ("bwa-mem" not in dd.get_tools_on(data) and
("bwa-mem" in dd.get_tools_off(data) or not _can_use_mem(fastq_file, data, min_size))):
out_file = _align_backtrack(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
else:
if is_precollapsed_bam(data) or not hla_on(data) or needs_separate_hla(data):
out_file = _align_mem(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
else:
out_file = _align_mem_hla(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
data["work_bam"] = out_file
# bwakit will corrupt the non-HLA alignments in a UMI collapsed BAM file
# (see https://github.com/bcbio/bcbio-nextgen/issues/3069)
if needs_separate_hla(data):
hla_file = os.path.join(os.path.dirname(out_file), "HLA-" + os.path.basename(out_file))
hla_file = _align_mem_hla(fastq_file, pair_file, ref_file, hla_file, names, rg_info, data)
data["hla_bam"] = hla_file
return data
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data):
"""Perform bwa-mem alignment on supported read lengths.
"""
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
cmd = ("unset JAVA_HOME && "
"%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file, with_hla=False), tobam_cl))
do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)])
return out_file
def _align_mem_hla(fastq_file, pair_file, ref_file, out_file, names, rg_info, data):
"""Perform bwa-mem alignment on supported read lengths with HLA alignments
"""
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
cmd = ("unset JAVA_HOME && "
"%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file, with_hla=True), tobam_cl))
do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)])
return out_file
def needs_separate_hla(data):
"""
bwakit will corrupt the non-HLA alignments in a UMI collapsed BAM file
(see https://github.com/bcbio/bcbio-nextgen/issues/3069)
"""
return hla_on(data) and has_umi(data)
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data):
"""Perform a BWA alignment using 'aln' backtrack algorithm.
"""
bwa = config_utils.get_program("bwa", data["config"])
config = data["config"]
sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0]
sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else ""
if not utils.file_exists(sai1_file):
with file_transaction(data, sai1_file) as tx_sai1_file:
_run_bwa_align(fastq_file, ref_file, tx_sai1_file, config)
if sai2_file and not utils.file_exists(sai2_file):
with file_transaction(data, sai2_file) as tx_sai2_file:
_run_bwa_align(pair_file, ref_file, tx_sai2_file, config)
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
align_type = "sampe" if sai2_file else "samse"
cmd = ("unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} "
"{fastq_file} {pair_file} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa %s" % align_type, data)
return out_file
def _bwa_args_from_config(config):
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-t", str(num_cores)] if num_cores > 1 else []
return core_flags
def _run_bwa_align(fastq_file, ref_file, out_file, config):
aln_cl = [config_utils.get_program("bwa", config), "aln",
"-n 2", "-k 2"]
aln_cl += _bwa_args_from_config(config)
aln_cl += [ref_file, fastq_file]
cmd = "{cl} > {out_file}".format(cl=" ".join(aln_cl), out_file=out_file)
do.run(cmd, "bwa aln: {f}".format(f=os.path.basename(fastq_file)), None)
def index_transcriptome(gtf_file, ref_file, data):
"""
use a GTF file and a reference FASTA file to index the transcriptome
"""
gtf_fasta = gtf.gtf_to_fasta(gtf_file, ref_file)
return build_bwa_index(gtf_fasta, data)
def build_bwa_index(fasta_file, data):
bwa = config_utils.get_program("bwa", data["config"])
cmd = "{bwa} index {fasta_file}".format(**locals())
message = "Creating transcriptome index of %s with bwa." % (fasta_file)
do.run(cmd, message)
return fasta_file
def align_transcriptome(fastq_file, pair_file, ref_file, data):
"""
bwa mem with settings for aligning to the transcriptome for eXpress/RSEM/etc
"""
work_bam = dd.get_work_bam(data)
base, ext = os.path.splitext(work_bam)
out_file = base + ".transcriptome" + ext
if utils.file_exists(out_file):
data = dd.set_transcriptome_bam(data, out_file)
return data
# bwa mem needs phred+33 quality, so convert if it is Illumina
if dd.get_quality_format(data).lower() == "illumina":
logger.info("bwa mem does not support the phred+64 quality format, "
"converting %s and %s to phred+33.")
fastq_file = fastq.groom(fastq_file, data, in_qual="fastq-illumina")
if pair_file:
pair_file = fastq.groom(pair_file, data, in_qual="fastq-illumina")
bwa = config_utils.get_program("bwa", data["config"])
gtf_file = dd.get_gtf_file(data)
gtf_fasta = index_transcriptome(gtf_file, ref_file, data)
args = " ".join(_bwa_args_from_config(data["config"]))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
samtools = config_utils.get_program("samtools", data["config"])
cmd = ("{bwa} mem {args} -a -t {num_cores} {gtf_fasta} {fastq_file} "
"{pair_file} ")
with file_transaction(data, out_file) as tx_out_file:
message = "Aligning %s and %s to the transcriptome." % (fastq_file, pair_file)
cmd += "| " + postalign.sam_to_sortbam_cl(data, tx_out_file, name_sort=True)
do.run(cmd.format(**locals()), message)
data = dd.set_transcriptome_bam(data, out_file)
return data
def filter_multimappers(align_file, data):
"""
Filtering a BWA alignment file for uniquely mapped reads, from here:
https://bioinformatics.stackexchange.com/questions/508/obtaining-uniquely-mapped-reads-from-bwa-mem-alignment
"""
config = dd.get_config(data)
type_flag = "" if bam.is_bam(align_file) else "S"
base, ext = os.path.splitext(align_file)
out_file = base + ".unique" + ext
bed_file = dd.get_variant_regions(data) or dd.get_sample_callable(data)
bed_cmd = '-L {0}'.format(bed_file) if bed_file else " "
if utils.file_exists(out_file):
return out_file
base_filter = '-F "not unmapped {paired_filter} and [XA] == null and [SA] == null and not supplementary " '
if bam.is_paired(align_file):
paired_filter = "and paired and proper_pair"
else:
paired_filter = ""
filter_string = base_filter.format(paired_filter=paired_filter)
sambamba = config_utils.get_program("sambamba", config)
num_cores = dd.get_num_cores(data)
with file_transaction(out_file) as tx_out_file:
cmd = ('{sambamba} view -h{type_flag} '
'--nthreads {num_cores} '
'-f bam {bed_cmd} '
'{filter_string} '
'{align_file} '
'> {tx_out_file}')
message = "Removing multimapped reads from %s." % align_file
do.run(cmd.format(**locals()), message)
bam.index(out_file, config)
return out_file
| lbeltrame/bcbio-nextgen | bcbio/ngsalign/bwa.py | Python | mit | 17,449 | [
"BWA"
] | 8bef8e0648eedf78715ae544d3c7baaa675a5d5781f05658c1539383a54806f8 |
#Ver. 0.1.0
#Authors: Dylan Wise & Zach Almon
import urllib.request
import re
import os
import platform
import sys
import string
import html
platformType = platform.system()
#Check All .re Expressions DONE
#Fix Chapter Names DONE
#Make Sure Titles are Good. DONE
#BUG: Manga doesnt have Chapters but is listed by volume only instead:
# (http://bato.to/comic/_/comics/tenshi-no-kiss-r3789) As Example
#BUG: Because some chapter names are Weird (IE. 15.5 or 15v3 or 20 (v2) or Non-Numbers)
# You wont be able to START or END at those chapters. Entered chapters HAVE To be ints
# This is to check that Start < End and also to size the lists according to the Start and/or End
#BUG: Because there can be chapters that arnt numbers There ARE Bugs with poping off the chapters with custom starts and ends because
# each item is converted to floats. This is similar yet different then the above bug and WILL Cause some breaking in the program
def main():
success = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\Batoto"
else:
MASTERdirectoryName = currentDirectory + "/Batoto"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Batoto Folder
os.chdir(MASTERdirectoryName)
#NO SEARCH FEATURE SORRY!
print('Currently MangaMine2 only supports Bato.to.')
print()
print('The URL you are to input below should be the top level page of the')
print('manga you wish to download')
print('Ex: http://bato.to/comic/_/comics/seto-no-hanayome-r385 ')
while success == False:
downloadManga = True
type_one_manga = False
type_two_manga = False
print()
print('Please enter the url of the manga you wish to download (or q to quit): ')
urlRequest = input('')
print('\n')
if urlRequest == 'q':
return
try:
urllibHTML = urllib.request.urlopen(urlRequest).read()
except:
print()
print('Invalid URL!')
downloadManga = False
if downloadManga == True:
Manga_Title = re.findall(r'<title>+(.*?)- Scanlations', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
return
Manga_Title_string = Manga_Title[0]
Manga_Title_string = Manga_Title_string[:-1]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivalent.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
#For any other language on Bato.to change lang_English to whatever matches the language you desire.
#Then this file *SHOULD* work with your language
allENGLISHChaps = re.findall(r'lang_English+(.*?)\ title="+', str(urllibHTML))
if len(allENGLISHChaps) == 0:
print("Manga has no English Chapters or there was an error reading the HTML!")
else:
First_chapter_string = allENGLISHChaps[-1]
First_chapter_address = re.findall(r'href=\"+(.*?)\"', First_chapter_string)
First_chapter_address_string = First_chapter_address[0]
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print()
print('Trouble Opening Webpage!')
downloadManga = False
if downloadManga == True:
#Find which type of manga this manga is. Whether all pages of the chapter are on one page or multiple pages.
type_one_padding_right = re.search("<div style=\"text-align:center;\">", str(First_chapter_html))
type_two_comic_page = re.search("comic_page", str(First_chapter_html))
#THERE IS A THIRD TYPE!?!?! http://bato.to/comic/_/comics/gaussian-blur-r8172
#Type one is All images on One Page
if type_one_padding_right != None:
type_one_manga = True
#Type two is All images on seperate pages
elif type_two_comic_page != None:
type_two_manga = True
else:
print("There was an error with the Manga Type!")
return
#This will get the chapter links from the Select options on the chapters first page
#There are 2 select options (one at top and one at bottom
#They are same so its arbutrary which you pick. I Will be selecting [0]
get_Chapters = re.findall(r'250px;">+(.*?)</select>', str(First_chapter_html))
chapter_master_string = get_Chapters[0]
#Get all chapter links. Last thing in list is an unneeded "selected" string. Pop that off.
list_of_Chapter_Links = re.findall(r'\"+(.*?)\"', chapter_master_string)
#In this list there may be a "selected". It may or may not be at the end. The loop solves it.
#I am 95% sure there will only ever be 1 "selected" per list.
#list_of_Chapter_Links.pop(-1)
for i in range(len(list_of_Chapter_Links)):
if list_of_Chapter_Links[i] == "selected":
list_of_Chapter_Links.pop(i)
break
#Get Numbers of the chapters. Will be "Matched" up to the list_of_Chapter_Links.
list_of_Chapter_Numbers_raw = re.findall(r'Ch\.+(.*?)<', chapter_master_string)
list_of_chapter_names_refined = []
# EXCEPTION HERE (http://bato.to/comic/_/comics/tenshi-no-kiss-r3789)
# GOES BY VOLUME NOT CHAPTER. BY THIS METHOD IT WONT WORK
#Some chapters may be like "230: Title of Chapter" Some may be "145"
for i in range(len(list_of_Chapter_Numbers_raw)):
temp_list = re.findall('^(.*?):', list_of_Chapter_Numbers_raw[i])
if len(temp_list) == 0:
list_of_chapter_names_refined.append(list_of_Chapter_Numbers_raw[i])
elif len(temp_list) == 1:
list_of_chapter_names_refined.append(temp_list[0])
else:
print("Manga Chapter Name Error!")
return
# list_of_Chapter_Links Has Links -Has Duplicates at this point
# list_of_chapter_names_refined Has Names -Has Duplicates at this point
list_of_Chapter_Links_Final = []
list_of_Chapter_Numbers_Final = []
for i in range(len(list_of_chapter_names_refined)):
if list_of_chapter_names_refined[i] in list_of_Chapter_Numbers_Final:
pass
else:
list_of_Chapter_Numbers_Final.append(list_of_chapter_names_refined[i])
list_of_Chapter_Links_Final.append(list_of_Chapter_Links[i])
list_of_Chapter_Links_Final.reverse()
list_of_Chapter_Numbers_Final.reverse()
fullDownload = False
chapter_found = False
custom_start = False
custom_end = False
chapter_to_start_from = 0
chapter_to_end_at = 0
while 1:
print('Do you wish to download the entire manga?[y/n], or [q] to quit.')
continueChoiceFullDownload = input('')
print('\n')
if continueChoiceFullDownload == 'y':
fullDownload = True
break
elif continueChoiceFullDownload == 'n':
while 1:
print('Do you wish to start download from a certain chapter?[y/n], or [q] to quit.')
print('By Choosing no the entire manga will download')
continueChoiceCustomChap = input('')
print('\n')
try:
if continueChoiceCustomChap == 'y':
print('Please enter the chapter you wish to start from')
chapNum = input('')
print('\n')
for i in range(len(list_of_Chapter_Numbers_Final)):
if chapNum == list_of_Chapter_Numbers_Final[i]:
chapter_found = True
custom_start = True
chapter_to_start_from = int(list_of_Chapter_Numbers_Final[i])
break
if chapter_found == False:
print('Invalid chapter number! Maybe the chapter is missing?')
print()
else:
print('Chapter Found!')
print('\n')
#May use chapter_found again for the end point
chapter_found = False
break
elif continueChoiceCustomChap == 'n':
fullDownload = True
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
print()
except ValueError:
print('Invalid chapter number!')
print('Numbers must be whole numbers. You cannot start at a half chapter')
print('\t Or at a non-numerical chapter.')
print('Please enter a Real Number!')
print('\n')
if fullDownload == False:
while 1:
print('Do you wish to end the download at a certain chapter?[y/n], or [q] to quit.')
print('By Choosing no the entire manga will download from the start location')
continueChoiceCustomChap = input('')
print('\n')
if continueChoiceCustomChap == 'y':
print('Please enter the chapter you wish to end at')
chapNum = input('')
print('\n')
#Check if number entered is actually a number or not.
try:
temporary_int = int(chapNum)
#END CHAPTER MUST BE BIGGER OR EQUAL TO THAN START CHAPTER
if temporary_int < chapter_to_start_from:
print('Sorry, Number must be greater than the Start chapter, which is:', chapter_to_start_from)
print('Invalid Option!')
print()
else:
for i in range(len(list_of_Chapter_Numbers_Final)):
if chapNum == list_of_Chapter_Numbers_Final[i]:
chapter_found = True
custom_end = True
chapter_to_end_at = chapNum
break
if chapter_found == False:
print('Invalid chapter number! Maybe the chapter is missing?')
print()
else:
print('Chapter Found!')
print('\n')
break
except ValueError:
print('Invalid chapter number!')
print('Numbers must be whole numbers. You cannot end at a half chapter')
print('\t or a non-numerical chapter.')
print('Please enter a Real Number!')
print('\n')
elif continueChoiceCustomChap == 'n':
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
print()
#At the end of the elif choice == no
break
elif continueChoiceFullDownload == 'q':
return
else:
print('Invalid Option!')
#For Reference:
#If fullDownload = True
#The user wants to download From chapter 1 to the end (Whatever is available)
#If custom_start = True Than fullDownload == False
#The user wants to download from The start chapter which was Found and stored in chapter_to_start_from
#Does not Need custom_end to be True. If it isnt then it will download until the end of manga
#If custom_end = True Than custom_start == True AND fullDownload == False
#The user wants to download from The start chapter which was Found and stored in chapter_to_start_from
#The user also wants to download until an end chapter which was Found and stored in chapter_to_end_at
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
first_page_of_first_chapter = False
#This if, elif, and elif are to set which chapters are to be downloaded.
if fullDownload == True:
first_page_of_first_chapter = True
#If you only have a start location, pop off chapter numbers/links until you hit that chapter
elif custom_start == True and custom_end == False:
for i in range(len(list_of_Chapter_Numbers_Final)):
try:
float_value = float(list_of_Chapter_Numbers_Final[0])
float_value_start = float(chapter_to_start_from)
if float_value < float_value_start:
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
else:
break
except ValueError:
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
#Do same As before But will need to pop off end as well
#I found it easier to reverse then do down the list in decending order
#And pop off from begining until the end chapter is reached.
#Then reverse again.
elif custom_start == True and custom_end == True:
for i in range(len(list_of_Chapter_Numbers_Final)):
try:
float_value = float(list_of_Chapter_Numbers_Final[0])
float_value_start = float(chapter_to_start_from)
if float_value < float_value_start:
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
else:
break
except ValueError:
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
list_of_Chapter_Numbers_Final.reverse()
list_of_Chapter_Links_Final.reverse()
for i in range(len(list_of_Chapter_Numbers_Final)):
try:
float_value = float(list_of_Chapter_Numbers_Final[0])
float_value_end = float(chapter_to_end_at)
if float_value > float_value_end:
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
else:
break
except ValueError:
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
list_of_Chapter_Numbers_Final.reverse()
list_of_Chapter_Links_Final.reverse()
#Main Loop for Downloading Images.
if fullDownload == True or custom_start == True:
for i in range(len(list_of_Chapter_Numbers_Final)):
first_page_of_each_chapter = True
chapter_number = list_of_Chapter_Numbers_Final[i]
chapter_link = list_of_Chapter_Links_Final[i]
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_number
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_number
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_number)
#I ALREADY HAVE THE FIRST PAGE OF FIRST CHAPTER
#Check Then move on makes it a bit quicker but not by much.
if first_page_of_first_chapter == True:
#This variable is set to False then ONLY set to true during Full Downloads
#when the first chapter is Gurenteed to be downloaded
first_page_of_first_chapter = False
urllibHTML = First_chapter_html
else:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
if type_one_manga == True:
get_images = re.findall(r'text-align:center;">+(.*?)</div><div', str(urllibHTML))
get_images_master_string = get_images[0]
image_file_name_list = re.findall(r"<img src=\\'(.*?)\\'", str(get_images_master_string))
Amount_of_pages = len(image_file_name_list)
for j in range(len(image_file_name_list)):
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
image_file_name = image_file_name_list[j]
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
rawImage = urllib.request.urlopen(image_file_name).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
elif type_two_manga == True:
#Get the pages between "<id..." and "</se..."
get_Pages = re.findall(r'id="page_select" onchange="window.location=this.value;">+(.*?)</select></li>', str(urllibHTML))
#There will be Two found
Pages_master_string = get_Pages[0]
#Get all page links. Second thing in list is an unneeded "selected" string. Loop to get rid
list_of_page_Links = re.findall(r'\"+(.*?)\"', Pages_master_string)
list_of_page_links_final = []
#Loop to rid of the "Selected" part of list
for j in range(len(list_of_page_Links)):
if list_of_page_Links[j] != "selected":
list_of_page_links_final.append(list_of_page_Links[j])
Amount_of_pages = len(list_of_page_links_final)
for j in range(len(list_of_page_links_final)):
try:
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
#Check for First page. Checks to see if anything is already downloaded
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
#At this point There will be something you need to download.
#Since we already have the HTML for the first page of EACH Chapter
#We dont need to waste time to read that again, set it here.
page_urllibHTML = urllibHTML
else:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
#Get Image URL
image_file_name_list = re.findall(r'comic_page" style="max-width: 100%;" src="(.*?)"', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Bato.to Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
rawImage = urllib.request.urlopen(image_file_name).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
except:
print("Invalid URL Error, or Connection Timeout!")
return
else:
print("Manga Type Error!")
return
while 1:
print('\n')
print('Do you wish to download another manga?[y/n]')
continueChoice = input('')
if continueChoice == 'y':
break
elif continueChoice == 'n':
success = True
break
else:
print('Invalid Option!')
main()
| DylanTheWise/MangaMine | MangaMine (Bato).py | Python | gpl-2.0 | 29,387 | [
"Gaussian"
] | 6c4b9028489f8b8c62f37ba2cb472010575f700d1d60f2373723dc93ee699308 |
#!/usr/bin/python
# script extract a particular genome region
# version 3 - 4-12-2012 to simplify the previous 27-12-2011 version with "usine a gaz" class
# Usage bowtie_window_extraction.py <bowtie input> <geneID> <Upstream_coordinate> <Downstream_coordinate> <output> <minsize> <maxsize> <output_format>
import sys, subprocess
from smRtools import antipara
geneID = sys.argv[2]
Upstream_coordinate = int(sys.argv[3])
Downstream_coordinate = int(sys.argv[4])
minsize = int(sys.argv[6])
maxsize = int(sys.argv[7])
F = open (sys.argv[1], "r") # F is the bowtie output taken as input
OUT = open (sys.argv[5], "w")
output_format = sys.argv[8]
def selectorformat(format, file_handler):
if format == "bowtie":
print "bowtie format"
def printformat(header, polarity, geneID, coordinate, sequence, file_handler):
print >> file_handler, "%s\t%s\t%s\t%s\t%s" % (header, polarity, geneID, coordinate, sequence)
elif format == "fasta":
print "fasta format"
def printformat(header, polarity, geneID, coordinate, sequence, file_handler):
if polarity == "-": sequence = antipara(sequence)
print >> file_handler, ">%s\n%s" % (header, sequence)
return printformat
#printing format selection
theprint = selectorformat(output_format, OUT)
def selectorfilter(option):
if option == "by_location":
print "choosing by_location"
def filter(header, polarity, running_geneID, running_coor, sequence, file_handler):
if geneID != running_geneID:
return
if polarity == "-":
original_coor = running_coor
running_coor = running_coor + len(sequence) -1
else: original_coor = running_coor
if Upstream_coordinate <=running_coor <= Downstream_coordinate :
theprint (header, polarity, running_geneID, original_coor, sequence, file_handler)
if option == "by_size":
print "choosing by_size"
def filter(header, polarity, running_geneID, running_coor, sequence, file_handler):
running_size = len(sequence)
if minsize <= running_size <= maxsize:
theprint (header, polarity, running_geneID, running_coor, sequence, file_handler)
if option == "by_location&size":
print "choosing by_location&size"
def filter(header, polarity, running_geneID, running_coor, sequence, file_handler):
if geneID != running_geneID:
return
if polarity == "-":
original_coor = running_coor
running_coor = running_coor + len(sequence) -1
else:
original_coor = running_coor
if not(Upstream_coordinate <= running_coor <= Downstream_coordinate):
return
running_size = len (sequence)
if not(minsize <= running_size <= maxsize):
return
theprint (header, polarity, running_geneID, original_coor, sequence, file_handler)
return filter
### filter selection
if geneID!="item" and maxsize:
thefilter=selectorfilter("by_location&size")
elif geneID!="item":
thefilter=selectorfilter("by_location")
else:
thefilter=selectorfilter("by_size")
### filtering
for line in F:
fields = line.split()
header = fields[0]
polarity = fields[1]
running_geneID = fields[2]
coordinate = int(fields[3])+1 # to shift to 1-based coordinates of genome browser and humans ! (bowtie is 0-based)
sequence = fields[4]
thefilter(header, polarity, running_geneID, coordinate, sequence, OUT)
F.close()
OUT.close()
| JuPeg/tools-artbio | unstable/local_tools/bowtie_window_extraction.py | Python | mit | 3,382 | [
"Bowtie"
] | 7d9a04763a3988c7cb9db4eac286e3ea5000378b3e92f43e36de92d58c0b0635 |
"""
This is a very simple service performance test. It calls the service with a message. The service
return the same message.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time
from DIRAC.Core.Base.Client import Client
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
cl = Client(url="Framework/SystemAdministrator")
class Transaction(object):
def __init__(self):
self.custom_timers = {}
def run(self):
start_time = time.time()
retVal = cl.echo("simple test")
if not retVal["OK"]:
print("ERROR", retVal["Message"])
end_time = time.time()
self.custom_timers["Service_ResponseTime"] = end_time - start_time
self.custom_timers["Service_Echo"] = end_time - start_time
if __name__ == "__main__":
trans = Transaction()
trans.run()
print(trans.custom_timers)
| ic-hep/DIRAC | tests/Performance/Service/test_scripts/service.py | Python | gpl-3.0 | 946 | [
"DIRAC"
] | 2ddcdfec8dbf1ac956acfc609fd7fc335274ce3661914cbcad1a8aeca43d7180 |
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from floatdelegate import FloatDelegate
class CurrencyDelegate(FloatDelegate):
"""Custom delegate for currency values"""
pass
| kurtraschke/camelot | camelot/view/controls/delegates/currencydelegate.py | Python | gpl-2.0 | 1,203 | [
"VisIt"
] | 562f2b582b8d5f373258dfbe5e2f507f09820709b1995254234a8c780f9ce9bc |
#!/usr/bin/env python2.7
# coding=utf-8
""" _ + _ . .
|_||!_' Y
| ||._! |
Simple GR1 synthesizer from
[AIGER-like](http://fmv.jku.at/aiger/) GR1 format.
The format is described [here](https://github.org/5nizza/spec-framework).
Gmail me: ayrat.khalimov
"""
import argparse
from logging import Logger
import pycudd
from pycudd import DdManager
from pycudd import DdNode
import aiger_swig.aiger_wrap as aiglib
from aiger_swig.aiger_wrap import *
from ansistrm import setup_logging
EXIT_STATUS_REALIZABLE = 10
EXIT_STATUS_UNREALIZABLE = 20
spec = None # type: aiger
cudd = None # type: DdManager
logger = None # type: Logger
# To cache equal Sub-BDDs
cache_dict = dict()
# Transition function of the circuit:
transition_function = dict()
# To translate the indexing
aiger_by_cudd = dict()
cudd_by_aiger = dict()
def is_negated(l):
return (l & 1) == 1
def strip_lit(l):
return l & ~1
def iterate_latches():
for i in range(int(spec.num_latches)):
yield get_aiger_symbol(spec.latches, i)
def parse_into_spec(aiger_file_name):
logger.info('parsing..')
global spec
#: :type: aiger
spec = aiger_init()
err = aiger_open_and_read_from_file(spec, aiger_file_name)
assert not err, err
# assert the formats
assert (spec.num_outputs == 1) ^ (spec.num_bad or spec.num_justice or spec.num_fairness), 'mix of two formats'
assert spec.num_outputs + spec.num_justice + spec.num_bad >= 1, 'no properties'
assert spec.num_justice <= 1, 'not supported'
assert spec.num_fairness <= 1, 'not supported'
def compose_indexing_translation():
for i in range(0, spec.num_inputs):
aiger_strip_lit = get_aiger_symbol(spec.inputs, i).lit
cudd_by_aiger[aiger_strip_lit] = i
aiger_by_cudd[i] = aiger_strip_lit
for i in range(0, spec.num_latches):
aiger_strip_lit = get_aiger_symbol(spec.latches, i).lit
cudd_idx = i + spec.num_inputs
cudd_by_aiger[aiger_strip_lit] = cudd_idx
aiger_by_cudd[cudd_idx] = aiger_strip_lit
for i in range(spec.num_inputs + spec.num_latches, 2*(spec.num_inputs + spec.num_latches) +1):
aiger_by_cudd[i] = 0
def get_substitution():
max_var = spec.num_inputs + spec.num_latches
sub_array = pycudd.DdArray(max_var)
for i in range(0, max_var):
if aiger_is_latch(spec, aiger_by_cudd[i]):
ret_val = transition_function[aiger_by_cudd[i]]
sub_array.Push(ret_val)
else:
sub_array.Push(cudd.ReadVars(i))
return sub_array
def compose_transition_vector():
logger.info('compose_transition_vector...')
for l in iterate_latches():
transition_function[l.lit] = get_bdd_for_sign_lit(l.next)
return transition_function
def get_bdd_for_sign_lit(lit):
stripped_lit = strip_lit(lit)
input_, latch_, and_ = get_lit_type(stripped_lit)
if stripped_lit == 0:
res = cudd.Zero()
elif input_ or latch_:
res = cudd.IthVar(cudd_by_aiger[stripped_lit])
else: # aiger_and
arg1 = get_bdd_for_sign_lit(int(and_.rhs0))
arg2 = get_bdd_for_sign_lit(int(and_.rhs1))
res = arg1 & arg2
if is_negated(lit):
res = ~res
return res
def get_lit_type(stripped_lit):
input_ = aiger_is_input(spec, stripped_lit)
latch_ = aiger_is_latch(spec, stripped_lit)
and_ = aiger_is_and(spec, stripped_lit)
return input_, latch_, and_
# FIXME: what is the diff with `get_bdd_for_sign_lit`?
def get_bdd_for_value(lit): # lit is variable index with sign
"""
We use the following mapping of AIGER indices to CUDD indices:
AIGER's stripped_lit -> CUDD's index
For latches: primed value of a variable with CUDD's index is index+1
Note that none of the AIGER inputs have primed version equivalents in CUDD.
Thus we lose some number of indices in CUDD.
Note that only AIGER's latches and inputs have equivalents in CUDD.
"""
stripped_lit = strip_lit(lit)
if stripped_lit == 0:
res = cudd.Zero()
else:
input_, latch_, and_ = get_lit_type(stripped_lit)
if input_ or latch_:
res = cudd.IthVar(cudd_by_aiger[stripped_lit])
elif and_: # of type 'aiger_and'
arg1 = get_bdd_for_value(int(and_.rhs0))
arg2 = get_bdd_for_value(int(and_.rhs1))
res = arg1 & arg2
else:
assert 0, 'should be impossible: if it is output then it is still either latch or and'
if is_negated(lit):
res = ~res
return res
def get_unprimed_variable_as_bdd(lit):
stripped_lit = strip_lit(lit)
return cudd.IthVar(stripped_lit)
def get_primed_variable_as_bdd(lit):
stripped_lit = strip_lit(lit)
return cudd.IthVar(stripped_lit + 1) # we know that odd vars cannot be used as names of latches/inputs
def get_cube(variables):
if not variables:
return cudd.One()
cube = cudd.One()
for v in variables:
cube &= v
return cube
def _get_bdd_vars(filter_func):
var_bdds = []
for i in range(spec.num_inputs):
input_aiger_symbol = get_aiger_symbol(spec.inputs, i)
if filter_func(input_aiger_symbol.name.strip()):
out_var_bdd = get_bdd_for_sign_lit(input_aiger_symbol.lit)
var_bdds.append(out_var_bdd)
return var_bdds
def get_controllable_vars_bdds():
return _get_bdd_vars(lambda name: name.startswith('controllable'))
def get_uncontrollable_vars_bdds():
return _get_bdd_vars(lambda name: not name.startswith('controllable'))
def get_all_latches_as_bdds():
bdds = [get_bdd_for_value(l.lit) for l in iterate_latches()]
return bdds
def sys_predecessor(dst_bdd, env_bdd, sys_bdd):
"""
Calculate controllable predecessor of dst
∀i ∃o: env(t,i,o) -> [ sys(t,i,o) & ∃t' tau(t,i,o,t') & dst(t') ]
:return: BDD representation of the predecessor states
"""
# Use VectorCompose instead of prime variables
dst_prime = dst_bdd.VectorCompose(get_substitution())
E_tn_tau_and_dst = dst_prime
sys_and_E_tn_tau_and_dst = sys_bdd & E_tn_tau_and_dst
env_impl_sys_and_tau = ~env_bdd | sys_and_E_tn_tau_and_dst
# ∃o: env -> (sys & ∃t' tau(t,i,t',o)):
out_vars_cube = get_cube(get_controllable_vars_bdds())
E_o_implication = env_impl_sys_and_tau.ExistAbstract(out_vars_cube)
# ∀i ∃o: inv -> ..
inp_vars_cube = get_cube(get_uncontrollable_vars_bdds())
A_i_E_o_implication = E_o_implication.UnivAbstract(inp_vars_cube)
return A_i_E_o_implication
def calc_win_region(env_bdd, sys_bdd,
fair_bdd, just_bdd):
"""
The mu-calculus formula for 1-Streett is
gfp.Z lfp.Y gfp.X [ just & Cpre(Z') | Cpre(Y') | !fair & Cpre(X') ]
Recall that the mu-calculus formula for Buechi is
gfp.Z lfp.Y [ just & Cpre(Z') | Cpre(Y') ]
What 1-Streett formula does is:
internal lfp.Y computes:
0. Y0 = 0
1. Y1 = just, lassos that fall out into just
2. Y2 = just, lassos that fall out into just | Cpre(Y1)
3. Y3 = just, lassos that fall out into just | Cpre(Y2)
...
One invariant is:
from Y[r] sys either reaches just via path that visits <r fair states,
or loops in !fair forever except possibly for <r moments where it visits a fair state.
The external gfp.Z is decreasing, and is somewhat similar to Buechi win set computation:
it gradually removes states from which we cannot visit just once, or twice, thrice...
and it accounts for the possibility to end in !fair lassos.
See also: [notes/1-streett-pair-mu-calculus.jpg](gfp.Y calculation)
:return: Z:bdd, Y:list(increasing)
"""
logger.info('calc_win_region..')
Cpre = lambda dst: sys_predecessor(dst, env_bdd, sys_bdd)
Z = cudd.One()
prevZ = None
while Z != prevZ:
Ys = list()
Y = cudd.Zero()
prevY = None
while Y != prevY:
Ys.append(Y)
Xs = list()
X = cudd.One()
prevX = None
while X != prevX:
Xs.append(X)
prevX = X
X = just_bdd & Cpre(Z) | Cpre(Y) | ~fair_bdd & Cpre(prevX)
prevY = Y
Y = X
prevZ = Z
Z = Y
return Z, Ys
def get_nondet_strategy(Z_bdd, Ys,
env_bdd, sys_bdd,
fair_bdd, just_bdd):
"""
The strategy extraction is:
- rho1 = just & sys(t,i,o) & Z(t')
- rho2 = OR{r>1}: Y[r]&~Y[r-1] & sys(t,i,o) & Y[r-1](t')
(Y[0] = 0,
Y[1] = lfp.X [just & Cpre(Z') | !fair & Cpre(X')],
and we take care of this in rho1 or in rho3)
- rho3 = OR{r}: Y[r]&~Y[r-1] & !fair & sys(t,i,o) & Y[r](t')
Note: we cannot go higher >r, because this does not guarantee: GFfair -> GFjust
(we could have GF fair and never visit just)
(recall that Y[r] may contain fair states)
- strategy = env(t,i,o) & Z(t) -> ∃t': trans(t,i,o,t') & rho1(t,i,o,t')|rho2(t,i,o,t')|rho3(t,i,o,t')
:return: non deterministic strategy bdd
:note: The strategy is not-deterministic -- determinization step is done later.
"""
logger.info('get_nondet_strategy..')
# TODO: optimize for special cases: safety, buechi (hangs on the huffman example)
assert_increasing(Ys)
onion = lambda i: Ys[i] & ~Ys[i-1] if (i > 0) else Ys[i]
rho1 = just_bdd & sys_bdd & Z_bdd.VectorCompose(get_substitution())
rho2 = cudd.Zero()
for r in range(2, len(Ys)):
rho2 |= onion(r) & sys_bdd & Ys[r-1].VectorCompose(get_substitution())
rho3 = cudd.Zero()
for r in range(0, len(Ys)):
rho3 |= onion(r) & ~fair_bdd & sys_bdd & Ys[r].VectorCompose(get_substitution())
strategy = ~env_bdd | ~Z_bdd | (rho1 | rho2 | rho3)
return strategy
def compose_init_state_bdd():
""" Initial state is 'all latches are zero' """
logger.info('compose_init_state_bdd..')
init_state_bdd = cudd.One()
for l in iterate_latches():
l_curr_value_bdd = get_bdd_for_sign_lit(l.lit)
init_state_bdd &= ~l_curr_value_bdd
return init_state_bdd
def extract_output_funcs(non_det_strategy_bdd):
"""
From a given non-deterministic strategy (the set of triples `(x,i,o)`),
for each output variable `o`, calculate the set of pairs `(x,i)` where `o` will hold.
There are different ways -- here we use cofactor-based approach.
:return: dictionary `controllable_variable_bdd -> func_bdd`
"""
logger.info('extract_output_funcs..')
output_models = dict()
controls = set(get_controllable_vars_bdds())
# build list with all variables
all_vars = get_uncontrollable_vars_bdds()
all_vars.extend(get_all_latches_as_bdds())
for c in get_controllable_vars_bdds():
logger.info('getting output function for ' +
aiger_is_input(spec, aiger_by_cudd[c.NodeReadIndex()]).name)
others = controls.difference({c})
if others:
others_cube = get_cube(others)
c_arena = non_det_strategy_bdd.ExistAbstract(others_cube) # type: DdNode
else:
c_arena = non_det_strategy_bdd
# c_arena.PrintMinterm()
can_be_true = c_arena.Cofactor(c) # states (x,i) in which c can be true
can_be_false = c_arena.Cofactor(~c)
# We need to intersect with can_be_true to narrow the search.
# Negation can cause including states from !W
must_be_true = (~can_be_false) & can_be_true # type: DdNode
must_be_false = (~can_be_true) & can_be_false # type: DdNode
# Optimization 'variable elimination':
for v in all_vars:
must_be_true_prime = must_be_true.ExistAbstract(v)
must_be_false_prime = must_be_false.ExistAbstract(v)
# (must_be_false_prime & must_be_true_prime) should be UNSAT
if (must_be_false_prime & must_be_true_prime) == cudd.Zero():
must_be_true = must_be_true_prime
must_be_false = must_be_false_prime
care_set = (must_be_true | must_be_false)
# We use 'restrict' operation, but we could also just do:
# c_model = care_set -> must_be_true
# but this is (presumably) less efficient (in time? in size?).
# (intuitively, because we always set c_model to 1 if !care_set,
# but we could set it to 0)
#
# The result of restrict operation satisfies:
# on c_care_set: c_must_be_true <-> must_be_true.Restrict(c_care_set)
c_model = must_be_true.Restrict(care_set)
output_models[c.NodeReadIndex()] = c_model
non_det_strategy_bdd = non_det_strategy_bdd.Compose(c_model, c.NodeReadIndex())
return output_models
def get_inv_err_f_j_bdds():
assert spec.num_justice <= 1
assert spec.num_fairness <= 1
j_bdd = get_bdd_for_value(aiglib.get_justice_lit(spec, 0, 0)) \
if spec.num_justice == 1 \
else cudd.One()
f_bdd = get_bdd_for_value(aiglib.get_aiger_symbol(spec.fairness, 0).lit) \
if spec.num_fairness == 1 \
else cudd.One()
inv_bdd = cudd.One()
if spec.num_constraints > 0:
for i in range(spec.num_constraints):
bdd = get_bdd_for_value(aiglib.get_aiger_symbol(spec.constraints, i).lit)
inv_bdd = inv_bdd & bdd
err_bdd = cudd.Zero()
if spec.num_bad > 0:
for i in range(spec.num_bad):
bdd = get_bdd_for_value(aiglib.get_aiger_symbol(spec.bad, i).lit)
err_bdd = err_bdd | bdd
elif spec.num_outputs == 1:
err_bdd = get_bdd_for_value(aiglib.get_aiger_symbol(spec.outputs, 0).lit)
return inv_bdd, err_bdd, f_bdd, j_bdd
def assert_increasing(attractors): # TODOopt: debug only
previous = attractors[0]
for a in attractors[1:]:
if not (previous.Leq(a) and previous != a):
logger.error('attractors are not strictly increasing')
print 'a:'
a.PrintMinterm()
print 'previous:'
previous.PrintMinterm()
print 'len(attractors):', str(len(attractors))
assert 0
previous = a
def assert_liveness_is_Moore(bdd, sig):
all_inputs_bdds = get_uncontrollable_vars_bdds() + get_controllable_vars_bdds()
assert (~(bdd.Support())).ExistAbstract(get_cube(all_inputs_bdds)) != cudd.One(), \
'Mealy-like %s signals are not supported' % sig
def synthesize(realiz_check):
""" Calculate winning region and extract output functions.
:return: - if realizable: <True, dictionary: controllable_variable_bdd -> func_bdd>
- if not: <False, None>
"""
logger.info('synthesize..')
# build indexing translation: cudd <-> aiger
compose_indexing_translation()
init_bdd = compose_init_state_bdd() # type: DdNode
compose_transition_vector()
env_bdd, err_bdd, f_bdd, j_bdd = get_inv_err_f_j_bdds()
# ensure that depends on latches only: (\exists i1..in: a&b&c&i1==False) is not True # TODO: lift to justice(t,i,o)
assert_liveness_is_Moore(j_bdd, 'J')
assert_liveness_is_Moore(f_bdd, 'F')
Z, Ys = calc_win_region(env_bdd, ~err_bdd,
f_bdd, j_bdd)
if not (init_bdd <= Z):
return False, None
if realiz_check:
return True, None
non_det_strategy = get_nondet_strategy(Z, Ys,
env_bdd, ~err_bdd,
f_bdd, j_bdd)
func_by_var = extract_output_funcs(non_det_strategy)
return True, func_by_var
def negated(lit):
return lit ^ 1
def next_lit():
""" :return: next possible to add to the spec literal """
return (int(spec.maxvar) + 1) * 2
def get_optimized_and_lit(a_lit, b_lit):
if a_lit == 0 or b_lit == 0:
return 0
if a_lit == 1 and b_lit == 1:
return 1
if a_lit == 1:
return b_lit
if b_lit == 1:
return a_lit
if a_lit > 1 and b_lit > 1:
a_b_lit = next_lit()
aiger_add_and(spec, a_b_lit, a_lit, b_lit)
return a_b_lit
assert 0, 'impossible'
def walk(a_bdd):
"""
Walk given BDD node (recursively).
If given input BDD requires intermediate AND gates for its representation, the function adds them.
Literal representing given input BDD is `not` added to the spec.
:returns: literal representing input BDD
:warning: variables in cudd nodes may be complemented, check with: ``node.IsComplement()``
"""
# caching
cached_lit = cache_dict.get(a_bdd.Regular(), None)
if cached_lit is not None:
return negated(cached_lit) if a_bdd.IsComplement()\
else cached_lit
# end caching
#: :type: DdNode
a_bdd = a_bdd
if a_bdd.IsConstant():
res = int(a_bdd == cudd.One()) # in aiger 0/1 = False/True
return res
# get an index of variable,
# all variables used in BDDs are also present in AIGER
a_lit = aiger_by_cudd[a_bdd.NodeReadIndex()]
t_bdd = a_bdd.T() # type: DdNode
e_bdd = a_bdd.E() # type: DdNode
t_lit = walk(t_bdd)
e_lit = walk(e_bdd)
# ite(a_bdd, then_bdd, else_bdd)
# = a*then + !a*else
# = !(!(a*then) * !(!a*else))
# -> in general case we need 3 more ANDs
a_t_lit = get_optimized_and_lit(a_lit, t_lit)
na_e_lit = get_optimized_and_lit(negated(a_lit), e_lit)
n_a_t_lit = negated(a_t_lit)
n_na_e_lit = negated(na_e_lit)
ite_lit = get_optimized_and_lit(n_a_t_lit, n_na_e_lit)
res = negated(ite_lit)
# caching
cache_dict[a_bdd.Regular()] = res
if a_bdd.IsComplement():
res = negated(res)
return res
def model_to_aiger(c_bdd, # type: DdNode
func_bdd, # type: DdNode
introduce_output,
output_name):
""" Update aiger spec with a definition of `c_bdd` """
c_lit = aiger_by_cudd[c_bdd.NodeReadIndex()]
func_as_aiger_lit = walk(func_bdd)
aiger_redefine_input_as_and(spec, c_lit, func_as_aiger_lit, func_as_aiger_lit)
if introduce_output:
aiger_add_output(spec, c_lit, output_name)
def init_cudd():
global cudd
cudd = pycudd.DdManager()
cudd.SetDefault()
# 0 CUDD_REORDER_SAME,
# 1 CUDD_REORDER_NONE,
# 2 CUDD_REORDER_RANDOM,
# 3 CUDD_REORDER_RANDOM_PIVOT,
# 4 CUDD_REORDER_SIFT,
# 5 CUDD_REORDER_SIFT_CONVERGE,
# 6 CUDD_REORDER_SYMM_SIFT,
# 7 CUDD_REORDER_SYMM_SIFT_CONV,
# 8 CUDD_REORDER_WINDOW2,
# 9 CUDD_REORDER_WINDOW3,
# 10 CUDD_REORDER_WINDOW4,
# 11 CUDD_REORDER_WINDOW2_CONV,
# 12 CUDD_REORDER_WINDOW3_CONV,
# 13 CUDD_REORDER_WINDOW4_CONV,
# 14 CUDD_REORDER_GROUP_SIFT,
# 15 CUDD_REORDER_GROUP_SIFT_CONV,
# 16 CUDD_REORDER_ANNEALING,
# 17 CUDD_REORDER_GENETIC,
# 18 CUDD_REORDER_LINEAR,
# 19 CUDD_REORDER_LINEAR_CONVERGE,
# 20 CUDD_REORDER_LAZY_SIFT,
# 21 CUDD_REORDER_EXACT
cudd.AutodynEnable(4)
# NOTE: not all reordering operations 'work' (i got errors with LINEAR)
# cudd.AutodynDisable()
# cudd.EnableReorderingReporting()
def main(aiger_file_name, out_file_name, output_full_circuit, realiz_check):
""" Open AIGER file, synthesize the circuit and write the result to output file.
:returns: boolean value 'is realizable?'
"""
init_cudd()
parse_into_spec(aiger_file_name)
realizable, func_by_var = synthesize(realiz_check)
if realiz_check:
return realizable
if realizable:
for (c_bdd, func_bdd) in func_by_var.items():
c_var_name = aiger_is_input(spec, aiger_by_cudd[c_bdd])\
.name
model_to_aiger(cudd.ReadVars(c_bdd), func_bdd, output_full_circuit, c_var_name)
# some model checkers do not like unordered variable names (when e.g. latch is > add)
# aiger_reencode(spec)
if out_file_name:
aiger_open_and_write_to_file(spec, out_file_name)
else:
res, string = aiger_write_to_string(spec, aiger_ascii_mode, 2147483648)
assert res != 0 or out_file_name is None, 'writing failure'
print(string) # print independently of logger level setup
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple synthesizer from AIGER GR1 format')
parser.add_argument('aiger', metavar='aiger', type=str,
help='input specification in AIGER format')
parser.add_argument('--out', '-o', metavar='out', type=str, required=False, default=None,
help='output file in AIGER format (if realizable)')
parser.add_argument('--full', action='store_true', default=False,
help='produce a full circuit that has outputs other than error bit')
parser.add_argument('--realizability', '-r', action='store_true', default=False,
help='Check Realizability only (do not produce circuits)')
parser.add_argument('--quiet', '-q', action='store_true', default=False,
help='Do not print anything but the model (if realizable)')
args = parser.parse_args()
logger = setup_logging(-1 if args.quiet else 0)
is_realizable = main(args.aiger, args.out, args.full, args.realizability)
logger.info(['unrealizable', 'realizable'][is_realizable])
exit([EXIT_STATUS_UNREALIZABLE, EXIT_STATUS_REALIZABLE][is_realizable])
| 5nizza/aisy | aisy.py | Python | mit | 21,602 | [
"VisIt"
] | 8158785254ab4a208eea93530d07d363e74da0453e7b2ac8e88d301a011dcc43 |
"""Window manager which controls any pop up windows from MPF. Used to display
game information, status, tests, keyboard-to-switch mapping, on screen DMD,
etc."""
# window.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
try:
import pygame
import pygame.locals
except ImportError:
pass
import version
from mpf.system.timing import Timing
from mpf.media_controller.core.display import MPFDisplay
class WindowManager(MPFDisplay):
"""Parent class for the Pygame-based on screen Window Manager in MPF.
There is only one Window Manager per machine. It's used for lots of things,
including displaying information about the game, an on-screen DMD, and for
capturing key events which are translated to switches.
"""
def __init__(self, machine):
# move some of this to parent class
if 'window' in machine.config:
self.config = machine.config['window']
else:
self.config = dict()
self.depth = 24
self.palette = None
super(WindowManager, self).__init__(machine, self.config)
self.name = 'window'
self.log = logging.getLogger("Window")
self.log.debug("Loading the Window Manager")
if 'window' in self.machine.config:
self.config = self.machine.config['window']
else:
self.config = dict()
self.slides = dict()
self.current_slide = None
if 'title' not in self.config:
self.config['title'] = 'Mission Pinball Framework v' + version.__version__
if 'resizable' not in self.config:
self.config['resizable'] = True
if 'fullscreen' not in self.config:
self.config['fullscreen'] = False
if 'frame' not in self.config:
self.config['frame'] = True
if 'quit_on_close' not in self.config:
self.config['quit_on_close'] = True
if 'background_image' not in self.config:
self.config['background_image'] = None
if 'fps' not in self.config or self.config['fps'] == 'auto':
self.config['fps'] = Timing.HZ
self._setup_window()
self.machine.events.add_handler('init_phase_5',
self._load_window_elements)
# Block all Pygame events from being reported. We'll selectively enable
# them one-by-one as event handlers are registered.
pygame.event.set_allowed(None)
def _initialize(self):
super(WindowManager, self)._initialize()
#self._load_window_elements()
def _load_window_elements(self):
# Loads the window elements from the config
if 'elements' not in self.config:
return
self.machine.display.slidebuilder.build_slide(
settings=self.config['elements'],
display='window',
slide_name='default',
priority=0)
#for element in self.config['elements']:
# this_element = self.config['elements'][element]
# if 'type' in this_element:
# this_element['element_type'] = this_element.pop('type')
#
# # create a new element
# self.current_slide.add_element(
# dmd_object=self.machine.display.hw_module, **this_element)
def _setup_window(self):
# Sets up the Pygame window based on the settings in the config file.
flags = 0
if self.config['resizable']:
flags = flags | pygame.locals.RESIZABLE
if not self.config['frame']:
flags = flags | pygame.locals.NOFRAME
if self.config['fullscreen']:
flags = flags | pygame.locals.FULLSCREEN
# Create the actual Pygame window
self.window = pygame.display.set_mode((self.width,
self.height),
flags)
# Set the caption
pygame.display.set_caption(self.config['title'])
def update(self):
"""Updates the display. Called from a timer based on this display's fps
settings.
"""
super(WindowManager, self).update()
# Update the display
self.window.blit(self.current_slide.surface, (0, 0))
pygame.display.flip()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| jabdoa2/mpf | mpf/media_controller/core/window.py | Python | mit | 5,616 | [
"Brian"
] | 6467a86fa7e9f4e1080f35800def87cea7fdad12d8d37a8f5a03f97b2b7e9085 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkQuadricDecimation(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkQuadricDecimation(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkQuadricDecimation.py | Python | bsd-3-clause | 495 | [
"VTK"
] | 86bcc13a38173be75f9b0dbd08b1a2ef9979ecbf970312d305838394dab5bda3 |
# Copyright (C) 2014 Sereina Riniker
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Torsion Fingerprints (Deviation) (TFD)
According to a paper from Schulz-Gasch et al., JCIM, 52, 1499-1512 (2012).
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import Geometry
from rdkit import Chem
from rdkit.Chem import rdchem
from rdkit.Chem import rdMolDescriptors
import math, os
def _doMatch(inv, atoms):
""" Helper function to check if all atoms in the list are the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms) - 1):
for j in range(i + 1, len(atoms)):
if (inv[atoms[i].GetIdx()] != inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doNotMatch(inv, atoms):
""" Helper function to check if all atoms in the list are NOT the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms) - 1):
for j in range(i + 1, len(atoms)):
if (inv[atoms[i].GetIdx()] == inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doMatchExcept1(inv, atoms):
""" Helper function to check if two atoms in the list are the same,
and one not
Note: Works only for three atoms
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: atom that is different
"""
if len(atoms) != 3:
raise ValueError("Number of atoms must be three")
a1 = atoms[0].GetIdx()
a2 = atoms[1].GetIdx()
a3 = atoms[2].GetIdx()
if (inv[a1] == inv[a2] and inv[a1] != inv[a3] and inv[a2] != inv[a3]):
return atoms[2]
elif (inv[a1] != inv[a2] and inv[a1] == inv[a3] and inv[a2] != inv[a3]):
return atoms[1]
elif (inv[a1] != inv[a2] and inv[a1] != inv[a3] and inv[a2] == inv[a3]):
return atoms[0]
return None
def _getAtomInvariantsWithRadius(mol, radius):
""" Helper function to calculate the atom invariants for each atom
with a given radius
Arguments:
- mol: the molecule of interest
- radius: the radius for the Morgan fingerprint
Return: list of atom invariants
"""
inv = []
for i in range(mol.GetNumAtoms()):
info = {}
fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info)
for k in info.keys():
if info[k][0][1] == radius:
inv.append(k)
return inv
def _getHeavyAtomNeighbors(atom1, aid2=-1):
""" Helper function to calculate the number of heavy atom neighbors.
Arguments:
- atom1: the atom of interest
- aid2: atom index that should be excluded from neighbors (default: none)
Return: a list of heavy atom neighbors of the given atom
"""
if aid2 < 0:
return [n for n in atom1.GetNeighbors() if n.GetSymbol() != 'H']
else:
return [n for n in atom1.GetNeighbors() if (n.GetSymbol() != 'H' and n.GetIdx() != aid2)]
def _getIndexforTorsion(neighbors, inv):
""" Helper function to calculate the index of the reference atom for
a given atom
Arguments:
- neighbors: list of the neighbors of the atom
- inv: atom invariants
Return: list of atom indices as reference for torsion
"""
if len(neighbors) == 1: # atom has only one neighbor
return [neighbors[0]]
elif _doMatch(inv, neighbors): # atom has all symmetric neighbors
return neighbors
elif _doNotMatch(inv, neighbors): # atom has all different neighbors
# sort by atom inv and simply use the first neighbor
neighbors = sorted(neighbors, key=lambda x: inv[x.GetIdx()])
return [neighbors[0]]
elif len(neighbors) == 3:
at = _doMatchExcept1(inv, neighbors) # two neighbors the same, one different
if at is None:
raise ValueError("Atom neighbors are either all the same or all different")
return [at]
else: # weird case
# sort by atom inv and simply use the first neighbor
neighbors = sorted(neighbors, key=lambda x: inv[x.GetIdx()])
return [neighbors[0]]
def _getBondsForTorsions(mol, ignoreColinearBonds):
""" Determine the bonds (or pair of atoms treated like a bond) for which
torsions should be calculated.
Arguments:
- refmol: the molecule of interest
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
"""
# flag the atoms that cannot be part of the centre atoms of a torsion
# patterns: triple bonds and allenes
patts = [Chem.MolFromSmarts(x) for x in ['*#*', '[$([C](=*)=*)]']]
atomFlags = [0] * mol.GetNumAtoms()
for p in patts:
if mol.HasSubstructMatch(p):
matches = mol.GetSubstructMatches(p)
for match in matches:
for a in match:
atomFlags[a] = 1
bonds = []
doneBonds = [0] * mol.GetNumBonds()
for b in mol.GetBonds():
if b.IsInRing():
continue
a1 = b.GetBeginAtomIdx()
a2 = b.GetEndAtomIdx()
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2)
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1)
if not doneBonds[b.GetIdx()] and (nb1 and nb2): # no terminal bonds
doneBonds[b.GetIdx()] = 1
# check if atoms cannot be middle atoms
if atomFlags[a1] or atomFlags[a2]:
if not ignoreColinearBonds: # search for alternative not-covalently bound atoms
while len(nb1) == 1 and atomFlags[a1]:
a1old = a1
a1 = nb1[0].GetIdx()
b = mol.GetBondBetweenAtoms(a1old, a1)
if b.GetEndAtom().GetIdx() == a1old:
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a1old)
else:
nb1 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1old)
doneBonds[b.GetIdx()] = 1
while len(nb2) == 1 and atomFlags[a2]:
doneBonds[b.GetIdx()] = 1
a2old = a2
a2 = nb2[0].GetIdx()
b = mol.GetBondBetweenAtoms(a2old, a2)
if b.GetBeginAtom().GetIdx() == a2old:
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a2old)
else:
nb2 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2old)
doneBonds[b.GetIdx()] = 1
if nb1 and nb2:
bonds.append((a1, a2, nb1, nb2))
else:
bonds.append((a1, a2, nb1, nb2))
return bonds
def CalculateTorsionLists(mol, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Calculate a list of torsions for a given molecule. For each torsion
the four atom indices are determined and stored in a set.
Arguments:
- mol: the molecule of interest
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: two lists of torsions: non-ring and ring torsions
"""
if maxDev not in ['equal', 'spec']:
raise ValueError("maxDev must be either equal or spec")
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get atom invariants
if symmRadius > 0:
inv = _getAtomInvariantsWithRadius(mol, symmRadius)
else:
inv = rdMolDescriptors.GetConnectivityInvariants(mol)
# get the torsions
tors_list = [] # to store the atom indices of the torsions
for a1, a2, nb1, nb2 in bonds:
d1 = _getIndexforTorsion(nb1, inv)
d2 = _getIndexforTorsion(nb2, inv)
if len(d1) == 1 and len(d2) == 1: # case 1, 2, 4, 5, 7, 10, 16, 12, 17, 19
tors_list.append(([(d1[0].GetIdx(), a1, a2, d2[0].GetIdx())], 180.0))
elif len(d1) == 1: # case 3, 6, 8, 13, 20
if len(nb2) == 2: # two neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 90.0))
else: # three neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 60.0))
elif len(d2) == 1: # case 3, 6, 8, 13, 20
if len(nb1) == 2:
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 90.0))
else: # three neighbors
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 60.0))
else: # both symmetric
tmp = []
for n1 in d1:
for n2 in d2:
tmp.append((n1.GetIdx(), a1, a2, n2.GetIdx()))
if len(nb1) == 2 and len(nb2) == 2: # case 9
tors_list.append((tmp, 90.0))
elif len(nb1) == 3 and len(nb2) == 3: # case 21
tors_list.append((tmp, 60.0))
else: # case 15
tors_list.append((tmp, 30.0))
# maximal possible deviation for non-cyclic bonds
if maxDev == 'equal':
tors_list = [(t, 180.0) for t, d in tors_list]
# rings
rings = Chem.GetSymmSSSR(mol)
tors_list_rings = []
for r in rings:
# get the torsions
tmp = []
num = len(r)
maxdev = 180.0 * math.exp(-0.025 * (num - 14) * (num - 14))
for i in range(len(r)):
tmp.append((r[i], r[(i + 1) % num], r[(i + 2) % num], r[(i + 3) % num]))
tors_list_rings.append((tmp, maxdev))
return tors_list, tors_list_rings
def _getTorsionAtomPositions(atoms, conf):
""" Helper function to retrieve the coordinates of the four atoms
in a torsion
Arguments:
- atoms: list with the four atoms
- conf: conformation of the molecule
Return: Point3D objects of the four atoms
"""
if len(atoms) != 4:
raise ValueError("List must contain exactly four atoms")
p1 = conf.GetAtomPosition(atoms[0])
p2 = conf.GetAtomPosition(atoms[1])
p3 = conf.GetAtomPosition(atoms[2])
p4 = conf.GetAtomPosition(atoms[3])
return p1, p2, p3, p4
def CalculateTorsionAngles(mol, tors_list, tors_list_rings, confId=-1):
""" Calculate the torsion angles for a list of non-ring and
a list of ring torsions.
Arguments:
- mol: the molecule of interest
- tors_list: list of non-ring torsions
- tors_list_rings: list of ring torsions
- confId: index of the conformation (default: first conformer)
Return: list of torsion angles
"""
torsions = []
conf = mol.GetConformer(confId)
for quartets, maxdev in tors_list:
tors = []
# loop over torsions and calculate angle
for atoms in quartets:
p1, p2, p3, p4 = _getTorsionAtomPositions(atoms, conf)
tmpTors = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4) / math.pi) * 180.0
if tmpTors < 0:
tmpTors += 360.0 # angle between 0 and 360
tors.append(tmpTors)
torsions.append((tors, maxdev))
# rings
for quartets, maxdev in tors_list_rings:
num = len(quartets)
# loop over torsions and sum them up
tors = 0
for atoms in quartets:
p1, p2, p3, p4 = _getTorsionAtomPositions(atoms, conf)
tmpTors = abs((Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4) / math.pi) * 180.0)
tors += tmpTors
tors /= num
torsions.append(([tors], maxdev))
return torsions
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
from numpy import std
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2:
continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first
def _calculateBeta(mol, distmat, aid1):
""" Helper function to calculate the beta for torsion weights
according to the formula in the paper.
w(dmax/2) = 0.1
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
- aid1: atom index of the most central atom
Return: value of beta (float)
"""
# get all non-terminal bonds
bonds = []
for b in mol.GetBonds():
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom())
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom())
if len(nb2) > 1 and len(nb2) > 1:
bonds.append(b)
# get shortest distance
dmax = 0
for b in bonds:
bid1 = b.GetBeginAtom().GetIdx()
bid2 = b.GetEndAtom().GetIdx()
d = max([distmat[aid1][bid1], distmat[aid1][bid2]])
if (d > dmax):
dmax = d
dmax2 = dmax / 2.0
beta = -math.log(0.1) / (dmax2 * dmax2)
return beta
def CalculateTorsionWeights(mol, aid1=-1, aid2=-1, ignoreColinearBonds=True):
""" Calculate the weights for the torsions in a molecule.
By default, the highest weight is given to the bond
connecting the two most central atoms.
If desired, two alternate atoms can be specified (must
be connected by a bond).
Arguments:
- mol: the molecule of interest
- aid1: index of the first atom (default: most central)
- aid2: index of the second atom (default: second most central)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of torsion weights (both non-ring and ring)
"""
# get distance matrix
distmat = Chem.GetDistanceMatrix(mol)
if aid1 < 0 and aid2 < 0:
aid1, aid2 = _findCentralBond(mol, distmat)
else:
b = mol.GetBondBetweenAtoms(aid1, aid2)
if b is None:
raise ValueError("Specified atoms must be connected by a bond.")
# calculate beta according to the formula in the paper
beta = _calculateBeta(mol, distmat, aid1)
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get shortest paths and calculate weights
weights = []
for bid1, bid2, nb1, nb2 in bonds:
if ((bid1, bid2) == (aid1, aid2)
or (bid2, bid1) == (aid1, aid2)): # if it's the most central bond itself
d = 0
else:
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1],
distmat[aid2][bid2]) + 1
w = math.exp(-beta * (d * d))
weights.append(w)
## RINGS
rings = mol.GetRingInfo()
for r in rings.BondRings():
# get shortest distances
tmp = []
num = len(r)
for bidx in r:
b = mol.GetBondWithIdx(bidx)
bid1 = b.GetBeginAtomIdx()
bid2 = b.GetEndAtomIdx()
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1],
distmat[aid2][bid2]) + 1
tmp.append(d)
# calculate weights and append to list
# Note: the description in the paper is not very clear, the following
# formula was found to give the same weights as shown in Fig. 1
# For a ring of size N: w = N/2 * exp(-beta*(sum(w of each bond in ring)/N)^2)
w = sum(tmp) / float(num)
w = math.exp(-beta * (w * w))
weights.append(w * (num / 2.0))
return weights
def CalculateTFD(torsions1, torsions2, weights=None):
""" Calculate the torsion deviation fingerprint (TFD) given two lists of
torsion angles.
Arguments:
- torsions1: torsion angles of conformation 1
- torsions2: torsion angles of conformation 2
- weights: list of torsion weights (default: None)
Return: TFD value (float)
"""
if len(torsions1) != len(torsions2):
raise ValueError("List of torsions angles must have the same size.")
# calculate deviations and normalize (divide by max. possible deviation)
deviations = []
for tors1, tors2 in zip(torsions1, torsions2):
mindiff = 180.0
for t1 in tors1[0]:
for t2 in tors2[0]:
diff = abs(t1 - t2)
if (360.0 - diff) < diff: # we do not care about direction
diff = 360.0 - diff
#print t1, t2, diff
if diff < mindiff:
mindiff = diff
deviations.append(mindiff / tors1[1])
# do we use weights?
if weights is not None:
if len(weights) != len(torsions1):
raise ValueError("List of torsions angles and weights must have the same size.")
deviations = [d * w for d, w in zip(deviations, weights)]
sum_weights = sum(weights)
else:
sum_weights = len(deviations)
tfd = sum(deviations)
if sum_weights != 0: # avoid division by zero
tfd /= sum_weights
return tfd
def _getSameAtomOrder(mol1, mol2):
""" Generate a new molecule with the atom order of mol1 and coordinates
from mol2.
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
Return: RDKit molecule
"""
match = mol2.GetSubstructMatch(mol1)
atomNums = tuple(range(mol1.GetNumAtoms()))
if match != atomNums: # atom orders are not the same!
#print "Atoms of second molecule reordered."
mol3 = Chem.Mol(mol1)
mol3.RemoveAllConformers()
for conf2 in mol2.GetConformers():
confId = conf2.GetId()
conf = rdchem.Conformer(mol1.GetNumAtoms())
conf.SetId(confId)
for i in range(mol1.GetNumAtoms()):
conf.SetAtomPosition(i, mol2.GetConformer(confId).GetAtomPosition(match[i]))
cid = mol3.AddConformer(conf)
return mol3
else:
return Chem.Mol(mol2)
# some wrapper functions
def GetTFDBetweenConformers(mol, confIds1, confIds2, useWeights=True, maxDev='equal', symmRadius=2,
ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two list of conformers
of a molecule
Arguments:
- mol: the molecule of interest
- confIds1: first list of conformer indices
- confIds2: second list of conformer indices
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of TFD values
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius,
ignoreColinearBonds=ignoreColinearBonds)
torsions1 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds1]
torsions2 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDBetweenMolecules(mol1, mol2, confId1=-1, confId2=-1, useWeights=True, maxDev='equal',
symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two molecules.
Important: The two molecules must be instances of the same molecule
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
- confId1: conformer index for mol1 (default: first conformer)
- confId2: conformer index for mol2 (default: first conformer)
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: TFD value
"""
if (Chem.MolToSmiles(mol1) != Chem.MolToSmiles(mol2)):
raise ValueError("The two molecules must be instances of the same molecule!")
mol2 = _getSameAtomOrder(mol1, mol2)
tl, tlr = CalculateTorsionLists(mol1, maxDev=maxDev, symmRadius=symmRadius,
ignoreColinearBonds=ignoreColinearBonds)
# first molecule
torsion1 = CalculateTorsionAngles(mol1, tl, tlr, confId=confId1)
# second molecule
torsion2 = CalculateTorsionAngles(mol2, tl, tlr, confId=confId2)
if useWeights:
weights = CalculateTorsionWeights(mol1, ignoreColinearBonds=ignoreColinearBonds)
tfd = CalculateTFD(torsion1, torsion2, weights=weights)
else:
tfd = CalculateTFD(torsion1, torsion2)
return tfd
def GetTFDMatrix(mol, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the matrix of TFD values for the
conformers of a molecule.
Arguments:
- mol: the molecule of interest
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: matrix of TFD values
Note that the returned matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
matrix = [ a,
b, c,
d, e, f,
g, h, i, j]
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius,
ignoreColinearBonds=ignoreColinearBonds)
numconf = mol.GetNumConformers()
torsions = [
CalculateTorsionAngles(mol, tl, tlr, confId=conf.GetId()) for conf in mol.GetConformers()
]
tfdmat = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j], weights=weights))
else:
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j]))
return tfdmat
| bp-kelley/rdkit | rdkit/Chem/TorsionFingerprints.py | Python | bsd-3-clause | 24,514 | [
"RDKit"
] | ab3bfbd8050301230994443e5cef7c28149aad0e5c2e0839f717a80ce39d4f0e |
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import struct
from mutagen._util import MutagenError
class error(MutagenError):
"""Error raised by :mod:`mutagen.asf`"""
class ASFError(error):
pass
class ASFHeaderError(error):
pass
def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
])
def bytes2guid(s):
"""Converts a serialized GUID to a text GUID"""
assert isinstance(s, bytes)
u = struct.unpack
v = []
v.extend(u("<IHH", s[:8]))
v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:]))
return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4}
CODECS = {
0x0000: u"Unknown Wave Format",
0x0001: u"Microsoft PCM Format",
0x0002: u"Microsoft ADPCM Format",
0x0003: u"IEEE Float",
0x0004: u"Compaq Computer VSELP",
0x0005: u"IBM CVSD",
0x0006: u"Microsoft CCITT A-Law",
0x0007: u"Microsoft CCITT u-Law",
0x0008: u"Microsoft DTS",
0x0009: u"Microsoft DRM",
0x000A: u"Windows Media Audio 9 Voice",
0x000B: u"Windows Media Audio 10 Voice",
0x000C: u"OGG Vorbis",
0x000D: u"FLAC",
0x000E: u"MOT AMR",
0x000F: u"Nice Systems IMBE",
0x0010: u"OKI ADPCM",
0x0011: u"Intel IMA ADPCM",
0x0012: u"Videologic MediaSpace ADPCM",
0x0013: u"Sierra Semiconductor ADPCM",
0x0014: u"Antex Electronics G.723 ADPCM",
0x0015: u"DSP Solutions DIGISTD",
0x0016: u"DSP Solutions DIGIFIX",
0x0017: u"Dialogic OKI ADPCM",
0x0018: u"MediaVision ADPCM",
0x0019: u"Hewlett-Packard CU codec",
0x001A: u"Hewlett-Packard Dynamic Voice",
0x0020: u"Yamaha ADPCM",
0x0021: u"Speech Compression SONARC",
0x0022: u"DSP Group True Speech",
0x0023: u"Echo Speech EchoSC1",
0x0024: u"Ahead Inc. Audiofile AF36",
0x0025: u"Audio Processing Technology APTX",
0x0026: u"Ahead Inc. AudioFile AF10",
0x0027: u"Aculab Prosody 1612",
0x0028: u"Merging Technologies S.A. LRC",
0x0030: u"Dolby Labs AC2",
0x0031: u"Microsoft GSM 6.10",
0x0032: u"Microsoft MSNAudio",
0x0033: u"Antex Electronics ADPCME",
0x0034: u"Control Resources VQLPC",
0x0035: u"DSP Solutions Digireal",
0x0036: u"DSP Solutions DigiADPCM",
0x0037: u"Control Resources CR10",
0x0038: u"Natural MicroSystems VBXADPCM",
0x0039: u"Crystal Semiconductor IMA ADPCM",
0x003A: u"Echo Speech EchoSC3",
0x003B: u"Rockwell ADPCM",
0x003C: u"Rockwell DigiTalk",
0x003D: u"Xebec Multimedia Solutions",
0x0040: u"Antex Electronics G.721 ADPCM",
0x0041: u"Antex Electronics G.728 CELP",
0x0042: u"Intel G.723",
0x0043: u"Intel G.723.1",
0x0044: u"Intel G.729 Audio",
0x0045: u"Sharp G.726 Audio",
0x0050: u"Microsoft MPEG-1",
0x0052: u"InSoft RT24",
0x0053: u"InSoft PAC",
0x0055: u"MP3 - MPEG Layer III",
0x0059: u"Lucent G.723",
0x0060: u"Cirrus Logic",
0x0061: u"ESS Technology ESPCM",
0x0062: u"Voxware File-Mode",
0x0063: u"Canopus Atrac",
0x0064: u"APICOM G.726 ADPCM",
0x0065: u"APICOM G.722 ADPCM",
0x0066: u"Microsoft DSAT",
0x0067: u"Microsoft DSAT Display",
0x0069: u"Voxware Byte Aligned",
0x0070: u"Voxware AC8",
0x0071: u"Voxware AC10",
0x0072: u"Voxware AC16",
0x0073: u"Voxware AC20",
0x0074: u"Voxware RT24 MetaVoice",
0x0075: u"Voxware RT29 MetaSound",
0x0076: u"Voxware RT29HW",
0x0077: u"Voxware VR12",
0x0078: u"Voxware VR18",
0x0079: u"Voxware TQ40",
0x007A: u"Voxware SC3",
0x007B: u"Voxware SC3",
0x0080: u"Softsound",
0x0081: u"Voxware TQ60",
0x0082: u"Microsoft MSRT24",
0x0083: u"AT&T Labs G.729A",
0x0084: u"Motion Pixels MVI MV12",
0x0085: u"DataFusion Systems G.726",
0x0086: u"DataFusion Systems GSM610",
0x0088: u"Iterated Systems ISIAudio",
0x0089: u"Onlive",
0x008A: u"Multitude FT SX20",
0x008B: u"Infocom ITS ACM G.721",
0x008C: u"Convedia G.729",
0x008D: u"Congruency Audio",
0x0091: u"Siemens Business Communications SBC24",
0x0092: u"Sonic Foundry Dolby AC3 SPDIF",
0x0093: u"MediaSonic G.723",
0x0094: u"Aculab Prosody 8KBPS",
0x0097: u"ZyXEL ADPCM",
0x0098: u"Philips LPCBB",
0x0099: u"Studer Professional Audio AG Packed",
0x00A0: u"Malden Electronics PHONYTALK",
0x00A1: u"Racal Recorder GSM",
0x00A2: u"Racal Recorder G720.a",
0x00A3: u"Racal Recorder G723.1",
0x00A4: u"Racal Recorder Tetra ACELP",
0x00B0: u"NEC AAC",
0x00FF: u"CoreAAC Audio",
0x0100: u"Rhetorex ADPCM",
0x0101: u"BeCubed Software IRAT",
0x0111: u"Vivo G.723",
0x0112: u"Vivo Siren",
0x0120: u"Philips CELP",
0x0121: u"Philips Grundig",
0x0123: u"Digital G.723",
0x0125: u"Sanyo ADPCM",
0x0130: u"Sipro Lab Telecom ACELP.net",
0x0131: u"Sipro Lab Telecom ACELP.4800",
0x0132: u"Sipro Lab Telecom ACELP.8V3",
0x0133: u"Sipro Lab Telecom ACELP.G.729",
0x0134: u"Sipro Lab Telecom ACELP.G.729A",
0x0135: u"Sipro Lab Telecom ACELP.KELVIN",
0x0136: u"VoiceAge AMR",
0x0140: u"Dictaphone G.726 ADPCM",
0x0141: u"Dictaphone CELP68",
0x0142: u"Dictaphone CELP54",
0x0150: u"Qualcomm PUREVOICE",
0x0151: u"Qualcomm HALFRATE",
0x0155: u"Ring Zero Systems TUBGSM",
0x0160: u"Windows Media Audio Standard",
0x0161: u"Windows Media Audio 9 Standard",
0x0162: u"Windows Media Audio 9 Professional",
0x0163: u"Windows Media Audio 9 Lossless",
0x0164: u"Windows Media Audio Pro over SPDIF",
0x0170: u"Unisys NAP ADPCM",
0x0171: u"Unisys NAP ULAW",
0x0172: u"Unisys NAP ALAW",
0x0173: u"Unisys NAP 16K",
0x0174: u"Sycom ACM SYC008",
0x0175: u"Sycom ACM SYC701 G725",
0x0176: u"Sycom ACM SYC701 CELP54",
0x0177: u"Sycom ACM SYC701 CELP68",
0x0178: u"Knowledge Adventure ADPCM",
0x0180: u"Fraunhofer IIS MPEG-2 AAC",
0x0190: u"Digital Theater Systems DTS",
0x0200: u"Creative Labs ADPCM",
0x0202: u"Creative Labs FastSpeech8",
0x0203: u"Creative Labs FastSpeech10",
0x0210: u"UHER informatic GmbH ADPCM",
0x0215: u"Ulead DV Audio",
0x0216: u"Ulead DV Audio",
0x0220: u"Quarterdeck",
0x0230: u"I-link Worldwide ILINK VC",
0x0240: u"Aureal Semiconductor RAW SPORT",
0x0249: u"Generic Passthru",
0x0250: u"Interactive Products HSX",
0x0251: u"Interactive Products RPELP",
0x0260: u"Consistent Software CS2",
0x0270: u"Sony SCX",
0x0271: u"Sony SCY",
0x0272: u"Sony ATRAC3",
0x0273: u"Sony SPC",
0x0280: u"Telum Audio",
0x0281: u"Telum IA Audio",
0x0285: u"Norcom Voice Systems ADPCM",
0x0300: u"Fujitsu TOWNS SND",
0x0350: u"Micronas SC4 Speech",
0x0351: u"Micronas CELP833",
0x0400: u"Brooktree BTV Digital",
0x0401: u"Intel Music Coder",
0x0402: u"Intel Audio",
0x0450: u"QDesign Music",
0x0500: u"On2 AVC0 Audio",
0x0501: u"On2 AVC1 Audio",
0x0680: u"AT&T Labs VME VMPCM",
0x0681: u"AT&T Labs TPC",
0x08AE: u"ClearJump Lightwave Lossless",
0x1000: u"Olivetti GSM",
0x1001: u"Olivetti ADPCM",
0x1002: u"Olivetti CELP",
0x1003: u"Olivetti SBC",
0x1004: u"Olivetti OPR",
0x1100: u"Lernout & Hauspie",
0x1101: u"Lernout & Hauspie CELP",
0x1102: u"Lernout & Hauspie SBC8",
0x1103: u"Lernout & Hauspie SBC12",
0x1104: u"Lernout & Hauspie SBC16",
0x1400: u"Norris Communication",
0x1401: u"ISIAudio",
0x1500: u"AT&T Labs Soundspace Music Compression",
0x1600: u"Microsoft MPEG ADTS AAC",
0x1601: u"Microsoft MPEG RAW AAC",
0x1608: u"Nokia MPEG ADTS AAC",
0x1609: u"Nokia MPEG RAW AAC",
0x181C: u"VoxWare MetaVoice RT24",
0x1971: u"Sonic Foundry Lossless",
0x1979: u"Innings Telecom ADPCM",
0x1FC4: u"NTCSoft ALF2CD ACM",
0x2000: u"Dolby AC3",
0x2001: u"DTS",
0x4143: u"Divio AAC",
0x4201: u"Nokia Adaptive Multi-Rate",
0x4243: u"Divio G.726",
0x4261: u"ITU-T H.261",
0x4263: u"ITU-T H.263",
0x4264: u"ITU-T H.264",
0x674F: u"Ogg Vorbis Mode 1",
0x6750: u"Ogg Vorbis Mode 2",
0x6751: u"Ogg Vorbis Mode 3",
0x676F: u"Ogg Vorbis Mode 1+",
0x6770: u"Ogg Vorbis Mode 2+",
0x6771: u"Ogg Vorbis Mode 3+",
0x7000: u"3COM NBX Audio",
0x706D: u"FAAD AAC Audio",
0x77A1: u"True Audio Lossless Audio",
0x7A21: u"GSM-AMR CBR 3GPP Audio",
0x7A22: u"GSM-AMR VBR 3GPP Audio",
0xA100: u"Comverse Infosys G723.1",
0xA101: u"Comverse Infosys AVQSBC",
0xA102: u"Comverse Infosys SBC",
0xA103: u"Symbol Technologies G729a",
0xA104: u"VoiceAge AMR WB",
0xA105: u"Ingenient Technologies G.726",
0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)",
0xA107: u"Encore Software Ltd's G.726",
0xA108: u"ZOLL Medical Corporation ASAO",
0xA109: u"Speex Voice",
0xA10A: u"Vianix MASC Speech Compression",
0xA10B: u"Windows Media 9 Spectrum Analyzer Output",
0xA10C: u"Media Foundation Spectrum Analyzer Output",
0xA10D: u"GSM 6.10 (Full-Rate) Speech",
0xA10E: u"GSM 6.20 (Half-Rate) Speech",
0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech",
0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech",
0xA111: u"GSM Adaptive Multi-Rate WideBand Speech",
0xA112: u"Polycom G.722",
0xA113: u"Polycom G.728",
0xA114: u"Polycom G.729a",
0xA115: u"Polycom Siren",
0xA116: u"Global IP Sound ILBC",
0xA117: u"Radio Time Time Shifted Radio",
0xA118: u"Nice Systems ACA",
0xA119: u"Nice Systems ADPCM",
0xA11A: u"Vocord Group ITU-T G.721",
0xA11B: u"Vocord Group ITU-T G.726",
0xA11C: u"Vocord Group ITU-T G.722.1",
0xA11D: u"Vocord Group ITU-T G.728",
0xA11E: u"Vocord Group ITU-T G.729",
0xA11F: u"Vocord Group ITU-T G.729a",
0xA120: u"Vocord Group ITU-T G.723.1",
0xA121: u"Vocord Group LBC",
0xA122: u"Nice G.728",
0xA123: u"France Telecom G.729 ACM Audio",
0xA124: u"CODIAN Audio",
0xCC12: u"Intel YUV12 Codec",
0xCFCC: u"Digital Processing Systems Perception Motion JPEG",
0xD261: u"DEC H.261",
0xD263: u"DEC H.263",
0xFFFE: u"Extensible Wave Format",
0xFFFF: u"Unregistered",
}
| jetskijoe/headphones | lib/mutagen/asf/_util.py | Python | gpl-3.0 | 10,716 | [
"CRYSTAL"
] | 9bd2437e13ccd05417c55c410266327e175555ecd169a85c6b609adc62f0891d |
import unittest
import datetime
from kev import (Document,CharProperty,DateTimeProperty,
DateProperty,BooleanProperty,IntegerProperty,
FloatProperty)
from kev.exceptions import ValidationException, QueryError
from kev.query import combine_list, combine_dicts
from kev.testcase import kev_handler,KevTestCase
class TestDocument(Document):
name = CharProperty(
required=True,
unique=True,
min_length=5,
max_length=20)
last_updated = DateTimeProperty(auto_now=True)
date_created = DateProperty(auto_now_add=True)
is_active = BooleanProperty(default_value=True)
no_subscriptions = IntegerProperty(
default_value=1, min_value=1, max_value=20)
gpa = FloatProperty()
def __unicode__(self):
return self.name
class Meta:
use_db = 's3redis'
handler = kev_handler
class BaseTestDocumentSlug(TestDocument):
slug = CharProperty(required=True, unique=True)
email = CharProperty(required=True, unique=True)
city = CharProperty(required=True, index=True)
class S3TestDocumentSlug(BaseTestDocumentSlug):
class Meta:
use_db = 's3'
handler = kev_handler
class S3RedisTestDocumentSlug(BaseTestDocumentSlug):
class Meta:
use_db = 's3redis'
handler = kev_handler
class RedisTestDocumentSlug(BaseTestDocumentSlug):
class Meta:
use_db = 'redis'
handler = kev_handler
class DynamoTestDocumentSlug(BaseTestDocumentSlug):
class Meta:
use_db = 'dynamodb'
handler = kev_handler
class DocumentTestCase(KevTestCase):
def test_default_values(self):
obj = TestDocument(name='Fred')
self.assertEqual(obj.is_active, True)
self.assertEqual(obj._doc.get('is_active'), True)
self.assertEqual(obj.date_created, datetime.date.today())
self.assertEqual(obj._doc.get('date_created'), datetime.date.today())
self.assertEqual(type(obj.last_updated), datetime.datetime)
self.assertEqual(type(obj._doc.get('last_updated')), datetime.datetime)
self.assertEqual(obj.no_subscriptions, 1)
self.assertEqual(obj._doc.get('no_subscriptions'), 1)
self.assertEqual(obj.gpa,None)
def test_get_unique_props(self):
obj = S3RedisTestDocumentSlug(name='Brian',slug='brian',email='brian@host.com',
city='Greensboro',gpa=4.0)
self.assertEqual(obj.get_unique_props().sort(),['name','slug','email'].sort())
def test_set_indexed_prop(self):
obj = S3RedisTestDocumentSlug(name='Brian', slug='brian', email='brian@host.com',
city='Greensboro', gpa=4.0)
obj.name = 'Tariq'
self.assertEqual(obj._index_change_list,['s3redis:s3redistestdocumentslug:indexes:name:brian'])
def test_validate_valid(self):
t1 = TestDocument(
name='DNSly',
is_active=False,
no_subscriptions=2,
gpa=3.5)
t1.save()
def test_validate_boolean(self):
t2 = TestDocument(name='Google', is_active='Gone', gpa=4.0)
with self.assertRaises(ValidationException) as vm:
t2.save()
self.assertEqual(str(vm.exception),
'is_active: This value should be True or False.')
def test_validate_datetime(self):
t2 = TestDocument(name='Google', gpa=4.0, last_updated='today')
with self.assertRaises(ValidationException) as vm:
t2.save()
self.assertEqual(str(vm.exception),
'last_updated: This value should be a valid datetime object.')
def test_validate_date(self):
t2 = TestDocument(name='Google', gpa=4.0, date_created='today')
with self.assertRaises(ValidationException) as vm:
t2.save()
self.assertEqual(str(vm.exception),
'date_created: This value should be a valid date object.')
def test_validate_integer(self):
t2 = TestDocument(name='Google', gpa=4.0, no_subscriptions='seven')
with self.assertRaises(ValidationException) as vm:
t2.save()
self.assertEqual(str(vm.exception),
'no_subscriptions: This value should be an integer')
def test_validate_float(self):
t2 = TestDocument(name='Google', gpa='seven')
with self.assertRaises(ValidationException) as vm:
t2.save()
self.assertEqual(str(vm.exception),
'gpa: This value should be a float.')
def test_validate_unique(self):
t1 = TestDocument(name='Google', gpa=4.0)
t1.save()
t2 = TestDocument(name='Google', gpa=4.0)
with self.assertRaises(ValidationException) as vm:
t2.save()
self.assertEqual(str(vm.exception),
'There is already a name with the value of Google')
class S3RedisQueryTestCase(KevTestCase):
doc_class = S3RedisTestDocumentSlug
def setUp(self):
self.t1 = self.doc_class(name='Goo and Sons', slug='goo-sons', gpa=3.2,
email='goo@sons.com', city="Durham")
self.t1.save()
self.t2 = self.doc_class(
name='Great Mountain',
slug='great-mountain',
gpa=3.2,
email='great@mountain.com',
city='Charlotte')
self.t2.save()
self.t3 = self.doc_class(
name='Lakewoood YMCA',
slug='lakewood-ymca',
gpa=3.2,
email='lakewood@ymca.com',
city='Durham')
self.t3.save()
def test_non_unique_filter(self):
qs = self.doc_class.objects().filter({'city': 'durham'})
self.assertEqual(2, qs.count())
def test_non_unique_wildcard_filter(self):
qs = self.doc_class.objects().filter({'city': 'du*ham'})
self.assertEqual(2, qs.count())
def test_objects_get_single_indexed_prop(self):
obj = self.doc_class.objects().get({'name': self.t1.name})
self.assertEqual(obj.slug, self.t1.slug)
def test_get(self):
obj = self.doc_class.get(self.t1.id)
self.assertEqual(obj._id, self.t1._id)
def test_flush_db(self):
self.assertEqual(3, len(list(self.doc_class.all())))
self.doc_class().flush_db()
self.assertEqual(0, len(list(self.doc_class.all())))
def test_delete(self):
qs = self.doc_class.objects().filter({'city': 'durham'})
self.assertEqual(2, qs.count())
qs[0].delete()
qs = self.doc_class.objects().filter({'city': 'durham'})
self.assertEqual(1, qs.count())
def test_wildcard_queryset_iter(self):
qs = self.doc_class.objects().filter({'city': 'du*ham'})
for i in qs:
self.assertIsNotNone(i.id)
def test_queryset_iter(self):
qs = self.doc_class.objects().filter({'city': 'durham'})
for i in qs:
self.assertIsNotNone(i.id)
def test_wildcard_queryset_chaining(self):
qs = self.doc_class.objects().filter(
{'name': 'Goo*'}).filter({'city': 'Du*ham'})
self.assertEqual(1, qs.count())
self.assertEqual(self.t1.name, qs[0].name)
def test_queryset_chaining(self):
qs = self.doc_class.objects().filter(
{'name': 'Goo and Sons'}).filter({'city': 'Durham'})
self.assertEqual(1, qs.count())
self.assertEqual(self.t1.name, qs[0].name)
def test_objects_get_no_result(self):
with self.assertRaises(QueryError) as vm:
self.doc_class.objects().get({'username': 'affsdfadsf'})
self.assertEqual(str(vm.exception),
'This query did not return a result.')
def test_objects_wildcard_get_no_result(self):
with self.assertRaises(QueryError) as vm:
self.doc_class.objects().get({'username': 'affsd*adsf'})
self.assertEqual(str(vm.exception),
'This query did not return a result.')
def test_all(self):
qs = self.doc_class.all()
self.assertEqual(3, len(list(qs)))
def test_objects_get_multiple_results(self):
with self.assertRaises(QueryError) as vm:
self.doc_class.objects().get({'city': 'durham'})
self.assertEqual(str(vm.exception),
'This query should return exactly one result. Your query returned 2')
def test_combine_list(self):
a = [1, 2, 3]
b = ['a', 'b', 'c']
c = combine_list(a, b)
self.assertEqual([1, 2, 3, 'a', 'b', 'c'], c)
def test_combine_dicts(self):
a = {'username': 'boywonder', 'doc_type': 'goo'}
b = {'email': 'boywonder@superteam.com', 'doc_type': 'foo'}
c = combine_dicts(a, b)
self.assertEqual({'username': 'boywonder',
'email': 'boywonder@superteam.com',
'doc_type': ['goo', 'foo']}, c)
class RedisQueryTestCase(S3RedisQueryTestCase):
doc_class = RedisTestDocumentSlug
class S3QueryTestCase(S3RedisQueryTestCase):
doc_class = S3TestDocumentSlug
def test_wildcard_queryset_chaining(self):
qs = self.doc_class.objects().filter(
{'name': 'Goo and Sons'}).filter({'city': 'Du*ham'})
with self.assertRaises(ValueError):
qs.count()
def test_queryset_chaining(self):
qs = self.doc_class.objects().filter(
{'name': 'Goo and Sons'}).filter({'city': 'Durham'})
with self.assertRaises(ValueError):
qs.count()
def test_non_unique_wildcard_filter(self):
qs = self.doc_class.objects().filter({'city': 'du*ham'})
self.assertEqual(2, qs.count())
def test_non_unique_wildcard_filter(self):
pass
class DynamoTestCase(KevTestCase):
doc_class = DynamoTestDocumentSlug
def setUp(self):
self.t1 = self.doc_class(name='Goo and Sons', slug='goo-sons', gpa=3.2,
email='goo@sons.com', city="Durham")
self.t1.save()
self.t2 = self.doc_class(name='Great Mountain', slug='great-mountain', gpa=3.2,
email='great@mountain.com', city='Charlotte')
self.t2.save()
self.t3 = self.doc_class(name='Lakewoood YMCA', slug='lakewood-ymca', gpa=3.2,
email='lakewood@ymca.com', city='Durham')
self.t3.save()
def test_get(self):
obj = self.doc_class.get(self.t1.id)
self.assertEqual(obj._id, self.t1._id)
def test_flush_db(self):
self.assertEqual(3, len(list(self.doc_class.all())))
self.doc_class().flush_db()
self.assertEqual(0, len(list(self.doc_class.all())))
def test_delete(self):
qs = self.doc_class.objects().filter({'city': 'Durham'})
self.assertEqual(2, qs.count())
qs[0].delete()
qs = self.doc_class.objects().filter({'city': 'Durham'})
self.assertEqual(1, qs.count())
def test_all(self):
qs = self.doc_class.all()
self.assertEqual(3, len(list(qs)))
def test_non_unique_filter(self):
qs = self.doc_class.objects().filter({'city': 'Durham'})
self.assertEqual(2, qs.count())
def test_objects_get_single_indexed_prop(self):
obj = self.doc_class.objects().get({'name': self.t1.name})
self.assertEqual(obj.slug, self.t1.slug)
def test_queryset_chaining(self):
qs = self.doc_class.objects().filter(
{'name': 'Goo and Sons'}).filter({'city': 'Durham'})
self.assertEqual(1, qs.count())
self.assertEqual(self.t1.name, qs[0].name)
def test_more_than_hundred_objects(self):
for i in range(110):
doc = self.doc_class(name='Object_{0}'.format(i), slug='object-{0}'.format(i), gpa=4.6,
email='object_{0}@ymca.com'.format(i), city='Durham')
doc.save()
qs = self.doc_class.all()
self.assertEqual(113, len(list(qs)))
qs = self.doc_class.objects().filter({'city': 'Durham'})
self.assertEqual(112, qs.count())
if __name__ == '__main__':
unittest.main()
| armicron/kev | kev/tests/documents.py | Python | gpl-3.0 | 12,189 | [
"Brian"
] | da41b6efd54d6b3f1a608ce0ae7f2c12cc0947ceb0aa63e9a8d4862fab9d14bc |
from numpy.testing import TestCase, run_module_suite, assert_,\
assert_raises
from numpy import random
from numpy.compat import asbytes
import numpy as np
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_( -5 <= random.randint(-5,-1) < -1)
x = random.randint(-5,-1,5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
""" Make sure the cached every-other-Gaussian is reset.
"""
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
""" When the state is saved with a cached Gaussian, make sure the cached
Gaussian is restored.
"""
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
""" Make sure we can accept old state tuples that do not have the cached
Gaussian value.
"""
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
""" Ensure that the negative binomial results take floating point
arguments without truncation.
"""
self.prng.negative_binomial(0.5, 0.5)
class TestRandomDist(TestCase):
""" Make sure the random distrobution return the correct value for a
given seed
"""
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[ 0.61879477158567997, 0.59162362775974664],
[ 0.88868358904449662, 0.89165480011560816],
[ 0.4575674820298663 , 0.7781880808593471 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[ 1.34016345771863121, 1.73759122771936081],
[ 1.498988344300628 , -0.2286433324536169 ],
[ 2.031033998682787 , 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3,2))
desired = np.array([[ 31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3,2))
desired = np.array([[ 31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[ 0.61879477158567997, 0.59162362775974664],
[ 0.88868358904449662, 0.89165480011560816],
[ 0.4575674820298663 , 0.7781880808593471 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1,3)
assert_raises(ValueError, sample, [[1,2],[3,4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1,2,3,4], 3,
p=[[0.25,0.25],[0.25,0.25]])
assert_raises(ValueError, sample, [1,2], 3, p=[0.4,0.4,0.2])
assert_raises(ValueError, sample, [1,2], 3, p=[1.1,-0.1])
assert_raises(ValueError, sample, [1,2], 3, p=[0.4,0.4])
assert_raises(ValueError, sample, [1,2,3], 4, replace=False)
assert_raises(ValueError, sample, [1,2,3], 2, replace=False,
p=[1,0,0])
def test_choice_return_shape(self):
p = [0.1,0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1,2], replace=True)))
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1,2], s, replace=True)))
# Check multi dimensional array
s = (2,3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays, and multidimensional versions of both:
for conv in [lambda x: x,
np.asarray,
lambda x: [(i, i) for i in x],
lambda x: np.asarray([(i, i) for i in x])]:
np.random.seed(self.seed)
alist = conv([1,2,3,4,5,6,7,8,9,0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array([[ 1.45341850513746058e-02, 5.31297615662868145e-04],
[ 1.85366619058432324e-06, 4.19214516800110563e-03],
[ 1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[ 63.87858175501090585, 68.68407748911370447],
[ 65.77116116901505904, 47.09686762438974483],
[ 72.3828403199695174 , 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[ 0.54539444573611562, 0.45460555426388438],
[ 0.62345816822039413, 0.37654183177960598]],
[[ 0.55206000085785778, 0.44793999914214233],
[ 0.58964023305154301, 0.41035976694845688]],
[[ 0.59266909280647828, 0.40733090719352177],
[ 0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[ 1.08342649775011624, 1.00607889924557314],
[ 2.46628830085216721, 2.49668106809923884],
[ 0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[ 1.21975394418575878, 1.75135759791559775],
[ 1.44803115017146489, 1.22108959480396262],
[ 1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 24.60509188649287182, 28.54993563207210627],
[ 26.13476110204064184, 12.56988482927716078],
[ 31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 8, 7],
[17, 17],
[ 5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc = .123456789, scale = 2.0, size = (3, 2))
desired = np.array([[ 0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278 , -1.47374816298446865],
[ 1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[ 9, 9]])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 0.66599721112760157, 0.52829452552221945],
[ 3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 1.09232835305011444, 0.8648196662399954 ],
[ 4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 16.50698631688883822, 36.54846706092654784],
[ 22.67886599981281748, 0.71617561058995771],
[ 65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[ 2, 2],
[ 6, 17],
[ 3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean= (.123456789, 10)
cov = [[1,0],[1,0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[ -1.47027513018564449, 10. ],
[ -1.65915081534845532, 10. ]],
[[ -2.29186329304599745, 10. ],
[ -1.77505606019580053, 10. ]],
[[ -0.54970369430044119, 10. ],
[ 0.29768848031692957, 10. ]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n = 100, p = .12345, size = (3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df = 5, nonc = 5, size = (3, 2))
desired = np.array([[ 23.91905354498517511, 13.35324692733826346],
[ 31.22452661329736401, 16.60047399466177254],
[ 5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum = 5, dfden = 2, nonc = 1,
size = (3, 2))
desired = np.array([[ 1.40598099674926669, 0.34207973179285761],
[ 3.57715069265772545, 7.92632662577829805],
[ 0.43741599463544162, 1.1774208752428319 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc = .123456789, scale = 2.0, size = (3, 2))
desired = np.array([[ 2.80378370443726244, 3.59863924443872163],
[ 3.121433477601256 , -0.33382987590723379],
[ 4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a =.123456789, size = (3, 2))
desired = np.array([[ 2.46852460439034849e+03, 1.41286880810518346e+03],
[ 5.28287797029485181e+07, 6.57720981047328785e+07],
[ 1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam = .123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a =.123456789, size = (3, 2))
desired = np.array([[ 0.02048932883240791, 0.01424192241128213],
[ 0.38446073748535298, 0.39499689943484395],
[ 0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale = 10, size = (3, 2))
desired = np.array([[ 13.8882496494248393 , 13.383318339044731 ],
[ 20.95413364294492098, 21.08285015800712614],
[ 11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size = (3, 2))
desired = np.array([[ 0.77127660196445336, -6.55601161955910605],
[ 0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size = (3, 2))
desired = np.array([[ 0.96441739162374596, 0.89556604882105506],
[ 2.1953785836319808 , 2.22243285392490542],
[ 0.6116915921431676 , 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape = 3, size = (3, 2))
desired = np.array([[ 5.50841531318455058, 6.62953470301903103],
[ 5.93988484943779227, 2.31044849402133989],
[ 7.54838614231317084, 8.012756093271868 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size = (3, 2))
desired = np.array([[ 1.34016345771863121, 1.73759122771936081],
[ 1.498988344300628 , -0.2286433324536169 ],
[ 2.031033998682787 , 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df = 10, size = (3, 2))
desired = np.array([[ 0.97140611862659965, -0.08830486548450577],
[ 1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left = 5.12, mode = 10.23, right = 20.34,
size = (3, 2))
desired = np.array([[ 12.68117178949215784, 12.4129206149193152 ],
[ 16.20131377335158263, 16.25692138747600524],
[ 11.20400690911820263, 14.4978144835829923 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low = 1.23, high=10.54, size = (3, 2))
desired = np.array([[ 6.99097932346268003, 6.73801597444323974],
[ 9.50364421400426274, 9.53130618907631089],
[ 5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu = 1.23, kappa = 1.54, size = (3, 2))
desired = np.array([[ 2.28567572673902042, 2.89163838442285037],
[ 0.38198375564286025, 2.57638023113890746],
[ 1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean = 1.23, scale = 1.54, size = (3, 2))
desired = np.array([[ 3.82935265715889983, 5.13125249184285526],
[ 0.35045403618358717, 1.50832396872003538],
[ 0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a = 1.23, size = (3, 2))
desired = np.array([[ 0.97097342648766727, 0.91422896443565516],
[ 1.89517770034962929, 1.91414357960479564],
[ 0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a = 1.23, size = (3, 2))
desired = np.array([[66, 29],
[ 1, 1],
[ 3, 13]])
np.testing.assert_array_equal(actual, desired)
if __name__ == "__main__":
run_module_suite()
| lthurlow/Network-Grapher | proj/external/numpy-1.7.0/numpy/random/tests/test_random.py | Python | mit | 23,877 | [
"Gaussian"
] | b889a1cf44aa08b030c2d935421dba83092ccb11482c3b14ac3883184ce66b0f |
# -*- coding: utf-8 -*-
## Copyright (c) 2015-2022, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
"""
Tests for :mod:`~exatomic.interfaces.cube`
#############################################
"""
import numpy as np
from unittest import TestCase
from exatomic.base import resource, staticdir
from exatomic.interfaces.cube import Cube, uni_from_cubes
from sys import platform
class TestCube(TestCase):
"""Tests cube reading and writing."""
def setUp(self):
self.lg = Cube(resource('mol-carbon-dz-1.cube'))
self.sm1 = Cube(resource('adf-lu-35.cube'))
self.sm2 = Cube(resource('adf-lu-36.cube'))
self.uni = uni_from_cubes(staticdir() + '/cube/', ext='*lu*cube')
def test_parse_atom(self):
self.lg.parse_atom()
self.sm1.parse_atom()
self.sm2.parse_atom()
self.assertEqual(self.lg.atom.shape[0], 1)
self.assertEqual(self.sm1.atom.shape[0], 1)
self.assertEqual(self.sm2.atom.shape[0], 1)
def test_parse_field(self):
self.lg.parse_field()
self.sm1.parse_field()
self.sm2.parse_field()
self.assertEqual(self.lg.field.shape[0], 1)
self.assertEqual(self.sm1.field.shape[0], 1)
self.assertEqual(self.sm2.field.shape[0], 1)
self.assertEqual(self.lg.field.field_values[0].shape[0], 132651)
self.assertEqual(self.sm1.field.field_values[0].shape[0], 4913)
self.assertEqual(self.sm2.field.field_values[0].shape[0], 4913)
def test_to_universe(self):
lg = self.lg.to_universe()
sm1 = self.sm1.to_universe()
sm2 = self.sm2.to_universe()
for uni in [lg, sm1, sm2]:
for attr in ['atom', 'field']:
self.assertTrue(hasattr(uni, attr))
def test_uni_from_cubes_rotate_and_write(self):
self.assertEqual(self.uni.field.shape[0], 2)
self.assertEqual(len(self.uni.field.field_values), 2)
rot = self.uni.field.rotate(0, 1, np.pi / 4)
self.assertEqual(rot.shape[0], 2)
if "win" not in platform.casefold():
f = Cube.from_universe(self.uni, 1)
self.assertEqual(len(f), 874)
| exa-analytics/exatomic | exatomic/interfaces/tests/test_cube.py | Python | apache-2.0 | 2,188 | [
"ADF"
] | d06566dae053882c2e0a2bfd64cb1139d679c76e4b08fed1c52e56af07831f9d |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
This file provides internal functions to calculate pubchem fingerprints
If you have any questions, please feel free to contact us.
E-mail: biomed@csu.edu.cn
@File name: PubChemFingerprints
@author: Jie Dong and Zhijiang Yao
"""
# Third party modules
from rdkit import Chem, DataStructs
# these are SMARTS patterns corresponding to the PubChem fingerprints
# https://astro.temple.edu/~tua87106/list_fingerprints.pdf
# ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
smartsPatts = {
1: ("[H]", 3), # 1-115
2: ("[H]", 7),
3: ("[H]", 15),
4: ("[H]", 31),
5: ("[Li]", 0),
6: ("[Li]", 1),
7: ("[B]", 0),
8: ("[B]", 1),
9: ("[B]", 3),
10: ("[C]", 1),
11: ("[C]", 3),
12: ("[C]", 7),
13: ("[C]", 15),
14: ("[C]", 31),
15: ("[N]", 0),
16: ("[N]", 1),
17: ("[N]", 3),
18: ("[N]", 7),
19: ("[O]", 0),
20: ("[O]", 1),
21: ("[O]", 3),
22: ("[O]", 7),
23: ("[O]", 15),
24: ("[F]", 0),
25: ("[F]", 1),
26: ("[F]", 3),
27: ("[Na]", 0),
28: ("[Na]", 1),
29: ("[Si]", 0),
30: ("[Si]", 1),
31: ("[P]", 0),
32: ("[P]", 1),
33: ("[P]", 3),
34: ("[S]", 0),
35: ("[S]", 1),
36: ("[S]", 3),
37: ("[S]", 7),
38: ("[Cl]", 0),
39: ("[Cl]", 1),
40: ("[Cl]", 3),
41: ("[Cl]", 7),
42: ("[K]", 0),
43: ("[K]", 1),
44: ("[Br]", 0),
45: ("[Br]", 1),
46: ("[Br]", 3),
47: ("[I]", 0),
48: ("[I]", 1),
49: ("[I]", 3),
50: ("[Be]", 0),
51: ("[Mg]", 0),
52: ("[Al]", 0),
53: ("[Ca]", 0),
54: ("[Sc]", 0),
55: ("[Ti]", 0),
56: ("[V]", 0),
57: ("[Cr]", 0),
58: ("[Mn]", 0),
59: ("[Fe]", 0),
60: ("[CO]", 0),
61: ("[Ni]", 0),
62: ("[Cu]", 0),
63: ("[Zn]", 0),
64: ("[Ga]", 0),
65: ("[Ge]", 0),
66: ("[As]", 0),
67: ("[Se]", 0),
68: ("[Kr]", 0),
69: ("[Rb]", 0),
70: ("[Sr]", 0),
71: ("[Y]", 0),
72: ("[Zr]", 0),
73: ("[Nb]", 0),
74: ("[Mo]", 0),
75: ("[Ru]", 0),
76: ("[Rh]", 0),
77: ("[Pd]", 0),
78: ("[Ag]", 0),
79: ("[Cd]", 0),
80: ("[In]", 0),
81: ("[Sn]", 0),
82: ("[Sb]", 0),
83: ("[Te]", 0),
84: ("[Xe]", 0),
85: ("[Cs]", 0),
86: ("[Ba]", 0),
87: ("[Lu]", 0),
88: ("[Hf]", 0),
89: ("[Ta]", 0),
90: ("[W]", 0),
91: ("[Re]", 0),
92: ("[Os]", 0),
93: ("[Ir]", 0),
94: ("[Pt]", 0),
95: ("[Au]", 0),
96: ("[Hg]", 0),
97: ("[Tl]", 0),
98: ("[Pb]", 0),
99: ("[Bi]", 0),
100: ("[La]", 0),
101: ("[Ce]", 0),
102: ("[Pr]", 0),
103: ("[Nd]", 0),
104: ("[Pm]", 0),
105: ("[Sm]", 0),
106: ("[Eu]", 0),
107: ("[Gd]", 0),
108: ("[Tb]", 0),
109: ("[Dy]", 0),
110: ("[Ho]", 0),
111: ("[Er]", 0),
112: ("[Tm]", 0),
113: ("[Yb]", 0),
114: ("[Tc]", 0),
115: ("[U]", 0),
116: ("[Li&!H0]", 0), # 264-881
117: ("[Li]~[Li]", 0),
118: ("[Li]~[#5]", 0),
119: ("[Li]~[#6]", 0),
120: ("[Li]~[#8]", 0),
121: ("[Li]~[F]", 0),
122: ("[Li]~[#15]", 0),
123: ("[Li]~[#16]", 0),
124: ("[Li]~[Cl]", 0),
125: ("[#5&!H0]", 0),
126: ("[#5]~[#5]", 0),
127: ("[#5]~[#6]", 0),
128: ("[#5]~[#7]", 0),
129: ("[#5]~[#8]", 0),
130: ("[#5]~[F]", 0),
131: ("[#5]~[#14]", 0),
132: ("[#5]~[#15]", 0),
133: ("[#5]~[#16]", 0),
134: ("[#5]~[Cl]", 0),
135: ("[#5]~[Br]", 0),
136: ("[#6&!H0]", 0),
137: ("[#6]~[#6]", 0),
138: ("[#6]~[#7]", 0),
139: ("[#6]~[#8]", 0),
140: ("[#6]~[F]", 0),
141: ("[#6]~[Na]", 0),
142: ("[#6]~[Mg]", 0),
143: ("[#6]~[Al]", 0),
144: ("[#6]~[#14]", 0),
145: ("[#6]~[#15]", 0),
146: ("[#6]~[#16]", 0),
147: ("[#6]~[Cl]", 0),
148: ("[#6]~[#33]", 0),
149: ("[#6]~[#34]", 0),
150: ("[#6]~[Br]", 0),
151: ("[#6]~[I]", 0),
152: ("[#7&!H0]", 0),
153: ("[#7]~[#7]", 0),
154: ("[#7]~[#8]", 0),
155: ("[#7]~[F]", 0),
156: ("[#7]~[#14]", 0),
157: ("[#7]~[#15]", 0),
158: ("[#7]~[#16]", 0),
159: ("[#7]~[Cl]", 0),
160: ("[#7]~[Br]", 0),
161: ("[#8&!H0]", 0),
162: ("[#8]~[#8]", 0),
163: ("[#8]~[Mg]", 0),
164: ("[#8]~[Na]", 0),
165: ("[#8]~[Al]", 0),
166: ("[#8]~[#14]", 0),
167: ("[#8]~[#15]", 0),
168: ("[#8]~[K]", 0),
169: ("[F]~[#15]", 0),
170: ("[F]~[#16]", 0),
171: ("[Al&!H0]", 0),
172: ("[Al]~[Cl]", 0),
173: ("[#14&!H0]", 0),
174: ("[#14]~[#14]", 0),
175: ("[#14]~[Cl]", 0),
176: ("[#15&!H0]", 0),
177: ("[#15]~[#15]", 0),
178: ("[#33&!H0]", 0),
179: ("[#33]~[#33]", 0),
180: ("[#6](~Br)(~[#6])", 0),
181: ("[#6](~Br)(~[#6])(~[#6])", 0),
182: ("[#6&!H0]~[Br]", 0),
183: ("[#6](~[Br])(:[c])", 0),
184: ("[#6](~[Br])(:[n])", 0),
185: ("[#6](~[#6])(~[#6])", 0),
186: ("[#6](~[#6])(~[#6])(~[#6])", 0),
187: ("[#6](~[#6])(~[#6])(~[#6])(~[#6])", 0),
188: ("[#6H1](~[#6])(~[#6])(~[#6])", 0),
189: ("[#6](~[#6])(~[#6])(~[#6])(~[#7])", 0),
190: ("[#6](~[#6])(~[#6])(~[#6])(~[#8])", 0),
191: ("[#6H1](~[#6])(~[#6])(~[#7])", 0),
192: ("[#6H1](~[#6])(~[#6])(~[#8])", 0),
193: ("[#6](~[#6])(~[#6])(~[#7])", 0),
194: ("[#6](~[#6])(~[#6])(~[#8])", 0),
195: ("[#6](~[#6])(~[Cl])", 0),
196: ("[#6&!H0](~[#6])(~[Cl])", 0),
197: ("[#6H,#6H2,#6H3,#6H4]~[#6]", 0),
198: ("[#6&!H0](~[#6])(~[#7])", 0),
199: ("[#6&!H0](~[#6])(~[#8])", 0),
200: ("[#6H1](~[#6])(~[#8])(~[#8])", 0),
201: ("[#6&!H0](~[#6])(~[#15])", 0),
202: ("[#6&!H0](~[#6])(~[#16])", 0),
203: ("[#6](~[#6])(~[I])", 0),
204: ("[#6](~[#6])(~[#7])", 0),
205: ("[#6](~[#6])(~[#8])", 0),
206: ("[#6](~[#6])(~[#16])", 0),
207: ("[#6](~[#6])(~[#14])", 0),
208: ("[#6](~[#6])(:c)", 0),
209: ("[#6](~[#6])(:c)(:c)", 0),
210: ("[#6](~[#6])(:c)(:n)", 0),
211: ("[#6](~[#6])(:n)", 0),
212: ("[#6](~[#6])(:n)(:n)", 0),
213: ("[#6](~[Cl])(~[Cl])", 0),
214: ("[#6&!H0](~[Cl])", 0),
215: ("[#6](~[Cl])(:c)", 0),
216: ("[#6](~[F])(~[F])", 0),
217: ("[#6](~[F])(:c)", 0),
218: ("[#6&!H0](~[#7])", 0),
219: ("[#6&!H0](~[#8])", 0),
220: ("[#6&!H0](~[#8])(~[#8])", 0),
221: ("[#6&!H0](~[#16])", 0),
222: ("[#6&!H0](~[#14])", 0),
223: ("[#6&!H0]:c", 0),
224: ("[#6&!H0](:c)(:c)", 0),
225: ("[#6&!H0](:c)(:n)", 0),
226: ("[#6&!H0](:n)", 0),
227: ("[#6H3]", 0),
228: ("[#6](~[#7])(~[#7])", 0),
229: ("[#6](~[#7])(:c)", 0),
230: ("[#6](~[#7])(:c)(:c)", 0),
231: ("[#6](~[#7])(:c)(:n)", 0),
232: ("[#6](~[#7])(:n)", 0),
233: ("[#6](~[#8])(~[#8])", 0),
234: ("[#6](~[#8])(:c)", 0),
235: ("[#6](~[#8])(:c)(:c)", 0),
236: ("[#6](~[#16])(:c)", 0),
237: ("[#6](:c)(:c)", 0),
238: ("[#6](:c)(:c)(:c)", 0),
239: ("[#6](:c)(:c)(:n)", 0),
240: ("[#6](:c)(:n)", 0),
241: ("[#6](:c)(:n)(:n)", 0),
242: ("[#6](:n)(:n)", 0),
243: ("[#7](~[#6])(~[#6])", 0),
244: ("[#7](~[#6])(~[#6])(~[#6])", 0),
245: ("[#7&!H0](~[#6])(~[#6])", 0),
246: ("[#7&!H0](~[#6])", 0),
247: ("[#7&!H0](~[#6])(~[#7])", 0),
248: ("[#7](~[#6])(~[#8])", 0),
249: ("[#7](~[#6])(:c)", 0),
250: ("[#7](~[#6])(:c)(:c)", 0),
251: ("[#7&!H0](~[#7])", 0),
252: ("[#7&!H0](:c)", 0),
253: ("[#7&!H0](:c)(:c)", 0),
254: ("[#7](~[#8])(~[#8])", 0),
255: ("[#7](~[#8])(:o)", 0),
256: ("[#7](:c)(:c)", 0),
257: ("[#7](:c)(:c)(:c)", 0),
258: ("[#8](~[#6])(~[#6])", 0),
259: ("[#8&!H0](~[#6])", 0),
260: ("[#8](~[#6])(~[#15])", 0),
261: ("[#8&!H0](~[#16])", 0),
262: ("[#8](:c)(:c)", 0),
263: ("[#15](~[#6])(~[#6])", 0),
264: ("[#15](~[#8])(~[#8])", 0),
265: ("[#16](~[#6])(~[#6])", 0),
266: ("[#16&!H0](~[#6])", 0),
267: ("[#16](~[#6])(~[#8])", 0),
268: ("[#14](~[#6])(~[#6])", 0),
269: ("[#6]=,:[#6]", 0),
270: ("[#6]#[#6]", 0),
271: ("[#6]=,:[#7]", 0),
272: ("[#6]#[#7]", 0),
273: ("[#6]=,:[#8]", 0),
274: ("[#6]=,:[#16]", 0),
275: ("[#7]=,:[#7]", 0),
276: ("[#7]=,:[#8]", 0),
277: ("[#7]=,:[#15]", 0),
278: ("[#15]=,:[#8]", 0),
279: ("[#15]=,:[#15]", 0),
280: ("[#6](#[#6])(-,:[#6])", 0),
281: ("[#6&!H0](#[#6])", 0),
282: ("[#6](#[#7])(-,:[#6])", 0),
283: ("[#6](-,:[#6])(-,:[#6])(=,:[#6])", 0),
284: ("[#6](-,:[#6])(-,:[#6])(=,:[#7])", 0),
285: ("[#6](-,:[#6])(-,:[#6])(=,:[#8])", 0),
286: ("[#6](-,:[#6])([Cl])(=,:[#8])", 0),
287: ("[#6&!H0](-,:[#6])(=,:[#6])", 0),
288: ("[#6&!H0](-,:[#6])(=,:[#7])", 0),
289: ("[#6&!H0](-,:[#6])(=,:[#8])", 0),
290: ("[#6](-,:[#6])(-,:[#7])(=,:[#6])", 0),
291: ("[#6](-,:[#6])(-,:[#7])(=,:[#7])", 0),
292: ("[#6](-,:[#6])(-,:[#7])(=,:[#8])", 0),
293: ("[#6](-,:[#6])(-,:[#8])(=,:[#8])", 0),
294: ("[#6](-,:[#6])(=,:[#6])", 0),
295: ("[#6](-,:[#6])(=,:[#7])", 0),
296: ("[#6](-,:[#6])(=,:[#8])", 0),
297: ("[#6]([Cl])(=,:[#8])", 0),
298: ("[#6&!H0](-,:[#7])(=,:[#6])", 0),
299: ("[#6&!H0](=,:[#6])", 0),
300: ("[#6&!H0](=,:[#7])", 0),
301: ("[#6&!H0](=,:[#8])", 0),
302: ("[#6](-,:[#7])(=,:[#6])", 0),
303: ("[#6](-,:[#7])(=,:[#7])", 0),
304: ("[#6](-,:[#7])(=,:[#8])", 0),
305: ("[#6](-,:[#8])(=,:[#8])", 0),
306: ("[#7](-,:[#6])(=,:[#6])", 0),
307: ("[#7](-,:[#6])(=,:[#8])", 0),
308: ("[#7](-,:[#8])(=,:[#8])", 0),
309: ("[#15](-,:[#8])(=,:[#8])", 0),
310: ("[#16](-,:[#6])(=,:[#8])", 0),
311: ("[#16](-,:[#8])(=,:[#8])", 0),
312: ("[#16](=,:[#8])(=,:[#8])", 0),
313: ("[#6]-,:[#6]-,:[#6]#[#6]", 0),
314: ("[#8]-,:[#6]-,:[#6]=,:[#7]", 0),
315: ("[#8]-,:[#6]-,:[#6]=,:[#8]", 0),
316: ("[#7]:[#6]-,:[#16&!H0]", 0),
317: ("[#7]-,:[#6]-,:[#6]=,:[#6]", 0),
318: ("[#8]=,:[#16]-,:[#6]-,:[#6]", 0),
319: ("[#7]#[#6]-,:[#6]=,:[#6]", 0),
320: ("[#6]=,:[#7]-,:[#7]-,:[#6]", 0),
321: ("[#8]=,:[#16]-,:[#6]-,:[#7]", 0),
322: ("[#16]-,:[#16]-,:[#6]:[#6]", 0),
323: ("[#6]:[#6]-,:[#6]=,:[#6]", 0),
324: ("[#16]:[#6]:[#6]:[#6]", 0),
325: ("[#6]:[#7]:[#6]-,:[#6]", 0),
326: ("[#16]-,:[#6]:[#7]:[#6]", 0),
327: ("[#16]:[#6]:[#6]:[#7]", 0),
328: ("[#16]-,:[#6]=,:[#7]-,:[#6]", 0),
329: ("[#6]-,:[#8]-,:[#6]=,:[#6]", 0),
330: ("[#7]-,:[#7]-,:[#6]:[#6]", 0),
331: ("[#16]-,:[#6]=,:[#7&!H0]", 0),
332: ("[#16]-,:[#6]-,:[#16]-,:[#6]", 0),
333: ("[#6]:[#16]:[#6]-,:[#6]", 0),
334: ("[#8]-,:[#16]-,:[#6]:[#6]", 0),
335: ("[#6]:[#7]-,:[#6]:[#6]", 0),
336: ("[#7]-,:[#16]-,:[#6]:[#6]", 0),
337: ("[#7]-,:[#6]:[#7]:[#6]", 0),
338: ("[#7]:[#6]:[#6]:[#7]", 0),
339: ("[#7]-,:[#6]:[#7]:[#7]", 0),
340: ("[#7]-,:[#6]=,:[#7]-,:[#6]", 0),
341: ("[#7]-,:[#6]=,:[#7&!H0]", 0),
342: ("[#7]-,:[#6]-,:[#16]-,:[#6]", 0),
343: ("[#6]-,:[#6]-,:[#6]=,:[#6]", 0),
344: ("[#6]-,:[#7]:[#6&!H0]", 0),
345: ("[#7]-,:[#6]:[#8]:[#6]", 0),
346: ("[#8]=,:[#6]-,:[#6]:[#6]", 0),
347: ("[#8]=,:[#6]-,:[#6]:[#7]", 0),
348: ("[#6]-,:[#7]-,:[#6]:[#6]", 0),
349: ("[#7]:[#7]-,:[#6&!H0]", 0),
350: ("[#8]-,:[#6]:[#6]:[#7]", 0),
351: ("[#8]-,:[#6]=,:[#6]-,:[#6]", 0),
352: ("[#7]-,:[#6]:[#6]:[#7]", 0),
353: ("[#6]-,:[#16]-,:[#6]:[#6]", 0),
354: ("[Cl]-,:[#6]:[#6]-,:[#6]", 0),
355: ("[#7]-,:[#6]=,:[#6&!H0]", 0),
356: ("[Cl]-,:[#6]:[#6&!H0]", 0),
357: ("[#7]:[#6]:[#7]-,:[#6]", 0),
358: ("[Cl]-,:[#6]:[#6]-,:[#8]", 0),
359: ("[#6]-,:[#6]:[#7]:[#6]", 0),
360: ("[#6]-,:[#6]-,:[#16]-,:[#6]", 0),
361: ("[#16]=,:[#6]-,:[#7]-,:[#6]", 0),
362: ("[Br]-,:[#6]:[#6]-,:[#6]", 0),
363: ("[#7&!H0]-,:[#7&!H0]", 0),
364: ("[#16]=,:[#6]-,:[#7&!H0]", 0),
365: ("[#6]-,:[#33]-[#8&!H0]", 0),
366: ("[#16]:[#6]:[#6&!H0]", 0),
367: ("[#8]-,:[#7]-,:[#6]-,:[#6]", 0),
368: ("[#7]-,:[#7]-,:[#6]-,:[#6]", 0),
369: ("[#6H,#6H2,#6H3]=,:[#6H,#6H2,#6H3]", 0),
370: ("[#7]-,:[#7]-,:[#6]-,:[#7]", 0),
371: ("[#8]=,:[#6]-,:[#7]-,:[#7]", 0),
372: ("[#7]=,:[#6]-,:[#7]-,:[#6]", 0),
373: ("[#6]=,:[#6]-,:[#6]:[#6]", 0),
374: ("[#6]:[#7]-,:[#6&!H0]", 0),
375: ("[#6]-,:[#7]-,:[#7&!H0]", 0),
376: ("[#7]:[#6]:[#6]-,:[#6]", 0),
377: ("[#6]-,:[#6]=,:[#6]-,:[#6]", 0),
378: ("[#33]-,:[#6]:[#6&!H0]", 0),
379: ("[Cl]-,:[#6]:[#6]-,:[Cl]", 0),
380: ("[#6]:[#6]:[#7&!H0]", 0),
381: ("[#7&!H0]-,:[#6&!H0]", 0),
382: ("[Cl]-,:[#6]-,:[#6]-,:[Cl]", 0),
383: ("[#7]:[#6]-,:[#6]:[#6]", 0),
384: ("[#16]-,:[#6]:[#6]-,:[#6]", 0),
385: ("[#16]-,:[#6]:[#6&!H0]", 0),
386: ("[#16]-,:[#6]:[#6]-,:[#7]", 0),
387: ("[#16]-,:[#6]:[#6]-,:[#8]", 0),
388: ("[#8]=,:[#6]-,:[#6]-,:[#6]", 0),
389: ("[#8]=,:[#6]-,:[#6]-,:[#7]", 0),
390: ("[#8]=,:[#6]-,:[#6]-,:[#8]", 0),
391: ("[#7]=,:[#6]-,:[#6]-,:[#6]", 0),
392: ("[#7]=,:[#6]-,:[#6&!H0]", 0),
393: ("[#6]-,:[#7]-,:[#6&!H0]", 0),
394: ("[#8]-,:[#6]:[#6]-,:[#6]", 0),
395: ("[#8]-,:[#6]:[#6&!H0]", 0),
396: ("[#8]-,:[#6]:[#6]-,:[#7]", 0),
397: ("[#8]-,:[#6]:[#6]-,:[#8]", 0),
398: ("[#7]-,:[#6]:[#6]-,:[#6]", 0),
399: ("[#7]-,:[#6]:[#6&!H0]", 0),
400: ("[#7]-,:[#6]:[#6]-,:[#7]", 0),
401: ("[#8]-,:[#6]-,:[#6]:[#6]", 0),
402: ("[#7]-,:[#6]-,:[#6]:[#6]", 0),
403: ("[Cl]-,:[#6]-,:[#6]-,:[#6]", 0),
404: ("[Cl]-,:[#6]-,:[#6]-,:[#8]", 0),
405: ("[#6]:[#6]-,:[#6]:[#6]", 0),
406: ("[#8]=,:[#6]-,:[#6]=,:[#6]", 0),
407: ("[Br]-,:[#6]-,:[#6]-,:[#6]", 0),
408: ("[#7]=,:[#6]-,:[#6]=,:[#6]", 0),
409: ("[#6]=,:[#6]-,:[#6]-,:[#6]", 0),
410: ("[#7]:[#6]-,:[#8&!H0]", 0),
411: ("[#8]=,:[#7]-,:c:c", 0),
412: ("[#8]-,:[#6]-,:[#7&!H0]", 0),
413: ("[#7]-,:[#6]-,:[#7]-,:[#6]", 0),
414: ("[Cl]-,:[#6]-,:[#6]=,:[#8]", 0),
415: ("[Br]-,:[#6]-,:[#6]=,:[#8]", 0),
416: ("[#8]-,:[#6]-,:[#8]-,:[#6]", 0),
417: ("[#6]=,:[#6]-,:[#6]=,:[#6]", 0),
418: ("[#6]:[#6]-,:[#8]-,:[#6]", 0),
419: ("[#8]-,:[#6]-,:[#6]-,:[#7]", 0),
420: ("[#8]-,:[#6]-,:[#6]-,:[#8]", 0),
421: ("N#[#6]-,:[#6]-,:[#6]", 0),
422: ("[#7]-,:[#6]-,:[#6]-,:[#7]", 0),
423: ("[#6]:[#6]-,:[#6]-,:[#6]", 0),
424: ("[#6&!H0]-,:[#8&!H0]", 0),
425: ("n:c:n:c", 0),
426: ("[#8]-,:[#6]-,:[#6]=,:[#6]", 0),
427: ("[#8]-,:[#6]-,:[#6]:[#6]-,:[#6]", 0),
428: ("[#8]-,:[#6]-,:[#6]:[#6]-,:[#8]", 0),
429: ("[#7]=,:[#6]-,:[#6]:[#6&!H0]", 0),
430: ("c:c-,:[#7]-,:c:c", 0),
431: ("[#6]-,:[#6]:[#6]-,:c:c", 0),
432: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
433: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
434: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#8]", 0),
435: ("[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
436: ("[Cl]-,:[#6]:[#6]-,:[#8]-,:[#6]", 0),
437: ("c:c-,:[#6]=,:[#6]-,:[#6]", 0),
438: ("[#6]-,:[#6]:[#6]-,:[#7]-,:[#6]", 0),
439: ("[#6]-,:[#16]-,:[#6]-,:[#6]-,:[#6]", 0),
440: ("[#7]-,:[#6]:[#6]-,:[#8&!H0]", 0),
441: ("[#8]=,:[#6]-,:[#6]-,:[#6]=,:[#8]", 0),
442: ("[#6]-,:[#6]:[#6]-,:[#8]-,:[#6]", 0),
443: ("[#6]-,:[#6]:[#6]-,:[#8&!H0]", 0),
444: ("[Cl]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
445: ("[#7]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
446: ("[#7]-,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
447: ("[#6]-,:[#8]-,:[#6]-,:[#6]=,:[#6]", 0),
448: ("c:c-,:[#6]-,:[#6]-,:[#6]", 0),
449: ("[#7]=,:[#6]-,:[#7]-,:[#6]-,:[#6]", 0),
450: ("[#8]=,:[#6]-,:[#6]-,:c:c", 0),
451: ("[Cl]-,:[#6]:[#6]:[#6]-,:[#6]", 0),
452: ("[#6H,#6H2,#6H3]-,:[#6]=,:[#6H,#6H2,#6H3]", 0),
453: ("[#7]-,:[#6]:[#6]:[#6]-,:[#6]", 0),
454: ("[#7]-,:[#6]:[#6]:[#6]-,:[#7]", 0),
455: ("[#8]=,:[#6]-,:[#6]-,:[#7]-,:[#6]", 0),
456: ("[#6]-,:c:c:[#6]-,:[#6]", 0),
457: ("[#6]-,:[#8]-,:[#6]-,:[#6]:c", 0),
458: ("[#8]=,:[#6]-,:[#6]-,:[#8]-,:[#6]", 0),
459: ("[#8]-,:[#6]:[#6]-,:[#6]-,:[#6]", 0),
460: ("[#7]-,:[#6]-,:[#6]-,:[#6]:c", 0),
461: ("[#6]-,:[#6]-,:[#6]-,:[#6]:c", 0),
462: ("[Cl]-,:[#6]-,:[#6]-,:[#7]-,:[#6]", 0),
463: ("[#6]-,:[#8]-,:[#6]-,:[#8]-,:[#6]", 0),
464: ("[#7]-,:[#6]-,:[#6]-,:[#7]-,:[#6]", 0),
465: ("[#7]-,:[#6]-,:[#8]-,:[#6]-,:[#6]", 0),
466: ("[#6]-,:[#7]-,:[#6]-,:[#6]-,:[#6]", 0),
467: ("[#6]-,:[#6]-,:[#8]-,:[#6]-,:[#6]", 0),
468: ("[#7]-,:[#6]-,:[#6]-,:[#8]-,:[#6]", 0),
469: ("c:c:n:n:c", 0),
470: ("[#6]-,:[#6]-,:[#6]-,:[#8&!H0]", 0),
471: ("c:[#6]-,:[#6]-,:[#6]:c", 0),
472: ("[#8]-,:[#6]-,:[#6]=,:[#6]-,:[#6]", 0),
473: ("c:c-,:[#8]-,:[#6]-,:[#6]", 0),
474: ("[#7]-,:[#6]:c:c:n", 0),
475: ("[#8]=,:[#6]-,:[#8]-,:[#6]:c", 0),
476: ("[#8]=,:[#6]-,:[#6]:[#6]-,:[#6]", 0),
477: ("[#8]=,:[#6]-,:[#6]:[#6]-,:[#7]", 0),
478: ("[#8]=,:[#6]-,:[#6]:[#6]-,:[#8]", 0),
479: ("[#6]-,:[#8]-,:[#6]:[#6]-,:[#6]", 0),
480: ("[#8]=,:[#33]-,:[#6]:c:c", 0),
481: ("[#6]-,:[#7]-,:[#6]-,:[#6]:c", 0),
482: ("[#16]-,:[#6]:c:c-,:[#7]", 0),
483: ("[#8]-,:[#6]:[#6]-,:[#8]-,:[#6]", 0),
484: ("[#8]-,:[#6]:[#6]-,:[#8&!H0]", 0),
485: ("[#6]-,:[#6]-,:[#8]-,:[#6]:c", 0),
486: ("[#7]-,:[#6]-,:[#6]:[#6]-,:[#6]", 0),
487: ("[#6]-,:[#6]-,:[#6]:[#6]-,:[#6]", 0),
488: ("[#7]-,:[#7]-,:[#6]-,:[#7&!H0]", 0),
489: ("[#6]-,:[#7]-,:[#6]-,:[#7]-,:[#6]", 0),
490: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
491: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
492: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#8]", 0),
493: ("[#6]=,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
494: ("[#8]-,:[#6]-,:[#6]-,:[#6]=,:[#6]", 0),
495: ("[#8]-,:[#6]-,:[#6]-,:[#6]=,:[#8]", 0),
496: ("[#6&!H0]-,:[#6]-,:[#7&!H0]", 0),
497: ("[#6]-,:[#6]=,:[#7]-,:[#7]-,:[#6]", 0),
498: ("[#8]=,:[#6]-,:[#7]-,:[#6]-,:[#6]", 0),
499: ("[#8]=,:[#6]-,:[#7]-,:[#6&!H0]", 0),
500: ("[#8]=,:[#6]-,:[#7]-,:[#6]-,:[#7]", 0),
501: ("[#8]=,:[#7]-,:[#6]:[#6]-,:[#7]", 0),
502: ("[#8]=,:[#7]-,:c:c-,:[#8]", 0),
503: ("[#8]=,:[#6]-,:[#7]-,:[#6]=,:[#8]", 0),
504: ("[#8]-,:[#6]:[#6]:[#6]-,:[#6]", 0),
505: ("[#8]-,:[#6]:[#6]:[#6]-,:[#7]", 0),
506: ("[#8]-,:[#6]:[#6]:[#6]-,:[#8]", 0),
507: ("[#7]-,:[#6]-,:[#7]-,:[#6]-,:[#6]", 0),
508: ("[#8]-,:[#6]-,:[#6]-,:[#6]:c", 0),
509: ("[#6]-,:[#6]-,:[#7]-,:[#6]-,:[#6]", 0),
510: ("[#6]-,:[#7]-,:[#6]:[#6]-,:[#6]", 0),
511: ("[#6]-,:[#6]-,:[#16]-,:[#6]-,:[#6]", 0),
512: ("[#8]-,:[#6]-,:[#6]-,:[#7]-,:[#6]", 0),
513: ("[#6]-,:[#6]=,:[#6]-,:[#6]-,:[#6]", 0),
514: ("[#8]-,:[#6]-,:[#8]-,:[#6]-,:[#6]", 0),
515: ("[#8]-,:[#6]-,:[#6]-,:[#8]-,:[#6]", 0),
516: ("[#8]-,:[#6]-,:[#6]-,:[#8&!H0]", 0),
517: ("[#6]-,:[#6]=,:[#6]-,:[#6]=,:[#6]", 0),
518: ("[#7]-,:[#6]:[#6]-,:[#6]-,:[#6]", 0),
519: ("[#6]=,:[#6]-,:[#6]-,:[#8]-,:[#6]", 0),
520: ("[#6]=,:[#6]-,:[#6]-,:[#8&!H0]", 0),
521: ("[#6]-,:[#6]:[#6]-,:[#6]-,:[#6]", 0),
522: ("[Cl]-,:[#6]:[#6]-,:[#6]=,:[#8]", 0),
523: ("[Br]-,:[#6]:c:c-,:[#6]", 0),
524: ("[#8]=,:[#6]-,:[#6]=,:[#6]-,:[#6]", 0),
525: ("[#8]=,:[#6]-,:[#6]=,:[#6&!H0]", 0),
526: ("[#8]=,:[#6]-,:[#6]=,:[#6]-,:[#7]", 0),
527: ("[#7]-,:[#6]-,:[#7]-,:[#6]:c", 0),
528: ("[Br]-,:[#6]-,:[#6]-,:[#6]:c", 0),
529: ("[#7]#[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
530: ("[#6]-,:[#6]=,:[#6]-,:[#6]:c", 0),
531: ("[#6]-,:[#6]-,:[#6]=,:[#6]-,:[#6]", 0),
532: ("[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
533: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
534: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#8]", 0),
535: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
536: ("[#7]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
537: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
538: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
539: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#8]", 0),
540: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]=,:[#8]", 0),
541: ("[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
542: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
543: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#8]", 0),
544: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
545: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
546: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#8]", 0),
547: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]=,:[#8]", 0),
548: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#7]", 0),
549: ("[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
550: ("[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-,:[#6])-,:[#6]", 0),
551: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
552: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-,:[#6])-,:[#6]", 0),
553: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#8]-,:[#6]", 0),
554: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-,:[#8])-,:[#6]", 0),
555: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#7]-,:[#6]", 0),
556: ("[#8]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-,:[#7])-,:[#6]", 0),
557: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6]", 0),
558: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-,:[#8])-,:[#6]", 0),
559: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](=,:[#8])-,:[#6]", 0),
560: ("[#8]=,:[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-,:[#7])-,:[#6]", 0),
561: ("[#6]-,:[#6](-,:[#6])-,:[#6]-,:[#6]", 0),
562: ("[#6]-,:[#6](-,:[#6])-,:[#6]-,:[#6]-,:[#6]", 0),
563: ("[#6]-,:[#6]-,:[#6](-,:[#6])-,:[#6]-,:[#6]", 0),
564: ("[#6]-,:[#6](-,:[#6])(-,:[#6])-,:[#6]-,:[#6]", 0),
565: ("[#6]-,:[#6](-,:[#6])-,:[#6](-,:[#6])-,:[#6]", 0),
566: ("[#6]c1ccc([#6])cc1", 0),
567: ("[#6]c1ccc([#8])cc1", 0),
568: ("[#6]c1ccc([#16])cc1", 0),
569: ("[#6]c1ccc([#7])cc1", 0),
570: ("[#6]c1ccc(Cl)cc1", 0),
571: ("[#6]c1ccc(Br)cc1", 0),
572: ("[#8]c1ccc([#8])cc1", 0),
573: ("[#8]c1ccc([#16])cc1", 0),
574: ("[#8]c1ccc([#7])cc1", 0),
575: ("[#8]c1ccc(Cl)cc1", 0),
576: ("[#8]c1ccc(Br)cc1", 0),
577: ("[#16]c1ccc([#16])cc1", 0),
578: ("[#16]c1ccc([#7])cc1", 0),
579: ("[#16]c1ccc(Cl)cc1", 0),
580: ("[#16]c1ccc(Br)cc1", 0),
581: ("[#7]c1ccc([#7])cc1", 0),
582: ("[#7]c1ccc(Cl)cc1", 0),
583: ("[#7]c1ccc(Br)cc1", 0),
584: ("Clc1ccc(Cl)cc1", 0),
585: ("Clc1ccc(Br)cc1", 0),
586: ("Brc1ccc(Br)cc1", 0),
587: ("[#6]c1cc([#6])ccc1", 0),
588: ("[#6]c1cc([#8])ccc1", 0),
589: ("[#6]c1cc([#16])ccc1", 0),
590: ("[#6]c1cc([#7])ccc1", 0),
591: ("[#6]c1cc(Cl)ccc1", 0),
592: ("[#6]c1cc(Br)ccc1", 0),
593: ("[#8]c1cc([#8])ccc1", 0),
594: ("[#8]c1cc([#16])ccc1", 0),
595: ("[#8]c1cc([#7])ccc1", 0),
596: ("[#8]c1cc(Cl)ccc1", 0),
597: ("[#8]c1cc(Br)ccc1", 0),
598: ("[#16]c1cc([#16])ccc1", 0),
599: ("[#16]c1cc([#7])ccc1", 0),
600: ("[#16]c1cc(Cl)ccc1", 0),
601: ("[#16]c1cc(Br)ccc1", 0),
602: ("[#7]c1cc([#7])ccc1", 0),
603: ("[#7]c1cc(Cl)ccc1", 0),
604: ("[#7]c1cc(Br)ccc1", 0),
605: ("Clc1cc(Cl)ccc1", 0),
606: ("Clc1cc(Br)ccc1", 0),
607: ("Brc1cc(Br)ccc1", 0),
608: ("[#6]c1c([#6])cccc1", 0),
609: ("[#6]c1c([#8])cccc1", 0),
610: ("[#6]c1c([#16])cccc1", 0),
611: ("[#6]c1c([#7])cccc1", 0),
612: ("[#6]c1c(Cl)cccc1", 0),
613: ("[#6]c1c(Br)cccc1", 0),
614: ("[#8]c1c([#8])cccc1", 0),
615: ("[#8]c1c([#16])cccc1", 0),
616: ("[#8]c1c([#7])cccc1", 0),
617: ("[#8]c1c(Cl)cccc1", 0),
618: ("[#8]c1c(Br)cccc1", 0),
619: ("[#16]c1c([#16])cccc1", 0),
620: ("[#16]c1c([#7])cccc1", 0),
621: ("[#16]c1c(Cl)cccc1", 0),
622: ("[#16]c1c(Br)cccc1", 0),
623: ("[#7]c1c([#7])cccc1", 0),
624: ("[#7]c1c(Cl)cccc1", 0),
625: ("[#7]c1c(Br)cccc1", 0),
626: ("Clc1c(Cl)cccc1", 0),
627: ("Clc1c(Br)cccc1", 0),
628: ("Brc1c(Br)cccc1", 0),
629: ("[#6][#6]1[#6][#6][#6]([#6])[#6][#6]1", 0),
630: ("[#6][#6]1[#6][#6][#6]([#8])[#6][#6]1", 0),
631: ("[#6][#6]1[#6][#6][#6]([#16])[#6][#6]1", 0),
632: ("[#6][#6]1[#6][#6][#6]([#7])[#6][#6]1", 0),
633: ("[#6][#6]1[#6][#6][#6](Cl)[#6][#6]1", 0),
634: ("[#6][#6]1[#6][#6][#6](Br)[#6][#6]1", 0),
635: ("[#8][#6]1[#6][#6][#6]([#8])[#6][#6]1", 0),
636: ("[#8][#6]1[#6][#6][#6]([#16])[#6][#6]1", 0),
637: ("[#8][#6]1[#6][#6][#6]([#7])[#6][#6]1", 0),
638: ("[#8][#6]1[#6][#6][#6](Cl)[#6][#6]1", 0),
639: ("[#8][#6]1[#6][#6][#6](Br)[#6][#6]1", 0),
640: ("[#16][#6]1[#6][#6][#6]([#16])[#6][#6]1", 0),
641: ("[#16][#6]1[#6][#6][#6]([#7])[#6][#6]1", 0),
642: ("[#16][#6]1[#6][#6][#6](Cl)[#6][#6]1", 0),
643: ("[#16][#6]1[#6][#6][#6](Br)[#6][#6]1", 0),
644: ("[#7][#6]1[#6][#6][#6]([#7])[#6][#6]1", 0),
645: ("[#7][#6]1[#6][#6][#6](Cl)[#6][#6]1", 0),
646: ("[#7][#6]1[#6][#6][#6](Br)[#6][#6]1", 0),
647: ("Cl[#6]1[#6][#6][#6](Cl)[#6][#6]1", 0),
648: ("Cl[#6]1[#6][#6][#6](Br)[#6][#6]1", 0),
649: ("Br[#6]1[#6][#6][#6](Br)[#6][#6]1", 0),
650: ("[#6][#6]1[#6][#6]([#6])[#6][#6][#6]1", 0),
651: ("[#6][#6]1[#6][#6]([#8])[#6][#6][#6]1", 0),
652: ("[#6][#6]1[#6][#6]([#16])[#6][#6][#6]1", 0),
653: ("[#6][#6]1[#6][#6]([#7])[#6][#6][#6]1", 0),
654: ("[#6][#6]1[#6][#6](Cl)[#6][#6][#6]1", 0),
655: ("[#6][#6]1[#6][#6](Br)[#6][#6][#6]1", 0),
656: ("[#8][#6]1[#6][#6]([#8])[#6][#6][#6]1", 0),
657: ("[#8][#6]1[#6][#6]([#16])[#6][#6][#6]1", 0),
658: ("[#8][#6]1[#6][#6]([#7])[#6][#6][#6]1", 0),
659: ("[#8][#6]1[#6][#6](Cl)[#6][#6][#6]1", 0),
660: ("[#8][#6]1[#6][#6](Br)[#6][#6][#6]1", 0),
661: ("[#16][#6]1[#6][#6]([#16])[#6][#6][#6]1", 0),
662: ("[#16][#6]1[#6][#6]([#7])[#6][#6][#6]1", 0),
663: ("[#16][#6]1[#6][#6](Cl)[#6][#6][#6]1", 0),
664: ("[#16][#6]1[#6][#6](Br)[#6][#6][#6]1", 0),
665: ("[#7][#6]1[#6][#6]([#7])[#6][#6][#6]1", 0),
666: ("[#7][#6]1[#6][#6](Cl)[#6][#6][#6]1", 0),
667: ("[#7][#6]1[#6][#6](Br)[#6][#6][#6]1", 0),
668: ("Cl[#6]1[#6][#6](Cl)[#6][#6][#6]1", 0),
669: ("Cl[#6]1[#6][#6](Br)[#6][#6][#6]1", 0),
670: ("Br[#6]1[#6][#6](Br)[#6][#6][#6]1", 0),
671: ("[#6][#6]1[#6]([#6])[#6][#6][#6][#6]1", 0),
672: ("[#6][#6]1[#6]([#8])[#6][#6][#6][#6]1", 0),
673: ("[#6][#6]1[#6]([#16])[#6][#6][#6][#6]1", 0),
674: ("[#6][#6]1[#6]([#7])[#6][#6][#6][#6]1", 0),
675: ("[#6][#6]1[#6](Cl)[#6][#6][#6][#6]1", 0),
676: ("[#6][#6]1[#6](Br)[#6][#6][#6][#6]1", 0),
677: ("[#8][#6]1[#6]([#8])[#6][#6][#6][#6]1", 0),
678: ("[#8][#6]1[#6]([#16])[#6][#6][#6][#6]1", 0),
679: ("[#8][#6]1[#6]([#7])[#6][#6][#6][#6]1", 0),
680: ("[#8][#6]1[#6](Cl)[#6][#6][#6][#6]1", 0),
681: ("[#8][#6]1[#6](Br)[#6][#6][#6][#6]1", 0),
682: ("[#16][#6]1[#6]([#16])[#6][#6][#6][#6]1", 0),
683: ("[#16][#6]1[#6]([#7])[#6][#6][#6][#6]1", 0),
684: ("[#16][#6]1[#6](Cl)[#6][#6][#6][#6]1", 0),
685: ("[#16][#6]1[#6](Br)[#6][#6][#6][#6]1", 0),
686: ("[#7][#6]1[#6]([#7])[#6][#6][#6][#6]1", 0),
687: ("[#7][#6]1[#6](Cl)[#6][#6][#6][#6]1", 0),
688: ("[#7][#6]1[#6](Br)[#6][#6][#6][#6]1", 0),
689: ("Cl[#6]1[#6](Cl)[#6][#6][#6][#6]1", 0),
690: ("Cl[#6]1[#6](Br)[#6][#6][#6][#6]1", 0),
691: ("Br[#6]1[#6](Br)[#6][#6][#6][#6]1", 0),
692: ("[#6][#6]1[#6][#6]([#6])[#6][#6]1", 0),
693: ("[#6][#6]1[#6][#6]([#8])[#6][#6]1", 0),
694: ("[#6][#6]1[#6][#6]([#16])[#6][#6]1", 0),
695: ("[#6][#6]1[#6][#6]([#7])[#6][#6]1", 0),
696: ("[#6][#6]1[#6][#6](Cl)[#6][#6]1", 0),
697: ("[#6][#6]1[#6][#6](Br)[#6][#6]1", 0),
698: ("[#8][#6]1[#6][#6]([#8])[#6][#6]1", 0),
699: ("[#8][#6]1[#6][#6]([#16])[#6][#6]1", 0),
700: ("[#8][#6]1[#6][#6]([#7])[#6][#6]1", 0),
701: ("[#8][#6]1[#6][#6](Cl)[#6][#6]1", 0),
702: ("[#8][#6]1[#6][#6](Br)[#6][#6]1", 0),
703: ("[#16][#6]1[#6][#6]([#16])[#6][#6]1", 0),
704: ("[#16][#6]1[#6][#6]([#7])[#6][#6]1", 0),
705: ("[#16][#6]1[#6][#6](Cl)[#6][#6]1", 0),
706: ("[#16][#6]1[#6][#6](Br)[#6][#6]1", 0),
707: ("[#7][#6]1[#6][#6]([#7])[#6][#6]1", 0),
708: ("[#7][#6]1[#6][#6](Cl)[#6][#6]1", 0),
709: ("[#7][#6]1[#6][#6](Br)[#6][#6]1", 0),
710: ("Cl[#6]1[#6][#6](Cl)[#6][#6]1", 0),
711: ("Cl[#6]1[#6][#6](Br)[#6][#6]1", 0),
712: ("Br[#6]1[#6][#6](Br)[#6][#6]1", 0),
713: ("[#6][#6]1[#6]([#6])[#6][#6][#6]1", 0),
714: ("[#6][#6]1[#6]([#8])[#6][#6][#6]1", 0),
715: ("[#6][#6]1[#6]([#16])[#6][#6][#6]1", 0),
716: ("[#6][#6]1[#6]([#7])[#6][#6][#6]1", 0),
717: ("[#6][#6]1[#6](Cl)[#6][#6][#6]1", 0),
718: ("[#6][#6]1[#6](Br)[#6][#6][#6]1", 0),
719: ("[#8][#6]1[#6]([#8])[#6][#6][#6]1", 0),
720: ("[#8][#6]1[#6]([#16])[#6][#6][#6]1", 0),
721: ("[#8][#6]1[#6]([#7])[#6][#6][#6]1", 0),
722: ("[#8][#6]1[#6](Cl)[#6][#6][#6]1", 0),
723: ("[#8][#6]1[#6](Br)[#6][#6][#6]1", 0),
724: ("[#16][#6]1[#6]([#16])[#6][#6][#6]1", 0),
725: ("[#16][#6]1[#6]([#7])[#6][#6][#6]1", 0),
726: ("[#16][#6]1[#6](Cl)[#6][#6][#6]1", 0),
727: ("[#16][#6]1[#6](Br)[#6][#6][#6]1", 0),
728: ("[#7][#6]1[#6]([#7])[#6][#6][#6]1", 0),
729: ("[#7][#6]1[#6](Cl)[#6][#6]1", 0),
730: ("[#7][#6]1[#6](Br)[#6][#6][#6]1", 0),
731: ("Cl[#6]1[#6](Cl)[#6][#6][#6]1", 0),
732: ("Cl[#6]1[#6](Br)[#6][#6][#6]1", 0),
733: ("Br[#6]1[#6](Br)[#6][#6][#6]1", 0),
}
PubchemKeys = None
def InitKeys(keyList, keyDict):
""" *Internal Use Only*
generates SMARTS patterns for the keys, run once
"""
assert len(keyList) == len(keyDict.keys()), "length mismatch"
for key in keyDict.keys():
patt, count = keyDict[key]
if patt != "?":
sma = Chem.MolFromSmarts(patt)
if not sma:
print("SMARTS parser error for key #%d: %s" % (key, patt))
else:
keyList[key - 1] = sma, count
def calcPubChemFingerPart1(mol, **kwargs):
""" Calculate PubChem Fingerprints (1-115; 263-881)
**Arguments**
- mol: the molecule to be fingerprinted
- any extra keyword arguments are ignored
**Returns**
a _DataStructs.SparseBitVect_ containing the fingerprint.
>>> m = Chem.MolFromSmiles('CNO')
>>> bv = PubChemFingerPart1(m)
>>> tuple(bv.GetOnBits())
(24, 68, 69, 71, 93, 94, 102, 124, 131, 139, 151, 158, 160, 161, 164)
>>> bv = PubChemFingerPart1(Chem.MolFromSmiles('CCC'))
>>> tuple(bv.GetOnBits())
(74, 114, 149, 155, 160)
"""
global PubchemKeys
if PubchemKeys is None:
PubchemKeys = [(None, 0)] * len(smartsPatts.keys())
InitKeys(PubchemKeys, smartsPatts)
ctor = kwargs.get("ctor", DataStructs.SparseBitVect)
res = ctor(len(PubchemKeys) + 1)
for i, (patt, count) in enumerate(PubchemKeys):
if patt is not None:
if count == 0:
res[i + 1] = mol.HasSubstructMatch(patt)
else:
matches = mol.GetSubstructMatches(patt)
if len(matches) > count:
res[i + 1] = 1
return res
def func_1(mol, bits):
""" *Internal Use Only*
Calculate PubChem Fingerprints (116-263)
"""
ringSize = []
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
AllRingsAtom = mol.GetRingInfo().AtomRings()
for ring in AllRingsAtom:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[0] = 1
bits[7] = 1
elif temp[3] == 1:
bits[0] = 1
else:
pass
if temp[4] >= 2:
bits[14] = 1
bits[21] = 1
elif temp[4] == 1:
bits[14] = 1
else:
pass
if temp[5] >= 5:
bits[28] = 1
bits[35] = 1
bits[42] = 1
bits[49] = 1
bits[56] = 1
elif temp[5] == 4:
bits[28] = 1
bits[35] = 1
bits[42] = 1
bits[49] = 1
elif temp[5] == 3:
bits[28] = 1
bits[35] = 1
bits[42] = 1
elif temp[5] == 2:
bits[28] = 1
bits[35] = 1
elif temp[5] == 1:
bits[28] = 1
else:
pass
if temp[6] >= 5:
bits[63] = 1
bits[70] = 1
bits[77] = 1
bits[84] = 1
bits[91] = 1
elif temp[6] == 4:
bits[63] = 1
bits[70] = 1
bits[77] = 1
bits[84] = 1
elif temp[6] == 3:
bits[63] = 1
bits[70] = 1
bits[77] = 1
elif temp[6] == 2:
bits[63] = 1
bits[70] = 1
elif temp[6] == 1:
bits[63] = 1
else:
pass
if temp[7] >= 2:
bits[98] = 1
bits[105] = 1
elif temp[7] == 1:
bits[98] = 1
else:
pass
if temp[8] >= 2:
bits[112] = 1
bits[119] = 1
elif temp[8] == 1:
bits[112] = 1
else:
pass
if temp[9] >= 1:
bits[126] = 1
else:
pass
if temp[10] >= 1:
bits[133] = 1
else:
pass
return ringSize, bits
def func_2(mol, bits):
""" *Internal Use Only*
saturated or aromatic carbon-only ring
"""
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize = []
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
######### saturated
nonsingle = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "SINGLE":
nonsingle = True
break
if nonsingle == False:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
######## aromatic carbon-only
aromatic = True
AllCarb = True
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "AROMATIC":
aromatic = False
break
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() != 6 or EndAtom.GetAtomicNum() != 6:
AllCarb = False
break
if aromatic == True and AllCarb == True:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[1] = 1
bits[8] = 1
elif temp[3] == 1:
bits[1] = 1
else:
pass
if temp[4] >= 2:
bits[15] = 1
bits[22] = 1
elif temp[4] == 1:
bits[15] = 1
else:
pass
if temp[5] >= 5:
bits[29] = 1
bits[36] = 1
bits[43] = 1
bits[50] = 1
bits[57] = 1
elif temp[5] == 4:
bits[29] = 1
bits[36] = 1
bits[43] = 1
bits[50] = 1
elif temp[5] == 3:
bits[29] = 1
bits[36] = 1
bits[43] = 1
elif temp[5] == 2:
bits[29] = 1
bits[36] = 1
elif temp[5] == 1:
bits[29] = 1
else:
pass
if temp[6] >= 5:
bits[64] = 1
bits[71] = 1
bits[78] = 1
bits[85] = 1
bits[92] = 1
elif temp[6] == 4:
bits[64] = 1
bits[71] = 1
bits[78] = 1
bits[85] = 1
elif temp[6] == 3:
bits[64] = 1
bits[71] = 1
bits[78] = 1
elif temp[6] == 2:
bits[64] = 1
bits[71] = 1
elif temp[6] == 1:
bits[64] = 1
else:
pass
if temp[7] >= 2:
bits[99] = 1
bits[106] = 1
elif temp[7] == 1:
bits[99] = 1
else:
pass
if temp[8] >= 2:
bits[113] = 1
bits[120] = 1
elif temp[8] == 1:
bits[113] = 1
else:
pass
if temp[9] >= 1:
bits[127] = 1
else:
pass
if temp[10] >= 1:
bits[134] = 1
else:
pass
return ringSize, bits
def func_3(mol, bits):
""" *Internal Use Only*
saturated or aromatic nitrogen-containing
"""
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize = []
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
######### saturated
nonsingle = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "SINGLE":
nonsingle = True
break
if nonsingle == False:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
######## aromatic nitrogen-containing
aromatic = True
ContainNitro = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "AROMATIC":
aromatic = False
break
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() == 7 or EndAtom.GetAtomicNum() == 7:
ContainNitro = True
break
if aromatic == True and ContainNitro == True:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[2] = 1
bits[9] = 1
elif temp[3] == 1:
bits[2] = 1
else:
pass
if temp[4] >= 2:
bits[16] = 1
bits[23] = 1
elif temp[4] == 1:
bits[16] = 1
else:
pass
if temp[5] >= 5:
bits[30] = 1
bits[37] = 1
bits[44] = 1
bits[51] = 1
bits[58] = 1
elif temp[5] == 4:
bits[30] = 1
bits[37] = 1
bits[44] = 1
bits[51] = 1
elif temp[5] == 3:
bits[30] = 1
bits[37] = 1
bits[44] = 1
elif temp[5] == 2:
bits[30] = 1
bits[37] = 1
elif temp[5] == 1:
bits[30] = 1
else:
pass
if temp[6] >= 5:
bits[65] = 1
bits[72] = 1
bits[79] = 1
bits[86] = 1
bits[93] = 1
elif temp[6] == 4:
bits[65] = 1
bits[72] = 1
bits[79] = 1
bits[86] = 1
elif temp[6] == 3:
bits[65] = 1
bits[72] = 1
bits[79] = 1
elif temp[6] == 2:
bits[65] = 1
bits[72] = 1
elif temp[6] == 1:
bits[65] = 1
else:
pass
if temp[7] >= 2:
bits[100] = 1
bits[107] = 1
elif temp[7] == 1:
bits[100] = 1
else:
pass
if temp[8] >= 2:
bits[114] = 1
bits[121] = 1
elif temp[8] == 1:
bits[114] = 1
else:
pass
if temp[9] >= 1:
bits[128] = 1
else:
pass
if temp[10] >= 1:
bits[135] = 1
else:
pass
return ringSize, bits
def func_4(mol, bits):
""" *Internal Use Only*
saturated or aromatic heteroatom-containing
"""
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize = []
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
######### saturated
nonsingle = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "SINGLE":
nonsingle = True
break
if nonsingle == False:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
######## aromatic heteroatom-containing
aromatic = True
heteroatom = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "AROMATIC":
aromatic = False
break
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() not in [1, 6] or EndAtom.GetAtomicNum() not in [
1,
6,
]:
heteroatom = True
break
if aromatic == True and heteroatom == True:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[3] = 1
bits[10] = 1
elif temp[3] == 1:
bits[3] = 1
else:
pass
if temp[4] >= 2:
bits[17] = 1
bits[24] = 1
elif temp[4] == 1:
bits[17] = 1
else:
pass
if temp[5] >= 5:
bits[31] = 1
bits[38] = 1
bits[45] = 1
bits[52] = 1
bits[59] = 1
elif temp[5] == 4:
bits[31] = 1
bits[38] = 1
bits[45] = 1
bits[52] = 1
elif temp[5] == 3:
bits[31] = 1
bits[38] = 1
bits[45] = 1
elif temp[5] == 2:
bits[31] = 1
bits[38] = 1
elif temp[5] == 1:
bits[31] = 1
else:
pass
if temp[6] >= 5:
bits[66] = 1
bits[73] = 1
bits[80] = 1
bits[87] = 1
bits[94] = 1
elif temp[6] == 4:
bits[66] = 1
bits[73] = 1
bits[80] = 1
bits[87] = 1
elif temp[6] == 3:
bits[66] = 1
bits[73] = 1
bits[80] = 1
elif temp[6] == 2:
bits[66] = 1
bits[73] = 1
elif temp[6] == 1:
bits[66] = 1
else:
pass
if temp[7] >= 2:
bits[101] = 1
bits[108] = 1
elif temp[7] == 1:
bits[101] = 1
else:
pass
if temp[8] >= 2:
bits[115] = 1
bits[122] = 1
elif temp[8] == 1:
bits[115] = 1
else:
pass
if temp[9] >= 1:
bits[129] = 1
else:
pass
if temp[10] >= 1:
bits[136] = 1
else:
pass
return ringSize, bits
def func_5(mol, bits):
""" *Internal Use Only*
unsaturated non-aromatic carbon-only
"""
ringSize = []
AllRingsBond = mol.GetRingInfo().BondRings()
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
unsaturated = False
nonaromatic = True
Allcarb = True
######### unsaturated
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "SINGLE":
unsaturated = True
break
######## non-aromatic
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name == "AROMATIC":
nonaromatic = False
break
######## allcarb
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() != 6 or EndAtom.GetAtomicNum() != 6:
Allcarb = False
break
if unsaturated == True and nonaromatic == True and Allcarb == True:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[4] = 1
bits[11] = 1
elif temp[3] == 1:
bits[4] = 1
else:
pass
if temp[4] >= 2:
bits[18] = 1
bits[25] = 1
elif temp[4] == 1:
bits[18] = 1
else:
pass
if temp[5] >= 5:
bits[32] = 1
bits[39] = 1
bits[46] = 1
bits[53] = 1
bits[60] = 1
elif temp[5] == 4:
bits[32] = 1
bits[39] = 1
bits[46] = 1
bits[53] = 1
elif temp[5] == 3:
bits[32] = 1
bits[39] = 1
bits[46] = 1
elif temp[5] == 2:
bits[32] = 1
bits[39] = 1
elif temp[5] == 1:
bits[32] = 1
else:
pass
if temp[6] >= 5:
bits[67] = 1
bits[74] = 1
bits[81] = 1
bits[88] = 1
bits[95] = 1
elif temp[6] == 4:
bits[67] = 1
bits[74] = 1
bits[81] = 1
bits[88] = 1
elif temp[6] == 3:
bits[67] = 1
bits[74] = 1
bits[81] = 1
elif temp[6] == 2:
bits[67] = 1
bits[74] = 1
elif temp[6] == 1:
bits[67] = 1
else:
pass
if temp[7] >= 2:
bits[102] = 1
bits[109] = 1
elif temp[7] == 1:
bits[102] = 1
else:
pass
if temp[8] >= 2:
bits[116] = 1
bits[123] = 1
elif temp[8] == 1:
bits[116] = 1
else:
pass
if temp[9] >= 1:
bits[130] = 1
else:
pass
if temp[10] >= 1:
bits[137] = 1
else:
pass
return ringSize, bits
def func_6(mol, bits):
""" *Internal Use Only*
unsaturated non-aromatic nitrogen-containing
"""
ringSize = []
AllRingsBond = mol.GetRingInfo().BondRings()
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
unsaturated = False
nonaromatic = True
ContainNitro = False
######### unsaturated
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "SINGLE":
unsaturated = True
break
######## non-aromatic
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name == "AROMATIC":
nonaromatic = False
break
######## nitrogen-containing
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() == 7 or EndAtom.GetAtomicNum() == 7:
ContainNitro = True
break
if unsaturated == True and nonaromatic == True and ContainNitro == True:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[5] = 1
bits[12] = 1
elif temp[3] == 1:
bits[5] = 1
else:
pass
if temp[4] >= 2:
bits[19] = 1
bits[26] = 1
elif temp[4] == 1:
bits[19] = 1
else:
pass
if temp[5] >= 5:
bits[33] = 1
bits[40] = 1
bits[47] = 1
bits[54] = 1
bits[61] = 1
elif temp[5] == 4:
bits[33] = 1
bits[40] = 1
bits[47] = 1
bits[54] = 1
elif temp[5] == 3:
bits[33] = 1
bits[40] = 1
bits[47] = 1
elif temp[5] == 2:
bits[33] = 1
bits[40] = 1
elif temp[5] == 1:
bits[33] = 1
else:
pass
if temp[6] >= 5:
bits[68] = 1
bits[75] = 1
bits[82] = 1
bits[89] = 1
bits[96] = 1
elif temp[6] == 4:
bits[68] = 1
bits[75] = 1
bits[82] = 1
bits[89] = 1
elif temp[6] == 3:
bits[68] = 1
bits[75] = 1
bits[82] = 1
elif temp[6] == 2:
bits[68] = 1
bits[75] = 1
elif temp[6] == 1:
bits[68] = 1
else:
pass
if temp[7] >= 2:
bits[103] = 1
bits[110] = 1
elif temp[7] == 1:
bits[103] = 1
else:
pass
if temp[8] >= 2:
bits[117] = 1
bits[124] = 1
elif temp[8] == 1:
bits[117] = 1
else:
pass
if temp[9] >= 1:
bits[131] = 1
else:
pass
if temp[10] >= 1:
bits[138] = 1
else:
pass
return ringSize, bits
def func_7(mol, bits):
""" *Internal Use Only*
unsaturated non-aromatic heteroatom-containing
"""
ringSize = []
AllRingsBond = mol.GetRingInfo().BondRings()
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
unsaturated = False
nonaromatic = True
heteroatom = False
######### unsaturated
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "SINGLE":
unsaturated = True
break
######## non-aromatic
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name == "AROMATIC":
nonaromatic = False
break
######## heteroatom-containing
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() not in [1, 6] or EndAtom.GetAtomicNum() not in [
1,
6,
]:
heteroatom = True
break
if unsaturated == True and nonaromatic == True and heteroatom == True:
ringSize.append(len(ring))
for k, v in temp.items():
if len(ring) == k:
temp[k] += 1
if temp[3] >= 2:
bits[6] = 1
bits[13] = 1
elif temp[3] == 1:
bits[6] = 1
else:
pass
if temp[4] >= 2:
bits[20] = 1
bits[27] = 1
elif temp[4] == 1:
bits[20] = 1
else:
pass
if temp[5] >= 5:
bits[34] = 1
bits[41] = 1
bits[48] = 1
bits[55] = 1
bits[62] = 1
elif temp[5] == 4:
bits[34] = 1
bits[41] = 1
bits[48] = 1
bits[55] = 1
elif temp[5] == 3:
bits[34] = 1
bits[41] = 1
bits[48] = 1
elif temp[5] == 2:
bits[34] = 1
bits[41] = 1
elif temp[5] == 1:
bits[34] = 1
else:
pass
if temp[6] >= 5:
bits[69] = 1
bits[76] = 1
bits[83] = 1
bits[90] = 1
bits[97] = 1
elif temp[6] == 4:
bits[69] = 1
bits[76] = 1
bits[83] = 1
bits[90] = 1
elif temp[6] == 3:
bits[69] = 1
bits[76] = 1
bits[83] = 1
elif temp[6] == 2:
bits[69] = 1
bits[76] = 1
elif temp[6] == 1:
bits[69] = 1
else:
pass
if temp[7] >= 2:
bits[104] = 1
bits[111] = 1
elif temp[7] == 1:
bits[104] = 1
else:
pass
if temp[8] >= 2:
bits[118] = 1
bits[125] = 1
elif temp[8] == 1:
bits[118] = 1
else:
pass
if temp[9] >= 1:
bits[132] = 1
else:
pass
if temp[10] >= 1:
bits[139] = 1
else:
pass
return ringSize, bits
def func_8(mol, bits):
""" *Internal Use Only*
aromatic rings or hetero-aromatic rings
"""
AllRingsBond = mol.GetRingInfo().BondRings()
temp = {"aromatic": 0, "heteroatom": 0}
for ring in AllRingsBond:
aromatic = True
heteroatom = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name != "AROMATIC":
aromatic = False
break
if aromatic == True:
temp["aromatic"] += 1
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() not in [1, 6] or EndAtom.GetAtomicNum() not in [
1,
6,
]:
heteroatom = True
break
if heteroatom == True:
temp["heteroatom"] += 1
if temp["aromatic"] >= 4:
bits[140] = 1
bits[142] = 1
bits[144] = 1
bits[146] = 1
elif temp["aromatic"] == 3:
bits[140] = 1
bits[142] = 1
bits[144] = 1
elif temp["aromatic"] == 2:
bits[140] = 1
bits[142] = 1
elif temp["aromatic"] == 1:
bits[140] = 1
else:
pass
if temp["aromatic"] >= 4 and temp["heteroatom"] >= 4:
bits[141] = 1
bits[143] = 1
bits[145] = 1
bits[147] = 1
elif temp["aromatic"] == 3 and temp["heteroatom"] == 3:
bits[141] = 1
bits[143] = 1
bits[145] = 1
elif temp["aromatic"] == 2 and temp["heteroatom"] == 2:
bits[141] = 1
bits[143] = 1
elif temp["aromatic"] == 1 and temp["heteroatom"] == 1:
bits[141] = 1
else:
pass
return bits
def calcPubChemFingerPart2(mol): # 116-263
""" *Internal Use Only*
Calculate PubChem Fingerprints (116-263)
"""
bits = [0] * 148
bits = func_1(mol, bits)[1]
bits = func_2(mol, bits)[1]
bits = func_3(mol, bits)[1]
bits = func_4(mol, bits)[1]
bits = func_5(mol, bits)[1]
bits = func_6(mol, bits)[1]
bits = func_7(mol, bits)[1]
bits = func_8(mol, bits)
return bits
def calcPubChemFingerAll(mol):
"""*Internal Use Only*
Calculate PubChem Fingerprints
"""
AllBits = [0] * 881
res1 = list(calcPubChemFingerPart1(mol).ToBitString())
for index, item in enumerate(res1[1:116]):
if item == "1":
AllBits[index] = 1
for index2, item2 in enumerate(res1[116:734]):
if item2 == "1":
AllBits[index2 + 115 + 148] = 1
res2 = calcPubChemFingerPart2(mol)
for index3, item3 in enumerate(res2):
if item3 == 1:
AllBits[index3 + 115] = 1
return AllBits
# ------------------------------------
if __name__ == "__main__":
print("-" * 10 + "START" + "-" * 10)
SMILES = "C1=NC2NC3=CNCC3=CC2CC1"
mol = Chem.MolFromSmiles(SMILES)
mol2 = Chem.AddHs(mol)
result = calcPubChemFingerAll(mol2)
print("Molecule: %s" % SMILES)
print("-" * 25)
print("Results: %s" % result)
print("-" * 10 + "END" + "-" * 10)
| gadsbyfly/PyBioMed | PyBioMed/PyMolecule/PubChemFingerprints.py | Python | bsd-3-clause | 53,803 | [
"RDKit"
] | ff9e49d1fac82ec5b797f55a0aa681355bab3cdec473c93ca366bf43f453a818 |
import sys
import os
import numpy as np
import h5py
import multiprocessing
import cPickle
import ephem
import matplotlib.pyplot as plt
import types
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import train_test_split
from sklearn import metrics, linear_model, tree, ensemble
# NOTE: endless empehm warnings
# DeprecationWarning: PyOS_ascii_strtod and PyOS_ascii_atof are deprecated. Use PyOS_string_to_double instead.
# https://github.com/brandon-rhodes/pyephem/issues/18
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
fMapper = {
"apcp_sfc" : "Total_precipitation",
"dlwrf_sfc" : "Downward_Long-Wave_Rad_Flux",
"dswrf_sfc" : "Downward_Short-Wave_Rad_Flux",
"pres_msl" : "Pressure",
"pwat_eatm" : "Precipitable_water",
"spfh_2m" : "Specific_humidity_height_above_ground",
"tcdc_eatm" : "Total_cloud_cover",
"tcolc_eatm" : "Total_Column-Integrated_Condensate",
"tmax_2m" : "Maximum_temperature",
"tmin_2m" : "Minimum_temperature",
"tmp_2m" : "Temperature_height_above_ground",
"tmp_sfc" : "Temperature_surface",
"ulwrf_sfc" : "Upward_Long-Wave_Rad_Flux_surface",
"ulwrf_tatm" : "Upward_Long-Wave_Rad_Flux",
"uswrf_sfc" : "Upward_Short-Wave_Rad_Flux"
}
fKeys = ("apcp_sfc", "dlwrf_sfc", "dswrf_sfc", "pres_msl", "pwat_eatm",
"spfh_2m", "tcdc_eatm", "tcolc_eatm", "tmax_2m", "tmin_2m",
"tmp_2m", "tmp_sfc", "ulwrf_sfc", "ulwrf_tatm", "uswrf_sfc")
NPTSt = 5113 # Train
NPTSp = 1796 # Predict
# Minimal script for gaussian process estimation
class Mesonet(object):
dtimet = np.recarray((NPTSt,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
dtimep = np.recarray((NPTSp,), dtype={"names": ("time",),
"formats": ("datetime64[D]",)})
def __init__(self, stid, nlat, elon, elev):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Measured data
self.datat = np.recarray((NPTSt,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
self.datap = np.recarray((NPTSp,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
"formats": (np.int64, np.float64, np.float64)})
def setAstro(self, time, data):
sun = ephem.Sun()
moon = ephem.Moon()
obs = ephem.Observer()
obs.lon = (self.elon * np.pi / 180) # need radians
obs.lat = (self.nlat * np.pi / 180) # need radians
obs.elevation = self.elev # meters
for i in range(len(time)):
obs.date = str(time[i])
sun.compute(obs)
moon.compute(obs)
data["sun_alt"][i] = float(180 / np.pi * sun.transit_alt)
data["moon_phase"][i] = moon.moon_phase
def regress(args):
features, flux = args
model = ensemble.GradientBoostingRegressor(loss="lad", n_estimators=1000)
return model.fit(features, flux)
def regressLoop(features, flux, nsplit=10, seed=666):
alphas = np.logspace(-5, 1, 6, base=10)
models = []
for alpha in alphas:
models.append(linear_model.Ridge(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.Lasso(normalize=True, fit_intercept=True, alpha=alpha))
models.append(linear_model.LassoLars(normalize=True, fit_intercept=True, alpha=alpha))
models.append(ensemble.RandomForestRegressor())
models.append(ensemble.ExtraTreesRegressor())
models.append(ensemble.AdaBoostRegressor())
models.append(ensemble.GradientBoostingRegressor(loss="lad", n_estimators=100)) # change to 1000 for better results
models.append(tree.DecisionTreeRegressor())
models.append(tree.ExtraTreeRegressor())
maeavg = []
for m in range(len(models)):
model = models[m]
maes = []
for i in range(nsplit):
feat_fit, feat_cv, flux_fit, flux_cv = train_test_split(features, flux, test_size=.20, random_state = i*seed)
try:
fit = model.fit(feat_fit, flux_fit)
preds = fit.predict(feat_cv)
mae = metrics.mean_absolute_error(flux_cv,preds)
#print "MAE (fold %d/%d): %f" % (i + 1, nsplit, mae)
maes.append(mae)
except:
continue
print " AVG MAE %d : %.1f +/- %.1f" % (m, np.mean(maes), np.std(maes))
maeavg.append(np.mean(maes))
idx = np.argsort(maeavg)
model = models[idx[0]]
print "BEST", maeavg[idx[0]], model
return model.fit(features, flux) # fit all data
if __name__ == "__main__":
suffix = sys.argv[1]
trainFile = "gp2_train_%s.pickle" % (suffix)
predFile = "gp2_pred_%s.pickle" % (suffix)
if suffix.find("logit") > -1:
buff = open(trainFile, "rb")
train, fmin, fmax = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred, fmin, fmax = cPickle.load(buff)
buff.close()
else:
buff = open(trainFile, "rb")
train = cPickle.load(buff)
buff.close()
buff = open(predFile, "rb")
pred = cPickle.load(buff)
buff.close()
# QUESTION: do we logit the flux? Not sure, might screw up CV interpretation
#pool = multiprocessing.Pool(multiprocessing.cpu_count())
#pool.map(int, range(multiprocessing.cpu_count())) # Trick to "warm up" the Pool
# Need to load the positions and times of training data
sdata = np.loadtxt("../station_info.csv", delimiter=",", skiprows=1,
dtype = [("stid", np.str_, 4),
("nlat", np.float64),
("elon", np.float64),
("elev", np.float64)])
fields = np.loadtxt("../train.csv", skiprows=1, delimiter=",", dtype=np.int64)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[:,0]]
Mesonet.dtimet = dates
mesonets = {}
for sidx in range(len(sdata)):
s = sdata[sidx]
station = Mesonet(s[0], s[1], s[2], s[3])
station.datat["flux"] = fields[:,sidx+1]
mesonets[s[0]] = station
# Dates of prediction data
fields = np.loadtxt("../sampleSubmission.csv", skiprows=1, delimiter=",", unpack=True).astype(np.int)
dates = [np.datetime64(str(x)[:4]+"-"+str(x)[4:6]+"-"+str(x)[6:8]) for x in fields[0]]
Mesonet.dtimep = dates
sdates = [np.str(x) for x in fields[0]]
# Do we do Astro terms?
useAstro = 0
if useAstro:
for mesonet in mesonets.values():
mesonet.setAstro(mesonet.dtimet, mesonet.datat)
mesonet.setAstro(mesonet.dtimep, mesonet.datap)
stride = 11 * 5
# Regress each Mesonet site on its own
for mKey in mesonets.keys():
# Look at each ensemble, one by one
pKey = 0 # which prediction, nelement * nhour
for eKey in range(11): # which element
for hKey in range(5): # which hour
print "%s %d" % (mKey, pKey)
featt = np.empty((NPTSt, len(fKeys) + 2 * useAstro))
for f in range(len(fKeys)):
fKey = fKeys[f]
featt[:,f] = train[mKey].pdata[pKey::stride][fKey]
if useAstro:
featt[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"]
featt[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"]
fluxt = mesonets[mKey].datat["flux"]
regressLoop(featt, fluxt)
pKey += 1
# Now average over all ensembles, select each hour
hstride = 5
for hKey in range(5): # which hour
print "%s %d" % (mKey, pKey)
featt = np.empty((NPTSt, len(fKeys) + 2 * useAstro))
for f in range(len(fKeys)):
fKey = fKeys[f]
featt[:,f] = np.ravel(np.mean(train[mKey].pdata[fKey].reshape((NPTSt, 11, 5)), axis=1))[hKey::hstride]
if useAstro:
featt[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"]
featt[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"]
fluxt = mesonets[mKey].datat["flux"]
regressLoop(featt, fluxt)
pKey += 1
# Now regress all sites at once
stride = 11 * 5
pKey = 0 # which prediction, nelement * nhour
for eKey in range(11): # which element
for hKey in range(5): # which hour
print "ALL %d" % (pKey)
featt = np.empty((NPTSt * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxt = np.empty((NPTSt * len(mesonets.keys())))
fIdx = 0
for mKey in mesonets.keys():
for f in range(len(fKeys)):
fKey = fKeys[f]
featt[fIdx*NPTSt:(fIdx*NPTSt + NPTSt),f] = train[mKey].pdata[pKey::stride][fKey]
#if useAstro:
# featt[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"]
# featt[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"]
fluxt[fIdx*NPTSt:(fIdx*NPTSt + NPTSt)] = mesonets[mKey].datat["flux"]
fIdx += 1
regressLoop(featt, fluxt)
pKey += 1
# Now average over all ensembles, select each hour
hstride = 5
for hKey in range(5): # which hour
print "ALL %d" % (pKey)
featt = np.empty((NPTSt * len(mesonets.keys()), len(fKeys) + 2 * useAstro))
fluxt = np.empty((NPTSt * len(mesonets.keys())))
fIdx = 0
for mKey in mesonets.keys():
for f in range(len(fKeys)):
fKey = fKeys[f]
featt[fIdx*NPTSt:(fIdx*NPTSt + NPTSt),f] = \
np.ravel(np.mean(train[mKey].pdata[fKey].reshape((NPTSt, 11, 5)), axis=1))[hKey::hstride]
#if useAstro:
# featt[:,len(fKeys)] = mesonets[mKey].datat["sun_alt"]
# featt[:,len(fKeys)+1] = mesonets[mKey].datat["moon_phase"]
fluxt[fIdx*NPTSt:(fIdx*NPTSt + NPTSt)] = mesonets[mKey].datat["flux"]
regressLoop(featt, fluxt)
pKey += 1
| acbecker/solar | regress_bk.py | Python | mit | 10,580 | [
"Gaussian"
] | 76ff629a88fd89e509781c01bb3484be48b6b7e7d2671c343881bc929c97949a |
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""some various utilities and helper classes, most of them used in the
main pylint class
"""
import sys
from warnings import warn
from os.path import dirname, basename, splitext, exists, isdir, join, normpath
from .logilab.common.interface import implements
from .logilab.common.modutils import modpath_from_file, get_module_files, \
file_from_modpath
from .logilab.common.textutils import normalize_text
from .logilab.common.configuration import rest_format_section
from .logilab.common.ureports import Section
from .logilab.astng import nodes, Module
from .checkers import EmptyReport
from .interfaces import IRawChecker
class UnknownMessage(Exception):
"""raised when a unregistered message id is encountered"""
MSG_TYPES = {
'I' : 'info',
'C' : 'convention',
'R' : 'refactor',
'W' : 'warning',
'E' : 'error',
'F' : 'fatal'
}
MSG_TYPES_LONG = dict([(v, k) for k, v in MSG_TYPES.iteritems()])
MSG_TYPES_STATUS = {
'I' : 0,
'C' : 16,
'R' : 8,
'W' : 4,
'E' : 2,
'F' : 1
}
_MSG_ORDER = 'EWRCIF'
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = 'FR'
class WarningScope(object):
LINE = 'line-based-msg'
NODE = 'node-based-msg'
def sort_msgs(msgids):
"""sort message identifiers according to their category first"""
msgs = {}
for msg in msgids:
msgs.setdefault(msg[0], []).append(msg)
result = []
for m_id in _MSG_ORDER:
if m_id in msgs:
result.extend( sorted(msgs[m_id]) )
return result
def get_module_and_frameid(node):
"""return the module name and the frame id in the module"""
frame = node.frame()
module, obj = '', []
while frame:
if isinstance(frame, Module):
module = frame.name
else:
obj.append(getattr(frame, 'name', '<lambda>'))
try:
frame = frame.parent.frame()
except AttributeError:
frame = None
obj.reverse()
return module, '.'.join(obj)
def category_id(id):
id = id.upper()
if id in MSG_TYPES:
return id
return MSG_TYPES_LONG.get(id)
class Message:
def __init__(self, checker, msgid, msg, descr, symbol, scope):
assert len(msgid) == 5, 'Invalid message id %s' % msgid
assert msgid[0] in MSG_TYPES, \
'Bad message type %s in %r' % (msgid[0], msgid)
self.msgid = msgid
self.msg = msg
self.descr = descr
self.checker = checker
self.symbol = symbol
self.scope = scope
class MessagesHandlerMixIn:
"""a mix-in class containing all the messages related methods for the main
lint class
"""
def __init__(self):
# dictionary of registered messages
self._messages = {}
# dictionary from string symbolic id to Message object.
self._messages_by_symbol = {}
self._msgs_state = {}
self._module_msgs_state = {} # None
self._raw_module_msgs_state = {}
self._msgs_by_category = {}
self.msg_status = 0
self._ignored_msgs = {}
self._suppression_mapping = {}
def register_messages(self, checker):
"""register a dictionary of messages
Keys are message ids, values are a 2-uple with the message type and the
message itself
message ids should be a string of len 4, where the two first characters
are the checker id and the two last the message id in this checker
"""
msgs_dict = checker.msgs
chkid = None
for msgid, msg_tuple in msgs_dict.iteritems():
if implements(checker, IRawChecker):
scope = WarningScope.LINE
else:
scope = WarningScope.NODE
if len(msg_tuple) > 2:
(msg, msgsymbol, msgdescr) = msg_tuple[:3]
assert msgsymbol not in self._messages_by_symbol, \
'Message symbol %r is already defined' % msgsymbol
if len(msg_tuple) > 3 and 'scope' in msg_tuple[3]:
scope = msg_tuple[3]['scope']
else:
# messages should have a symbol, but for backward compatibility
# they may not.
(msg, msgdescr) = msg_tuple
warn("[pylint 0.26] description of message %s doesn't include "
"a symbolic name" % msgid, DeprecationWarning)
msgsymbol = None
# avoid duplicate / malformed ids
assert msgid not in self._messages, \
'Message id %r is already defined' % msgid
assert chkid is None or chkid == msgid[1:3], \
'Inconsistent checker part in message id %r' % msgid
chkid = msgid[1:3]
msg = Message(checker, msgid, msg, msgdescr, msgsymbol, scope)
self._messages[msgid] = msg
self._messages_by_symbol[msgsymbol] = msg
self._msgs_by_category.setdefault(msgid[0], []).append(msgid)
def get_message_help(self, msgid, checkerref=False):
"""return the help string for the given message id"""
msg = self.check_message_id(msgid)
desc = normalize_text(' '.join(msg.descr.split()), indent=' ')
if checkerref:
desc += ' This message belongs to the %s checker.' % \
msg.checker.name
title = msg.msg
if msg.symbol:
symbol_part = ' (%s)' % msg.symbol
else:
symbol_part = ''
if title != '%s':
title = title.splitlines()[0]
return ':%s%s: *%s*\n%s' % (msg.msgid, symbol_part, title, desc)
return ':%s%s:\n%s' % (msg.msgid, symbol_part, desc)
def disable(self, msgid, scope='package', line=None):
"""don't output message of the given id"""
assert scope in ('package', 'module')
# handle disable=all by disabling all categories
if msgid == 'all':
for msgid in MSG_TYPES:
self.disable(msgid, scope, line)
return
# msgid is a category?
catid = category_id(msgid)
if catid is not None:
for _msgid in self._msgs_by_category.get(catid):
self.disable(_msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
self.disable(_msgid, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.disable_report(msgid)
return
# msgid is a symbolic or numeric msgid.
msg = self.check_message_id(msgid)
if scope == 'module':
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = False
except KeyError:
self._module_msgs_state[msg.msgid] = {line: False}
if msgid != 'I0011':
self.add_message('I0011', line=line, args=msg.msgid)
else:
msgs = self._msgs_state
msgs[msg.msgid] = False
# sync configuration object
self.config.disable_msg = [mid for mid, val in msgs.iteritems()
if not val]
def enable(self, msgid, scope='package', line=None):
"""reenable message of the given id"""
assert scope in ('package', 'module')
catid = category_id(msgid)
# msgid is a category?
if catid is not None:
for msgid in self._msgs_by_category.get(catid):
self.enable(msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for msgid in checker.msgs:
self.enable(msgid, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.enable_report(msgid)
return
# msgid is a symbolic or numeric msgid.
msg = self.check_message_id(msgid)
if scope == 'module':
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = True
except KeyError:
self._module_msgs_state[msg.msgid] = {line: True}
self.add_message('I0012', line=line, args=msg.msgid)
else:
msgs = self._msgs_state
msgs[msg.msgid] = True
# sync configuration object
self.config.enable = [mid for mid, val in msgs.iteritems() if val]
def check_message_id(self, msgid):
"""returns the Message object for this message.
msgid may be either a numeric or symbolic id.
Raises UnknownMessage if the message id is not defined.
"""
if msgid in self._messages_by_symbol:
return self._messages_by_symbol[msgid]
msgid = msgid.upper()
try:
return self._messages[msgid]
except KeyError:
raise UnknownMessage('No such message id %s' % msgid)
def get_msg_display_string(self, msgid):
"""Generates a user-consumable representation of a message.
Can be just the message ID or the ID and the symbol.
"""
if self.config.symbols:
symbol = self.check_message_id(msgid).symbol
if symbol:
msgid += '(%s)' % symbol
return msgid
def get_message_state_scope(self, msgid, line=None):
"""Returns the scope at which a message was enabled/disabled."""
try:
if line in self._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
def is_message_enabled(self, msgid, line=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if msgid in self._messages_by_symbol:
msgid = self._messages_by_symbol[msgid].msgid
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self._module_msgs_state[msgid][line]
except (KeyError, TypeError):
return self._msgs_state.get(msgid, True)
def handle_ignored_message(self, state_scope, msgid, line, node, args):
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs.setdefault((msgid, orig_line), set()).add(line)
except KeyError:
pass
def add_message(self, msgid, line=None, node=None, args=None):
"""add the message corresponding to the given id.
If provided, msg is expanded using args
astng checkers should provide the node argument, raw checkers should
provide the line argument.
"""
msg_info = self._messages[msgid]
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if msgid[0] not in _SCOPE_EXEMPT:
if msg_info.scope == WarningScope.LINE:
assert node is None and line is not None, (
'Message %s must only provide line, got line=%s, node=%s' % (msgid, line, node))
elif msg_info.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
assert node is not None, 'Message %s must provide Node, got None'
if line is None and node is not None:
line = node.fromlineno
if hasattr(node, 'col_offset'):
col_offset = node.col_offset # XXX measured in bytes for utf-8, divide by two for chars?
else:
col_offset = None
# should this message be displayed
if not self.is_message_enabled(msgid, line):
self.handle_ignored_message(
self.get_message_state_scope(msgid, line), msgid, line, node, args)
return
# update stats
msg_cat = MSG_TYPES[msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[msgid[0]]
self.stats[msg_cat] += 1
self.stats['by_module'][self.current_name][msg_cat] += 1
try:
self.stats['by_msg'][msgid] += 1
except KeyError:
self.stats['by_msg'][msgid] = 1
# expand message ?
msg = msg_info.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ''
path = self.current_file
else:
module, obj = get_module_and_frameid(node)
path = node.root().file
# add the message
self.reporter.add_message(msgid, (path, module, obj, line or 1, col_offset or 0), msg)
def help_message(self, msgids):
"""display help messages for the given message identifiers"""
for msgid in msgids:
try:
print self.get_message_help(msgid, True)
print
except UnknownMessage, ex:
print ex
print
continue
def print_full_documentation(self):
"""output a full documentation in ReST format"""
by_checker = {}
for checker in self.get_checkers():
if checker.name == 'master':
prefix = 'Main '
print "Options"
print '-------\n'
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = 'General options'
else:
title = '%s options' % section.capitalize()
print title
print '~' * len(title)
rest_format_section(sys.stdout, None, options)
print
else:
try:
by_checker[checker.name][0] += checker.options_and_values()
by_checker[checker.name][1].update(checker.msgs)
by_checker[checker.name][2] += checker.reports
except KeyError:
by_checker[checker.name] = [list(checker.options_and_values()),
dict(checker.msgs),
list(checker.reports)]
for checker, (options, msgs, reports) in by_checker.iteritems():
prefix = ''
title = '%s checker' % checker
print title
print '-' * len(title)
print
if options:
title = 'Options'
print title
print '~' * len(title)
rest_format_section(sys.stdout, None, options)
print
if msgs:
title = ('%smessages' % prefix).capitalize()
print title
print '~' * len(title)
for msgid in sort_msgs(msgs.iterkeys()):
print self.get_message_help(msgid, False)
print
if reports:
title = ('%sreports' % prefix).capitalize()
print title
print '~' * len(title)
for report in reports:
print ':%s: %s' % report[:2]
print
print
def list_messages(self):
"""output full messages list documentation in ReST format"""
msgids = []
for checker in self.get_checkers():
for msgid in checker.msgs.iterkeys():
msgids.append(msgid)
msgids.sort()
for msgid in msgids:
print self.get_message_help(msgid, False)
print
class ReportsHandlerMixIn:
"""a mix-in class containing all the reports and stats manipulation
related methods for the main lint class
"""
def __init__(self):
self._reports = {}
self._reports_state = {}
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports.setdefault(checker, []).append( (reportid, r_title, r_cb) )
def enable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid):
"""return true if the report associated to the given identifier is
enabled
"""
return self._reports_state.get(reportid, True)
def make_reports(self, stats, old_stats):
"""render registered reports"""
sect = Section('Report',
'%s statements analysed.'% (self.stats['statement']))
for checker in self._reports:
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReport:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
def add_stats(self, **kwargs):
"""add some stats entries to the statistic dictionary
raise an AssertionError if there is a key conflict
"""
for key, value in kwargs.iteritems():
if key[-1] == '_':
key = key[:-1]
assert key not in self.stats
self.stats[key] = value
return self.stats
def expand_modules(files_or_modules, black_list):
"""take a list of files/modules/packages and return the list of tuple
(file, module name) which have to be actually checked
"""
result = []
errors = []
for something in files_or_modules:
if exists(something):
# this is a file or a directory
try:
modname = '.'.join(modpath_from_file(something))
except ImportError:
modname = splitext(basename(something))[0]
if isdir(something):
filepath = join(something, '__init__.py')
else:
filepath = something
else:
# suppose it's a module or package
modname = something
try:
filepath = file_from_modpath(modname.split('.'))
if filepath is None:
errors.append( {'key' : 'F0003', 'mod': modname} )
continue
except (ImportError, SyntaxError), ex:
# FIXME p3k : the SyntaxError is a Python bug and should be
# removed as soon as possible http://bugs.python.org/issue10588
errors.append( {'key': 'F0001', 'mod': modname, 'ex': ex} )
continue
filepath = normpath(filepath)
result.append( {'path': filepath, 'name': modname,
'basepath': filepath, 'basename': modname} )
if not (modname.endswith('.__init__') or modname == '__init__') \
and '__init__.py' in filepath:
for subfilepath in get_module_files(dirname(filepath), black_list):
if filepath == subfilepath:
continue
submodname = '.'.join(modpath_from_file(subfilepath))
result.append( {'path': subfilepath, 'name': submodname,
'basepath': filepath, 'basename': modname} )
return result, errors
class PyLintASTWalker(object):
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 1
self.visit_events = {}
self.leave_events = {}
self.linter = linter
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
# XXX : should be possible to merge needed_checkers and add_checker
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
msgs = self.linter._msgs_state
for member in dir(checker):
cid = member[6:]
if cid == 'default':
continue
if member.startswith('visit_'):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if hasattr(v_meth, 'checks_msgs'):
if not any(msgs.get(m, True) for m in v_meth.checks_msgs):
continue
visits.setdefault(cid, []).append(v_meth)
vcids.add(cid)
elif member.startswith('leave_'):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if hasattr(l_meth, 'checks_msgs'):
if not any(msgs.get(m, True) for m in l_meth.checks_msgs):
continue
leaves.setdefault(cid, []).append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, 'visit_default', None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits.setdefault(cid, []).append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astng):
"""call visit events of astng checkers for the given node, recurse on
its children, then leave events.
"""
cid = astng.__class__.__name__.lower()
if astng.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for cb in self.visit_events.get(cid, ()):
cb(astng)
# recurse on children
for child in astng.get_children():
self.walk(child)
for cb in self.leave_events.get(cid, ()):
cb(astng)
| yorvic/.vim | bundle/python-mode/pylibs/pylama/checkers/pylint/utils.py | Python | gpl-3.0 | 23,922 | [
"VisIt"
] | d7013a0628ceb5c390d38fa40b25a72255166a3b703a3da68ac00268a367feec |
from dulwich.repo import *
from dulwich.objects import *
import yaml
import os
import argparse
import shutil
import time
import datetime
from dateutil.tz import tzlocal
import uuid
from user import User
import hashlib
# Man is this ugly
from crypto.asymencdata import ASymEncData
from crypto.asymenc import ASymEnc
from crypto.asymkey import ASymKey
from crypto.encresult import EncResult
from crypto.symencdata import SymEncData
from crypto.symenckey import SymEncKey
from crypto.symencpasswordkey import SymEncPasswordKey
from crypto.symenc import SymEnc
class LedgerException(Exception):
pass
class Ledger:
def __init__(self, path, user = None):
self.path = path
self.repo = Repo(path)
self.current_user = user
self.dirty_files = []
self.actions = []
self.key = None
self.cached_users = None
self.cached_txs = None
def __enter__(self):
self.load_all_users()
if isinstance(self.current_user, str):
self.auth_user(self.current_user)
errs = self.errors()
if errs:
raise LedgerException(errs)
self.load_key()
return self
def __exit__(self, type, value, traceback):
if value is None:
return True
return False
@staticmethod
def init(path, user):
Repo.init(path, mkdir=True)
user.generate_key()
ledger = Ledger(path, user)
ledger.actions.append('Init')
ledger.create_master_key()
ledger.add_user(user)
@staticmethod
def str_to_sign(ctime, parent, digest, actions, user):
if isinstance(parent, list):
parent = ",".join(parent)
action_digest = hashlib.sha256(actions).hexdigest()
return "%d:%s:%s:%s:%s" % (ctime, parent, digest, action_digest, user)
def commit(self, branch, actions, data=None):
if self.current_user is None:
raise LedgerException('No User Logged in')
branch = "refs/heads/%s" % branch
parent = ''
if branch in self.repo.refs:
parent = [self.repo.refs[branch]]
digest = hashlib.sha256(data).hexdigest()
ctime = int(time.time())
if isinstance(actions, list):
actions = ". ".join(actions)
s2s = Ledger.str_to_sign(ctime = ctime,
parent = parent,
digest = digest,
actions = actions,
user = repr(self.current_user))
ase = ASymEnc(self.current_user.key)
sig = ase.sign(s2s)
if not ase.verify(s2s, sig):
raise Exception('Bah!')
msg = "Actions: %s\nSig: %s\n%s" % (actions, sig, data)
commit = Commit()
commit.author = commit.committer = repr(self.current_user)
tzo = int(tzlocal().utcoffset(datetime.datetime.now()).total_seconds())
commit.commit_timezone = commit.author_timezone = tzo
commit.commit_time = commit.author_time = ctime
commit.encoding = "UTF-8"
commit.message = msg
# SHA of an empty tree
# git hash-object -t tree /dev/null
commit.tree = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
if parent:
commit.parents = parent
object_store = self.repo.object_store
object_store.add_object(commit)
self.repo.refs[branch] = commit.id
def load_key(self):
for key in self.keys():
key_key = key['key_key']
key_key = SymEncPasswordKey.from_dict(key_key)
key = key['key']
key = SymEncKey.from_dict(key_key, key)
self.key = key
break # There should only be a single key, or we'll just use the first one
def check_key(self):
if self.key is None:
self.load_key()
if self.key is None:
raise LedgerException("Key not loaded")
def auth_user(self, username):
if username not in self.cached_users:
raise LedgerException("User %s doesn't exist" % username)
user = self.cached_users[username]
user.decrypt_key()
self.current_user = user
def add_user(self, user):
user_key_yaml = yaml.dump(user.to_dict(), default_flow_style = False)
self.commit('users', "Create user %s" % user, data=user_key_yaml)
def create_master_key(self):
key_key = SymEncPasswordKey()
key = SymEncKey()
to_store = {
'key_key': key_key.to_dict(),
'key': key.to_dict(key_key)
}
key_yaml = yaml.dump(to_store, default_flow_style=False)
self.commit('key', "Generated Master Key", data=key_yaml)
self.key = key
def create_tx(self, from_account, to_account, description, amount):
self.check_key()
encor = SymEnc(self.key)
description = encor.encrypt(description)
amount = encor.encrypt(str(amount))
tx = {
'description': description.to_dict(),
'amount': amount.to_dict(),
'to_account': to_account,
'from_account': from_account,
}
tx_yaml = yaml.dump(tx, default_flow_style = False)
self.commit('txs',"Added Tx", data=tx_yaml)
def walk_branch(self, branch, verify = True):
branch = "refs/heads/%s" % branch
if branch not in self.repo.refs:
return
for tx in self.repo.get_walker(include=self.repo.refs[branch]):
a = tx.commit.message.split('\n', 2)
actions = a[0]
sig = a[1]
data = a[2]
actions = actions.split(':')[1].strip()
sig = sig.split(':')[1].strip()
s2s = Ledger.str_to_sign(ctime = tx.commit.commit_time,
parent = ','.join(tx.commit.parents),
digest = hashlib.sha256(data).hexdigest(),
actions = actions,
user = tx.commit.author)
if verify:
user = self.cached_users[tx.commit.author]
asc = ASymEnc(user.key)
if not asc.verify(s2s, sig):
raise LedgerException("Commit %s has a bad sig" % tx.commit.id)
if data is None and -1 != tx.commit.message.find('Merge'):
continue
yield data, tx.commit
def keys(self): # There should only ever be 1, but...
for data, commit in self.walk_branch('key'):
key = yaml.safe_load(data)
yield key
def txs(self):
self.check_key()
if self.cached_txs is not None:
for tx in self.cached_txs:
yield tx
else:
self.cached_txs = []
encor = SymEnc(self.key)
for data, commit in self.walk_branch('txs'):
tx = yaml.safe_load(data)
tx['who'] = commit.author
tx['when'] = commit.commit_time
tx['amount'] = int(encor.decrypt(EncResult.from_dict(tx['amount'])))
tx['description'] = encor.decrypt(EncResult.from_dict(tx['description']))
self.cached_txs.append(tx)
yield tx
def users(self, verify = True):
for data, commit in self.walk_branch('users', verify=verify):
data = yaml.safe_load(data)
user = User.from_dict_auth(data, decrypt = False)
yield user
def load_all_users(self):
users = {}
if self.cached_users is not None:
return None
for user in self.users(verify=False):
users[repr(user)] = user
users[str(user)] = user
self.cached_users = users
def balances(self):
accts = {}
self.check_key()
for tx in self.txs():
from_account = tx['from_account']
to_account = tx['to_account']
amount = tx['amount']
if from_account not in accts:
accts[from_account] = 0
if to_account not in accts:
accts[to_account] = 0
accts[from_account] -= amount
accts[to_account] += amount
return accts
def verify(self):
return self.errors() is None
def errors(self):
try:
for tx in self.txs(): pass
for tx in self.users(): pass
for tx in self.keys(): pass
except LedgerException as e:
return str(e)
return None
def txs_for_account(self, account):
for tx in self.txs():
if account == tx['to_account'] or account == tx['from_account']:
yield tx
| jimktrains/freebooks | ledger.py | Python | gpl-3.0 | 8,782 | [
"ASE"
] | 0b246762607b4cdf5456749da0c2bcd2f176c825899ee32587d1d08a06e251f6 |
# AddHeaderForEdgeR/AddHeaderForEdgeR.py - a self annotated version of rgToolFactory.py generated by running rgToolFactory.py
# to make a new Galaxy tool called AddHeaderForEdgeR
# User mika.yoshimura@riken.jp at 30/01/2015 16:38:14
# rgToolFactory.py
# see https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
#
# all rights reserved
# Licensed under the LGPL
# suggestions for improvement and bug fixes welcome at https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# August 2014
# merged John Chilton's citation addition and ideas from Marius van den Beek to enable arbitrary
# data types for input and output - thanks!
#
# march 2014
# had to remove dependencies because cross toolshed dependencies are not possible - can't pre-specify a toolshed url for graphicsmagick and ghostscript
# grrrrr - night before a demo
# added dependencies to a tool_dependencies.xml if html page generated so generated tool is properly portable
#
# added ghostscript and graphicsmagick as dependencies
# fixed a wierd problem where gs was trying to use the new_files_path from universe (database/tmp) as ./database/tmp
# errors ensued
#
# august 2013
# found a problem with GS if $TMP or $TEMP missing - now inject /tmp and warn
#
# july 2013
# added ability to combine images and individual log files into html output
# just make sure there's a log file foo.log and it will be output
# together with all images named like "foo_*.pdf
# otherwise old format for html
#
# January 2013
# problem pointed out by Carlos Borroto
# added escaping for <>$ - thought I did that ages ago...
#
# August 11 2012
# changed to use shell=False and cl as a sequence
# This is a Galaxy tool factory for simple scripts in python, R or whatever ails ye.
# It also serves as the wrapper for the new tool.
#
# you paste and run your script
# Only works for simple scripts that read one input from the history.
# Optionally can write one new history dataset,
# and optionally collect any number of outputs into links on an autogenerated HTML page.
# DO NOT install on a public or important site - please.
# installed generated tools are fine if the script is safe.
# They just run normally and their user cannot do anything unusually insecure
# but please, practice safe toolshed.
# Read the fucking code before you install any tool
# especially this one
# After you get the script working on some test data, you can
# optionally generate a toolshed compatible gzip file
# containing your script safely wrapped as an ordinary Galaxy script in your local toolshed for
# safe and largely automated installation in a production Galaxy.
# If you opt for an HTML output, you get all the script outputs arranged
# as a single Html history item - all output files are linked, thumbnails for all the pdfs.
# Ugly but really inexpensive.
#
# Patches appreciated please.
#
#
# long route to June 2012 product
# Behold the awesome power of Galaxy and the toolshed with the tool factory to bind them
# derived from an integrated script model
# called rgBaseScriptWrapper.py
# Note to the unwary:
# This tool allows arbitrary scripting on your Galaxy as the Galaxy user
# There is nothing stopping a malicious user doing whatever they choose
# Extremely dangerous!!
# Totally insecure. So, trusted users only
#
# preferred model is a developer using their throw away workstation instance - ie a private site.
# no real risk. The universe_wsgi.ini admin_users string is checked - only admin users are permitted to run this tool.
#
import sys
import shutil
import subprocess
import os
import time
import tempfile
import optparse
import tarfile
import re
import shutil
import math
progname = os.path.split(sys.argv[0])[1]
myversion = 'V001.1 March 2014'
verbose = False
debug = False
toolFactoryURL = 'https://bitbucket.org/fubar/galaxytoolfactory'
# if we do html we need these dependencies specified in a tool_dependencies.xml file and referred to in the generated
# tool xml
toolhtmldepskel = """<?xml version="1.0"?>
<tool_dependency>
<package name="ghostscript" version="9.10">
<repository name="package_ghostscript_9_10" owner="devteam" prior_installation_required="True" />
</package>
<package name="graphicsmagick" version="1.3.18">
<repository name="package_graphicsmagick_1_3" owner="iuc" prior_installation_required="True" />
</package>
<readme>
%s
</readme>
</tool_dependency>
"""
protorequirements = """<requirements>
<requirement type="package" version="9.10">ghostscript</requirement>
<requirement type="package" version="1.3.18">graphicsmagick</requirement>
</requirements>"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
"$": "\$"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def parse_citations(citations_text):
"""
"""
citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
citation_tuples = []
for citation in citations:
if citation.startswith("doi"):
citation_tuples.append( ("doi", citation[len("doi"):].strip() ) )
else:
citation_tuples.append( ("bibtex", citation[len("bibtex"):].strip() ) )
return citation_tuples
class ScriptRunner:
"""class is a wrapper for an arbitrary script
"""
def __init__(self,opts=None,treatbashSpecial=True):
"""
cleanup inputs, setup some outputs
"""
self.useGM = cmd_exists('gm')
self.useIM = cmd_exists('convert')
self.useGS = cmd_exists('gs')
self.temp_warned = False # we want only one warning if $TMP not set
self.treatbashSpecial = treatbashSpecial
if opts.output_dir: # simplify for the tool tarball
os.chdir(opts.output_dir)
self.thumbformat = 'png'
self.opts = opts
self.toolname = re.sub('[^a-zA-Z0-9_]+', '', opts.tool_name) # a sanitizer now does this but..
self.toolid = self.toolname
self.myname = sys.argv[0] # get our name because we write ourselves out as a tool later
self.pyfile = self.myname # crude but efficient - the cruft won't hurt much
self.xmlfile = '%s.xml' % self.toolname
s = open(self.opts.script_path,'r').readlines()
s = [x.rstrip() for x in s] # remove pesky dos line endings if needed
self.script = '\n'.join(s)
fhandle,self.sfile = tempfile.mkstemp(prefix=self.toolname,suffix=".%s" % (opts.interpreter))
tscript = open(self.sfile,'w') # use self.sfile as script source for Popen
tscript.write(self.script)
tscript.close()
self.indentedScript = '\n'.join([' %s' % html_escape(x) for x in s]) # for restructured text in help
self.escapedScript = '\n'.join([html_escape(x) for x in s])
self.elog = os.path.join(self.opts.output_dir,"%s_error.log" % self.toolname)
if opts.output_dir: # may not want these complexities
self.tlog = os.path.join(self.opts.output_dir,"%s_runner.log" % self.toolname)
art = '%s.%s' % (self.toolname,opts.interpreter)
artpath = os.path.join(self.opts.output_dir,art) # need full path
artifact = open(artpath,'w') # use self.sfile as script source for Popen
artifact.write(self.script)
artifact.close()
self.cl = []
self.html = []
a = self.cl.append
a(opts.interpreter)
if self.treatbashSpecial and opts.interpreter in ['bash','sh']:
a(self.sfile)
else:
a('-') # stdin
a(opts.input_tab)
a(opts.input_int)
a(opts.output_tab)
self.outputFormat = self.opts.output_format
self.inputFormats = self.opts.input_formats
self.test1Input = '%s_test1_input.xls' % self.toolname
self.test1Output = '%s_test1_output.xls' % self.toolname
self.test1HTML = '%s_test1_output.html' % self.toolname
def makeXML(self):
"""
Create a Galaxy xml tool wrapper for the new script as a string to write out
fixme - use templating or something less fugly than this example of what we produce
<tool id="reverse" name="reverse" version="0.01">
<description>a tabular file</description>
<command interpreter="python">
reverse.py --script_path "$runMe" --interpreter "python"
--tool_name "reverse" --input_tab "$input1" --output_tab "$tab_file"
</command>
<inputs>
<param name="input1" type="data" format="tabular" label="Select a suitable input file from your history"/><param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="reverse"/>
</inputs>
<outputs>
<data format="tabular" name="tab_file" label="${job_name}"/>
</outputs>
<help>
**What it Does**
Reverse the columns in a tabular file
</help>
<configfiles>
<configfile name="runMe">
# reverse order of columns in a tabular file
import sys
inp = sys.argv[1]
outp = sys.argv[2]
i = open(inp,'r')
o = open(outp,'w')
for row in i:
rs = row.rstrip().split('\t')
rs.reverse()
o.write('\t'.join(rs))
o.write('\n')
i.close()
o.close()
</configfile>
</configfiles>
</tool>
"""
newXML="""<tool id="%(toolid)s" name="%(toolname)s" version="%(tool_version)s">
%(tooldesc)s
%(requirements)s
<command interpreter="python">
%(command)s
</command>
<inputs>
%(inputs)s
</inputs>
<outputs>
%(outputs)s
</outputs>
<configfiles>
<configfile name="runMe">
%(script)s
</configfile>
</configfiles>
%(tooltests)s
<help>
%(help)s
</help>
<citations>
%(citations)s
<citation type="doi">10.1093/bioinformatics/bts573</citation>
</citations>
</tool>""" # needs a dict with toolname, toolid, interpreter, scriptname, command, inputs as a multi line string ready to write, outputs ditto, help ditto
newCommand="""
%(toolname)s.py --script_path "$runMe" --interpreter "%(interpreter)s"
--tool_name "%(toolname)s" %(command_inputs)s %(command_outputs)s """
# may NOT be an input or htmlout - appended later
tooltestsTabOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s"/>
</test>
</tests>
"""
tooltestsHTMLOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="5"/>
</test>
</tests>
"""
tooltestsBoth = """<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s" />
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="10"/>
</test>
</tests>
"""
xdict = {}
xdict['outputFormat'] = self.outputFormat
xdict['inputFormats'] = self.inputFormats
xdict['requirements'] = ''
if self.opts.make_HTML:
if self.opts.include_dependencies == "yes":
xdict['requirements'] = protorequirements
xdict['tool_version'] = self.opts.tool_version
xdict['test1Input'] = self.test1Input
xdict['test1HTML'] = self.test1HTML
xdict['test1Output'] = self.test1Output
if self.opts.make_HTML and self.opts.output_tab <> 'None':
xdict['tooltests'] = tooltestsBoth % xdict
elif self.opts.make_HTML:
xdict['tooltests'] = tooltestsHTMLOnly % xdict
else:
xdict['tooltests'] = tooltestsTabOnly % xdict
xdict['script'] = self.escapedScript
# configfile is least painful way to embed script to avoid external dependencies
# but requires escaping of <, > and $ to avoid Mako parsing
if self.opts.help_text:
helptext = open(self.opts.help_text,'r').readlines()
helptext = [html_escape(x) for x in helptext] # must html escape here too - thanks to Marius van den Beek
xdict['help'] = ''.join([x for x in helptext])
else:
xdict['help'] = 'Please ask the tool author (%s) for help as none was supplied at tool generation\n' % (self.opts.user_email)
if self.opts.citations:
citationstext = open(self.opts.citations,'r').read()
citation_tuples = parse_citations(citationstext)
citations_xml = ""
for citation_type, citation_content in citation_tuples:
citation_xml = """<citation type="%s">%s</citation>""" % (citation_type, html_escape(citation_content))
citations_xml += citation_xml
xdict['citations'] = citations_xml
else:
xdict['citations'] = ""
coda = ['**Script**','Pressing execute will run the following code over your input file and generate some outputs in your history::']
coda.append('\n')
coda.append(self.indentedScript)
coda.append('\n**Attribution**\nThis Galaxy tool was created by %s at %s\nusing the Galaxy Tool Factory.\n' % (self.opts.user_email,timenow()))
coda.append('See %s for details of that project' % (toolFactoryURL))
coda.append('Please cite: Creating re-usable tools from scripts: The Galaxy Tool Factory. Ross Lazarus; Antony Kaspi; Mark Ziemann; The Galaxy Team. ')
coda.append('Bioinformatics 2012; doi: 10.1093/bioinformatics/bts573\n')
xdict['help'] = '%s\n%s' % (xdict['help'],'\n'.join(coda))
if self.opts.tool_desc:
xdict['tooldesc'] = '<description>%s</description>' % self.opts.tool_desc
else:
xdict['tooldesc'] = ''
xdict['command_outputs'] = ''
xdict['outputs'] = ''
if self.opts.input_tab <> 'None':
xdict['command_inputs'] = '--input_tab "$input1" ' # the space may matter a lot if we append something
xdict['inputs'] = '<param name="input1" type="data" format="%s" label="Select a suitable input file from your history"/> \n' % self.inputFormats
else:
xdict['command_inputs'] = '' # assume no input - eg a random data generator
xdict['inputs'] = ''
xdict['inputs'] += '<param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="%s"/> \n' % self.toolname
xdict['toolname'] = self.toolname
xdict['toolid'] = self.toolid
xdict['interpreter'] = self.opts.interpreter
xdict['scriptname'] = self.sfile
if self.opts.make_HTML:
xdict['command_outputs'] += ' --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes"'
xdict['outputs'] += ' <data format="html" name="html_file" label="${job_name}.html"/>\n'
else:
xdict['command_outputs'] += ' --output_dir "./"'
if self.opts.output_tab <> 'None':
xdict['command_outputs'] += ' --output_tab "$tab_file"'
xdict['outputs'] += ' <data format="%s" name="tab_file" label="${job_name}"/>\n' % self.outputFormat
xdict['command'] = newCommand % xdict
xmls = newXML % xdict
xf = open(self.xmlfile,'w')
xf.write(xmls)
xf.write('\n')
xf.close()
# ready for the tarball
def makeTooltar(self):
"""
a tool is a gz tarball with eg
/toolname/tool.xml /toolname/tool.py /toolname/test-data/test1_in.foo ...
"""
retval = self.run()
if retval:
print >> sys.stderr,'## Run failed. Cannot build yet. Please fix and retry'
sys.exit(1)
tdir = self.toolname
os.mkdir(tdir)
self.makeXML()
if self.opts.make_HTML:
if self.opts.help_text:
hlp = open(self.opts.help_text,'r').read()
else:
hlp = 'Please ask the tool author for help as none was supplied at tool generation\n'
if self.opts.include_dependencies:
tooldepcontent = toolhtmldepskel % hlp
depf = open(os.path.join(tdir,'tool_dependencies.xml'),'w')
depf.write(tooldepcontent)
depf.write('\n')
depf.close()
if self.opts.input_tab <> 'None': # no reproducible test otherwise? TODO: maybe..
testdir = os.path.join(tdir,'test-data')
os.mkdir(testdir) # make tests directory
shutil.copyfile(self.opts.input_tab,os.path.join(testdir,self.test1Input))
if self.opts.output_tab <> 'None':
shutil.copyfile(self.opts.output_tab,os.path.join(testdir,self.test1Output))
if self.opts.make_HTML:
shutil.copyfile(self.opts.output_html,os.path.join(testdir,self.test1HTML))
if self.opts.output_dir:
shutil.copyfile(self.tlog,os.path.join(testdir,'test1_out.log'))
outpif = '%s.py' % self.toolname # new name
outpiname = os.path.join(tdir,outpif) # path for the tool tarball
pyin = os.path.basename(self.pyfile) # our name - we rewrite ourselves (TM)
notes = ['# %s - a self annotated version of %s generated by running %s\n' % (outpiname,pyin,pyin),]
notes.append('# to make a new Galaxy tool called %s\n' % self.toolname)
notes.append('# User %s at %s\n' % (self.opts.user_email,timenow()))
pi = open(self.pyfile,'r').readlines() # our code becomes new tool wrapper (!) - first Galaxy worm
notes += pi
outpi = open(outpiname,'w')
outpi.write(''.join(notes))
outpi.write('\n')
outpi.close()
stname = os.path.join(tdir,self.sfile)
if not os.path.exists(stname):
shutil.copyfile(self.sfile, stname)
xtname = os.path.join(tdir,self.xmlfile)
if not os.path.exists(xtname):
shutil.copyfile(self.xmlfile,xtname)
tarpath = "%s.gz" % self.toolname
tar = tarfile.open(tarpath, "w:gz")
tar.add(tdir,arcname=self.toolname)
tar.close()
shutil.copyfile(tarpath,self.opts.new_tool)
shutil.rmtree(tdir)
## TODO: replace with optional direct upload to local toolshed?
return retval
def compressPDF(self,inpdf=None,thumbformat='png'):
"""need absolute path to pdf
note that GS gets confoozled if no $TMP or $TEMP
so we set it
"""
assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf,self.myName)
hlog = os.path.join(self.opts.output_dir,"compress_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'a')
our_env = os.environ.copy()
our_tmp = our_env.get('TMP',None)
if not our_tmp:
our_tmp = our_env.get('TEMP',None)
if not (our_tmp and os.path.exists(our_tmp)):
newtmp = os.path.join(self.opts.output_dir,'tmp')
try:
os.mkdir(newtmp)
except:
sto.write('## WARNING - cannot make %s - it may exist or permissions need fixing\n' % newtmp)
our_env['TEMP'] = newtmp
if not self.temp_warned:
sto.write('## WARNING - no $TMP or $TEMP!!! Please fix - using %s temporarily\n' % newtmp)
self.temp_warned = True
outpdf = '%s_compressed' % inpdf
cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH","-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf,inpdf]
x = subprocess.Popen(cl,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval1 = x.wait()
sto.close()
if retval1 == 0:
os.unlink(inpdf)
shutil.move(outpdf,inpdf)
os.unlink(hlog)
hlog = os.path.join(self.opts.output_dir,"thumbnail_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'w')
outpng = '%s.%s' % (os.path.splitext(inpdf)[0],thumbformat)
if self.useGM:
cl2 = ['gm', 'convert', inpdf, outpng]
else: # assume imagemagick
cl2 = ['convert', inpdf, outpng]
x = subprocess.Popen(cl2,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval2 = x.wait()
sto.close()
if retval2 == 0:
os.unlink(hlog)
retval = retval1 or retval2
return retval
def getfSize(self,fpath,outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath,fpath)
if os.path.isfile(fp):
size = '0 B'
n = float(os.path.getsize(fp))
if n > 2**20:
size = '%1.1f MB' % (n/2**20)
elif n > 2**10:
size = '%1.1f KB' % (n/2**10)
elif n > 0:
size = '%d B' % (int(n))
return size
def makeHtml(self):
""" Create an HTML file content to list all the artifacts found in the output_dir
"""
galhtmlprefix = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="toolFormBody">
"""
galhtmlattr = """<hr/><div class="infomessage">This tool (%s) was generated by the <a href="https://bitbucket.org/fubar/galaxytoolfactory/overview">Galaxy Tool Factory</a></div><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
flist = os.listdir(self.opts.output_dir)
flist = [x for x in flist if x <> 'Rplots.pdf']
flist.sort()
html = []
html.append(galhtmlprefix % progname)
html.append('<div class="infomessage">Galaxy Tool "%s" run at %s</div><br/>' % (self.toolname,timenow()))
fhtml = []
if len(flist) > 0:
logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
logfiles.sort()
logfiles = [x for x in logfiles if os.path.abspath(x) <> os.path.abspath(self.tlog)]
logfiles.append(os.path.abspath(self.tlog)) # make it the last one
pdflist = []
npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf'])
for rownum,fname in enumerate(flist):
dname,e = os.path.splitext(fname)
sfsize = self.getfSize(fname,self.opts.output_dir)
if e.lower() == '.pdf' : # compress and make a thumbnail
thumb = '%s.%s' % (dname,self.thumbformat)
pdff = os.path.join(self.opts.output_dir,fname)
retval = self.compressPDF(inpdf=pdff,thumbformat=self.thumbformat)
if retval == 0:
pdflist.append((fname,thumb))
else:
pdflist.append((fname,fname))
if (rownum+1) % 2 == 0:
fhtml.append('<tr class="odd_row"><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
else:
fhtml.append('<tr><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
for logfname in logfiles: # expect at least tlog - if more
if os.path.abspath(logfname) == os.path.abspath(self.tlog): # handled later
sectionname = 'All tool run'
if (len(logfiles) > 1):
sectionname = 'Other'
ourpdfs = pdflist
else:
realname = os.path.basename(logfname)
sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] <> sectionname] # remove
nacross = 1
npdf = len(ourpdfs)
if npdf > 0:
nacross = math.sqrt(npdf) ## int(round(math.log(npdf,2)))
if int(nacross)**2 != npdf:
nacross += 1
nacross = int(nacross)
width = min(400,int(1200/nacross))
html.append('<div class="toolFormTitle">%s images and outputs</div>' % sectionname)
html.append('(Click on a thumbnail image to download the corresponding original PDF image)<br/>')
ntogo = nacross # counter for table row padding with empty cells
html.append('<div><table class="simple" cellpadding="2" cellspacing="2">\n<tr>')
for i,paths in enumerate(ourpdfs):
fname,thumb = paths
s= """<td><a href="%s"><img src="%s" title="Click to download a PDF of %s" hspace="5" width="%d"
alt="Image called %s"/></a></td>\n""" % (fname,thumb,fname,width,fname)
if ((i+1) % nacross == 0):
s += '</tr>\n'
ntogo = 0
if i < (npdf - 1): # more to come
s += '<tr>'
ntogo = nacross
else:
ntogo -= 1
html.append(s)
if html[-1].strip().endswith('</tr>'):
html.append('</table></div>\n')
else:
if ntogo > 0: # pad
html.append('<td> </td>'*ntogo)
html.append('</tr></table></div>\n')
logt = open(logfname,'r').readlines()
logtext = [x for x in logt if x.strip() > '']
html.append('<div class="toolFormTitle">%s log output</div>' % sectionname)
if len(logtext) > 1:
html.append('\n<pre>\n')
html += logtext
html.append('\n</pre>\n')
else:
html.append('%s is empty<br/>' % logfname)
if len(fhtml) > 0:
fhtml.insert(0,'<div><table class="colored" cellpadding="3" cellspacing="3"><tr><th>Output File Name (click to view)</th><th>Size</th></tr>\n')
fhtml.append('</table></div><br/>')
html.append('<div class="toolFormTitle">All output files available for downloading</div>\n')
html += fhtml # add all non-pdf files to the end of the display
else:
html.append('<div class="warningmessagelarge">### Error - %s returned no files - please confirm that parameters are sane</div>' % self.opts.interpreter)
html.append(galhtmlpostfix)
htmlf = file(self.opts.output_html,'w')
htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
self.html = html
def run(self):
"""
scripts must be small enough not to fill the pipe!
"""
if self.treatbashSpecial and self.opts.interpreter in ['bash','sh']:
retval = self.runBash()
else:
if self.opts.output_dir:
ste = open(self.elog,'w')
sto = open(self.tlog,'w')
sto.write('## Toolfactory generated command line = %s\n' % ' '.join(self.cl))
sto.flush()
#p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=ste,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
p = subprocess.Popen(self.cl,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False,stdin=subprocess.PIPE)
p.stdin.write(self.script)
stdout_data, stderr_data = p.communicate()
p.stdin.close()
retval = p.returncode
#retval = p.wait()
if self.opts.output_dir:
sto.close()
ste.close()
err = stderr_data
#err = open(self.elog,'r').readlines()
print >> sys.stdout,stdout_data
if retval <> 0 and err: # problem
print >> sys.stderr,err
if self.opts.make_HTML:
self.makeHtml()
return retval
def runBash(self):
"""
cannot use - for bash so use self.sfile
"""
if self.opts.output_dir:
s = '## Toolfactory generated command line = %s\n' % ' '.join(self.cl)
sto = open(self.tlog,'w')
sto.write(s)
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=sto,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False)
retval = p.wait()
if self.opts.output_dir:
sto.close()
if self.opts.make_HTML:
self.makeHtml()
return retval
def main():
u = """
This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
<command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath" --tool_name "foo" --interpreter "Rscript"
</command>
"""
op = optparse.OptionParser()
a = op.add_option
a('--script_path',default=None)
a('--tool_name',default=None)
a('--interpreter',default=None)
a('--output_dir',default='./')
a('--output_html',default=None)
a('--input_tab',default="None")
a('--input_int',default="None")
a('--input_formats',default="tabular,text")
a('--output_tab',default="None")
a('--output_format',default="tabular")
a('--user_email',default='Unknown')
a('--bad_user',default=None)
a('--make_Tool',default=None)
a('--make_HTML',default=None)
a('--help_text',default=None)
a('--citations',default=None)
a('--tool_desc',default=None)
a('--new_tool',default=None)
a('--tool_version',default=None)
a('--include_dependencies',default=None)
opts, args = op.parse_args()
assert not opts.bad_user,'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to admin_users in universe_wsgi.ini' % (opts.bad_user,opts.bad_user)
assert opts.tool_name,'## Tool Factory expects a tool name - eg --tool_name=DESeq'
assert opts.interpreter,'## Tool Factory wrapper expects an interpreter - eg --interpreter=Rscript'
assert os.path.isfile(opts.script_path),'## Tool Factory wrapper expects a script path - eg --script_path=foo.R'
if opts.output_dir:
try:
os.makedirs(opts.output_dir)
except:
pass
r = ScriptRunner(opts)
if opts.make_Tool:
retcode = r.makeTooltar()
else:
retcode = r.run()
os.unlink(r.sfile)
if retcode:
sys.exit(retcode) # indicate failure to job runner
if __name__ == "__main__":
main()
| myoshimura080822/tools_of_rnaseq_on_docker_galaxy | AddGroupIdForDEGAnalysis/AddGroupIdForDEGAnalysis.py | Python | mit | 32,429 | [
"Galaxy"
] | 0ef8420231c2897b8d9f237c854de8738dddb4d82108a824d98cca59dc92d999 |
import pytest
from ethereum import tester
from ethereum import utils
from ethereum._solidity import get_solidity
solidity_currency = open('currency.sol').read()
serpent_currency = open('currency.se').read()
@pytest.mark.skipif(get_solidity() is None, reason="'solc' compiler not available")
def test_currency_apis():
s = tester.state()
c1 = s.abi_contract(serpent_currency, sender=tester.k0)
c2 = s.abi_contract(solidity_currency, language='solidity', sender=tester.k0)
o = []
s.block.log_listeners.append(lambda x: o.append(c._translator.listen(x)))
for c in (c1, c2):
o = []
assert c.coinBalanceOf(tester.a0) == 1000000
assert c.sendCoin(1000, tester.a2, sender=tester.k0) is True
assert c.sendCoin(999001, tester.a2, sender=tester.k0) is False
assert c.sendCoinFrom(tester.a2, 500, tester.a3, sender=tester.k0) is False
c.approveOnce(tester.a0, 500, sender=tester.k2)
assert c.sendCoinFrom(tester.a2, 400, tester.a3, sender=tester.k0) is True
assert c.sendCoinFrom(tester.a2, 400, tester.a3, sender=tester.k0) is False
assert c.sendCoinFrom(tester.a2, 100, tester.a3, sender=tester.k0) is True
assert c.sendCoinFrom(tester.a2, 100, tester.a3, sender=tester.k0) is False
c.approve(tester.a0, sender=tester.k2)
assert c.sendCoinFrom(tester.a2, 100, tester.a3, sender=tester.k0) is True
c.disapprove(tester.a0, sender=tester.k2)
assert c.sendCoinFrom(tester.a2, 100, tester.a3, sender=tester.k0) is False
assert c.coinBalance(sender=tester.k0) == 999000
assert c.coinBalanceOf(tester.a2) == 400
assert c.coinBalanceOf(tester.a3) == 600
assert o == [{"_event_type": b"CoinSent", "from": utils.encode_hex(tester.a0),
"value": 1000, "to": utils.encode_hex(tester.a2)},
{"_event_type": b"CoinSent", "from": utils.encode_hex(tester.a2),
"value": 400, "to": utils.encode_hex(tester.a3)},
{"_event_type": b"CoinSent", "from": utils.encode_hex(tester.a2),
"value": 100, "to": utils.encode_hex(tester.a3)},
{"_event_type": b"CoinSent", "from": utils.encode_hex(tester.a2),
"value": 100, "to": utils.encode_hex(tester.a3)}]
serpent_namereg = open('namereg.se').read()
solidity_namereg = open('namereg.sol').read()
@pytest.mark.skipif(get_solidity() is None, reason="'solc' compiler not available")
def test_registrar_apis():
s = tester.state()
c1 = s.abi_contract(serpent_namereg, sender=tester.k0)
c2 = s.abi_contract(solidity_namereg, language='solidity', sender=tester.k0)
o = []
s.block.log_listeners.append(lambda x: o.append(c._translator.listen(x)))
for c in (c1, c2):
o = []
assert c.reserve('moose', sender=tester.k0) is True
assert c.reserve('moose', sender=tester.k0) is False
assert c.owner('moose') == utils.encode_hex(tester.a0)
c.setAddr('moose', tester.a5)
c.setAddr('moose', tester.a6, sender=tester.k1)
assert c.addr('moose') == utils.encode_hex(tester.a5)
c.transfer('moose', tester.a1, sender=tester.k0)
c.transfer('moose', tester.a2, sender=tester.k0)
assert c.owner('moose') == utils.encode_hex(tester.a1)
c.setContent('moose', 'antlers', sender=tester.k0)
c.setContent('moose', 'reindeer', sender=tester.k1)
assert c.content('moose')[:8] == 'reindeer'
c.setSubRegistrar('moose', tester.a7, sender=tester.k1)
c.setSubRegistrar('moose', tester.a8, sender=tester.k2)
assert c.subRegistrar('moose') == utils.encode_hex(tester.a7)
assert o == [{"_event_type": b"Changed", "name": b'moose', "__hash_name": utils.sha3(b'moose')}] * 5
solidity_exchange = open('exchange.sol').read()
serpent_exchange = open('exchange.se').read()
@pytest.mark.skipif(get_solidity() is None, reason="'solc' compiler not available")
def test_exchange_apis():
s = tester.state()
oc1 = s.abi_contract(serpent_currency, sender=tester.k0)
oc2 = s.abi_contract(solidity_currency, language='solidity', sender=tester.k0)
wc1 = s.abi_contract(serpent_currency, sender=tester.k1)
wc2 = s.abi_contract(solidity_currency, language='solidity', sender=tester.k1)
e1 = s.abi_contract(serpent_exchange, sender=tester.k0)
e2 = s.abi_contract(solidity_exchange, language='solidity', sender=tester.k0)
o = []
s.block.log_listeners.append(lambda x: o.append(e1._translator.listen(x)))
# Test serpent-solidity, solidity-serpent interop
for (oc, wc, e) in ((oc1, wc1, e2), (oc2, wc2, e1))[1:]:
o = []
assert oc.coinBalanceOf(tester.a0) == 1000000
assert oc.coinBalanceOf(tester.a1) == 0
assert wc.coinBalanceOf(tester.a0) == 0
assert wc.coinBalanceOf(tester.a1) == 1000000
# Offer fails because not approved to withdraw
assert e.placeOrder(oc.address, 1000, wc.address, 5000, sender=tester.k0) == 0
# Approve to withdraw
oc.approveOnce(e.address, 1000, sender=tester.k0)
# Offer succeeds
oid = e.placeOrder(oc.address, 1000, wc.address, 5000, sender=tester.k0)
assert oid > 0
# Offer fails because withdrawal approval was one-time
assert e.placeOrder(oc.address, 1000, wc.address, 5000, sender=tester.k0) == 0
# Claim fails because not approved to withdraw
assert e.claimOrder(oid, sender=tester.k1) is False
# Approve to withdraw
wc.approveOnce(e.address, 5000, sender=tester.k1)
# Claim succeeds
assert e.claimOrder(oid, sender=tester.k1) is True
# Check balances
assert oc.coinBalanceOf(tester.a0) == 999000
assert oc.coinBalanceOf(tester.a1) == 1000
assert wc.coinBalanceOf(tester.a0) == 5000
assert wc.coinBalanceOf(tester.a1) == 995000
cxor = utils.big_endian_to_int(oc.address) ^ utils.big_endian_to_int(wc.address)
assert {"_event_type": b"Traded",
"currencyPair": oc.address[:16] + wc.address[:16],
"seller": utils.encode_hex(tester.a0), "offerValue": 1000,
"buyer": utils.encode_hex(tester.a1), "wantValue": 5000} in o
serpent_datafeed = open('datafeed.se').read()
solidity_datafeed = open('datafeed.sol').read()
@pytest.mark.skipif(get_solidity() is None, reason="'solc' compiler not available")
def test_datafeeds():
s = tester.state()
c1 = s.abi_contract(serpent_datafeed, sender=tester.k0)
c2 = s.abi_contract(solidity_datafeed, language='solidity', sender=tester.k0)
for c in (c1, c2):
c.set('moose', 110, sender=tester.k0)
c.set('moose', 125, sender=tester.k1)
assert c.get('moose') == 110
serpent_ether_charging_datafeed = open('fee_charging_datafeed.se').read()
solidity_ether_charging_datafeed = open('fee_charging_datafeed.sol').read()
@pytest.mark.skipif(get_solidity() is None, reason="'solc' compiler not available")
def test_ether_charging_datafeeds():
s = tester.state()
c1 = s.abi_contract(serpent_ether_charging_datafeed, sender=tester.k0)
c2 = s.abi_contract(solidity_ether_charging_datafeed, language='solidity', sender=tester.k0)
for c in (c1, c2):
c.set('moose', 110, sender=tester.k0)
c.set('moose', 125, sender=tester.k1)
assert c.get('moose') == 110
c.setFee(70, sender=tester.k0)
c.setFee(110, sender=tester.k1)
assert c.getFee() == 70
assert c.get('moose') == 0
assert c.get('moose', value=69) == 0
assert c.get('moose', value=70) == 110
| LianaHus/dapp-bin | standardized_contract_apis/test.py | Python | mit | 7,672 | [
"MOOSE"
] | 9d6c8f81ca2e63f9b61d437f8c5e858c613afd6535f3718f9a92049c80fc1804 |
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access control helper.
The functions in this module can be used to check access control
related requirements. When the specified required conditions are not
met, an exception is raised. This exception contains a views that
either prompts for authentication, or informs the user that they
do not meet the required criteria.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Todd Larsen" <tlarsen@google.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.logic.helper import timeline as timeline_helper
from soc.logic.models.club_admin import logic as club_admin_logic
from soc.logic.models.club_member import logic as club_member_logic
from soc.logic.models.document import logic as document_logic
from soc.logic.models.host import logic as host_logic
from soc.logic.models.mentor import logic as mentor_logic
from soc.logic.models.org_admin import logic as org_admin_logic
from soc.logic.models.org_app_record import logic as org_app_record_logic
from soc.logic.models.organization import logic as org_logic
from soc.logic.models.program import logic as program_logic
from soc.logic.models.request import logic as request_logic
from soc.logic.models.role import logic as role_logic
from soc.logic.models.site import logic as site_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.logic.models.student import logic as student_logic
from soc.logic.models.timeline import logic as timeline_logic
from soc.logic.models.user import logic as user_logic
from soc.modules import callback
from soc.views.helper import redirects
from soc.views import out_of_band
from soc.modules.ghop.logic.models.mentor import logic as ghop_mentor_logic
from soc.modules.ghop.logic.models.organization import logic as ghop_org_logic
from soc.modules.ghop.logic.models.org_admin import logic as \
ghop_org_admin_logic
from soc.modules.ghop.logic.models.program import logic as ghop_program_logic
from soc.modules.ghop.logic.models.student import logic as ghop_student_logic
from soc.modules.gsoc.logic.models.mentor import logic as gsoc_mentor_logic
from soc.modules.gsoc.logic.models.organization import logic as gsoc_org_logic
from soc.modules.gsoc.logic.models.org_admin import logic as \
gsoc_org_admin_logic
from soc.modules.gsoc.logic.models.program import logic as gsoc_program_logic
from soc.modules.gsoc.logic.models.student import logic as gsoc_student_logic
DEF_NO_USER_LOGIN_MSG = ugettext(
'Please create <a href="/user/create_profile">User Profile</a>'
' in order to view this page.')
DEF_AGREE_TO_TOS_MSG_FMT = ugettext(
'You must agree to the <a href="%(tos_link)s">site-wide Terms of'
' Service</a> in your <a href="/user/edit_profile">User Profile</a>'
' in order to view this page.')
DEF_DEV_LOGOUT_LOGIN_MSG_FMT = ugettext(
'Please <a href="%%(sign_out)s">sign out</a>'
' and <a href="%%(sign_in)s">sign in</a>'
' again as %(role)s to view this page.')
DEF_NEED_MEMBERSHIP_MSG_FMT = ugettext(
'You need to be in the %(status)s group to %(action)s'
' documents in the %(prefix)s prefix.')
DEF_NEED_ROLE_MSG = ugettext(
'You do not have the required role.')
DEF_NOT_YOUR_ENTITY_MSG = ugettext(
'This entity does not belong to you.')
DEF_ENTITY_DOES_NOT_HAVE_STATUS = ugettext(
'There is no entity with the required status.')
DEF_NO_ACTIVE_ENTITY_MSG = ugettext(
'There is no such active entity.')
DEF_NO_ACTIVE_GROUP_MSG = ugettext(
'There is no such active group.')
DEF_NO_ACTIVE_ROLE_MSG = ugettext(
'There is no such active role.')
DEF_ALREADY_PARTICIPATING_MSG = ugettext(
'You cannot become a Student because you are already participating '
'in this program.')
DEF_ALREADY_STUDENT_ROLE_MSG = ugettext(
'You cannot become a Mentor or Organization Admin because you already are '
'a Student in this program.')
DEF_NO_ACTIVE_PROGRAM_MSG = ugettext(
'There is no such active program.')
DEF_NO_REQUEST_MSG = ugettext(
'There is no accepted request that would allow you to visit this page. '
'Perhaps you already accepted this request?')
DEF_NO_APPLICATION_MSG = ugettext(
'There is no application that would allow you to visit this page.')
DEF_NEED_PICK_ARGS_MSG = ugettext(
'The "continue" and "field" args are not both present.')
DEF_REVIEW_COMPLETED_MSG = ugettext('This Application can not be reviewed '
'anymore because it has been completed already.')
DEF_REQUEST_COMPLETED_MSG = ugettext(
'This request cannot be accepted (it is either completed or denied).')
DEF_REQUEST_NOT_ACCEPTED_MSG = ugettext(
'This request has not been accepted by the group (it can also be completed already).')
DEF_SCOPE_INACTIVE_MSG = ugettext(
'The scope for this request is not active.')
DEF_NO_LIST_ACCESS_MSG = ugettext('You do not have the required rights to '
'list documents for this scope and prefix.')
DEF_PAGE_DENIED_MSG = ugettext(
'Access to this page has been restricted.')
DEF_PREFIX_NOT_IN_ARGS_MSG = ugettext(
'A required GET url argument ("prefix") was not specified.')
DEF_PAGE_INACTIVE_MSG = ugettext(
'This page is inactive at this time.')
DEF_LOGOUT_MSG_FMT = ugettext(
'Please <a href="%(sign_out)s">sign out</a> in order to view this page.')
DEF_GROUP_NOT_FOUND_MSG = ugettext(
'The requested Group can not be found.')
DEF_NO_VALID_RECORD_ID = ugettext('No valid numeric record ID given.')
DEF_NOT_YOUR_RECORD = ugettext(
'This is not your Survey Record. If you feel you should have access to '
'this page please notify the administrators.')
DEF_USER_ACCOUNT_INVALID_MSG_FMT = ugettext(
'The <b><i>%(email)s</i></b> account cannot be used with this site, for'
' one or more of the following reasons:'
'<ul>'
' <li>the account is invalid</li>'
' <li>the account is already attached to a User profile and cannot be'
' used to create another one</li>'
' <li>the account is a former account that cannot be used again</li>'
'</ul>')
class Error(Exception):
"""Base class for all exceptions raised by this module.
"""
pass
class InvalidArgumentError(Error):
"""Raised when an invalid argument is passed to a method.
For example, if an argument is None, but must always be non-False.
"""
pass
def allowSidebar(fun):
"""Decorator that allows access if the sidebar is calling.
"""
from functools import wraps
@wraps(fun)
def wrapper(self, django_args, *args, **kwargs):
"""Decorator wrapper method.
"""
if django_args.get('SIDEBAR_CALLING'):
return
return fun(self, django_args, *args, **kwargs)
return wrapper
def denySidebar(fun):
"""Decorator that denies access if the sidebar is calling.
"""
from functools import wraps
@wraps(fun)
def wrapper(self, django_args, *args, **kwargs):
"""Decorator wrapper method.
"""
if django_args.get('SIDEBAR_CALLING'):
raise out_of_band.Error("Sidebar Calling")
return fun(self, django_args, *args, **kwargs)
return wrapper
def allowIfCheckPasses(checker_name):
"""Returns a decorator that allows access if the specified checker passes.
"""
from functools import wraps
def decorator(fun):
"""Decorator that allows access if the current user is a Developer.
"""
@wraps(fun)
def wrapper(self, django_args=None, *args, **kwargs):
"""Decorator wrapper method.
"""
try:
# if the check passes we allow access regardless
return self.doCheck(checker_name, django_args, [])
except out_of_band.Error:
# otherwise we run the original check
return fun(self, django_args, *args, **kwargs)
return wrapper
return decorator
# pylint: disable-msg=C0103
allowDeveloper = allowIfCheckPasses('checkIsDeveloper')
class Checker(object):
"""
The __setitem__() and __getitem__() methods are overloaded to DTRT
when adding new access rights, and retrieving them, so use these
rather then modifying rights directly if so desired.
"""
MEMBERSHIP = {
'anyone': 'allow',
'club_admin': ('checkHasRoleForScope', club_admin_logic),
'club_member': ('checkHasRoleForScope', club_member_logic),
'host': ('checkHasDocumentAccess', [host_logic, 'sponsor']),
'org_admin': ('checkHasDocumentAccess', [org_admin_logic, 'org']),
'org_mentor': ('checkHasDocumentAccess', [mentor_logic, 'org']),
'org_student': ('checkHasDocumentAccess', [student_logic, 'org']),
'ghop_org_admin': ('checkHasDocumentAccess', [ghop_org_admin_logic, 'org']),
'ghop_org_mentor': ('checkHasDocumentAccess', [ghop_mentor_logic, 'org']),
'ghop_org_student': ('checkHasDocumentAccess', [ghop_student_logic, 'org']),
'gsoc_org_admin': ('checkHasDocumentAccess', [gsoc_org_admin_logic, 'org']),
'gsoc_org_mentor': ('checkHasDocumentAccess', [gsoc_mentor_logic, 'org']),
'gsoc_org_student': ('checkHasDocumentAccess', [gsoc_student_logic, 'org']),
'user': 'checkIsUser',
'user_self': ('checkIsUserSelf', 'scope_path'),
}
#: the depths of various scopes to other scopes
# the 0 entries are not used, and are for clarity purposes only
SCOPE_DEPTH = {
'site': None,
'sponsor': (sponsor_logic, {'sponsor': 0}),
'program': (program_logic, {'sponsor': 1, 'program': 0}),
'ghop_program': (
ghop_program_logic, {'sponsor': 1, 'ghop_program': 0}),
'gsoc_program': (
gsoc_program_logic, {'sponsor': 1, 'gsoc_program': 0}),
'org': (org_logic, {'sponsor': 2, 'program': 1, 'org': 0}),
'ghop_org': (
ghop_org_logic, {'sponsor': 2, 'ghop_program': 1, 'ghop_org': 0}),
'gsoc_org': (
gsoc_org_logic, {'sponsor': 2, 'gsoc_program': 1, 'gsoc_org': 0}),
}
def __init__(self, params):
"""Adopts base.rights as rights if base is set.
"""
base = params.get('rights') if params else None
self.rights = base.rights if base else {}
self.id = None
self.user = None
def normalizeChecker(self, checker):
"""Normalizes the checker to a pre-defined format.
The result is guaranteed to be a list of 2-tuples, the first element is a
checker (iff there is an checker with the specified name), the second
element is a list of arguments that should be passed to the checker when
calling it in addition to the standard django_args.
"""
# Be nice an repack so that it is always a list with tuples
if isinstance(checker, tuple):
name, arg = checker
return (name, (arg if isinstance(arg, list) else [arg]))
else:
return (checker, [])
def __setitem__(self, key, value):
"""Sets a value only if no old value exists.
"""
oldvalue = self.rights.get(key)
self.rights[key] = oldvalue if oldvalue else value
def __getitem__(self, key):
"""Retrieves and normalizes the right checkers.
"""
return [self.normalizeChecker(i) for i in self.rights.get(key, [])]
def key(self, checker_name):
"""Returns the key for the specified checker for the current user.
"""
return "checker.%s.%s" % (self.id, checker_name)
def put(self, checker_name, value):
"""Puts the result for the specified checker in the cache.
"""
cache_key = self.key(checker_name)
callback.getCore().setRequestValue(cache_key, value)
def get(self, checker_name):
"""Retrieves the result for the specified checker from cache.
"""
cache_key = self.key(checker_name)
return callback.getCore().getRequestValue(cache_key)
def doCheck(self, checker_name, django_args, args):
"""Runs the specified checker with the specified arguments.
"""
checker = getattr(self, checker_name)
checker(django_args, *args)
def doCachedCheck(self, checker_name, django_args, args):
"""Retrieves from cache or runs the specified checker.
"""
cached = self.get(checker_name)
if cached is None:
try:
self.doCheck(checker_name, django_args, args)
self.put(checker_name, True)
return
except out_of_band.Error, exception:
self.put(checker_name, exception)
raise
if cached is True:
return
# re-raise the cached exception
raise cached
def check(self, use_cache, checker_name, django_args, args):
"""Runs the checker, optionally using the cache.
"""
if use_cache:
self.doCachedCheck(checker_name, django_args, args)
else:
self.doCheck(checker_name, django_args, args)
def setCurrentUser(self, id, user):
"""Sets up everything for the current user.
"""
self.id = id
self.user = user
def checkAccess(self, access_type, django_args):
"""Runs all the defined checks for the specified type.
Args:
access_type: the type of request (such as 'list' or 'edit')
rights: a dictionary containing access check functions
django_args: a dictionary with django's arguments
Rights usage:
The rights dictionary is used to check if the current user is allowed
to view the page specified. The functions defined in this dictionary
are always called with the provided django_args dictionary as argument. On any
request, regardless of what type, the functions in the 'any_access' value
are called. If the specified type is not in the rights dictionary, all
the functions in the 'unspecified' value are called. When the specified
type _is_ in the rights dictionary, all the functions in that access_type's
value are called.
"""
use_cache = django_args.get('SIDEBAR_CALLING')
# Call each access checker
for checker_name, args in self['any_access']:
self.check(use_cache, checker_name, django_args, args)
if access_type not in self.rights:
# No checks defined, so do the 'generic' checks and bail out
for checker_name, args in self['unspecified']:
self.check(use_cache, checker_name, django_args, args)
return
for checker_name, args in self[access_type]:
self.check(use_cache, checker_name, django_args, args)
def hasMembership(self, roles, django_args):
"""Checks whether the user has access to any of the specified roles.
Makes use of self.MEMBERSHIP, which defines checkers specific to
document access, as such this method should only be used when checking
document access.
Args:
roles: a list of roles to check
django_args: the django args that should be passed to doCheck
"""
try:
# we need to check manually, as we must return True!
self.checkIsDeveloper(django_args)
return True
except out_of_band.Error:
pass
for role in roles:
try:
checker_name, args = self.normalizeChecker(self.MEMBERSHIP[role])
self.doCheck(checker_name, django_args, args)
# the check passed, we can stop now
return True
except out_of_band.Error:
continue
return False
@allowDeveloper
def checkMembership(self, action, prefix, status, django_args):
"""Checks whether the user has access to the specified status.
Args:
action: the action that was performed (e.g., 'read')
prefix: the prefix, determines what access set is used
status: the access status (e.g., 'public')
django_args: the django args to pass on to the checkers
"""
checker = callback.getCore().getRightsChecker(prefix)
roles = checker.getMembership(status)
message_fmt = DEF_NEED_MEMBERSHIP_MSG_FMT % {
'action': action,
'prefix': prefix,
'status': status,
}
# try to see if they belong to any of the roles, if not, raise an
# access violation for the specified action, prefix and status.
if not self.hasMembership(roles, django_args):
raise out_of_band.AccessViolation(message_fmt)
def checkHasAny(self, django_args, checks):
"""Checks if any of the checks passes.
If none of the specified checks passes, the exception that the first of the
checks raised is reraised.
"""
first = None
for checker_name, args in checks:
try:
self.doCheck(checker_name, django_args, args)
# one check passed, all is well
return
except out_of_band.Error, exception:
# store the first exception
first = first if first else exception
# none passed, re-raise the first exception
# pylint: disable-msg=W0706
raise first
def allow(self, django_args):
"""Never raises an alternate HTTP response. (an access no-op, basically).
Args:
django_args: a dictionary with django's arguments
"""
return
def deny(self, django_args=None):
"""Always raises an alternate HTTP response.
Args:
django_args: a dictionary with django's arguments
Raises:
always raises AccessViolationResponse if called
"""
context = django_args.get('context', {})
context['title'] = 'Access denied'
raise out_of_band.AccessViolation(DEF_PAGE_DENIED_MSG, context=context)
def checkIsLoggedIn(self, django_args=None):
"""Raises an alternate HTTP response if Google Account is not logged in.
Args:
django_args: a dictionary with django's arguments, not used
Raises:
AccessViolationResponse:
* if no Google Account is even logged in
"""
if self.id:
return
raise out_of_band.LoginRequest()
def checkNotLoggedIn(self, django_args=None):
"""Raises an alternate HTTP response if Google Account is logged in.
Args:
django_args: a dictionary with django's arguments, not used
Raises:
AccessViolationResponse:
* if a Google Account is currently logged in
"""
if not self.id:
return
raise out_of_band.LoginRequest(message_fmt=DEF_LOGOUT_MSG_FMT)
def checkIsUser(self, django_args=None):
"""Raises an alternate HTTP response if Google Account has no User entity.
Args:
django_args: a dictionary with django's arguments, not used
Raises:
AccessViolationResponse:
* if no User exists for the logged-in Google Account, or
* if no Google Account is logged in at all
* if User has not agreed to the site-wide ToS, if one exists
"""
self.checkIsLoggedIn()
if not self.user:
raise out_of_band.LoginRequest(message_fmt=DEF_NO_USER_LOGIN_MSG)
if user_logic.agreesToSiteToS(self.user):
return
# Would not reach this point of site-wide ToS did not exist, since
# agreesToSiteToS() call above always returns True if no ToS is in effect.
login_msg_fmt = DEF_AGREE_TO_TOS_MSG_FMT % {
'tos_link': redirects.getToSRedirect(site_logic.getSingleton())}
raise out_of_band.LoginRequest(message_fmt=login_msg_fmt)
@allowDeveloper
def checkIsHost(self, django_args=None):
"""Checks whether the current user has a role entity.
Args:
django_args: the keyword args from django, not used
"""
if not django_args:
django_args = {}
return self.checkHasRole(django_args, host_logic)
@allowDeveloper
def checkIsUserSelf(self, django_args, field_name):
"""Checks whether the specified user is the logged in user.
Args:
django_args: the keyword args from django, only field_name is used
"""
self.checkIsUser()
if not field_name in django_args:
self.deny()
if self.user.link_id == django_args[field_name]:
return
raise out_of_band.AccessViolation(DEF_NOT_YOUR_ENTITY_MSG)
def checkIsUnusedAccount(self, django_args=None):
"""Raises an alternate HTTP response if Google Account has a User entity.
Args:
django_args: a dictionary with django's arguments, not used
Raises:
AccessViolationResponse:
* if a User exists for the logged-in Google Account, or
* if a User has this Gooogle Account in their formerAccounts list
"""
self.checkIsLoggedIn()
fields = {'account': self.id}
user_entity = user_logic.getForFields(fields, unique=True)
if not user_entity and not user_logic.isFormerAccount(self.id):
# this account has not been used yet
return
message_fmt = DEF_USER_ACCOUNT_INVALID_MSG_FMT % {
'email' : self.id.email()
}
raise out_of_band.LoginRequest(message_fmt=message_fmt)
def checkHasUserEntity(self, django_args=None):
"""Raises an alternate HTTP response if Google Account has no User entity.
Args:
django_args: a dictionary with django's arguments
Raises:
AccessViolationResponse:
* if no User exists for the logged-in Google Account, or
* if no Google Account is logged in at all
"""
self.checkIsLoggedIn()
if self.user:
return
raise out_of_band.LoginRequest(message_fmt=DEF_NO_USER_LOGIN_MSG)
def checkIsDeveloper(self, django_args=None):
"""Raises an alternate HTTP response if Google Account is not a Developer.
Args:
django_args: a dictionary with django's arguments, not used
Raises:
AccessViolationResponse:
* if User is not a Developer, or
* if no User exists for the logged-in Google Account, or
* if no Google Account is logged in at all
"""
self.checkIsUser()
if user_logic.isDeveloper(account=self.id, user=self.user):
return
login_message_fmt = DEF_DEV_LOGOUT_LOGIN_MSG_FMT % {
'role': 'a Site Developer ',
}
raise out_of_band.LoginRequest(message_fmt=login_message_fmt)
@allowDeveloper
@denySidebar
def _checkHasStatus(self, django_args, logic, fields, status='active'):
"""Raises an alternate HTTP response if the entity does not have the
specified status.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
fields: the name of the fields that should be copied verbatim
from the django_args as filter
status: string or list of strings specifying possible status
Raises:
AccessViolationResponse:
* if no entity is found
* if the entity status is not active
"""
self.checkIsUser()
fields = dicts.filter(django_args, fields)
fields['status'] = status
entity = logic.getForFields(fields, unique=True)
if entity:
return entity
raise out_of_band.AccessViolation(
message_fmt=DEF_ENTITY_DOES_NOT_HAVE_STATUS)
def checkGroupIsActiveForScopeAndLinkId(self, django_args, logic):
"""Checks that the specified group is active.
Only group where both the link_id and the scope_path match the value
of the link_id and the scope_path from the django_args are considered.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
"""
fields = ['scope_path', 'link_id']
return self._checkHasStatus(django_args, logic, fields)
def checkGroupIsActiveForLinkId(self, django_args, logic):
"""Checks that the specified group is active.
Only group where the link_id matches the value of the link_id
from the django_args are considered.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
"""
return self._checkHasStatus(django_args, logic, ['link_id'])
def checkHasRole(self, django_args, logic, status='active'):
"""Checks that the user has the specified role and status.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
status: the status or list of status that the role may have
"""
django_args = django_args.copy()
django_args['user'] = self.user
return self._checkHasStatus(django_args, logic, ['user'], status=status)
def _checkHasRoleFor(self, django_args, logic, field_name, status='active'):
"""Checks that the user has the specified role and status.
Only roles where the field as specified by field_name matches the
scope_path from the django_args are considered.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
status: the status the role can have.
"""
fields = [field_name, 'user']
django_args = django_args.copy()
django_args['user'] = self.user
return self._checkHasStatus(django_args, logic, fields, status=status)
def checkHasRoleForKeyFieldsAsScope(self, django_args, logic, status='active'):
"""Checks that the user has the specified role and status.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
status: string or list of strings indicating the status of the role
"""
key_fields = "%(scope_path)s/%(link_id)s" % django_args
new_args = {'scope_path': key_fields}
return self._checkHasRoleFor(new_args, logic, 'scope_path', status=status)
def checkHasRoleForScope(self, django_args, logic, status='active'):
"""Checks that the user has the specified role and status.
Only roles where the scope_path matches the scope_path from the
django_args are considered.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
status: string or list of strings indicating the status of the role
"""
return self._checkHasRoleFor(django_args, logic, 'scope_path',
status=status)
def checkHasRoleForLinkId(self, django_args, logic, status='active'):
"""Checks that the user has the specified role and status.
Only roles where the link_id matches the link_id from the
django_args are considered.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
status: string or list of strings indicating the status of the role
"""
return self._checkHasRoleFor(django_args, logic, 'link_id', status=status)
def checkHasRoleForLinkIdAsScope(self, django_args, logic, status='active'):
"""Checks that the user has the specified role and status.
Only roles where the scope_path matches the link_id from the
django_args are considered.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
status: string or list of strings indicating the status of the role
"""
django_args = django_args.copy()
django_args['scope_path'] = django_args['link_id']
return self._checkHasRoleFor(django_args, logic, 'scope_path',
status=status)
def checkHasDocumentAccess(self, django_args, logic, target_scope):
"""Checks that the user has access to the specified document scope.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to look up the entity
"""
prefix = django_args['prefix']
if self.SCOPE_DEPTH.get(prefix):
scope_logic, depths = self.SCOPE_DEPTH[prefix]
else:
return self.checkHasRole(django_args, logic)
depth = depths.get(target_scope, 0)
# nothing to do
if not (scope_logic and depth):
return self.checkHasRoleForScope(django_args, logic)
# we don't want to modify the original django args
django_args = django_args.copy()
entity = scope_logic.getFromKeyName(django_args['scope_path'])
# cannot have access to the specified scope if it is invalid
if not entity:
raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_ENTITY_MSG)
# walk up the scope to where we need to be
for _ in range(depth):
entity = entity.scope
django_args['scope_path'] = entity.key().id_or_name()
self.checkHasRoleForScope(django_args, logic)
def checkSeeded(self, django_args, checker_name, *args):
"""Wrapper to update the django_args with the contens of seed first.
"""
django_args.update(django_args.get('seed', {}))
self.doCheck(checker_name, django_args, args)
def checkCanMakeRequestToGroup(self, django_args, group_logic):
"""Raises an alternate HTTP response if the specified group is not in an
active status.
Args:
django_args: a dictionary with django's arguments
group_logic: Logic instance for the group which the request is for
"""
self.checkIsUser(django_args)
group_entity = group_logic.getFromKeyName(django_args['scope_path'])
if not group_entity:
raise out_of_band.Error(DEF_GROUP_NOT_FOUND_MSG, status=404)
if group_entity.status != 'active':
# tell the user that this group is not active
raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_GROUP_MSG)
return
def checkIsMyRequestWithStatus(self, django_args, statuses):
"""Checks whether the user is allowed to visit the page regarding Request.
Args:
django_args: a dictionary with django's arguments
statuses: the statuses in which the Request may be to allow access
"""
self.checkIsUser(django_args)
id = int(django_args['id'])
request_entity = request_logic.getFromIDOr404(id)
if request_entity.user.key() != self.user.key():
# this is not the current user's request
raise out_of_band.AccessViolation(message_fmt=DEF_NOT_YOUR_ENTITY_MSG)
if request_entity.status not in statuses:
raise out_of_band.AccessViolation(message_fmt=DEF_REQUEST_NOT_ACCEPTED_MSG)
if request_entity.group.status not in ['new', 'active']:
raise out_of_band.AccessViolation(message_fmt=DEF_SCOPE_INACTIVE_MSG)
return
def checkCanProcessRequest(self, django_args, role_logics):
"""Raises an alternate HTTP response if the specified request does not exist
or if it's status is completed or rejected. Also Raises an alternate HTTP response
whenever the group in the request is not active.
Args:
django_args: a dictionary with django's arguments
role_logics: list with Logic instances for roles who can process
requests for the group the request is for.
"""
self.checkIsUser(django_args)
id = int(django_args['id'])
request_entity = request_logic.getFromIDOr404(id)
if request_entity.status in ['completed', 'rejected']:
raise out_of_band.AccessViolation(message_fmt=DEF_REQUEST_COMPLETED_MSG)
if request_entity.group.status != 'active':
raise out_of_band.AccessViolation(message_fmt=DEF_SCOPE_INACTIVE_MSG)
role_fields = {'user': self.user,
'scope': request_entity.group,
'status': 'active'}
role_entity = None
for role_logic in role_logics:
role_entity = role_logic.getForFields(role_fields, unique=True)
if role_entity:
break;
if not role_entity:
# the current user does not have the necessary role
raise out_of_band.AccessViolation(message_fmt=DEF_NEED_ROLE_MSG)
return
@allowDeveloper
@denySidebar
def checkIsHostForProgram(self, django_args, logic=program_logic):
"""Checks if the user is a host for the specified program.
Args:
django_args: a dictionary with django's arguments
logic: the logic used to look up for program entity
"""
program = logic.getFromKeyFields(django_args)
if not program or program.status == 'invalid':
raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_PROGRAM_MSG)
new_args = {'scope_path': program.scope_path }
self.checkHasRoleForScope(new_args, host_logic)
@allowDeveloper
@denySidebar
def checkIsHostForProgramInScope(self, django_args, logic=program_logic):
"""Checks if the user is a host for the specified program.
Args:
django_args: a dictionary with django's arguments
logic: Program Logic instance
"""
scope_path = django_args.get('scope_path')
if not scope_path:
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_DENIED_MSG)
program = logic.getFromKeyName(scope_path)
if not program or program.status == 'invalid':
raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_PROGRAM_MSG)
django_args = {'scope_path': program.scope_path}
self.checkHasRoleForScope(django_args, host_logic)
@allowDeveloper
@denySidebar
def checkIsActivePeriod(self, django_args, period_name, key_name_arg,
program_logic):
"""Checks if the given period is active for the given program.
Args:
django_args: a dictionary with django's arguments
period_name: the name of the period which is checked
key_name_arg: the entry in django_args that specifies the given program
keyname. If none is given the key_name is constructed from django_args
itself.
program_logic: Program Logic instance
Raises:
AccessViolationResponse:
* if no active Program is found
* if the period is not active
"""
self._checkTimelineCondition(django_args, period_name, key_name_arg,
program_logic, timeline_helper.isActivePeriod)
@allowDeveloper
@denySidebar
def checkIsAfterEvent(self, django_args, event_name, key_name_arg,
program_logic):
"""Checks if the given event has taken place for the given program.
Args:
django_args: a dictionary with django's arguments
event_name: the name of the event which is checked
key_name_arg: the entry in django_args that specifies the given program
keyname. If none is given the key_name is constructed from django_args
itself.
program_logic: Program Logic instance
Raises:
AccessViolationResponse:
* if no active Program is found
* if the event has not taken place yet
"""
self._checkTimelineCondition(django_args, event_name, key_name_arg,
program_logic, timeline_helper.isAfterEvent)
@allowDeveloper
@denySidebar
def checkIsBeforeEvent(self, django_args, event_name, key_name_arg,
program_logic):
"""Checks if the given event has not taken place for the given program.
Args:
django_args: a dictionary with django's arguments
event_name: the name of the event which is checked
key_name_arg: the entry in django_args that specifies the given program
keyname. If none is given the key_name is constructed from django_args
itself.
program_logic: Program Logic instance
Raises:
AccessViolationResponse:
* if no active Program is found
* if the event has not taken place yet
"""
self._checkTimelineCondition(django_args, event_name, key_name_arg,
program_logic, timeline_helper.isBeforeEvent)
def _checkTimelineCondition(self, django_args, event_name, key_name_arg,
program_logic, timeline_fun):
"""Checks if the given event fulfills a certain timeline condition.
Args:
django_args: a dictionary with django's arguments
event_name: the name of the event which is checked
key_name_arg: the entry in django_args that specifies the given program
keyname. If none is given the key_name is constructed from django_args
itself.
program_logic: Program Logic instance
timeline_fun: function checking for the main condition
Raises:
AccessViolationResponse:
* if no active Program is found
* if the event has not taken place yet
"""
if key_name_arg and key_name_arg in django_args:
key_name = django_args[key_name_arg]
else:
key_name = program_logic.retrieveKeyNameFromPath(
program_logic.getKeyNameFromFields(django_args))
program_entity = program_logic.getFromKeyName(key_name)
if not program_entity or (
program_entity.status in ['invalid']):
raise out_of_band.AccessViolation(message_fmt=DEF_SCOPE_INACTIVE_MSG)
if timeline_fun(program_entity.timeline, event_name):
return
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_INACTIVE_MSG)
@allowSidebar
def checkCanReviewOrgAppRecord(self, django_args, org_app_logic):
"""Checks if the request to review an Organization Application Record is
valid.
Args:
django_args: a dictionary with django's arguments
org_app_logic: A logic instance for the Organization Application
Raises AccessViolation if:
- No valid id parameter is found in the GET dictionary
- The key of the survey of the record does not match the record in the
django_args.
- The status of the OrgApplicationRecord is completed
"""
get_dict = django_args['GET']
id = get_dict.get('id', None)
if not(id and id.isdigit()):
raise out_of_band.AccessViolation(
message_fmt=DEF_NO_VALID_RECORD_ID)
id = int(id)
record_logic = org_app_logic.getRecordLogic()
record = record_logic.getFromIDOr404(id)
expected_application = org_app_logic.getFromKeyFieldsOr404(django_args)
found_application = record.survey
if expected_application.key() != found_application.key():
raise out_of_band.AccessViolation(message_fmt=DEF_NO_VALID_RECORD_ID)
if record.status == 'completed':
raise out_of_band.AccessViolation(message_fmt=DEF_REVIEW_COMPLETED_MSG)
return
@allowDeveloper
def checkCanViewOrgAppRecord(self, django_args, org_app_logic):
"""Checks if the current user is allowed to view the OrgAppSurveyRecord.
The ID of the OrgAppSurveyRecord is present in the GET dict.
Args:
django_args: a dictionary with django's arguments
org_app_logic: OrgAppSurveyLogic instance
"""
self.checkIsUser(django_args)
get_dict = django_args['GET']
id = get_dict.get('id', None)
if not(id and id.isdigit()):
raise out_of_band.AccessViolation(
message_fmt=DEF_NO_VALID_RECORD_ID)
id = int(id)
record_logic = org_app_logic.getRecordLogic()
org_app_record = record_logic.getFromIDOr404(id)
admin_keys = [org_app_record.main_admin.key(), org_app_record.backup_admin.key()]
if not self.user.key() in admin_keys:
raise out_of_band.AccessViolation(
message_fmt=DEF_NOT_YOUR_ENTITY_MSG)
return org_app_record
@allowDeveloper
def checkOrgAppRecordIfPresent(self, django_args, org_app_logic):
"""Checks if the current user can see the OrgAppRecord iff present in GET.
Args:
django_args: a dictionary with django's arguments
org_app_logic: OrgAppSurvey Logic instance
"""
self.checkIsUser(django_args)
get_dict = django_args['GET']
id = get_dict.get('id', None)
if id:
# id present so check wether the user can see it
return self.checkCanViewOrgAppRecord(django_args, org_app_logic)
# no id present so return
return
@allowDeveloper
def checkIsOrgAppAccepted(self, django_args, org_app_logic):
"""Checks if the current user is an owner of the OrgApplication
and if the OrgApplication is accepted.
Args:
django_args: a dictionary with django's arguments
org_app_logic: OrgAppSurvey Logic instance
"""
self.checkIsUser(django_args)
org_app_record = self.checkCanViewOrgAppRecord(django_args, org_app_logic)
if org_app_record.status != 'accepted':
raise out_of_band.AccessViolation(message_fmt=DEF_NO_APPLICATION_MSG)
def checkIsNotParticipatingInProgramInScope(self, django_args, program_logic,
student_logic, org_admin_logic,
mentor_logic):
"""Checks if the current user has no roles for the given
program in django_args.
Args:
django_args: a dictionary with django's arguments
program_logic: Program Logic instance
student_logic: Student Logic instance
org_admin_logic: Org Admin Logic instance
mentor_logic: Mentor Logic instance
Raises:
AccessViolationResponse: if the current user has a student, mentor or
org admin role for the given program.
"""
if not django_args.get('scope_path'):
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_DENIED_MSG)
program_entity = program_logic.getFromKeyNameOr404(
django_args['scope_path'])
user_entity = user_logic.getForCurrentAccount()
filter = {'user': user_entity,
'scope': program_entity,
'status': 'active'}
# check if the current user is already a student for this program
student_role = student_logic.getForFields(filter, unique=True)
if student_role:
raise out_of_band.AccessViolation(
message_fmt=DEF_ALREADY_PARTICIPATING_MSG)
# fill the role_list with all the mentor and org admin roles for this user
# role_list = []
filter = {'user': user_entity,
'program': program_entity,
'status': 'active'}
mentor_role = mentor_logic.getForFields(filter, unique=True)
if mentor_role:
# the current user has a role for the given program
raise out_of_band.AccessViolation(
message_fmt=DEF_ALREADY_PARTICIPATING_MSG)
org_admin_role = org_admin_logic.getForFields(filter, unique=True)
if org_admin_role:
# the current user has a role for the given program
raise out_of_band.AccessViolation(
message_fmt=DEF_ALREADY_PARTICIPATING_MSG)
# no roles found, access granted
return
def checkIsNotStudentForProgramInScope(self, django_args, program_logic,
student_logic):
"""Checks if the current user is not a student for the given
program in django_args.
Args:
django_args: a dictionary with django's arguments
program_logic: Program Logic instance
student_logic: Student Logic instance
Raises:
AccessViolationResponse: if the current user has a student
role for the given program.
"""
if django_args.get('seed'):
key_name = django_args['seed']['scope_path']
else:
key_name = django_args['scope_path']
program_entity = program_logic.getFromKeyNameOr404(key_name)
user_entity = user_logic.getForCurrentAccount()
filter = {'user': user_entity,
'scope': program_entity,
'status': 'active'}
# check if the current user is already a student for this program
student_role = student_logic.getForFields(filter, unique=True)
if student_role:
raise out_of_band.AccessViolation(
message_fmt=DEF_ALREADY_STUDENT_ROLE_MSG)
return
def checkIsNotStudentForProgramOfOrg(self, django_args, org_logic, student_logic):
"""Checks if the current user has no active Student role for the program
that the organization in the scope_path is participating in.
Args:
django_args: a dictionary with django's arguments
org_logic: Organization logic instance
student_logic: Student logic instance
Raises:
AccessViolationResponse: if the current user is a student for the
program the organization is in.
"""
if not django_args.get('scope_path'):
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_DENIED_MSG)
self.checkIsUser(django_args)
org_entity = org_logic.getFromKeyNameOr404(django_args['scope_path'])
user_entity = self.user
filter = {'scope': org_entity.scope,
'user': user_entity,
'status': 'active'}
student_role = student_logic.getForFields(filter=filter, unique=True)
if student_role:
raise out_of_band.AccessViolation(
message_fmt=DEF_ALREADY_STUDENT_ROLE_MSG)
return
def checkIsNotStudentForProgramOfOrgInRequest(self, django_args, org_logic,
student_logic):
"""Checks if the current user has no active Student role for the program
that the organization in the request is participating in.
Args:
django_args: a dictionary with django's arguments
org_logic: Organization logic instance
student_logic: Student logic instance
Raises:
AccessViolationResponse: if the current user is a student for the
program the organization is in.
"""
request_entity = request_logic.getFromIDOr404(int(django_args['id']))
django_args['scope_path'] = request_entity.group.key().id_or_name()
return self.checkIsNotStudentForProgramOfOrg(django_args, org_logic, student_logic)
@allowDeveloper
def checkIsMyEntity(self, django_args, logic,
field_name='user', user=False):
"""Checks whether the entity belongs to the user.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to fetch the entity
field_name: the name of the field the entity uses to store it's owner
user: true iff the entity stores the user's reference, false iff keyname
"""
self.checkIsUser(django_args)
fields = {
'link_id': django_args['link_id'],
field_name: self.user if user else self.user.key().id_or_name()
}
if 'scope_path' in django_args:
fields['scope_path'] = django_args['scope_path']
entity = logic.getForFields(fields)
if entity:
return
raise out_of_band.AccessViolation(message_fmt=DEF_NOT_YOUR_ENTITY_MSG)
@allowDeveloper
def checkIsMyActiveRole(self, django_args, role_logic):
"""Checks whether the current user has the active role given by django_args.
Args:
django_args: a dictionary with django's arguments
logic: the logic that should be used to fetch the role
"""
self.checkIsUser(django_args)
entity = role_logic.getFromKeyFieldsOr404(django_args)
if entity.user.key() != self.user.key() or (
entity.link_id != self.user.link_id):
raise out_of_band.AccessViolation(message_fmt=DEF_NOT_YOUR_ENTITY_MSG)
if entity.status != 'active':
raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_ROLE_MSG)
# this role belongs to the current user and is active
return
@allowDeveloper
@denySidebar
def checkIsAllowedToManageRole(self, django_args, logic_for_role,
manage_role_logic):
"""Returns an alternate HTTP response if the user is not allowed to manage
the role given in args.
Args:
django_args: a dictionary with django's arguments
logic_for_role: determines the logic for the role in args.
manage_role_logic: determines the logic for the role which is allowed
to manage this role.
Raises:
AccessViolationResponse: if the required authorization is not met
Returns:
None if the given role is active and belongs to the current user.
None if the current User has an active role (from manage_role_logic)
that belongs to the same scope as the role that needs to be managed
"""
try:
# check if it is the user's own role
self.checkHasRoleForScope(django_args, logic_for_role)
self.checkIsMyEntity(django_args, logic_for_role, 'user', True)
return
except out_of_band.Error:
pass
# apparently it's not the user's role so check
# if managing this role is allowed
fields = {
'link_id': django_args['link_id'],
'scope_path': django_args['scope_path'],
}
role_entity = logic_for_role.getFromKeyFieldsOr404(fields)
if role_entity.status != 'active':
raise out_of_band.AccessViolation(message_fmt=DEF_NO_ACTIVE_ROLE_MSG)
fields = {
'link_id': self.user.link_id,
'scope_path': django_args['scope_path'],
'status': 'active'
}
manage_entity = manage_role_logic.getForFields(fields, unique=True)
if not manage_entity:
raise out_of_band.AccessViolation(message_fmt=DEF_NOT_YOUR_ENTITY_MSG)
return
@allowSidebar
@allowDeveloper
def checkIsSurveyReadable(self, django_args, survey_logic,
key_name_field=None):
"""Checks whether a survey is readable.
Args:
django_args: a dictionary with django's arguments
key_name_field: key name field
"""
if key_name_field:
key_name = django_args[key_name_field]
survey = survey_logic.getFromKeyNameOr404(key_name)
else:
survey = survey_logic.getFromKeyFieldsOr404(django_args)
self.checkMembership('read', survey.prefix,
survey.read_access, django_args)
@denySidebar
@allowDeveloper
def checkIsMySurveyRecord(self, django_args, survey_logic, id_field):
"""Checks if the SurveyRecord given in the GET arguments as id_field is
from the current user.
Args:
django_args: a dictionary with django's arguments
survey_logic: Survey Logic which contains the needed Record logic
id_field: name of the field in the GET dictionary that contains the Record ID.
Raises:
AccesViolation if:
- There is no valid numeric record ID present in the GET dict
- There is no SurveyRecord with the found ID
- The SurveyRecord has not been taken by the current user
"""
self.checkIsUser(django_args)
user_entity = self.user
get_dict = django_args['GET']
record_id = get_dict.get(id_field)
if not record_id or not record_id.isdigit():
raise out_of_band.AccessViolation(
message_fmt=DEF_NO_VALID_RECORD_ID)
else:
record_id = int(record_id)
record_logic = survey_logic.getRecordLogic()
record_entity = record_logic.getFromIDOr404(record_id)
if record_entity.user.key() != user_entity.key():
raise out_of_band.AccessViolation(
message_fmt=DEF_NOT_YOUR_RECORD)
@denySidebar
@allowDeveloper
def checkIsSurveyWritable(self, django_args, survey_logic,
key_name_field=None):
"""Checks whether a survey is writable.
Args:
django_args: a dictionary with django's arguments
key_name_field: key name field
"""
if key_name_field:
key_name = django_args[key_name_field]
survey = survey_logic.getFromKeyNameOr404(key_name)
else:
survey = survey_logic.getFromKeyFieldsOr404(django_args)
self.checkMembership('write', survey.prefix,
survey.write_access, django_args)
@denySidebar
@allowDeveloper
def checkIsSurveyTakeable(self, django_args, survey_logic, check_time=True):
"""Checks if the survey specified in django_args can be taken.
Uses survey.taking_access to map that string onto a check. Also checks for
survey start and end.
If the prefix is 'program', the scope of the survey is the program and
the taking_acccess attribute means:
mentor: user is mentor for the program
org_admin: user is org_admin for the program
student: user is student for the program
user: valid user on the website
Args:
survey_logic: SurveyLogic instance (or subclass)
check_time: iff True checks if the current date is between the survey
start and end date.
"""
# TODO: Make this work with other prefixes perhaps by adding
# checkmembership on 'take'.
if django_args['prefix'] == 'gsoc_program':
org_admin_logic = gsoc_org_admin_logic
mentor_logic = gsoc_mentor_logic
student_logic = gsoc_student_logic
elif django_args['prefix'] == 'ghop_program':
org_admin_logic = ghop_org_admin_logic
mentor_logic = ghop_mentor_logic
student_logic = ghop_student_logic
else:
# TODO: update when generic surveys are allowed
return self.deny(django_args)
# get the survey from django_args
survey = survey_logic.getFromKeyFieldsOr404(django_args)
# check if the survey can be taken now
if check_time and not timeline_helper.isActivePeriod(survey, 'survey'):
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_INACTIVE_MSG)
# retrieve the role that is allowed to take this survey
role = survey.taking_access
if role == 'user':
# check if the current user is registered
return self.checkIsUser(django_args)
django_args = django_args.copy()
# get the survey scope
survey_scope = survey_logic.getScope(survey)
if role == 'mentor':
# check if the current user is a mentor for the program in survey.scope
django_args['program'] = survey_scope
# program is the 'program' attribute for mentors and org_admins
return self._checkHasRoleFor(django_args, mentor_logic, 'program')
if role == 'org_admin':
# check if the current user is an org admin for the program
django_args['program'] = survey_scope
# program is the 'program' attribute for mentors and org_admins
return self._checkHasRoleFor(django_args, org_admin_logic,
'program')
if role == 'org':
# check if the current user is an org admin or mentor for the program
django_args['program'] = survey_scope
try:
# program is the 'program' attribute for mentors and org_admins
return self._checkHasRoleFor(django_args, org_admin_logic,
'program')
except:
# the current user is no org admin
pass
# try to check if the current user is a mentor instead
return self._checkHasRoleFor(django_args, mentor_logic, 'program')
if role == 'student':
# check if the current user is a student for the program in survey.scope
django_args['scope'] = survey_scope
# program is the 'scope' attribute for students
return self.checkHasRoleForScope(django_args, student_logic)
# unknown role
self.deny(django_args)
@allowSidebar
@allowDeveloper
def checkIsDocumentReadable(self, django_args, key_name_field=None):
"""Checks whether a document is readable by the current user.
Args:
django_args: a dictionary with django's arguments
key_name_field: key name field
"""
if key_name_field:
key_name = django_args[key_name_field]
document = document_logic.getFromKeyNameOr404(key_name)
else:
document = document_logic.getFromKeyFieldsOr404(django_args)
self.checkMembership('read', document.prefix,
document.read_access, django_args)
@denySidebar
@allowDeveloper
def checkIsDocumentWritable(self, django_args, key_name_field=None):
"""Checks whether a document is writable by the current user.
Args:
django_args: a dictionary with django's arguments
key_name_field: key name field
"""
if key_name_field:
key_name = django_args[key_name_field]
document = document_logic.getFromKeyNameOr404(key_name)
else:
document = document_logic.getFromKeyFieldsOr404(django_args)
self.checkMembership('write', document.prefix,
document.write_access, django_args)
@denySidebar
@allowDeveloper
def checkDocumentList(self, django_args):
"""Checks whether the user is allowed to list documents.
Args:
django_args: a dictionary with django's arguments
"""
filter = django_args['filter']
prefix = filter['prefix']
checker = callback.getCore().getRightsChecker(prefix)
roles = checker.getMembership('list')
if not self.hasMembership(roles, filter):
raise out_of_band.AccessViolation(message_fmt=DEF_NO_LIST_ACCESS_MSG)
@denySidebar
@allowDeveloper
def checkDocumentPick(self, django_args):
"""Checks whether the user has access to the specified pick url.
Will update the 'read_access' field of django_args['GET'].
Args:
django_args: a dictionary with django's arguments
"""
get_args = django_args['GET']
# make mutable in order to inject the proper read_access filter
mutable = get_args._mutable
get_args._mutable = True
if 'prefix' not in get_args:
raise out_of_band.AccessViolation(message_fmt=DEF_PREFIX_NOT_IN_ARGS_MSG)
prefix = get_args['prefix']
django_args['prefix'] = prefix
django_args['scope_path'] = get_args['scope_path']
checker = callback.getCore().getRightsChecker(prefix)
memberships = checker.getMemberships()
roles = []
for key, value in memberships.iteritems():
if self.hasMembership(value, django_args):
roles.append(key)
if not roles:
roles = ['deny']
get_args.setlist('read_access', roles)
get_args._mutable = mutable
def checkCanEditTimeline(self, django_args, program_logic):
"""Checks whether this program's timeline may be edited.
Args:
django_args: a dictionary with django's arguments
program_logic: Program Logic instance
"""
time_line_keyname = program_logic.timeline_logic.getKeyNameFromFields(
django_args)
timeline_entity = program_logic.timeline_logic.getFromKeyNameOr404(
time_line_keyname)
fields = program_logic.getKeyFieldsFromFields(django_args)
self.checkIsHostForProgram(fields, logic=program_logic)
| MatthewWilkes/mw4068-packaging | src/melange/src/soc/views/helper/access.py | Python | apache-2.0 | 58,088 | [
"VisIt"
] | 1b2864d460fa5a4f3b1946b2fb13c024fbcfbbdb8b392663d4f87b58abd4f6ba |
import time
import os.path
import tensorflow as tf
import numpy as np
import networkDef as nn
import sys
from fiberfileIO import *
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_hidden', 1024, 'Number of hidden layers.')
flags.DEFINE_string('data_dir', '',
'Directory with the training data.')
flags.DEFINE_string('output_dir', '/root/work/Multiclass/Results/',
'Output directory')
flags.DEFINE_string('checkpoint_dir', '',
"""Directory where to write model checkpoints.""")
flags.DEFINE_string('summary_dir', '/root/work/SaveTest/',
"""Directory where to write model checkpoints.""")
flags.DEFINE_boolean('conv', False,
"""Whether use conv version or no Conv.""")
flags.DEFINE_boolean('multiclass', False,
"""Whether Multiclassification or Biclassification.""")
flags.DEFINE_string('fiber_name', "Fiber",
"""In biclassification permit to know the name of the fiber to extract (By default: fiber_name = 'Fiber' which gives Fiber_extracted.vtk)""")
name_labels = [ '0', 'Arc_L_FT', 'Arc_L_FrontoParietal', 'Arc_L_TemporoParietal', 'Arc_R_FT', 'Arc_R_FrontoParietal', 'Arc_R_TemporoParietal', 'CGC_L', 'CGC_R',
'CGH_L', 'CGH_R', 'CorpusCallosum_Genu', 'CorpusCallosum_Motor', 'CorpusCallosum_Parietal', 'CorpusCallosum_PreMotor', 'CorpusCallosum_Rostrum',
'CorpusCallosum_Splenium', 'CorpusCallosum_Tapetum', 'CorticoFugal-Left_Motor', 'CorticoFugal-Left_Parietal', 'CorticoFugal-Left_PreFrontal',
'CorticoFugal-Left_PreMotor', 'CorticoFugal-Right_Motor', 'CorticoFugal-Right_Parietal', 'CorticoFugal-Right_PreFrontal', 'CorticoFugal-Right_PreMotor',
'CorticoRecticular-Left', 'CorticoRecticular-Right', 'CorticoSpinal-Left', 'CorticoSpinal-Right', 'CorticoThalamic_L_PreFrontal', 'CorticoThalamic_L_SUPERIOR',
'CorticoThalamic_Left_Motor', 'CorticoThalamic_Left_Parietal', 'CorticoThalamic_Left_PreMotor', 'CorticoThalamic_R_PreFrontal', 'CorticoThalamic_R_SUPERIOR',
'CorticoThalamic_Right_Motor', 'CorticoThalamic_Right_Parietal', 'CorticoThalamic_Right_PreMotor', 'Fornix_L', 'Fornix_R', 'IFOF_L', 'IFOF_R', 'ILF_L', 'ILF_R',
'OpticRadiation_Left', 'OpticRadiation_Right', 'Optic_Tract_L', 'Optic_Tract_R', 'SLF_II_L', 'SLF_II_R', 'UNC_L', 'UNC_R']
threshold_labels = [1, 1, 0.9995, 0.99999, 1, 0.945, 0.995, 0.99999, 0.9999, 0.58995, 0.58995, 0.99995, 0.9999, 0.99995, 1, 0.9999, 1, 1, 0.35, 0.79995,
1, 0.9999, 0.59995, 0.89995, 0.6, 0.97495, 0.475, 0.55, 0.575, 0.875, 1, 0.8, 0.45, 0.925, 0.35, 1, 0.4, 0.35, 0.945, 0.4, 0.97495,
0.9999, 0.99995, 0.99995, 0.99995, 1, 0.9999, 0.99995, 0.25, 0.99995, 1, 1, 1, 1 ]
def fibername_split(fibername):
# input: fibername - designate a single fiber in the format [name of the fiber bundle]:[index of the fiber]
# output: name - name of the fiber bundle
# output: index - index the single fiber to extract
list = fibername.split(":")
if len(list) == 2:
name = list[0]
index = int(list[1])
return name, index
else:
raise Exception("Non valid format for the file %s. "
"Impossible to extract name and index of the fiber" % fibername)
def reformat_prediction(predictions, num_classes):
vector_id_blank = []
for i in range(num_classes):
vector_id_blank.append(vtk.vtkIdTypeArray())
dict_pred = {}
for num_class in range(num_classes):
for num_fib in range(len(predictions[num_class])):
key, value = fibername_split(predictions[num_class][num_fib])
if key not in dict_pred.keys():
dict_pred[key] = vector_id_blank
dict_pred[key][num_class].InsertNextValue(value)
return dict_pred
def classification(dict, output_dir, num_classes, multiclass, fiber_name):
# input: predictions - prediction of the data to classify, size [num_fibers]x[num_labels]
# input: name_labels - containing the name of all the labels (classes)
# input: test_names - containing the name and index of each fibers
# output: No output but at the end of this function, we write the positives fibers in one vtk file for each class
# Except the class 0
# Create the output directory if necessary
if not os.path.exists(os.path.dirname(output_dir)):
os.makedirs(output_dir)
append_list = np.ndarray(shape=num_classes, dtype=np.object)
for i in xrange(num_classes-1):
append_list[i] = vtk.vtkAppendPolyData()
bundle_fiber = vtk.vtkPolyData()
for fiber in dict.keys():
bundle_fiber = read_vtk_data(fiber)
for num_class in xrange(num_classes-1):
if vtk.VTK_MAJOR_VERSION > 5:
append_list[num_class].AddInputData(extract_fiber(bundle_fiber, dict[fiber][num_class+1]))
else:
append_list[num_class].AddInput(extract_fiber(bundle_fiber, dict[fiber][num_class+1]))
for num_class in xrange(num_classes-1):
append_list[num_class].Update()
if multiclass:
write_vtk_data(append_list[num_class].GetOutput(), output_dir+'/'+name_labels[num_class+1]+'_extracted.vtk')
else:
write_vtk_data(append_list[num_class].GetOutput(), output_dir+'/'+fiber_name+'_extracted.vtk')
print ""
# run_classification(num_hidden, data_dir, output_dir, checkpoint_dir, summary_dir, conv, multiclass)
def run_classification(data_dir, output_dir, checkpoint_dir, summary_dir, num_hidden=1024, fiber_name="Fiber", conv=False, multiclass=False):
# Run evaluation on the input data set
if multiclass:
num_classes = 54
else:
num_classes = 2
start = time.time()
with tf.Graph().as_default() as g:
# Build a Graph that computes the logits predictions from the
# inference model. We'll use a prior graph built by the training
# Non Conv Version
if not conv:
fibers, labels = nn.inputs(data_dir, 'test', batch_size=1, num_epochs=1, conv=False)
logits = nn.inference(fibers, num_hidden, num_classes, is_training=False)
# Conv Version
else:
fibers, labels = nn.inputs(data_dir, 'test', batch_size=1,
num_epochs=1, conv=True)
logits = nn.inference_conv(fibers, 2, 34, 50, num_hidden, num_classes, is_training=False)
logits = tf.nn.softmax(logits)
predict_value, predict_class = tf.nn.top_k(logits, k=1)
# setup the initialization of variables
local_init = tf.local_variables_initializer()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(summary_dir, g)
# create the saver and session
saver = tf.train.Saver()
sess = tf.Session()
# init the local variables
sess.run(local_init)
while True:
prediction = []
for i in range(num_classes):
prediction.append([])
# read in the most recent checkpointed graph and weights
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found in %s' % checkpoint_dir)
return
# start up the threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
# run a single iteration of evaluation
# predictions = sess.run([top_k_op])
val, pred, name = sess.run([predict_value, predict_class, labels])
pred_lab = pred[0][0]
pred_val = val[0][0]
prediction[pred_lab].append(name[0])
# if multiclass and pred_val >= threshold_labels[pred_lab]:
# prediction[pred_lab].append(name[0])
# elif not multiclass:
# prediction[pred_lab].append(name[0])
step += 1
except tf.errors.OutOfRangeError:
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
# summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
finally:
coord.request_stop()
# shutdown gracefully
coord.join(threads)
# if run_once:
break
sess.close()
# print "prediction\n", prediction
pred_dictionnary = reformat_prediction(prediction, num_classes)
classification(pred_dictionnary, output_dir, num_classes, multiclass, fiber_name)
end = time.time()
def main(_):
start = time.time()
run_classification(FLAGS.data_dir, FLAGS.output_dir, FLAGS.checkpoint_dir, FLAGS.summary_dir, FLAGS.num_hidden, FLAGS.fiber_name, FLAGS.conv, FLAGS.multiclass)
end = time.time()
print YELLOW, "Classification Process took %dh%02dm%02ds" % (convert_time(end - start)), NC
if __name__ == '__main__':
tf.app.run()
| PrinceNgattaiLam/Trafic | TraficLib/runClassification.py | Python | apache-2.0 | 9,752 | [
"VTK"
] | e23aabd00d2e021a62dd0788b4f82c7ff19e278066ec10a7bf799724cd1f89f3 |
""" Module that holds the ReportGeneratorHandler class
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN ReportGenerator
:end-before: ##END
:dedent: 2
:caption: ReportGenerator options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import datetime
from DIRAC import S_OK, S_ERROR, rootPath, gConfig, gLogger
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Core.Utilities import Time
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.AccountingSystem.DB.MultiAccountingDB import MultiAccountingDB
from DIRAC.Core.Utilities.Plotting import gDataCache
from DIRAC.AccountingSystem.private.MainReporter import MainReporter
from DIRAC.AccountingSystem.private.DBUtils import DBUtils
from DIRAC.AccountingSystem.private.Policies import gPoliciesList
from DIRAC.Core.Utilities.Plotting.Plots import generateErrorMessagePlot
from DIRAC.Core.Utilities.Plotting.FileCoding import extractRequestFromFileId
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RequestHandler import RequestHandler
class ReportGeneratorHandler(RequestHandler):
"""DIRAC service class to retrieve information from the AccountingDB"""
__acDB = None
__reportRequestDict = {
"typeName": str,
"reportName": str,
"startTime": (datetime.datetime, datetime.date),
"endTime": (datetime.datetime, datetime.date),
"condDict": dict,
"grouping": str,
"extraArgs": dict,
}
@classmethod
def initializeHandler(cls, serviceInfo):
multiPath = PathFinder.getDatabaseSection("Accounting/MultiDB")
cls.__acDB = MultiAccountingDB(multiPath, readOnly=True)
# Get data location
reportSection = serviceInfo["serviceSectionPath"]
dataPath = gConfig.getValue("%s/DataLocation" % reportSection, "data/accountingGraphs")
dataPath = dataPath.strip()
if "/" != dataPath[0]:
dataPath = os.path.realpath("%s/%s" % (gConfig.getValue("/LocalSite/InstancePath", rootPath), dataPath))
gLogger.info("Data will be written into %s" % dataPath)
mkDir(dataPath)
try:
testFile = "%s/acc.jarl.test" % dataPath
with open(testFile, "w"):
pass
os.unlink(testFile)
except IOError:
gLogger.fatal("Can't write to %s" % dataPath)
return S_ERROR("Data location is not writable")
gDataCache.setGraphsLocation(dataPath)
gMonitor.registerActivity("plotsDrawn", "Drawn plot images", "Accounting reports", "plots", gMonitor.OP_SUM)
gMonitor.registerActivity(
"reportsRequested", "Generated reports", "Accounting reports", "reports", gMonitor.OP_SUM
)
return S_OK()
def __checkPlotRequest(self, reportRequest):
# If extraArgs is not there add it
if "extraArgs" not in reportRequest:
reportRequest["extraArgs"] = {}
if not isinstance(reportRequest["extraArgs"], self.__reportRequestDict["extraArgs"]):
return S_ERROR("Extra args has to be of type %s" % self.__reportRequestDict["extraArgs"])
reportRequestExtra = reportRequest["extraArgs"]
# Check sliding plots
if "lastSeconds" in reportRequestExtra:
try:
lastSeconds = int(reportRequestExtra["lastSeconds"])
except ValueError:
gLogger.error("lastSeconds key must be a number")
return S_ERROR("Value Error")
if lastSeconds < 3600:
return S_ERROR("lastSeconds must be more than 3600")
now = Time.dateTime()
reportRequest["endTime"] = now
reportRequest["startTime"] = now - datetime.timedelta(seconds=lastSeconds)
else:
# if enddate is not there, just set it to now
if not reportRequest.get("endTime", False):
reportRequest["endTime"] = Time.dateTime()
# Check keys
for key in self.__reportRequestDict:
if key not in reportRequest:
return S_ERROR("Missing mandatory field %s in plot reques" % key)
if not isinstance(reportRequest[key], self.__reportRequestDict[key]):
return S_ERROR(
"Type mismatch for field %s (%s), required one of %s"
% (key, str(type(reportRequest[key])), str(self.__reportRequestDict[key]))
)
if key in ("startTime", "endTime"):
reportRequest[key] = int(Time.toEpoch(reportRequest[key]))
return S_OK(reportRequest)
types_generatePlot = [dict]
def export_generatePlot(self, reportRequest):
"""
Plot a accounting
Arguments:
- viewName : Name of view (easy!)
- startTime
- endTime
- argsDict : Arguments to the view.
- grouping
- extraArgs
"""
retVal = self.__checkPlotRequest(reportRequest)
if not retVal["OK"]:
return retVal
reporter = MainReporter(self.__acDB, self.serviceInfoDict["clientSetup"])
gMonitor.addMark("plotsDrawn")
reportRequest["generatePlot"] = True
return reporter.generate(reportRequest, self.getRemoteCredentials())
types_getReport = [dict]
def export_getReport(self, reportRequest):
"""
Plot a accounting
Arguments:
- viewName : Name of view (easy!)
- startTime
- endTime
- argsDict : Arguments to the view.
- grouping
- extraArgs
"""
retVal = self.__checkPlotRequest(reportRequest)
if not retVal["OK"]:
return retVal
reporter = MainReporter(self.__acDB, self.serviceInfoDict["clientSetup"])
gMonitor.addMark("reportsRequested")
reportRequest["generatePlot"] = False
return reporter.generate(reportRequest, self.getRemoteCredentials())
types_listReports = [str]
def export_listReports(self, typeName):
"""
List all available plots
Arguments:
- none
"""
reporter = MainReporter(self.__acDB, self.serviceInfoDict["clientSetup"])
return reporter.list(typeName)
types_listUniqueKeyValues = [str]
def export_listUniqueKeyValues(self, typeName):
"""
List all values for all keys in a type
Arguments:
- none
"""
dbUtils = DBUtils(self.__acDB, self.serviceInfoDict["clientSetup"])
credDict = self.getRemoteCredentials()
if typeName in gPoliciesList:
policyFilter = gPoliciesList[typeName]
filterCond = policyFilter.getListingConditions(credDict)
else:
policyFilter = gPoliciesList["Null"]
filterCond = {}
retVal = dbUtils.getKeyValues(typeName, filterCond)
if not policyFilter or not retVal["OK"]:
return retVal
return policyFilter.filterListingValues(credDict, retVal["Value"])
def __generatePlotFromFileId(self, fileId):
result = extractRequestFromFileId(fileId)
if not result["OK"]:
return result
plotRequest = result["Value"]
gLogger.info("Generating the plots..")
result = self.export_generatePlot(plotRequest)
if not result["OK"]:
gLogger.error("Error while generating the plots", result["Message"])
return result
fileToReturn = "plot"
if "extraArgs" in plotRequest:
extraArgs = plotRequest["extraArgs"]
if "thumbnail" in extraArgs and extraArgs["thumbnail"]:
fileToReturn = "thumbnail"
gLogger.info("Returning %s file: %s " % (fileToReturn, result["Value"][fileToReturn]))
return S_OK(result["Value"][fileToReturn])
def __sendErrorAsImg(self, msgText, fileHelper):
retVal = generateErrorMessagePlot(msgText)
retVal = fileHelper.sendData(retVal["Value"])
if not retVal["OK"]:
return retVal
fileHelper.sendEOF()
return S_OK()
def transfer_toClient(self, fileId, token, fileHelper):
"""
Get graphs data
"""
# First check if we've got to generate the plot
if len(fileId) > 5 and fileId[1] == ":":
gLogger.info("Seems the file request is a plot generation request!")
# Seems a request for a plot!
try:
result = self.__generatePlotFromFileId(fileId)
except Exception as e:
gLogger.exception("Exception while generating plot")
result = S_ERROR("Error while generating plot: %s" % str(e))
if not result["OK"]:
self.__sendErrorAsImg(result["Message"], fileHelper)
fileHelper.sendEOF()
return result
fileId = result["Value"]
retVal = gDataCache.getPlotData(fileId)
if not retVal["OK"]:
self.__sendErrorAsImg(retVal["Message"], fileHelper)
return retVal
retVal = fileHelper.sendData(retVal["Value"])
if not retVal["OK"]:
return retVal
fileHelper.sendEOF()
return S_OK()
| ic-hep/DIRAC | src/DIRAC/AccountingSystem/Service/ReportGeneratorHandler.py | Python | gpl-3.0 | 9,358 | [
"DIRAC"
] | 919db86b7bda5190c52df8cced924cea6df462fea0611470144b68ffd9b9a951 |
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
import traceback
import re
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer.
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception, e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
self.send_header("X-traceback", traceback.format_exc())
self.send_header("Content-length", "0")
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = xmlrpclib.gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return xmlrpclib.gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: %s' % BaseHTTPServer.DEFAULT_ERROR_CONTENT_TYPE
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (TypeError, ValueError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| antb/TPT----My-old-mod | src/python/stdlib/SimpleXMLRPCServer.py | Python | gpl-2.0 | 25,681 | [
"Brian"
] | 30648bdf3db31b43b87f8c4b7ad723b49aef2364c841b6309cf77b0321e69301 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Extended PDB topology parser
============================
.. versionadded:: 0.8
This topology parser uses a PDB file to build a minimum internal structure
representation (list of atoms). The only difference from
:mod:`~MDAnalysis.topology.PDBParser` is that this parser reads a
non-standard PDB-like format in which residue numbers can be five digits
instead of four.
The topology reader reads a PDB file line by line and ignores atom numbers but
reads residue numbers up to 99,999 correctly. If you have systems containing at
least 100,000 residues then you need to use a different file format that can
handle such residue numbers.
.. SeeAlso::
* :mod:`MDAnalysis.topology.PDBParser`
* :class:`MDAnalysis.coordinates.PDB.ExtendedPDBReader`
* :class:`MDAnalysis.core.universe.Universe`
Classes
-------
.. autoclass:: ExtendedPDBParser
:members:
:inherited-members:
"""
from __future__ import absolute_import
from . import PDBParser
class ExtendedPDBParser(PDBParser.PDBParser):
"""Parser that handles non-standard "extended" PDB file.
Extended PDB files (MDAnalysis format specifier *XPDB*) may contain residue
sequence numbers up to 99,999 by utilizing the insertion character field of
the PDB standard.
Creates a Topology with the following Attributes (if present):
- serials
- names
- altLocs
- chainids
- tempfactors
- occupancies
- resids
- resnames
- segids
.. SeeAlso:: :class:`MDAnalysis.coordinates.PDB.ExtendedPDBReader`
.. versionadded:: 0.8
"""
format = 'XPDB'
| kain88-de/mdanalysis | package/MDAnalysis/topology/ExtendedPDBParser.py | Python | gpl-2.0 | 2,608 | [
"MDAnalysis"
] | 6af94f32e7c9ddb25a9c25426a0f23b412e48ba40c9f3e91c043aa8f58d9cab5 |
#!/usr/bin/env python
# file parse.py: parsers for map file, distance matrix file, env file
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Daniel McDonald", "Greg Caporaso",
"Justin Kuczynski", "Cathy Lozupone", "Jens Reeder",
"Antonio Gonzalez Pena", "Jai Ram Rideout", "Will Van Treuren",
"Yoshiki Vazquez-Baeza", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from string import strip
from collections import defaultdict
import os
from os.path import expandvars
import re
from types import GeneratorType
from numpy import concatenate, repeat, zeros, nan, asarray
from numpy.random import permutation
from skbio.stats.ordination import OrdinationResults
from skbio.parse.record_finder import LabeledRecordFinder
from cogent.parse.tree import DndParser
from skbio.parse.sequences import parse_fastq
from skbio.parse.sequences.fasta import FastaFinder
from skbio.sequence import DNA
from skbio.io.util import open_file
from cogent.core.tree import PhyloNode
def is_casava_v180_or_later(header_line):
""" True if this file is generated by Illumina software post-casava 1.8 """
assert header_line.startswith('@'),\
"Bad fastq file passed as input. Header line must start with '@'."
fields = header_line.split(':')
if len(fields) == 10 and fields[7] in 'YN':
return True
return False
def MinimalSamParser(data):
for line in data:
line = line.strip()
if not line or line.startswith('@'):
continue
else:
yield line.strip().split('\t')
class QiimeParseError(Exception):
pass
class IlluminaParseError(QiimeParseError):
pass
def parse_newick(lines, constructor=PhyloNode):
"""Return PhyloNode from newick file handle stripping quotes from tip names
This function wraps cogent.parse.tree.DndParser stripping
matched leading/trailing single quotes from tip names, and returning
a PhyloNode object by default (alternate constructor can be passed
with constructor=).
Sripping of quotes is essential for many applications in Qiime, as
the tip names are frequently matched to OTU ids, and if the tip name
is read in with leading/trailing quotes, node.Name won't match to the
corresponding OTU identifier. Disaster follows.
"""
return DndParser(lines, constructor=constructor, unescape_name=True)
def parse_mapping_file(lines, strip_quotes=True, suppress_stripping=False):
"""Parser for map file that relates samples to metadata.
Format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
Result: list of lists of fields, incl. headers.
"""
if hasattr(lines, "upper"):
# Try opening if a string was passed
try:
lines = open(lines, 'U')
except IOError:
raise QiimeParseError("A string was passed that doesn't refer "
"to an accessible filepath.")
if strip_quotes:
if suppress_stripping:
# remove quotes but not spaces
strip_f = lambda x: x.replace('"', '')
else:
# remove quotes and spaces
strip_f = lambda x: x.replace('"', '').strip()
else:
if suppress_stripping:
# don't remove quotes or spaces
strip_f = lambda x: x
else:
# remove spaces but not quotes
strip_f = lambda x: x.strip()
# Create lists to store the results
mapping_data = []
header = []
comments = []
# Begin iterating over lines
for line in lines:
line = strip_f(line)
if not line or (suppress_stripping and not line.strip()):
# skip blank lines when not stripping lines
continue
if line.startswith('#'):
line = line[1:]
if not header:
header = line.strip().split('\t')
else:
comments.append(line)
else:
# Will add empty string to empty fields
tmp_line = map(strip_f, line.split('\t'))
if len(tmp_line) < len(header):
tmp_line.extend([''] * (len(header) - len(tmp_line)))
mapping_data.append(tmp_line)
if not header:
raise QiimeParseError("No header line was found in mapping file.")
if not mapping_data:
raise QiimeParseError("No data found in mapping file.")
return mapping_data, header, comments
def parse_mapping_file_to_dict(*args, **kwargs):
"""Parser for map file that relates samples to metadata.
input format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
calls parse_mapping_file, then processes the result into a 2d dict, assuming
the first field is the sample id
e.g.: {'sample1':{'age':'3','sex':'male'},'sample2':...
returns the dict, and a list of comment lines
"""
mapping_data, header, comments = parse_mapping_file(*args, **kwargs)
return mapping_file_to_dict(mapping_data, header), comments
def mapping_file_to_dict(mapping_data, header):
"""processes mapping data in list of lists format into a 2 deep dict"""
map_dict = {}
for i in range(len(mapping_data)):
sam = mapping_data[i]
map_dict[sam[0]] = {}
for j in range(len(header)):
if j == 0:
continue # sampleID field
map_dict[sam[0]][header[j]] = sam[j]
return map_dict
def parse_prefs_file(prefs_string):
"""Returns prefs dict evaluated from prefs_string.
prefs_string: read buffer from prefs file or string containing prefs
dict. Must be able to evauluated as a dict using eval.
"""
try:
prefs = dict(eval(prefs_string))
except TypeError:
raise QiimeParseError(
"Invalid prefs file. Prefs file must contain a valid prefs dictionary.")
return prefs
def group_by_field(table, name):
"""Returns dict of field_state:[row_headers] from table.
Use to extract info from table based on a single field.
"""
try:
col_index = table[0].index(name)
except ValueError as e:
raise ValueError("Couldn't find name %s in headers: %s" %
(name, table[0]))
result = defaultdict(list)
for row in table[1:]:
header, state = row[0], row[col_index]
result[state].append(header)
return result
def group_by_fields(table, names):
"""Returns dict of (field_states):[row_headers] from table.
Use to extract info from table based on combinations of fields.
"""
col_indices = map(table[0].index, names)
result = defaultdict(list)
for row in table[1:]:
header = row[0]
states = tuple([row[i] for i in col_indices])
result[states].append(header)
return result
def parse_distmat(lines):
"""Parser for distance matrix file (e.g. UniFrac dist matrix).
The examples I have of this file are just sample x sample tab-delimited
text, so easiest way to handle is just to convert into a numpy array
plus a list of field names.
"""
header = None
result = []
for line in lines:
if line[0] == '\t': # is header
header = map(strip, line.split('\t')[1:])
else:
result.append(map(float, line.split('\t')[1:]))
return header, asarray(result)
def parse_matrix(lines):
"""Parser for a matrix file Tab delimited. skips first lines if led
by '#', assumes column headers line starts with a tab
"""
col_headers = None
result = []
row_headers = []
for line in lines:
if line[0] == '#':
continue
if line[0] == '\t': # is header
col_headers = map(strip, line.split('\t')[1:])
else:
entries = line.split('\t')
result.append(map(float, entries[1:]))
row_headers.append(entries[0])
return col_headers, row_headers, asarray(result)
def parse_distmat_to_dict(table):
"""Parse a dist matrix into an 2d dict indexed by sample ids.
table: table as lines
"""
col_headers, row_headers, data = parse_matrix(table)
assert(col_headers == row_headers)
result = defaultdict(dict)
for (sample_id_x, row) in zip(col_headers, data):
for (sample_id_y, value) in zip(row_headers, row):
result[sample_id_x][sample_id_y] = value
return result
def parse_bootstrap_support(lines):
"""Parser for a bootstrap/jackknife support in tab delimited text
"""
bootstraps = {}
for line in lines:
if line[0] == '#':
continue
wordlist = line.strip().split()
bootstraps[wordlist[0]] = float(wordlist[1])
return bootstraps
def parse_rarefaction_data(lines):
data = {}
data['headers'] = []
data['options'] = []
data['xaxis'] = []
data['series'] = {}
data['error'] = {}
data['color'] = {}
for l in lines:
if l.startswith('#'):
data['headers'].append(l.strip('#').strip())
continue
if l.startswith('xaxis'):
data['xaxis'] = [float(v) for v in l[6:].strip().split('\t')]
continue
if l.startswith('>>'):
data['options'].append(l.strip('>').strip())
continue
if l.startswith('series'):
data['series'][data['options'][len(data['options']) - 1]] = \
[float(v) for v in l[7:].strip().split('\t')]
continue
if l.startswith('error'):
data['error'][data['options'][len(data['options']) - 1]] = \
[float(v) for v in l[6:].strip().split('\t')]
if l.startswith('color'):
data['color'][data['options'][len(data['options']) - 1]] = \
str(l[6:].strip())
if(len(str(l[6:].strip())) < 1):
print data['options'][len(data['options']) - 1]
return data
def parse_rarefaction_record(line):
""" Return (rarefaction_fn, [data])"""
def float_or_nan(v):
try:
return float(v)
except ValueError:
return nan
entries = line.split('\t')
return entries[0], map(float_or_nan, entries[1:])
def parse_rarefaction(lines):
"""Function for parsing rarefaction files specifically for use in
make_rarefaction_plots.py"""
col_headers = []
comments = []
rarefaction_data = []
rarefaction_fns = []
for line in lines:
if line[0] == '#':
# is comment
comments.append(line)
elif line[0] == '\t':
# is header
col_headers = map(strip, line.split('\t'))
else:
# is rarefaction record
rarefaction_fn, data = parse_rarefaction_record(line)
rarefaction_fns.append(rarefaction_fn)
rarefaction_data.append(data)
return col_headers, comments, rarefaction_fns, rarefaction_data
def parse_coords(lines):
"""Parse skbio's ordination results file into coords, labels, eigvals,
pct_explained.
Returns:
- list of sample labels in order
- array of coords (rows = samples, cols = axes in descending order)
- list of eigenvalues
- list of percent variance explained
For the file format check
skbio.stats.ordination.OrdinationResults.read
Strategy: read the file using skbio's parser and return the objects
we want
"""
pcoa_results = OrdinationResults.read(lines)
return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals,
pcoa_results.proportion_explained)
def parse_rarefaction_fname(name_string):
"""returns base, seqs/sam, iteration, extension. seqs, iters as ints
all as strings, some may be empty strings ('')"""
root, ext = os.path.splitext(name_string)
root_list = root.split("_")
iters = int(root_list.pop())
seqs_per_sam = int(root_list.pop())
base_name = "_".join(root_list)
return base_name, seqs_per_sam, iters, ext
def parse_taxonomy(infile):
"""parse a taxonomy file.
Typically the lines in these files look like:
3 SAM1_32 \t Root;Bacteria;Fi... \t 0.9
where the first field is the sequence identifier, the second field is the
taxonomy assignment separated by ; characters, and the third field is a
quality score (e.g., confidence from the RDP classifier)
when using the BLAST taxonomy assigner, an additional field is included,
containing the sequence identifier of the best blast hit or each input
sequence. these lines might look like:
3 SAM1_32 \t Root;Bacteria;Fi... \t 1e-42 \t A1237756
Returns: dict of otu id to taxonomy name.
ignores other parts of the otu file, such as confidence and seq id (otu id
only)
"""
res = {}
for line in infile:
if not line or line.startswith('#'):
continue
line = line.rstrip("\n")
fields = line.split('\t')
otu = fields[0].split(' ')[0]
res[otu] = taxa_split(fields[1])
return res
parse_observation_metadata = parse_taxonomy
def taxa_split(taxa_string):
return [t.strip() for t in taxa_string.split(';')]
def parse_taxonomy_to_otu_metadata(
lines, labels=['taxonomy', 'score'], process_fs=[taxa_split, float]):
""" Return a dict mapping otu identifier to dict of otu metadata
lines: file handle or list of lines - format should be:
otu_id <tab> metadata entry 1 <tab> metadata entry 2 <tab> ...
labels: list of lables for metadata entrys to be used in the
internal dicts. each internal dict will have only as many entries
as there are labels (extra metadata entries in the input file
will be ignored)
process_fs: functions which are applied to each metadata entry -
if there are more process_fs than labels, the additional ones
will be ignored
"""
result = {}
for line in lines:
line = line.strip()
fields = line.split('\t')
id_ = fields[0].split()[0]
result[id_] = {}
for i, field in enumerate(fields[1:]):
try:
label = labels[i]
except IndexError:
continue
try:
value = process_fs[i](field)
except IndexError:
raise ValueError(
"Too few process functions provided (n=%d)." %
len(process_fs))
result[id_][label] = value
return result
def process_otu_table_sample_ids(sample_id_fields):
""" process the sample IDs line of an OTU table """
if len(sample_id_fields) == 0:
raise ValueError('Error parsing sample ID line in OTU table. Fields are %s'
% ' '.join(sample_id_fields))
# Detect if a metadata column is included as the last column. This
# field will be named either 'Consensus Lineage' or 'OTU Metadata',
# but we don't care about case or spaces.
last_column_header = sample_id_fields[-1].strip().replace(' ', '').lower()
if last_column_header in ['consensuslineage', 'otumetadata', 'taxonomy']:
has_metadata = True
sample_ids = sample_id_fields[:-1]
else:
has_metadata = False
sample_ids = sample_id_fields
# Return the list of sample IDs and boolean indicating if a metadata
# column is included.
return sample_ids, has_metadata
def parse_classic_otu_table(lines, count_map_f=int, remove_empty_rows=False):
"""parses a classic otu table (sample ID x OTU ID map)
Returns tuple: sample_ids, otu_ids, matrix of OTUs(rows) x samples(cols),
and lineages from infile.
"""
otu_table = []
otu_ids = []
metadata = []
sample_ids = []
# iterate over lines in the OTU table -- keep track of line number
# to support legacy (Qiime 1.2.0 and earlier) OTU tables
for i, line in enumerate(lines):
line = line.strip()
if line:
if (i == 1 or i == 0) and line.startswith('#OTU ID') and not sample_ids:
# we've got a legacy OTU table
try:
sample_ids, has_metadata = process_otu_table_sample_ids(
line.strip().split('\t')[1:])
except ValueError:
raise ValueError("Error parsing sample IDs in OTU table. Appears to be a" +
" legacy OTU table. Sample ID line:\n %s" % line)
elif not line.startswith('#'):
if not sample_ids:
# current line is the first non-space, non-comment line
# in OTU table, so contains the sample IDs
try:
sample_ids, has_metadata = process_otu_table_sample_ids(
line.strip().split('\t')[1:])
except ValueError:
raise ValueError("Error parsing sample IDs in OTU table." +
" Sample ID line:\n %s" % line)
else:
# current line is OTU line in OTU table
fields = line.split('\t')
if has_metadata:
# if there is OTU metadata the last column gets appended
# to the metadata list
# added in a try/except to handle OTU tables containing
# floating numbers
try:
valid_fields = asarray(
fields[1:-1],
dtype=count_map_f)
except ValueError:
valid_fields = asarray(fields[1:-1], dtype=float)
# validate that there are no empty rows
if remove_empty_rows and (valid_fields >= 0).all() and \
sum(valid_fields) == 0.0:
continue
metadata.append(map(strip, fields[-1].split(';')))
else:
# otherwise all columns are appended to otu_table
# added in a try/except to handle OTU tables containing
# floating numbers
try:
valid_fields = asarray(
fields[1:],
dtype=count_map_f)
except ValueError:
valid_fields = asarray(fields[1:], dtype=float)
# validate that there are no empty rows
if remove_empty_rows and (valid_fields >= 0.0).all() and \
sum(valid_fields) == 0.0:
continue
otu_table.append(valid_fields)
# grab the OTU ID
otu_id = fields[0].strip()
otu_ids.append(otu_id)
return sample_ids, otu_ids, asarray(otu_table), metadata
parse_otu_table = parse_classic_otu_table
def parse_taxa_summary_table(lines):
result = parse_classic_otu_table(lines, count_map_f=float)
return result[0], result[1], result[2]
def make_envs_dict(abund_mtx, sample_names, taxon_names):
""" makes an envs dict suitable for unifrac from an abundance matrix
abund_mtx is samples (rows) by seqs (colunmns) numpy 2d array
sample_names is a list, length = num rows
taxon_names is a list, length = num columns
"""
num_samples, num_seqs = abund_mtx.shape
if (num_samples, num_seqs) != (len(sample_names), len(taxon_names)):
raise ValueError(
"Shape of matrix %s doesn't match # samples and # taxa (%s and %s)" %
(abund_mtx.shape, num_samples, num_seqs))
envs_dict = {}
sample_names = asarray(sample_names)
for i, taxon in enumerate(abund_mtx.T):
nonzeros = taxon.nonzero() # this removes zero values to reduce memory
envs_dict[taxon_names[i]] = dict(zip(sample_names[nonzeros],
taxon[nonzeros]))
return envs_dict
def fields_to_dict(lines, delim='\t', strip_f=strip):
"""makes a dict where first field is key, rest are vals."""
result = {}
for line in lines:
# skip empty lines
if strip_f:
fields = map(strip_f, line.split(delim))
else:
fields = line.split(delim)
if not fields[0]: # empty string in first field implies problem
continue
result[fields[0]] = fields[1:]
return result
def parse_qiime_parameters(lines):
""" Return 2D dict of params (and values, if applicable) which should be on
"""
# The result object is a default dict: if keys are not
# present, {} is returned
result = defaultdict(dict)
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
pound_pos = line.find('#')
# A pound sign only starts an inline comment if it is preceded by
# whitespace.
if pound_pos > 0 and line[pound_pos - 1].isspace():
line = line[:pound_pos].rstrip()
fields = line.split(None, 1)
script_id, parameter_id = fields[0].split(':')
try:
value = fields[1]
except IndexError:
continue
if value.upper() == 'FALSE' or value.upper() == 'NONE':
continue
elif value.upper() == 'TRUE':
value = None
else:
pass
result[script_id][parameter_id] = value
return result
def parse_qiime_config_file(qiime_config_file):
""" Parse lines in a qiime_config file
"""
result = {}
for line in qiime_config_file:
line = line.strip()
# ignore blank lines or lines beginning with '#'
if not line or line.startswith('#'):
continue
fields = line.split()
param_id = fields[0]
param_value = expandvars(' '.join(fields[1:])) or None
result[param_id] = param_value
return result
def parse_qiime_config_files(qiime_config_files):
""" Parse files in (ordered!) list of qiime_config_files
The order of files must be least important to most important.
Values defined in earlier files will be overwritten if the same
values are defined in later files.
"""
# The qiime_config object is a default dict: if keys are not
# present, none is returned
def return_none():
return None
results = defaultdict(return_none)
for qiime_config_file in qiime_config_files:
try:
results.update(parse_qiime_config_file(qiime_config_file))
except IOError:
pass
return results
def parse_tmp_to_final_filepath_map_file(lines):
"""Parses poller maps of tmp -> final file names
For example, lines:
tmpA1.txt tmpA2.txt tmpA3.txt A.txt
B1.txt B2.txt B3.txt B.txt
Would result in:
([[tmpA1.txt,tmpA2.txt,tmpA3.txt], [B1.txt,B2.txt,B3.txt]],
[A.txt,B.txt])
"""
infiles_lists = []
out_filepaths = []
for line in lines:
fields = line.split()
infiles_lists.append(fields[:-1])
out_filepaths.append(fields[-1])
return infiles_lists, out_filepaths
def parse_metadata_state_descriptions(state_string):
"""From string in format 'col1:good1,good2;col2:good1' return dict."""
result = {}
state_string = state_string.strip()
if state_string:
cols = map(strip, state_string.split(';'))
for c in cols:
# split on the first colon to account for category names with
# colons
colname, vals = map(strip, c.split(':', 1))
vals = map(strip, vals.split(','))
result[colname] = set(vals)
return result
def parse_illumina_line(l, barcode_length, rev_comp_barcode,
barcode_in_sequence=False):
"""Parses a single line of Illumina data
"""
fields = l.strip().split(':')
y_position_subfields = fields[4].split('#')
y_position = int(y_position_subfields[0])
sequence = fields[5]
qual_string = fields[6]
if barcode_in_sequence:
barcode = sequence[:barcode_length]
sequence = sequence[barcode_length:]
qual_string = qual_string[barcode_length:]
else:
barcode = y_position_subfields[1][:barcode_length]
if rev_comp_barcode:
barcode = str(DNA(barcode).rc())
result = {
'Full description': ':'.join(fields[:5]),
'Machine Name': fields[0],
'Channel Number': int(fields[1]),
'Tile Number': int(fields[2]),
'X Position': int(fields[3]),
'Y Position': y_position,
'Barcode': barcode,
'Full Y Position Field': fields[4],
'Sequence': sequence,
'Quality Score': qual_string}
return result
def parse_qual_score(infile, value_cast_f=int):
"""Load quality scores into dict."""
id_to_qual = dict([rec for rec in MinimalQualParser(infile, value_cast_f)])
return id_to_qual
def MinimalQualParser(infile, value_cast_f=int, full_header=False):
"""Yield quality scores"""
for rec in FastaFinder(infile):
curr_id = rec[0][1:]
curr_qual = ' '.join(rec[1:])
try:
parts = asarray(curr_qual.split(), dtype=value_cast_f)
except ValueError:
raise QiimeParseError(
"Invalid qual file. Check the format of the qual files.")
if full_header:
curr_pid = curr_id
else:
curr_pid = curr_id.split()[0]
yield (curr_pid, parts)
def parse_qual_scores(qual_files):
"""Load qual scores into dict of {id:qual_scores}.
No filtering is performed at this step.
"""
qual_mappings = {}
for qual_file in qual_files:
qual_mappings.update(parse_qual_score(qual_file))
return qual_mappings
def parse_trflp(lines):
"""Load a trflp file and returns a header and data lists"""
sample_ids = []
otu_ids = []
data = []
non_alphanum_mask = re.compile('[^\w|^\t]')
# not sure why the above regex doesn't cover the following regex...
dash_space_mask = re.compile('[_ -]')
for i, line in enumerate(lines):
elements = line.strip('\n').split('\t')
# special handling for the first line only
if i == 0:
# validating if the file has a header
if elements[0] == '':
for otu_id in elements[1:]:
otu_ids.append(non_alphanum_mask.sub('_', otu_id))
continue
else:
for j, otu_id in enumerate(elements[1:]):
otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))
# handling of all other lines
current_row = []
# converting each value in the row to int
for count in elements[1:]:
try:
current_row.append(int(round(float(count), 0)))
except ValueError:
current_row.append(0)
# if the sum of all the values is equial to 0 ignore line
if sum(current_row) == 0:
continue
# adding sample header to list
sample_ids.append(non_alphanum_mask.sub('.',
dash_space_mask.sub('.', elements[0])))
# validating the size of the headers to add missing columns
# this is only valid when there is no header
if len(current_row) > len(otu_ids):
# modify header data
extra_cols = []
for j in range(len(otu_ids), len(current_row)):
extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))
# modify data
for j in range(len(data)):
data[j].extend([0] * (len(current_row) - len(otu_ids)))
otu_ids.extend(extra_cols)
elif len(current_row) < len(otu_ids):
# modify data
current_row.extend([0] * (len(otu_ids) - len(current_row)))
data.append(current_row)
return sample_ids, otu_ids, asarray(data).transpose()
def parse_denoiser_mapping(denoiser_map):
""" read a denoiser mapping file into a dictionary """
result = {}
for line in denoiser_map:
line = line.strip().split('\t')
denoised_id = line[0].rstrip(':')
original_ids = [denoised_id] + line[1:]
if denoised_id in result:
# just a healthy dose of paranoia
raise ValueError("Duplicated identifiers in denoiser mapping file: "
"are you sure you merged the correct files?")
else:
result[denoised_id] = original_ids
return result
def parse_otu_map(otu_map_f, otu_ids_to_exclude=None, delim='_'):
""" parse otu map file into a sparse dict {(otu_idx,sample_idx):count}
This function is much more memory efficent than fields_to_dict and
and the result dict is of the correct format to be passed to
table_factory for creating OtuTable objects.
"""
if otu_ids_to_exclude is None:
otu_ids_to_exclude = {}
result = defaultdict(int)
sample_ids = []
sample_id_idx = {}
otu_ids = []
otu_count = 0
sample_count = 0
for line in otu_map_f:
fields = line.strip().split('\t')
otu_id = fields[0]
if otu_id in otu_ids_to_exclude:
continue
for seq_id in fields[1:]:
sample_id = seq_id.split(delim)[0]
try:
sample_index = sample_id_idx[sample_id]
except KeyError:
sample_index = sample_count
sample_id_idx[sample_id] = sample_index
sample_count += 1
sample_ids.append(sample_id)
# {(row,col):val}
result[(otu_count, sample_index)] += 1
otu_count += 1
otu_ids.append(otu_id)
return result, sample_ids, otu_ids
def parse_sample_id_map(sample_id_map_f):
"""Parses the lines of a sample ID map file into a dictionary.
Returns a dictionary with original sample IDs as the keys and new sample
IDs as the values.
This function only allows a sample ID map to perform one-to-one mappings
between sample IDs (e.g. S1 and T1 point to new ID 'a', but a third
original ID, such as S2, cannot also point to 'a').
Arguments:
sample_id_map_f - the lines of a sample ID map file to parse. Each line
should contain two sample IDs separated by a tab. Each value in the
first column must be unique, since the returned data structure is a
dictionary using those values as keys
"""
result = {}
new_samp_id_counts = defaultdict(int)
for line in sample_id_map_f:
# Only try to parse lines that aren't just whitespace.
line = line.strip()
if line:
samp_id, mapped_id = line.split('\t')
if samp_id in result:
raise ValueError("The first column of the sample ID map must "
"contain unique sample IDs ('%s' is "
"repeated). The second column, however, may "
"contain repeats." % samp_id)
elif new_samp_id_counts[mapped_id] >= 2:
raise ValueError("Only two original sample IDs may map to the "
"same new sample ID. The new sample ID '%s' "
"has more than two sample IDs mapping to it."
% mapped_id)
else:
result[samp_id] = mapped_id
new_samp_id_counts[mapped_id] += 1
return result
def parse_items(fp):
"""Parse items from a file where each item is in a different line
Parameters
----------
fp : str/bytes/unicode string or file-like
Filepath or file-like object to parse.
Returns
-------
list
List of the items parsed from the file
"""
with open_file(fp, 'U') as f:
items = f.read().strip('\n').split('\n')
if items == ['']:
items = []
return items
| antgonza/qiime | qiime/parse.py | Python | gpl-2.0 | 32,670 | [
"BLAST"
] | ae5fd9871c10bf5c5d33ac725b71a9753c0c01d92b546ad2792496bdeadd2128 |
#!/afs/bx.psu.edu/project/pythons/py2.7-linux-x86_64-ucs4/bin/python2.7
"""
Returns a bed-like translation of a CDS in which each record corresponds to
a single site in the CDS and includes additional fields for site degenaracy,
position ind CDS, and amino acid encoded.
usage: %prog nibdir genefile [options]
-o, --outfile=o: output file
-f, --format=f: format bed (default), or gtf|gff
-a, --allpositions: 1st, 2nd and 3rd positions are evaluated for degeneracy given the sequence at the other two positions. Many 1d sites in 1st codon positions become 2d sites when considered this way.
-n, --include_name: include the 'name' or 'id' field from the source file on every line of output
"""
import re
import sys
import os
import string
from bx.seq import nib
from bx.bitset import *
from bx.bitset_builders import *
from bx.bitset_utils import *
from bx.gene_reader import *
from bx.cookbook import doc_optparse
GENETIC_CODE = """
TTT (Phe/F)Phenylalanine
TTC (Phe/F)Phenylalanine
TTA (Leu/L)Leucine
TTG (Leu/L)Leucine, Start
TCT (Ser/S)Serine
TCC (Ser/S)Serine
TCA (Ser/S)Serine
TCG (Ser/S)Serine
TAT (Tyr/Y)Tyrosine
TAC (Tyr/Y)Tyrosine
TAA Ochre (Stop)
TAG Amber (Stop)
TGT (Cys/C)Cysteine
TGC (Cys/C)Cysteine
TGA Opal (Stop)
TGG (Trp/W)Tryptophan
CTT (Leu/L)Leucine
CTC (Leu/L)Leucine
CTA (Leu/L)Leucine
CTG (Leu/L)Leucine, Start
CCT (Pro/P)Proline
CCC (Pro/P)Proline
CCA (Pro/P)Proline
CCG (Pro/P)Proline
CAT (His/H)Histidine
CAC (His/H)Histidine
CAA (Gln/Q)Glutamine
CAG (Gln/Q)Glutamine
CGT (Arg/R)Arginine
CGC (Arg/R)Arginine
CGA (Arg/R)Arginine
CGG (Arg/R)Arginine
ATT (Ile/I)Isoleucine, Start2
ATC (Ile/I)Isoleucine
ATA (Ile/I)Isoleucine
ATG (Met/M)Methionine, Start1
ACT (Thr/T)Threonine
ACC (Thr/T)Threonine
ACA (Thr/T)Threonine
ACG (Thr/T)Threonine
AAT (Asn/N)Asparagine
AAC (Asn/N)Asparagine
AAA (Lys/K)Lysine
AAG (Lys/K)Lysine
AGT (Ser/S)Serine
AGC (Ser/S)Serine
AGA (Arg/R)Arginine
AGG (Arg/R)Arginine
GTT (Val/V)Valine
GTC (Val/V)Valine
GTA (Val/V)Valine
GTG (Val/V)Valine, Start2
GCT (Ala/A)Alanine
GCC (Ala/A)Alanine
GCA (Ala/A)Alanine
GCG (Ala/A)Alanine
GAT (Asp/D)Aspartic acid
GAC (Asp/D)Aspartic acid
GAA (Glu/E)Glutamic acid
GAG (Glu/E)Glutamic acid
GGT (Gly/G)Glycine
GGC (Gly/G)Glycine
GGA (Gly/G)Glycine
GGG (Gly/G)Glycine
"""
def translate( codon, genetic_code):
c1,c2,c3 = codon
return genetic_code[c1][c2][c3]
""" parse the doc string to hash the genetic code"""
GEN_CODE = {}
for line in GENETIC_CODE.split('\n'):
if line.strip() == '': continue
f = re.split('\s|\(|\)|\/',line)
codon = f[0]
c1,c2,c3 = codon
aminoacid = f[3]
if c1 not in GEN_CODE: GEN_CODE[c1] = {}
if c2 not in GEN_CODE[c1]: GEN_CODE[c1][c2] = {}
GEN_CODE[c1][c2][c3] = aminoacid
def getnib( nibdir ):
seqs = {}
for nibf in os.listdir( nibdir ):
if not nibf.endswith('.nib'): continue
chr = nibf.replace('.nib','')
file = os.path.join( nibdir, nibf )
seqs[chr] = nib.NibFile( open(file) )
return seqs
REVMAP = string.maketrans("ACGTacgt","TGCAtgca")
def revComp(seq):
return seq[::-1].translate(REVMAP)
def Comp(seq):
return seq.translate(REVMAP)
def codon_degeneracy( codon, position=3 ):
aa = translate( codon, GEN_CODE )
if position==1:
degeneracy1 = [GEN_CODE[ k ][ codon[1] ][ codon[2] ] for k in all].count(aa)
elif position==2:
degeneracy2 = [GEN_CODE[ codon[0] ][ k ][ codon[2] ] for k in all].count(aa)
elif position==3:
degeneracy = GEN_CODE[ codon[0] ][ codon[1] ].values().count(aa)
return degeneracy
def main():
options, args = doc_optparse.parse( __doc__ )
try:
if options.outfile:
out = open( options.outfile, "w")
else:
out = sys.stdout
if options.format:
format = options.format
else:
format = 'bed'
allpositions = bool( options.allpositions )
include_name = bool( options.include_name )
nibdir = args[0]
bedfile = args[1]
except:
doc_optparse.exit()
nibs = getnib(nibdir)
for chrom, strand, cds_exons, name in CDSReader( open(bedfile), format=format):
cds_seq = ''
# genome_seq_index maps the position in CDS to position on the genome
genome_seq_index = []
for (c_start, c_end) in cds_exons:
cds_seq += nibs[chrom].get( c_start, c_end-c_start )
for i in range(c_start,c_end):
genome_seq_index.append(i)
cds_seq = cds_seq.upper()
if strand == '+':
frsts = range( 0, len(cds_seq), 3)
offsign = 1
else:
cds_seq = Comp( cds_seq )
frsts = range( 2, len(cds_seq), 3)
offsign = -1
offone = 1 * offsign
offtwo = 2 * offsign
all = ['A','C','G','T']
for first_pos in frsts:
c1 = first_pos
c2 = first_pos + offone
c3 = first_pos + offtwo
try:
assert c3 < len(cds_seq)
except AssertionError:
print >>sys.stderr, "out of sequence at %d for %s, %d" % (c3, chrom, genome_seq_index[ first_pos ])
continue
codon = cds_seq[c1], cds_seq[c2], cds_seq[c3]
aa = translate( codon, GEN_CODE )
degeneracy3 = str(GEN_CODE[ codon[0] ][ codon[1] ].values().count(aa)) + "d"
if not include_name: name_text = ''
else:
name_text = name.replace(' ','_')
if allpositions:
try:
degeneracy1 = str([GEN_CODE[ k ][ codon[1] ][ codon[2] ] for k in all].count(aa)) + "d"
degeneracy2 = str([GEN_CODE[ codon[0] ][ k ][ codon[2] ] for k in all].count(aa)) + "d"
except TypeError, s:
print >>sys.stderr, GEN_CODE.values()
raise TypeError, s
if strand == '+':
print >>out, chrom, genome_seq_index[c1], genome_seq_index[c1] + 1, cds_seq[c1], degeneracy1, aa, name_text
print >>out, chrom, genome_seq_index[c2], genome_seq_index[c2] + 1, cds_seq[c2], degeneracy2, aa, name_text
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
else:
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
print >>out, chrom, genome_seq_index[c2], genome_seq_index[c2] + 1, cds_seq[c2], degeneracy2, aa, name_text
print >>out, chrom, genome_seq_index[c1], genome_seq_index[c1] + 1, cds_seq[c1], degeneracy1, aa, name_text
else:
if strand == '+':
for b in c1,c2:
print >>out, chrom, genome_seq_index[b], genome_seq_index[b] + 1, cds_seq[b], "1d", aa, name_text
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
else:
print >>out, chrom, genome_seq_index[c3], genome_seq_index[c3] + 1, cds_seq[c3], degeneracy3, aa, name_text
for b in c2,c1:
print >>out, chrom, genome_seq_index[b], genome_seq_index[b] + 1, cds_seq[b], "1d", aa, name_text
out.close()
if __name__ == '__main__':
main()
#format = sys.argv[1]
#file = sys.argv[2]
#for chr, strand, cds_exons in CDSReader( open(file), format=format):
# s_points = [ "%d,%d" % (a[0],a[1]) for a in cds_exons ]
# print chr, strand, len(cds_exons), "\t".join(s_points)
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/gene_fourfold_sites.py | Python | gpl-3.0 | 7,729 | [
"Amber"
] | 43ed4b1f8cb029767bd710dc7dd48f43ddb7df6f9deec1075f6eadc7752c6307 |
import os,md5,sys
from PyQt4.QtGui import *
from DisasmWin import *
import sqlite3
import json
import pefile
sys.path.append("distorm-read-only/build/lib/")
#sys.path.append("distorm-read-only/build/lib")
#from pydasm import *
BASE_ADDR = 0x0;
import distorm3
import mmap
from utils import *
import re
labels = {}
tovisit = []
disassembly = {}
# INITIALISE SQLITE DATABASE ----------------
dbcon = sqlite3.connect(sys.argv[1] + ".db")
dbcon.row_factory = sqlite3.Row
dbcur = dbcon.cursor()
LABEL_FUNC = 0
LABEL_LABEL = 1
LABEL_DATA = 2
LABEL_IMPORT = 3
dbcur.execute("DROP TABLE IF EXISTS disasm")
dbcur.execute("CREATE TABLE disasm (offset int, mnemonic text, ops text, size int, formatted text, comment text, meta text, primary key(offset))")
dbcur.execute("DROP TABLE IF EXISTS labels")
dbcur.execute("CREATE TABLE labels (offset int, labelName text, labelType int, meta text, primary key(offset))")
dbcur.execute("DROP TABLE IF EXISTS xrefs")
dbcur.execute("CREATE TABLE xrefs (fromOffset int, toOffset int, primary key(fromOffset))")
dbcur.execute("DROP TABLE IF EXISTS segments")
dbcur.execute("CREATE TABLE segments (id integer primary key, name text, fileOffset int, fileSize int, virtOffset int, virtSize int, read bool, write bool, execute bool)")
#dbcur.execute("DROP TABLE IF EXISTS imports")
#dbcur.execute("CREATE TABLE imports (id integer primary key, name text, addr int)")
# END INITIALISE SQLITE DATABASE ----------------
# OPEN AND PARSE PE FILE ----------------
pe = pefile.PE(sys.argv[1], fast_load=True)
imgBase = pe.OPTIONAL_HEADER.ImageBase
entryPt = pe.OPTIONAL_HEADER.AddressOfEntryPoint + imgBase
print ("Reading sections")
for section in pe.sections:
dbcur.execute("INSERT INTO segments (name, fileOffset, fileSize, virtOffset, virtSize, read, write, execute) VALUES (?, ?, ?, ?, ?, 0, 0, 0)", (section.Name, section.PointerToRawData, section.SizeOfRawData, imgBase + section.VirtualAddress, section.Misc_VirtualSize))
# print section
# print (section.Name, hex(imgBase + section.VirtualAddress),
# hex(section.Misc_VirtualSize), section.SizeOfRawData )
pe.parse_data_directories()
print ("Reading imports")
for entry in pe.DIRECTORY_ENTRY_IMPORT:
# print entry.dll
for imp in entry.imports:
dbcur.execute("INSERT INTO labels (offset, labelName, labelType) VALUES (?, ?, ?)", (imp.address, "{}!{}".format(entry.dll.lower(), imp.name), LABEL_IMPORT))
# print "{}!{} {:x}".format(entry.dll, imp.name, imp.address)
dbcon.commit()
# END OPEN AND PARSE PE FILE ----------------
# HELPER FUNCS ------------
def memoryOffsetToFileOffset(off):
dbcur.execute("SELECT * FROM segments")
for r in dbcur.fetchmany():
if off>=r['virtOffset'] and off<=r['virtOffset']+r['fileSize']:
return off - int(r['virtOffset']) + int(r['fileOffset'])
return None
def memoryOffsetToSegmentName(off):
dbcur.execute("SELECT * FROM segments")
for r in dbcur.fetchmany():
if off>=r['virtOffset'] and off<=r['virtOffset']+r['fileSize']:
return r['name']
return ""
return None
def fileOffsetToMemoryOffset(off):
dbcur.execute("SELECT * FROM segments")
for r in dbcur.fetchmany():
if off>=r['fileOffset'] and off<=r['fileOffset']+r['fileSize']:
return off - r['fileOffset'] + r['virtOffset']
return None
# Replace offsets with label names/import names
# Parse full instruction string
re_off = re.compile("0x[0-9a-f]+")
def replaceLabels(inst):
news = inst
for o in re_off.findall(inst):
dbcur.execute("SELECT labelName FROM labels WHERE offset=?", (int(o,16),))
res = dbcur.fetchall()
if len(res)>0:
news = news.replace(o, res[0]['labelName'])
return news
# END HELPER FUNCS ------------
# Memory map file
fd = file(os.sys.argv[1], 'r+b')
data = mmap.mmap(fd.fileno(), 0)
fileLen = len(data)
# add entry point for beginning
labels[entryPt] = {'type':'sub', 'name':'_start', 'xrefs':[], 'calls_out':[], 'end':0}
tovisit.append(entryPt)
# first pass - Do Disassembly
print "PASS 1"
offset = 0
while True:
if len(tovisit) == 0: # any more labels to visit?
break
offset = tovisit.pop()
terminalInstruction = False
# while not finished this label
while((not disassembly.has_key(offset)) and (not terminalInstruction) and memoryOffsetToFileOffset(offset) != None):
if offset in tovisit:
tovisit.remove(offset)
# decode instructions
inst = distorm3.Decode(offset, data[memoryOffsetToFileOffset(offset):], distorm3.Decode32Bits)[0]
#print offset,inst
ins = inst['mnemonic']
ops = inst['ops']
dbcur.execute("INSERT INTO disasm (offset, mnemonic, ops, size, meta) VALUES (?, ?, ?, ?, ?)", (offset, ins, ops, inst['size'], json.dumps(inst)))
# Is control flow instruction with static destination?
if (ins.startswith("call") or ins[0] == 'j' or ins.startswith("loop")) and ops.find("0x")!=-1 and ops.find("[")==-1:
newoff = int(ops[ops.find("0x")+2:],16)
# if not already on tovisit list and not disassembled then add to todo list
if (not newoff in tovisit) and (not disassembly.has_key(newoff)):
tovisit.append(newoff)
# add label for called/jmped addr
if labels.has_key(newoff):
if not offset in labels[newoff]['xrefs']:
labels[newoff]['xrefs'].append(offset)
dbcur.execute("INSERT INTO xrefs (fromOffset, toOffset) VALUES(?, ?)", (offset, newoff))
else:
labels[newoff] = {}
labels[newoff]['calls_out'] = []
labels[newoff]['xrefs'] = [ offset ]
dbcur.execute("INSERT INTO xrefs (fromOffset, toOffset) VALUES(?, ?)", (offset, newoff))
labels[newoff]['type'] = ('sub' if ins.startswith('call') else 'lbl')
dbcur.execute("INSERT INTO labels (offset, labelName, labelType, meta) VALUES (?, ?, ?, ?)", (newoff, getLblName(labels, newoff), LABEL_FUNC if ins.startswith('call') else LABEL_LABEL, json.dumps(labels[newoff])))
disassembly[offset] = inst
if(ins.startswith("ret") or ins.startswith("jmp")):
terminalInstruction = True;
else:
offset += inst['size'];
if(disassembly.has_key(offset)): #next instruction already visited - so stop
break
print "PASS 2"
# second pass - does function related passing
inProcLabel = False
offset = BASE_ADDR
for off in labels:
#while offset < fileOffsetToMemoryOffset(fileLen):
# check for function start
if labels[off]['type'] != "sub":
continue
print off
offset = off
regs = {'ESP': 0, 'EBP': 0 }
stack = { }
while True:
if disassembly.has_key(offset):
inst = disassembly[offset]
ins = inst['mnemonic']
# try to keep track of stack
instd = distorm3.Decompose(offset, data[memoryOffsetToFileOffset(offset):], distorm3.Decode32Bits)[0]
if ins == "push":
regs['ESP'] -= 4
elif ins == "pop":
regs['ESP'] += 4
elif ins == "leave":
regs['ESP'] = regs['EBP']
regs['ESP'] += 4
elif ins == "enter":
print "IMPLEMENT ENTER STACK TRACE***************************"
# regs['ESP'] -= 4
# regs['ESP'] -= instd.operands[0].value
print instd
else:
numOps = len(instd.operands)
if numOps == 2 and instd.operands[0].type == distorm3.OPERAND_REGISTER:
val = None
reg = instd.operands[0].name
# try to resolve value e.g. esp/ebp
if instd.operands[1].type == distorm3.OPERAND_IMMEDIATE:
val = instd.operands[1].value
elif instd.operands[1].type == distorm3.OPERAND_REGISTER and regs.has_key(instd.operands[1].name):
val = regs[instd.operands[1].name]
if val and regs.has_key(reg):
if ins == "sub":
regs[reg] -= val
if ins == "add":
regs[reg] += val
if ins == "mov":
regs[reg] = val
if(ins.startswith("call") and inst['ops'].find("0x") != -1 and inst['ops'].find("[") == -1): # collate calls out of function
labels[off]['calls_out'].append(int(inst['ops'][inst['ops'].find("0x")+2:],16))
dbcur.execute("UPDATE labels SET meta=? WHERE offset=?", (json.dumps(labels[off]), off))
if(ins.startswith("ret")): #detect function termination
print "END: ", regs
if regs['ESP'] != 0:
print "***warn stack trace may have failed"
if not labels[off].has_key('end'):
labels[off]['end'] = offset + inst['size']
dbcur.execute("UPDATE labels SET meta=? WHERE offset=?", (json.dumps(labels[off]), off))
break
offset += inst['size']
else:
break
#pass 3 - print
print "PASS 3: Display"
def disassemblyText(disassembly, labels, start, end):
str = '<pre style="font-family: monospace; font-size: 14px;">'
inProcLabel = False
offset = start
while offset < end:
if labels.has_key(offset):
if labels[offset]['type'] == "sub":
str += "\n\n---STARTPROC---\n"
inProcLabel = offset
str += "<span style='color:blue'>%s_%x</span>:\n" % (labels[offset]['type'], offset)
if disassembly.has_key(offset):
inst = disassembly[offset]
ins = replaceLabels(inst['instr'])
segnm = memoryOffsetToSegmentName(offset)
str += "<span style='color:red'>%s.%08x</span>:\t%s\n" % (segnm, offset,ins)
if(ins.startswith("ret") and inProcLabel): #detect function termination
str += "<span style='color:blue'>---ENDPROC--- ; sub_%x ; length = %d bytes ; %d calls out</span>\n\n" % (inProcLabel, labels[inProcLabel]['end'] - inProcLabel, len(labels[inProcLabel]['calls_out']))
inProcLabel = False
offset += inst['size']
else:
fileOffset = memoryOffsetToFileOffset(offset)
str += "<span style='color:red'>%08x</span>:\tdb 0x%02x %s\n" % (offset, ord(data[fileOffset]), data[fileOffset] if isPrintable(data[fileOffset]) else '')
offset += 1
str += "</pre>"
return str
dtext = disassemblyText(disassembly, labels, 0x401000, 0x402000)
print dtext
print "TOTaL LBLS", len(labels)
#for lbladdr in labels:
# lbl = labels[lbladdr]
# if lbl['type'] != 'sub':
# continue
# if lbl.has_key('end'):
# instructions = data[memoryOffsetToFileOffset(lbladdr):memoryOffsetToFileOffset(lbl['end'])]
# print "%x %d %d %d %s" % (lbladdr, lbl['end'] - lbladdr, len(lbl['calls_out']), len(lbl['xrefs']), md5.new(instructions).hexdigest())
# generate graphviz callgraph
def graphFuncs(labels):
str = 'digraph {'
for lbladdr in labels:
lbl = labels[lbladdr]
if lbl['type'] != 'sub':
continue
lblname = getLblName(labels, lbladdr)
for xref in set(lbl['calls_out']):
xrefnm = getLblName(labels, xref)
str += "{} -> {}\n".format(lblname, xrefnm)
str += "}"
return str
print graphFuncs(labels)
app = QApplication(sys.argv)
ex = DisasmWin(dtext, labels)
ex.show()
sys.exit(app.exec_())
f = open("callgraph.dot", "w+")
f.write(graphFuncs(labels))
f.close()
dbcon.commit()
dbcon.close()
| drgowen/GarethDisasm-Python | garethdisasm.py | Python | gpl-3.0 | 11,800 | [
"VisIt"
] | 57d8d48d02a26c862880668cdebc99856903e08c7553b71eeb110f491e3670e6 |
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Generic USB Mass storage device driver. This is not a complete stand alone
driver. It is intended to be subclassed with the relevant parts implemented
for a particular device.
'''
import os, time, json, shutil
from itertools import cycle
from calibre.constants import numeric_version
from calibre import prints, isbytestring, fsync
from calibre.constants import filesystem_encoding, DEBUG
from calibre.devices.usbms.cli import CLI
from calibre.devices.usbms.device import Device
from calibre.devices.usbms.books import BookList, Book
from calibre.ebooks.metadata.book.json_codec import JsonCodec
BASE_TIME = None
def debug_print(*args):
global BASE_TIME
if BASE_TIME is None:
BASE_TIME = time.time()
if DEBUG:
prints('DEBUG: %6.1f'%(time.time()-BASE_TIME), *args)
def safe_walk(top, topdown=True, onerror=None, followlinks=False):
' A replacement for os.walk that does not die when it encounters undecodeable filenames in a linux filesystem'
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isinstance(name, bytes):
try:
name = name.decode(filesystem_encoding)
except UnicodeDecodeError:
debug_print('Skipping undecodeable file: %r' % name)
continue
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in safe_walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
# CLI must come before Device as it implements the CLI functions that
# are inherited from the device interface in Device.
class USBMS(CLI, Device):
'''
The base class for all USBMS devices. Implements the logic for
sending/getting/updating metadata/caching metadata/etc.
'''
description = _('Communicate with an e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Store type instances of BookList and Book. We must do this because
# a) we need to override these classes in some device drivers, and
# b) the classmethods seem only to see real attributes declared in the
# class, not attributes stored in the class
booklist_class = BookList
book_class = Book
FORMATS = []
CAN_SET_METADATA = []
METADATA_CACHE = 'metadata.calibre'
DRIVEINFO = 'driveinfo.calibre'
SCAN_FROM_ROOT = False
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
from calibre.utils.date import now, isoformat
import uuid
if not isinstance(dinfo, dict):
dinfo = {}
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = unicode(uuid.uuid4())
if dinfo.get('device_name', None) is None:
dinfo['device_name'] = self.get_gui_name()
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join([unicode(i) for i in numeric_version])
dinfo['date_last_connected'] = isoformat(now())
dinfo['prefix'] = prefix.replace('\\', '/')
return dinfo
def _update_driveinfo_file(self, prefix, location_code, name=None):
from calibre.utils.config import from_json, to_json
if os.path.exists(os.path.join(prefix, self.DRIVEINFO)):
with lopen(os.path.join(prefix, self.DRIVEINFO), 'rb') as f:
try:
driveinfo = json.loads(f.read(), object_hook=from_json)
except:
driveinfo = None
driveinfo = self._update_driveinfo_record(driveinfo, prefix,
location_code, name)
with lopen(os.path.join(prefix, self.DRIVEINFO), 'wb') as f:
f.write(json.dumps(driveinfo, default=to_json))
fsync(f)
else:
driveinfo = self._update_driveinfo_record({}, prefix, location_code, name)
with lopen(os.path.join(prefix, self.DRIVEINFO), 'wb') as f:
f.write(json.dumps(driveinfo, default=to_json))
fsync(f)
return driveinfo
def get_device_information(self, end_session=True):
self.report_progress(1.0, _('Get device information...'))
self.driveinfo = {}
if self._main_prefix is not None:
try:
self.driveinfo['main'] = self._update_driveinfo_file(self._main_prefix, 'main')
except (IOError, OSError) as e:
raise IOError(_('Failed to access files in the main memory of'
' your device. You should contact the device'
' manufacturer for support. Common fixes are:'
' try a different USB cable/USB port on your computer.'
' If you device has a "Reset to factory defaults" type'
' of setting somewhere, use it. Underlying error: %s')
% e)
try:
if self._card_a_prefix is not None:
self.driveinfo['A'] = self._update_driveinfo_file(self._card_a_prefix, 'A')
if self._card_b_prefix is not None:
self.driveinfo['B'] = self._update_driveinfo_file(self._card_b_prefix, 'B')
except (IOError, OSError) as e:
raise IOError(_('Failed to access files on the SD card in your'
' device. This can happen for many reasons. The SD card may be'
' corrupted, it may be too large for your device, it may be'
' write-protected, etc. Try a different SD card, or reformat'
' your SD card using the FAT32 filesystem. Also make sure'
' there are not too many files in the root of your SD card.'
' Underlying error: %s') % e)
return (self.get_gui_name(), '', '', '', self.driveinfo)
def set_driveinfo_name(self, location_code, name):
if location_code == 'main':
self._update_driveinfo_file(self._main_prefix, location_code, name)
elif location_code == 'A':
self._update_driveinfo_file(self._card_a_prefix, location_code, name)
elif location_code == 'B':
self._update_driveinfo_file(self._card_b_prefix, location_code, name)
def formats_to_scan_for(self):
return set(self.settings().format_map) | set(self.FORMATS)
def is_allowed_book_file(self, filename, path, prefix):
return True
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
debug_print('USBMS: Fetching list of books from device. Device=',
self.__class__.__name__,
'oncard=', oncard)
dummy_bl = self.booklist_class(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
ebook_dirs = self.get_carda_ebook_dir() if oncard == 'carda' else \
self.EBOOK_DIR_CARD_B if oncard == 'cardb' else \
self.get_main_ebook_dir()
debug_print('USBMS: dirs are:', prefix, ebook_dirs)
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx, b in enumerate(bl):
bl_cache[b.lpath] = idx
all_formats = self.formats_to_scan_for()
def update_booklist(filename, path, prefix):
changed = False
if path_to_ext(filename) in all_formats and self.is_allowed_book_file(filename, path, prefix):
try:
lpath = os.path.join(path, filename).partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
idx = bl_cache.get(lpath, None)
if idx is not None:
bl_cache[lpath] = None
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
if bl.add_book(self.book_from_path(prefix, lpath),
replace_metadata=False):
changed = True
except: # Probably a filename encoding error
import traceback
traceback.print_exc()
return changed
if isinstance(ebook_dirs, basestring):
ebook_dirs = [ebook_dirs]
for ebook_dir in ebook_dirs:
ebook_dir = self.path_to_unicode(ebook_dir)
if self.SCAN_FROM_ROOT:
ebook_dir = self.normalize_path(prefix)
else:
ebook_dir = self.normalize_path(
os.path.join(prefix, *(ebook_dir.split('/')))
if ebook_dir else prefix)
debug_print('USBMS: scan from root', self.SCAN_FROM_ROOT, ebook_dir)
if not os.path.exists(ebook_dir):
continue
# Get all books in the ebook_dir directory
if self.SUPPORTS_SUB_DIRS or self.SUPPORTS_SUB_DIRS_FOR_SCAN:
# build a list of files to check, so we can accurately report progress
flist = []
for path, dirs, files in safe_walk(ebook_dir):
for filename in files:
if filename != self.METADATA_CACHE:
flist.append({'filename': self.path_to_unicode(filename),
'path':self.path_to_unicode(path)})
for i, f in enumerate(flist):
self.report_progress(i/float(len(flist)), _('Getting list of books on device...'))
changed = update_booklist(f['filename'], f['path'], prefix)
if changed:
need_sync = True
else:
paths = os.listdir(ebook_dir)
for i, filename in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Getting list of books on device...'))
changed = update_booklist(self.path_to_unicode(filename), ebook_dir, prefix)
if changed:
need_sync = True
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
debug_print('USBMS: count found in cache: %d, count of files in metadata: %d, need_sync: %s' %
(len(bl_cache), len(bl), need_sync))
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb':
self.sync_booklists((None, None, bl))
elif oncard == 'carda':
self.sync_booklists((None, bl, None))
else:
self.sync_booklists((bl, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print('USBMS: Finished fetching list of books from device. oncard=', oncard)
return bl
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug_print('USBMS: uploading %d books'%(len(files)))
path = self._sanity_check(on_card, files)
paths = []
names = iter(names)
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = metadata.next(), names.next()
filepath = self.normalize_path(self.create_upload_path(path, mdata, fname))
if not hasattr(infile, 'read'):
infile = self.normalize_path(infile)
filepath = self.put_file(infile, filepath, replace_file=True)
paths.append(filepath)
try:
self.upload_cover(os.path.dirname(filepath),
os.path.splitext(os.path.basename(filepath))[0],
mdata, filepath)
except: # Failure to upload cover is not catastrophic
import traceback
traceback.print_exc()
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...'))
debug_print('USBMS: finished uploading %d books'%(len(files)))
return zip(paths, cycle([on_card]))
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the directory where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the e-book file
'''
pass
def add_books_to_metadata(self, locations, metadata, booklists):
debug_print('USBMS: adding metadata for %d books'%(len(metadata)))
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
path = self.normalize_path(location[0])
if self._main_prefix:
prefix = self._main_prefix if \
path.startswith(self.normalize_path(self._main_prefix)) else None
if not prefix and self._card_a_prefix:
prefix = self._card_a_prefix if \
path.startswith(self.normalize_path(self._card_a_prefix)) else None
if not prefix and self._card_b_prefix:
prefix = self._card_b_prefix if \
path.startswith(self.normalize_path(self._card_b_prefix)) else None
if prefix is None:
prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix)
continue
lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:]
book = self.book_class(prefix, lpath, other=info)
if book.size is None:
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
debug_print('USBMS: finished adding metadata')
def delete_single_book(self, path):
os.unlink(path)
def delete_extra_book_files(self, path):
filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS:
for x in (filepath, path):
x += ext
if os.path.exists(x):
if os.path.isdir(x):
shutil.rmtree(x, ignore_errors=True)
else:
os.unlink(x)
if self.SUPPORTS_SUB_DIRS:
try:
os.removedirs(os.path.dirname(path))
except:
pass
def delete_books(self, paths, end_session=True):
debug_print('USBMS: deleting %d books'%(len(paths)))
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
if os.path.exists(path):
# Delete the ebook
self.delete_single_book(path)
self.delete_extra_book_files(path)
self.report_progress(1.0, _('Removing books from device...'))
debug_print('USBMS: finished deleting %d books'%(len(paths)))
def remove_books_from_metadata(self, paths, booklists):
debug_print('USBMS: removing metadata for %d books'%(len(paths)))
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
if path.endswith(book.path):
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
debug_print('USBMS: finished removing metadata for %d books'%(len(paths)))
# If you override this method and you use book._new_book, then you must
# complete the processing before you call this method. The flag is cleared
# at the end just before the return
def sync_booklists(self, booklists, end_session=True):
debug_print('USBMS: starting sync_booklists')
json_codec = JsonCodec()
if not os.path.exists(self.normalize_path(self._main_prefix)):
os.makedirs(self.normalize_path(self._main_prefix))
def write_prefix(prefix, listid):
if (prefix is not None and len(booklists) > listid and
isinstance(booklists[listid], self.booklist_class)):
if not os.path.exists(prefix):
os.makedirs(self.normalize_path(prefix))
with lopen(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f:
json_codec.encode_to_file(f, booklists[listid])
fsync(f)
write_prefix(self._main_prefix, 0)
write_prefix(self._card_a_prefix, 1)
write_prefix(self._card_b_prefix, 2)
# Clear the _new_book indication, as we are supposed to be done with
# adding books at this point
for blist in booklists:
if blist is not None:
for book in blist:
book._new_book = False
self.report_progress(1.0, _('Sending metadata to device...'))
debug_print('USBMS: finished sync_booklists')
@classmethod
def build_template_regexp(cls):
from calibre.devices.utils import build_template_regexp
return build_template_regexp(cls.save_template())
@classmethod
def path_to_unicode(cls, path):
if isbytestring(path):
path = path.decode(filesystem_encoding)
return path
@classmethod
def normalize_path(cls, path):
'Return path with platform native path separators'
if path is None:
return None
if os.sep == '\\':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
return cls.path_to_unicode(path)
@classmethod
def parse_metadata_cache(cls, bl, prefix, name):
json_codec = JsonCodec()
need_sync = False
cache_file = cls.normalize_path(os.path.join(prefix, name))
if os.access(cache_file, os.R_OK):
try:
with lopen(cache_file, 'rb') as f:
json_codec.decode_from_file(f, bl, cls.book_class, prefix)
except:
import traceback
traceback.print_exc()
bl = []
need_sync = True
else:
need_sync = True
return need_sync
@classmethod
def update_metadata_item(cls, book):
changed = False
size = os.stat(cls.normalize_path(book.path)).st_size
if size != book.size:
changed = True
mi = cls.metadata_from_path(book.path)
book.smart_update(mi)
book.size = size
return changed
@classmethod
def metadata_from_path(cls, path):
return cls.metadata_from_formats([path])
@classmethod
def metadata_from_formats(cls, fmts):
from calibre.ebooks.metadata.meta import metadata_from_formats
from calibre.customize.ui import quick_metadata
with quick_metadata:
return metadata_from_formats(fmts, force_read_metadata=True,
pattern=cls.build_template_regexp())
@classmethod
def book_from_path(cls, prefix, lpath):
from calibre.ebooks.metadata.book.base import Metadata
if cls.settings().read_metadata or cls.MUST_READ_METADATA:
mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath)))
else:
from calibre.ebooks.metadata.meta import metadata_from_filename
mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)),
cls.build_template_regexp())
if mi is None:
mi = Metadata(os.path.splitext(os.path.basename(lpath))[0],
[_('Unknown')])
size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size
book = cls.book_class(prefix, lpath, other=mi, size=size)
return book
| jelly/calibre | src/calibre/devices/usbms/driver.py | Python | gpl-3.0 | 23,538 | [
"VisIt"
] | c7882b1f5566e35830f40e6b49c5625b607c8f4b4e67587ad5b450e6428c766f |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Ferret(Package):
"""Ferret is an interactive computer visualization and analysis environment
designed to meet the needs of oceanographers and meteorologists
analyzing large and complex gridded data sets."""
homepage = "http://ferret.pmel.noaa.gov/Ferret/home"
url = "ftp://ftp.pmel.noaa.gov/ferret/pub/source/fer_source.v696.tar.gz"
version('6.96', '51722027c864369f41bab5751dfff8cc')
depends_on("hdf5~mpi~fortran")
depends_on("netcdf~mpi")
depends_on("netcdf-fortran")
depends_on("readline")
depends_on("zlib")
def url_for_version(self, version):
return "ftp://ftp.pmel.noaa.gov/ferret/pub/source/fer_source.v{0}.tar.gz".format(
version.joined)
def patch(self):
hdf5_prefix = self.spec['hdf5'].prefix
netcdff_prefix = self.spec['netcdf-fortran'].prefix
readline_prefix = self.spec['readline'].prefix
libz_prefix = self.spec['zlib'].prefix
filter_file(r'^BUILDTYPE.+',
'BUILDTYPE = x86_64-linux',
'FERRET/site_specific.mk')
filter_file(r'^INSTALL_FER_DIR.+',
'INSTALL_FER_DIR = %s' % self.spec.prefix,
'FERRET/site_specific.mk')
filter_file(r'^HDF5_DIR.+',
'HDF5_DIR = %s' % hdf5_prefix,
'FERRET/site_specific.mk')
filter_file(r'^NETCDF4_DIR.+',
'NETCDF4_DIR = %s' % netcdff_prefix,
'FERRET/site_specific.mk')
filter_file(r'^READLINE_DIR.+',
'READLINE_DIR = %s' % readline_prefix,
'FERRET/site_specific.mk')
filter_file(r'^LIBZ_DIR.+',
'LIBZ_DIR = %s' % libz_prefix,
'FERRET/site_specific.mk')
filter_file(r'^JAVA_HOME.+',
' ',
'FERRET/site_specific.mk')
filter_file(r'-lm',
'-lgfortran -lm',
'FERRET/platform_specific.mk.x86_64-linux')
def install(self, spec, prefix):
hdf5_prefix = spec['hdf5'].prefix
netcdff_prefix = spec['netcdf-fortran'].prefix
netcdf_prefix = spec['netcdf'].prefix
libz_prefix = spec['zlib'].prefix
ln = which('ln')
ln('-sf',
hdf5_prefix + '/lib',
hdf5_prefix + '/lib64')
ln('-sf',
netcdff_prefix + '/lib',
netcdff_prefix + '/lib64')
ln('-sf',
netcdf_prefix + '/lib/libnetcdf.a',
netcdff_prefix + '/lib/libnetcdf.a')
ln('-sf',
netcdf_prefix + '/lib/libnetcdf.la',
netcdff_prefix + '/lib/libnetcdf.la')
ln('-sf',
libz_prefix + '/lib',
libz_prefix + '/lib64')
if 'LDFLAGS' in env and env['LDFLAGS']:
env['LDFLAGS'] += ' ' + '-lquadmath'
else:
env['LDFLAGS'] = '-lquadmath'
with working_dir('FERRET', create=False):
os.environ['LD_X11'] = '-L/usr/lib/X11 -lX11'
os.environ['HOSTTYPE'] = 'x86_64-linux'
make(parallel=False)
make("install")
| EmreAtes/spack | var/spack/repos/builtin/packages/ferret/package.py | Python | lgpl-2.1 | 4,431 | [
"NetCDF"
] | 9cce4c6acddd1f8beb7be6073279b475b6b42c50845c9e0149c136596bd36c38 |
from __future__ import print_function
from string import Template
import neuroml
from .channel import Channel
from .biology import BiologyType
from .dataObject import DatatypeProperty, ObjectProperty, This
from .cell_common import CELL_RDF_TYPE
__all__ = ["Cell"]
# XXX: Should we specify somewhere whether we have NetworkX or something else?
ns = {'ns1': 'http://www.neuroml.org/schema/neuroml2/'}
segment_query = Template("""
SELECT ?seg_id ?seg_name ?x ?y ?z ?d ?par_id ?x_prox ?y_prox ?z_prox ?d_prox
WHERE {
?p ns1:id '$morph_name' .
?p ns1:segment ?segment .
?segment ns1:distal ?loop
; ns1:id ?seg_id
; ns1:name ?seg_name .
OPTIONAL {
?segment ns1:proximal ?loop_prox .
?loop_prox ns1:x ?x_prox
; ns1:y ?y_prox
; ns1:z ?z_prox
; ns1:diameter ?d_prox .
}
OPTIONAL {?segment ns1:parent ?par . ?par ns1:segment ?par_id }.
?loop ns1:x ?x
; ns1:y ?y
; ns1:z ?z
; ns1:diameter ?d .
}
""")
segment_group_query = Template("""
SELECT ?gid ?member ?include
WHERE {
?p ns1:id '$morph_name' .
?p ns1:segmentGroup ?seg_group .
?seg_group ns1:id ?gid .
OPTIONAL {
?seg_group ns1:include ?inc .
?inc ns1:segmentGroup ?include .
}
OPTIONAL {
?seg_group ns1:member ?inc .
?inc ns1:segment ?member .
}
}
""")
def _dict_merge(d1, d2):
from itertools import chain
dict(chain(d1.items(), d2.items()))
class Cell(BiologyType):
"""
A biological cell.
All cells with the same name are considered to be the same object.
Parameters
-----------
name : str
The name of the cell
lineageName : str
The lineageName of the cell
"""
class_context = BiologyType.class_context
rdf_type = CELL_RDF_TYPE
divisionVolume = DatatypeProperty()
''' The volume of the cell at division
Example::
>>> v = Quantity("600","(um)^3")
>>> c = Cell(lineageName="AB plapaaaap")
>>> c.divisionVolume(v)
'''
name = DatatypeProperty()
''' The 'adult' name of the cell typically used by biologists when discussing C. elegans '''
wormbaseID = DatatypeProperty()
description = DatatypeProperty()
''' A description of the cell '''
channel = ObjectProperty(value_type=Channel,
multiple=True,
inverse_of=(Channel, 'appearsIn'))
lineageName = DatatypeProperty()
''' The lineageName of the cell
Example::
>>> c = Cell(name="ADAL")
>>> c.lineageName() # Returns ["AB plapaaaapp"]
'''
synonym = DatatypeProperty(multiple=True)
daughterOf = ObjectProperty(value_type=This,
inverse_of=(This, 'parentOf'))
parentOf = ObjectProperty(value_type=This, multiple=True)
def __init__(self, name=None, lineageName=None, **kwargs):
super(Cell, self).__init__(**kwargs)
if name:
self.name(name)
if lineageName:
self.lineageName(lineageName)
def _morphology(self):
"""Return the morphology of the cell. Currently this is restricted to
`Neuron <#neuron>`_ objects.
"""
morph_name = "morphology_" + str(self.name())
# Query for segments
query = segment_query.substitute(morph_name=morph_name)
qres = self.rdf.query(query, initNs=ns)
morph = neuroml.Morphology(id=morph_name)
for r in qres:
par = False
if r['par_id']:
par = neuroml.SegmentParent(segments=str(r['par_id']))
s = neuroml.Segment(name=str(r['seg_name']),
id=str(r['seg_id']), parent=par)
else:
s = neuroml.Segment(name=str(r['seg_name']),
id=str(r['seg_id']))
if r['x_prox']:
loop_prox = neuroml.Point3DWithDiam(*(r[x] for x
in ['x_prox',
'y_prox',
'z_prox',
'd_prox']))
s.proximal = loop_prox
loop = neuroml.Point3DWithDiam(*(r[x] for x in ['x',
'y',
'z',
'd']))
s.distal = loop
morph.segments.append(s)
# Query for segment groups
query = segment_group_query.substitute(morph_name=morph_name)
qres = self.rdf.query(query, initNs=ns)
for r in qres:
s = neuroml.SegmentGroup(id=r['gid'])
if r['member']:
m = neuroml.Member()
m.segments = str(r['member'])
s.members.append(m)
elif r['include']:
i = neuroml.Include()
i.segment_groups = str(r['include'])
s.includes.append(i)
morph.segment_groups.append(s)
return morph
def blast(self):
"""
Return the blast name.
Example::
>>> c = Cell(name="ADAL")
>>> c.blast() # Returns "AB"
Note that this isn't a Property. It returns the blast extracted from
the ''first'' lineageName saved.
"""
import re
try:
ln = self.lineageName()
x = re.split("[. ]", ln)
return x[0]
except Exception:
return ""
def __str__(self):
if self.name.has_defined_value():
return str(self.name.defined_values[0].idl)
else:
return super(Cell, self).__str__()
def defined_augment(self):
return self.name.has_defined_value()
def identifier_augment(self, *args, **kwargs):
return self.make_identifier_direct(str(self.name.defined_values[0].identifier))
__yarom_mapped_classes__ = (Cell,)
| gsarma/PyOpenWorm | PyOpenWorm/cell.py | Python | mit | 6,111 | [
"BLAST",
"NEURON"
] | 364fb03e0ea6a8bbbd79d0b7f8c90965f18c0b9d4a3964ed0050970c87d45a1f |
__author__ = 'esteinig'
"""
seqD3
Version 0.1
Multiple Sequence Comparison Tool for interactive visualization in D3
Idea with reference to the BLAST Ring Image Generator (BRIG) by
Alikhan et al. (2012): http://sourceforge.net/projects/brig/
Manual and tutorials for seqD3: https://github.com/esteinig/seqD3.
Eike J. Steinig
Tong Lab, Menzies School of Health Research
eike.steinig@menzies.edu.au
"""
import os
import csv
import json
import numpy
import statistics
from Bio import SeqIO
from Bio import Entrez
from subprocess import call
from html.parser import HTMLParser
from urllib.error import HTTPError
from sklearn.preprocessing import MinMaxScaler
############# Data Module #############
class SeqData:
"""" Data module to download, hold and annotate sequences with Prokka and BioPython. """
#########################################################################################
################ UNDER CONSTRUCTION #######################
#########################################################################################
def __init__(self):
self.names = []
self.colors = []
self.fasta = []
self.genbank = []
self.fasta_seqs = []
self.genbank_seqs = []
def getGenbank(self, accessions, outdir=os.getcwd(), email="", db='nuccore', rettype="gb", retmode="text"):
Entrez.email = email
try:
seqs = [Entrez.efetch(db=db, rettype=rettype, retmode=retmode, id=id) for id in accessions]
except HTTPError:
print("Accession not found.")
except IOError:
print("Problem connecting to NCBI.")
def readFiles(self, files, mode='gb'):
seqs = [SeqIO.read(open(file, "r"), mode) for file in files]
if mode == "genbank":
self.genbank += [file for file in files]
self.genbank_seqs += seqs
elif mode == "fasta":
self.fasta += [file for file in files]
self.fasta_seqs += seqs
def reset(self):
self.fasta_seqs = []
self.genbank_seqs = []
self.fasta = []
self.genbank = []
def annotateFiles(self, prokka, files=None, path=None):
prokka.runProkka(files=files, path=path)
for p in prokka.result_paths:
gbk_files = [f for f in os.listdir(p) if f.endswith('.gbk')]
self.readFiles(gbk_files)
self.readFiles(prokka.files, mode='fasta')
class Prokka:
def __init__(self):
self.files = []
self.path = ''
self.prefix = ''
self.locustag = 'PROKKA'
self.outdir = os.getcwd()
self.kingdom = 'Bacteria'
self.genus = ''
self.species = ''
self.strain = ''
self.force = True
self.addgenes = True
self.compliant = True
self.usegenus = True
self.cpus = 4
self.evalue = '1e-06'
self.result_paths = []
def setOptions(self, outdir=os.getcwd(), force=False, addgenes=False, compliant=False, locustag='PROKKA', cpus=4,
evalue='1e-06', kingdom='Bacteria', genus='', species='', strain='', usegenus=False):
self.locustag = locustag
self.outdir = outdir
self.kingdom = kingdom
self.genus = genus
self.species = species
self.strain = strain
self.force = force
self.addgenes = addgenes
self.compliant = compliant
self.usegenus = usegenus
self.cpus = str(cpus)
self.evalue = evalue
def runProkka(self, files=None, path=None):
if path is None and files is None:
raise ValueError('You must provide either a list of files or ' +
'the path to a directory containing only the files for annotation with Prokka.')
if path is None:
path = ''
self.files = files
else:
self.files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
for file in self.files:
filename = file.split('.')[0]
print(filename)
result_path = os.path.join(self.outdir, filename)
cmd = ['prokka', '--prefix', filename, '--outdir', result_path, '--locustag', self.locustag, '--cpus', self.cpus,
'--kingdom', self.kingdom, '--genus', self.genus, '--species', self.species, '--strain', self.strain,
'--evalue', self.evalue]
if self.force:
cmd.append('--force')
if self.addgenes:
cmd.append('--addgenes')
if self.compliant:
cmd.append('--compliant')
if self.usegenus:
cmd.append('--usegenus')
cmd.append(os.path.join(path, file))
call(cmd)
self.result_paths.append(result_path)
######### Style Handlers #######
# brigD3
# seqD3
############# Generators #############
class RingGenerator:
"""
Class: Ring Generator
Initiate with list of rings and set options for visualization. The generator transforms the ring data into
a list of dictionaries for writing as JSON. Options can be set via setOptions. The main access is the seqD3 method,
which initiates the visualization class seqD3 and sets its parameters in the string of the script.
The method then writes HTML file to working directory.
Attributes:
self.rings: list, ring objects
self.project: str, name of output files
self.radius: int, radius of the ring center
self.gap: int, gap between the rings
self.data: dict, rings and chords (keys) data for seqD3
"""
def __init__(self, rings):
"""Initialize generator with list of rings."""
self.rings = rings
self.project = 'seqD3'
self.radius = 300
self.gap = 10
self.data = {'rings': [], 'chords': []}
self.options = {}
self.ring_animation = """.transition()
.delay(function(d, i){ return i * 1; })
.duration(5000)
.style("opacity", function(d){ return d.opacity; })
"""
self.chord_animation = """.transition()
.delay(5000)
.duration(5000)
.style("opacity", function(d){ return d.opacity; })
"""
self.text_animation = ".transition().duration(5000).delay(2000)"
def getData(self):
"""Transform data from rings to data appropriate for D3"""
lengths = set([ring.length for ring in self.rings])
if len(lengths) > 1:
raise ValueError('Ring lengths are not the same.')
self.options['main_length'] = lengths.pop()
radius = self.radius
for ring in self.rings:
print(radius)
for seg in ring.data:
height = seg.pop('height')
seg['inner'] = float(radius)
seg['outer'] = float(radius + height)
self.data['rings'].append(seg)
radius = radius + ring.height + self.gap
if isinstance(ring, SequenceRing):
if ring.chords is not None:
self.data['chords'] += ring.chord_data
def setOptions(self, radius=300, gap=5, project='Test1', title='SCCmec-IV', title_x=40, title_y=40,
width=1700, height=1000, animated=True):
"""Set options for circle generator and visualization with D3."""
self.radius = radius
self.gap = gap
self.project = project
self.options['main_title'] = title
self.options['main_width'] = width
self.options['main_height'] = height
self.options['title_width'] = title_x
self.options['title_height'] = title_y
if animated:
r_ani = self.ring_animation
c_ani = self.chord_animation
t_ani = self.text_animation
s_op = 0
else:
r_ani = ''
c_ani = ''
t_ani = ''
s_op = 'function(d, i){ return d.opacity; }'
self.options['ring_animation'] = r_ani
self.options['chord_animation'] = c_ani
self.options['text_animation'] = t_ani
self.options['start_opacity'] = s_op
def seqD3(self):
"""Write HTML file for seqD3 to working directory."""
self.getData()
print('\nWriting visualization ', self.project + '.html', 'to working directory ...\n')
viz = seqD3(self.options, self.data)
viz.setScript()
viz.writeHTML(self.project)
# Add manual chord placement.
class ChordGenerator:
"""Generate combinations of comparisons with BLAST and create ChordSets."""
def __init__(self):
self.colors = []
self.mode = 'neighbor'
self.min_identity = 90
self.min_length = 2000
self.chords = []
def setOptions(self, colors=None, mode='neighbor', min_identity=70, min_length=1000):
self.mode = mode
self.colors = colors
self.min_identity = min_identity
self.min_length = min_length
def getChords(self, fastas=None, chord_order=None):
self.fastas = fastas
if self.mode == 'neighbor':
"""Neighbor comparisons of .fasta sequences with BLAST"""
for i in range(len(self.fastas)):
db = self.fastas[i]
db_name = db.split('.')[0]
if i != len(self.fastas)-1:
query = self.fastas[i+1]
db_idx = i+1
src_idx = i+2
else:
query = self.fastas[0]
db_idx = i+1
src_idx = 1
query_name = query.split('.')[0]
blaster = Blaster(db, [query])
blaster.db = db_name
blaster.runBLAST()
chords = ChordSet()
chords.color = self.colors[i]
chords.source = src_idx
chords.target = db_idx
chords.src_name = query_name
chords.tar_name = db_name
chords.min_identity = self.min_identity
chords.min_length = self.min_length
chords.readComparison(query_name + 'vs' + db_name)
self.chords.append(chords)
if len(fastas) == 2:
self.chords = [self.chords[0]]
return self.chords
elif self.mode == "ordered":
if chord_order is None:
# Dictionary Keys: single file to be connected; Dictionary Values: List of files to connect to
raise ValueError("In ordered mode, please provide a dictionary of chord orders using the given files.")
for source, targets in chord_order.items():
db = source
db_name = db.split(".")[0]
for target in targets:
query = target
query_name = query.split(".")[0]
db_idx = self.fastas.index(db)+1
src_idx = self.fastas.index(query)+1
blaster = Blaster(db, [query])
blaster.db = db_name
blaster.runBLAST()
chords = ChordSet()
chords.color = self.colors[self.fastas.index(query)]
chords.source = src_idx
chords.target = db_idx
chords.src_name = query_name
chords.tar_name = db_name
chords.min_identity = self.min_identity
chords.min_length = self.min_length
chords.readComparison(query_name + 'vs' + db_name)
self.chords.append(chords)
return self.chords
############# Ring Superclass, Ring Subclasses and Readers #############
class Ring:
"""
Super Class Ring:
A ring object reads and transforms data into the data shape accesses by the RingGenerator and JS. The standard Ring
has base attributes colour, name, height and the tooltip style that can be set by the user via setOptions. The
standard Ring reads a comma-delimited file via readRing containig raw data on each segment (no header) in
the columns:
Start Position, End Position, Color, Height and HTML string for Tooltip
It writes data in the same format via writeRing. Ring objects can also be merged via mergeRings, which adds multiple
rings' data (list of ring objects) to the current ring object. Attributes of the ring object which called the
method are retained.
Subclasses of the ring object have additional attributes pertaining to their function, as well as different readers
for data files.
Attributes:
self.name: str, name to be shown in tooltips
self.color: str, color of ring, hex or name
self.height: int, height of ring
self.tooltip: obj, tooltip object to set header and text colors
"""
def __init__(self):
"""Initialize ring object."""
self.color = 'black'
self.name = ''
self.height = 20
self.opacity = 0.8
self.tooltip = Tooltip()
self.positions = []
self.popups = []
self.colors = []
self.heights = []
self.names = []
self.opacities = []
self.data = []
def mergeRings(self, rings):
"""Merge the current ring with a list of ring objects. Add data of ring objects to current ring."""
for ring in rings:
self.data += ring.data
def setOptions(self, name='Ring', color='black', height=20, opacity=0.8, tooltip=None):
"""Set basic attributes for ring object"""
self.name = name
self.color = color
self.height = height
self.opacity = opacity
if tooltip is None:
self.tooltip = Tooltip()
else:
self.tooltip = tooltip
def getRing(self):
"""Get ring data in dictionary format for Ring Generator and D3."""
n = len(self.positions)
print('Generating Ring:', self.name)
for i in range(n):
data_dict = {}
data_dict['start'] = self.positions[i][0]
data_dict['end'] = self.positions[i][1]
data_dict['color'] = self.colors[i]
data_dict['text'] = self.popups[i]
data_dict['height'] = self.heights[i]
data_dict['name'] = self.names[i]
data_dict['opacity'] = self.opacities[i]
self.data.append(data_dict)
return self.data
def writeRing(self, file):
"""Write raw ring data to comma-delimited file."""
with open(file, 'w') as outfile:
w = csv.writer(outfile, delimiter=',')
header = [['Segment Name', 'Start', 'End', 'Color', 'Height', 'Opacity', 'Tooltip Text', 'Tooltip HTML']]
w.writerows(header)
d = [[segment['name'], segment['start'], segment['end'], segment['color'], segment['height'],
segment['opacity'], strip_tags(segment['text']), segment['text']] for segment in self.data]
w.writerows(d)
def readRing(self, file):
"""Read raw ring data from comma-delimited file."""
self._clear()
with open(file, 'r') as infile:
reader = csv.reader(infile)
header = []
for row in reader:
if header:
self.names.append(row[0])
self.heights.append(float(row[4]))
self.colors.append(row[3])
self.positions.append([float(row[1]), float(row[2])])
self.popups.append(row[7])
self.opacities.append(row[5])
data = {}
data['name'] = row[0]
data['start'] = float(row[1])
data['end'] = float(row[2])
data['color'] = row[3]
data['height'] = float(row[4])
data['opacity'] = float(row[5])
data['text'] = row[7]
self.data.append(data)
else:
header = row
def _clear(self):
"""Clear all ring data."""
self.heights = []
self.colors = []
self.positions = []
self.popups = []
self.opacities = []
self.names = []
self.data = []
class AnnotationRing(Ring):
def __init__(self):
Ring.__init__(self)
self.feature_types = ['CDS']
self.extract = {'gene': 'Gene: ', 'product': 'Product: '}
self.snp_length = 100
self.intergenic = 'yellow'
self.synonymous = 'orange'
self.non_synonymous = 'red'
self.tooltip = Tooltip()
self.length = 0
def readSNP(self, file, n=1):
"""Read SNP data from comma_delimited file (without header): SNP ID, Location, Type, Notes, Ref + N x SNPs"""
self._clear()
n += 4 # Index 4 is Reference, n = 0: Reference, n = 1: first Sample
with open(file, 'r') as infile:
reader = csv.reader(infile)
for row in reader:
if row[n] != row[4] or n == 4:
self.positions.append([int(row[1])-self.snp_length//2, int(row[1])+self.snp_length//2])
if row[2] == 'intergenic':
self.colors.append(self.intergenic)
elif row[2] == 'synonymous':
self.colors.append(self.synonymous)
elif row[2] == 'non-synonymous':
self.colors.append(self.non_synonymous)
else:
self.colors.append(self.color)
self.heights.append(self.height)
self.popups.append(self.tooltip.getPopup([('Sequence: ', self.name), ('SNP: ', row[0]),
('Location: ', row[1]), ('Type: ', row[2]),
('Note: ', row[3])]))
self.opacities.append(self.opacity)
self.names.append('')
self.getRing()
def readGenbank(self, file, strict=False):
"""Read genbank annotation file and extract relevant features and qualifiers."""
seq = SeqIO.read(open(file, "r"), "genbank")
self.length = len(seq)
features = [feature for feature in seq.features if feature.type in self.feature_types]
# Fix? #
if strict:
clean = []
for feature in features:
check = True
for q in self.extract.keys():
if q not in feature.qualifiers:
check = False
if check:
clean.append(feature)
features = clean
for feature in features:
self.positions.append([int(feature.location.start), int(feature.location.end)])
self.colors.append(self.color)
self.heights.append(self.height)
self.names.append('')
self.opacities.append(self.opacity)
qualifier_texts = []
for qualifier in self.extract.keys():
if qualifier in feature.qualifiers:
text_tuple = (self.extract[qualifier], ''.join(feature.qualifiers[qualifier]))
qualifier_texts.append(text_tuple)
qualifier_texts.insert(0, ('Location: ', str(feature.location.start) + '-' + str(feature.location.end)))
qualifier_texts.insert(0, ('Sequence: ', self.name))
popup = self.tooltip.getPopup(qualifier_texts)
self.popups.append(popup)
self.getRing()
def readSegments(self, segs, length):
"""Manual sequence generation for annotation."""
self.length = length
for seq in segs:
self.positions.append([int(seq['start']), int(seq['end'])])
self.colors.append(seq['color'])
self.popups.append(seq['text'])
self.heights.append(self.height)
self.names.append(seq['name'])
self.opacities.append(seq['opacity'])
self.getRing()
class BlastRing(Ring):
"""Sub-class Blast Ring, for depicting BLAST comparisons against a reference DB"""
def __init__(self):
"""Initialize super-class ring and attributes for Blast Ring."""
Ring.__init__(self)
self.min_identity = 70
self.min_length = 100
self.values = []
def setOptions(self, name='Ring', color='black', height=20, opacity=0.8, tooltip=None, min_identity=0.7,
min_length=100):
self.min_identity = min_identity*100
self.min_length = min_length
super(BlastRing, self).setOptions()
def readComparison(self, file):
"""Reads comparison files from BLAST output (--outfmt 6)"""
self._clear()
with open(file, 'r') as infile:
reader = csv.reader(infile, delimiter='\t')
for row in reader:
positions = sorted([int(row[8]), int(row[9])])
if positions[1] - positions[0] >= self.min_length and float(row[2]) >= self.min_identity:
self.positions.append(positions)
self.values.append(float(row[2]))
self.colors = [self.color for v in self.values]
self.heights = [self.height for v in self.values]
self.opacities = [self.opacity for v in self.values]
self.names = ['' for v in self.values]
texts = [[('Sequence: ', self.name), ('BLAST Identity: ', str(v) + '%')] for v in self.values]
self.popups = [self.tooltip.getPopup(text) for text in texts]
self.getRing()
class CoverageRing(Ring):
"""Subclass Coverage Ring for depicting coverage matrix across genomes (single or average)."""
def __init__(self):
"""Initialize super-class ring and attributes for Coverage Ring."""
Ring.__init__(self)
self.threshold = 0.96
self.below = '#E41B17'
def setOptions(self, name='Ring', color='black', height=20, opacity=0.8, tooltip=None, threshold=0.9,
threshold_color='#E41B17'):
self.threshold = threshold
self.below = threshold_color
super(CoverageRing, self).setOptions()
def readCoverage(self, file, sep='\t', mean=True, rescale=False, n=5):
"""Read coverage matrix from file (with header):
Segment ID, Start Position, End Position, Value Sample1, Value Sample2..."""
self._clear()
with open(file, 'r') as infile:
reader = csv.reader(infile, delimiter=sep)
values = []
header = []
texts = []
for row in reader:
if header:
start = int(row[1])
end = int(row[2])
if mean:
value = statistics.mean([float(v) for v in row[3:]])
cov = 'Mean Coverage: '
else:
value = float(row[n])
cov = 'Coverage: '
values.append(value)
self.positions.append((start, end))
color = self.below if value < self.threshold else self.color
self.colors.append(color)
texts.append([('Sequence: ', self.name), ('Location: ', str(start) + ' - ' + str(end)),
(cov, format(value, ".2f"))])
self.opacities.append(self.opacity)
self.names.append('')
else:
header = row
if rescale:
values = numpy.array(values)
scaler = MinMaxScaler()
values = scaler.fit_transform(values)
values.tolist()
self.heights = [value*self.height for value in values]
self.popups = [self.tooltip.getPopup(text) for text in texts]
self.getRing()
class SequenceRing(Ring):
def __init__(self, segments, chords=None):
Ring.__init__(self)
self.segments = segments
self.chords = chords
self.length = 0
self.radius = 300
self.gap = 5000
self.seq_starts = []
self.chord_data = []
def setOptions(self, name='Ring', color=None, height=20, opacity=0.8, tooltip=None, chord_radius=300, gap=5000):
self.radius = chord_radius
self.gap = gap
super(SequenceRing, self).setOptions()
def generateSequence(self):
self.combineSegments()
self.addChords()
def combineSegments(self):
start = 0
for seg in self.segments:
self.positions += [[p+start for p in pos] for pos in seg.positions]
self.heights += [self.height for h in seg.heights]
self.popups += seg.popups
self.colors += seg.colors
self.names += seg.names
self.opacities += seg.opacities
self.seq_starts.append(start)
start += seg.length + self.gap
self.length = start
self.getRing()
def addChords(self):
if self.chords is None:
print('No chords added to Sequence Ring.')
else:
for chord in self.chords:
for i in range(len(chord.source_positions)):
src_pos = sorted([int(chord.source_positions[i][0]),
int(chord.source_positions[i][1])])
target_pos = sorted([int(chord.target_positions[i][0]),
int(chord.target_positions[i][1])])
data = {"source": {"start": src_pos[0]+self.seq_starts[chord.source-1],
"end": src_pos[1]+self.seq_starts[chord.source-1],
"radius": self.radius},
"target": {"start": target_pos[0]+self.seq_starts[chord.target-1],
"end": target_pos[1]+self.seq_starts[chord.target-1],
"radius": self.radius},
"options": {"color": chord.colors[i],
"text": chord.texts[i]},
"opacity": chord.opacities[i]}
self.chord_data.append(data)
############# Chord Class and Reader #############
class ChordSet:
"""Class for holding data for a set of chords."""
def __init__(self):
self.source_positions = []
self.target_positions = []
self.colors = []
self.opacities = []
self.texts = []
self.tooltip = Tooltip()
self.opacity = 0.3
self.color = 'gray'
self.source = 2 # Query Sequence
self.target = 1 # DB
self.src_name = 'Seq 1'
self.tar_name = 'Seq 2'
self.min_identity = 70
self.min_length = 1000
def readComparison(self, file):
with open(file, 'r') as infile:
reader = csv.reader(infile, delimiter='\t')
for row in reader:
src_pos = sorted([int(row[6]), int(row[7])])
tar_pos = sorted([int(row[8]), int(row[9])])
if src_pos[1] - src_pos[0] >= self.min_length or tar_pos[1] - tar_pos[0] >= self.min_length:
if float(row[2]) >= self.min_identity:
self.source_positions.append(src_pos)
self.target_positions.append(tar_pos)
self.colors.append(self.color)
self.opacities.append(self.opacity)
text = [('Comparison: ', self.src_name + ' vs. ' + self.tar_name), ('BLAST Identity: ', str(row[2]) + '%'),
('Location ' + self.src_name + ': ', ' - '.join(str(s) for s in sorted(src_pos))),
('Location ' + self.tar_name + ': ', ' - '.join(str(s) for s in sorted(tar_pos)))]
self.texts.append(self.tooltip.getPopup(text))
############# BLAST #############
class Blaster:
"""
Class: Blaster
Convenience module to run BLAST+ (needs to be in $PATH).
Initialize with a string for the reference genome file (.fasta) and a list of strings for the sequence files
to be compared (.fasta). Main access is through runBlast. Results attribute holds the string names of the
output comparison files that can be iterated over to create Blast Rings.
Attributes:
self.name_db: str, name of database to be created from the reference sequence
self.type: str, database type, either 'nucl' or 'prot'
self.mode: str, type of comparison, either 'blastn' for nucleotide or 'blastp' for protein
"""
def __init__(self, reference, queries):
"""Initialize blast object with reference and sequence files"""
self.reference = reference
self.queries = queries
self.db = 'ReferenceDB'
self.type = 'nucl'
self.mode = 'blastn'
self.results = []
def _getDB(self):
"""Run makeblastdb to create reference DB."""
call(['makeblastdb', '-in', os.path.join(os.getcwd(), self.reference), '-dbtype', self.type, '-out', self.db])
print('\n')
def runBLAST(self):
""""Blast sequence files against reference DB."""
self._getDB()
refname = self.reference.split('.')[0]
for query in self.queries:
seqname = query.split('.')[0]
print('Blasting', query, 'against Reference DB ...')
filename = seqname + 'vs' + refname
call([self.mode, '-query', os.path.join(os.getcwd(), query), '-db', self.db, '-outfmt', '6', '-out',
os.path.join(os.getcwd(), filename)])
self.results.append(filename)
print('\n')
############# D3 Visualization #############
class seqD3:
"""Helper class Visualization, holds script for JS D3. Methods to write replace options from Ring Generator in
script and write the HTML. Initialize with options dict and data from Ring Generator. """
def __init__(self, options, data):
self.options = options
self.data = data
self.head = """
<!DOCTYPE html>
<meta charset="UTF-8">
<html lang="en">
<style>
.arcText {
fill: #6B6B6B;
font-size: 11px;
font-family: 'Courgette', sans-serif;
}
div.tooltip {
position: absolute;
text-align: left;
max-width: 300px;
padding: 11px;
font-size: 11px;
font-family: 'Courgette', sans-serif;
background: #FEFCFF;
border-radius: 11px;
pointer-events: none;
}
.title {
fill: #565051;
font-size: 14px;
font-family: 'Courgette', sans-serif;
}
</style>
<head>
<title></title>
</head>
<body>
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
"""
self.data1 = """
<script type="application/json" id="rings">
"""
self.data2 = """
</script>
<script type="application/json" id="chords">
"""
self.script = """
</script>
<script>
var width = main_width
var height = main_height
var length = main_length
var pi = Math.PI
var rings = JSON.parse(document.getElementById('rings').innerHTML);
var chords = JSON.parse(document.getElementById('chords').innerHTML);
// Main Body with Zoom
var chart = d3.select("body")
.append("svg")
.attr("id", "chart")
.attr("width", width)
.attr("height", height)
.call(d3.behavior.zoom().on("zoom", function () {
chart.attr("transform", "translate(" + d3.event.translate + ")" + " scale(" + d3.event.scale + ")")
}))
.append("g");
// Tooltip CSS
var div = d3.select("body").append("div")
.attr("class", "tooltip")
.style("opacity", 0);
// Circle Scaler
var degreeScale = d3.scale.linear()
.domain([0, length])
.range([0,360]);
// Rings
var ringShell = chart.append("g")
.attr("transform", "translate(" + width / 2 + "," + height / 2 + ")");
var seqArc = d3.svg.arc(rings)
.innerRadius(function(d, i){return d.inner;})
.outerRadius(function(d, i){return d.outer;})
.startAngle(function(d, i){return degreeScale(d.start) * (pi/180);})
.endAngle(function(d, i){return degreeScale(d.end) * (pi/180);}) ;
ringShell.selectAll("path")
.data(rings)
.enter()
.append("path")
.style("fill", function(d, i){ return d.color; })
.style("opacity", start_opacity)
.attr("id", function(d,i) { return "seqArc_"+i; })
.attr("d", seqArc)
.attr('pointer-events', 'none')
.on("mouseover", function(d){
div.transition()
.duration(200)
.style("opacity", .9);
div .html(d.text)
.style("left", (d3.event.pageX + 20) + "px")
.style("top", (d3.event.pageY + 10) + "px");
})
.on('mouseout', function(d) {
div.transition()
.duration(200)
.style("opacity", 0)
})
ring_animation
.transition().attr('pointer-events', 'visible');
ringShell.selectAll(".arcText")
.data(rings)
.enter().append("text")
.attr("dy", -13)
.style("opacity", 1)
.attr("class", "arcText")
.append("textPath")
.attr("startOffset", '10%')
.attr("xlink:href",function(d,i){return "#seqArc_"+i;})
.transition().delay(11000)
.text(function(d){return d.name; });
// Chord Elements
var cordShell = chart.append("g")
.attr("class", "chords")
.attr("transform", "translate(" + width / 2 + "," + height / 2 + ")");
var chord = d3.svg.chord(chords)
.radius(function(d, i) {return d.radius;})
.startAngle(function(d, i){return degreeScale(d.start) * (pi/180); })
.endAngle(function(d, i){return degreeScale(d.end) * (pi/180);});
cordShell.selectAll("path")
.data(chords)
.enter()
.append("path")
.attr("class", "chord")
.style("fill", function(d) { return d.options.color; })
.style("opacity", start_opacity)
.attr("d", chord)
.attr('pointer-events', 'none')
.on("mouseover", function(){
d3.select(this).transition().duration(300).style("opacity", 0.8);
})
.on("mouseout", function(){
d3.select(this).transition().duration(300).style("opacity", 0.3);
})
.on("mousedown", function(d){
div.transition()
.duration(200)
.style("opacity", .9);
div .html(d.options.text)
.style("left", (d3.event.pageX + 20) + "px")
.style("top", (d3.event.pageY + 10) + "px");
})
.on('mouseup', function(d) {
div.transition()
.duration(200)
.style("opacity", 0)
})
chord_animation
.transition().attr('pointer-events', 'visible');
// Title
titleShell = chart.append("g")
.attr("transform", "translate(" + (width / 100) * title_width + "," + (height / 100) * title_height + ")");
titleShell.append("text")
.style("opacity", 0)
.style("text-anchor", "middle")
.style("font-size", "300%")
.style("font-weight", "bold")
.style("font-family", "times")
.attr("class", "title")
.text("main_title")
text_animation
.style("opacity", 1);
</script>
</body>
</html>
"""
def setScript(self):
"""Replace placeholder values in script with given options."""
for placeholder, value in self.options.items():
self.script = self.script.replace(str(placeholder), str(value))
def writeHTML(self, project):
"""Write script to HTML."""
with open(project + '.html', 'w') as outfile:
outfile.write(self.head)
with open(project + '.html', 'a') as outfile:
outfile.write(self.data1)
json.dump(self.data['rings'], outfile, indent=4, sort_keys=True)
outfile.write(self.data2)
json.dump(self.data['chords'], outfile, indent=4, sort_keys=True)
outfile.write(self.script)
############# Helper Classes #############
class Tooltip:
"""Tooltip class (under construction), will hold more options to customize Tooltips for D3"""
def __init__(self):
self.text_color = '#565051'
self.head_color = '#565051'
def getPopup(self, text):
"""Converts text - tuple of header and text, i.e. ('Genome:', 'DAR4145') - to HTML string for Tooltip."""
if len(text) > 0:
popup = ''
for i in range(len(text)):
popup += '<strong>' + '<span style="color:' + self.head_color + '">' + text[i][0] +\
'</span>' + '</strong>' + '<span style="color:' + self.text_color + '">' + text[i][1] + ' ' +\
'</span>' + '<br>'
else:
popup = '-'
return popup
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
| esteinig/seqD3 | seqD3.py | Python | gpl-2.0 | 42,067 | [
"BLAST",
"Biopython"
] | d2244033b13d47397ad93ff21caedbb32582381c02c382a29f2f310c69187625 |
"""
Tools for analysing the relationship between tidal stresses and tectonics on
icy satellites.
Written by U{Zane Selvans <http://zaneselvans.org>}
(C{U{zane.selvans@colorado.edu <mailto:zane.selvans@colorado.edu>}}) as part of
his Ph.D. dissertation research.
C{satstress} is released under GNU General Public License (GPL) version 3. For
the full text of the license, see: U{http://www.gnu.org/}
The project is hosted at Google Code: U{http://code.google.com/p/satstress}
1 Installation
==============
Hopefully getting C{satstress} to work on your system is a relatively painless
process, however, the software does assume you have basic experience with the
Unix shell and programming within a Unix environment (though it should work on
Windows too). In particular, this installation information assumes you already
have and are able to use:
- compilers for both C and Fortran. Development has been done on Mac OS X
(10.5) using the GNU compilers C{gcc} and C{g77}, so those should
definitely work. On other systems, with other compilers, your mileage may
vary.
- the C{make} utility, which manages dependencies between files.
1.1 Other Required and Recommended Software
-------------------------------------------
To get the L{satstress} package working, you'll need to install some other
(free) software first:
- B{Python 2.5} or later (U{http://www.python.org}). If you're running a
recent install of Linux, or Apple's Leopard operating system (OS X 10.5.x),
you already have this. Python is also available for Microsoft Windows, and
just about any other platform you can think of.
- B{SciPy} (U{http://www.scipy.org}), a collection of scientific libraries
that extend the capabilities of the Python language.
In addition, if you want to use L{gridcalc}, you'll need:
- B{netCDF} (U{http://www.unidata.ucar.edu/software/netcdf/}), a library of
routines for storing, retrieving, and annotating regularly gridded
multi-dimensional datasets. Developed by U{Unidata
<http://www.unidata.ucar.edu>}
- B{netcdf4-python} (U{http://code.google.com/p/netcdf4-python/}), a Python
interface to the netCDF library.
If you want to actually view L{gridcalc} output, you'll need a netCDF file
viewing program. Many commercial software packages can read netCDF files, such
as ESRI ArcGIS and Matlab. A simple and free reader for OS X is U{Panoply
<http://www.giss.nasa.gov/tools/panoply/>}, from NASA. If you want to really
be able to interact with the outputs from this model, you should install and
get familiar with:
- B{Matplotlib/Pylab} (U{http://matplotlib.sourceforge.net/}), a Matlab-like
interactive plotting and analysis package, which uses Python as its
"shell".
1.2 Building and Installing satstress
-------------------------------------
Once you have the required software prerequisites installed, uncompress and
unarchive the satstress distribution::
tar -xvzf satstress-X.Y.Z.tar.gz
then go into the distribution directory created::
cd satstress-X.Y.Z
To build and test the package, run::
make test
If the test cases pass, go ahead and install with::
make install
And you'll be able to write your own Python programs using the C{satstress}
library.
If you're not using the GNU Fortran 77 compiler C{g77}, you'll need to edit the
C{Makefile} for the Love number code::
satstress/love/john_wahr/Makefile
and tell it what Fortran compiler it ought to be using.
If you have any trouble getting C{satstress} working, feel free to post to the
satstress discussion board: U{http://groups.google.com/group/satstress}
2 Design Overview
=================
A few notes on the general architecture of the C{satstress} package.
2.1 Who is the Audience?
------------------------
In writing this software and documentation, my hope is that an undergraduate
research assistant who has been hired for the summer, and who has at least
some experience with programming (though not necessarily in Python), should
be able to understand how the system works, and make fruitful use of it. So
if it seems like things are sometimes over-explained or over-commented,
that's why.
2.2 A Toolkit, not a Program
----------------------------
The C{satstress} package is not itself a stand-alone program (or not much of
one anyway). Instead it is a set of tools with which you can build programs
that need to know about the stresses on the surface of a satellite, and how
they compare to tectonic features, so you can do your own hypothesizing and
testing.
2.3 Object Oriented
-------------------
The package attempts to make use of U{object oriented programming
<http://en.wikipedia.org/wiki/Object-oriented_programming>} (OOP) in order to
maximize the re-usability and extensibility of the code. Many scientists are
more familiar with the U{imperative programming style
<http://en.wikipedia.org/wiki/Imperative_programming>} of languages like
Fortran and C, but as more data analysis and hypothesis testing takes place
inside computers, and as many scientists become highly specialized and
knowledgeable software engineers (even if they don't want to admit it), the
advantages of OOP become significant. If the object orientation of this
module seems odd at first glance, don't despair, it's worth learning.
2.4 Written in Python
---------------------
U{Python <http://www.python.org>} is a general purpose, high-level scripting
language. It is an interpreted language (as opposed to compiled languages
like Fortran or C) and so Python code is very portable, meaning it is usable
on a wide variety of computing platforms without any alteration. It is
relatively easy to learn and easy to read, and it has a very active
development community. It also has a large base of friendly, helpful
scientific users and an enormous selection of pre-existing libraries designed
for scientific applications. For those tasks which are particularly
computationally intensive, Python allows you to extend the language with code
written in C and Fortran. Python is also U{Free Software
<http://www.gnu.org/philosophy/free-sw.html>}. If you are a scientist and
you write code, Python is a great choice.
2.5 Open Source
---------------
Because science today is intimately intertwined with computation, it is
important for researchers to share the code that their scientific results are
based on. No matter how elegant and accurate your derivation is, if your
implementation of the model in code is wrong, your results will be flawed.
As our models and hypotheses become more complex, our code becomes vital
primary source material, and it needs to be open to peer review. Opening our
source:
- allows bugs to be found and fixed more quickly
- facilitates collaboration and interoperability
- reduces duplicated effort
- enhances institutional memory
- encourages better software design and documentation
Of course, it also means that other people can use our code to write their
own scientific papers, but I{that is the fundamental nature of science}. We
are all "standing on the shoulders of giants". Nobody re-derives quantum
mechanics when they just want to do a little spectroscopy. Why should we all
be re-writing each others code I{ad nauseam}? Opening scientific source code
will ultimately increase everyone's productivity. Additionally, a great deal
of science is funded by the public, and our code is a major product of that
funding. It is unethical to make it proprietary.
"""
__all__ = ["satstress", "lineament", "nsrhist", "stressplot", "gsn"]
__author__ = "Zane Selvans"
__contact__ = "zane.selvans@colorado.edu"
__maintainer__ = "Zane Selvans"
__maintainer_email__ = "zane.selvans@colorado.edu"
__license__ = "http://www.gnu.org/licenses/gpl.html"
__docformat__ = 'epytext en'
__version__ = '0.2.0'
__projecturl__ = 'http://code.google.com/p/satstress'
__downloadurl__ = 'http://code.google.com/p/satstress/downloads/list'
__description__ = 'Tools for modeling tidal stresses and tectonics on icy satellites.'
__long_description__ = """
satstress is a collection of objects and scripts which are useful for modeling
tidal stresses on icy satellites, and for comparing those stresses to mapped
tectonic features. It includes a Love number code which treats the satellite
as a Maxwell viscoelastic material. The tidal stresses currently modeled are
the non-synchronous rotation of a decoupled icy shell (NSR) and the radial and
librational tides that result from an eccentric orbit (Diurnal), as described
in Wahr et al. (2008).
"""
__pythonrequiredversion__ = "2.5"
import datetime
__date__ = datetime.datetime.utcnow().ctime()
__copyright__ = "2007-%d %s" % (datetime.datetime.utcnow().year,__author__)
| zaneselvans/satstress | satstress/__init__.py | Python | gpl-3.0 | 8,848 | [
"NetCDF"
] | 60c4d9e8dbd062f1899c0bbdc5faaaefe59f918df3954ee8d8fda866f28e89a7 |
from cached_property import cached_property
from .cigar import (
cigartuples_to_cigarstring,
cigar_to_tuple,
cigartuples_to_named_cigartuples
)
from collections import namedtuple
class BaseTag(object):
"""Generate class template for tags."""
def __new__(cls, header):
"""Return a Tag class that knows how to format a tag."""
return type('NamedTagTuple', (namedtuple('tag', 'tid reference_start cigar is_reverse mapq query_alignment_start query_alignment_end'),),
{'__str__': lambda self: self.tag_str_template % (self.header['SQ'][self.tid]['SN'],
self.reference_start,
self.query_alignment_start,
self.query_alignment_end,
cigartuples_to_cigarstring(self.cigar),
'AS' if self.is_reverse else 'S',
self.mapq),
'reference_name': lambda self: self.header['SQ'][self.tid]['SN'],
'header': header,
'tag_str_template': "R:%s,POS:%d,QSTART:%d,QEND:%d,CIGAR:%s,S:%s,MQ:%d"})
def make_tag(template, r):
"""Return namedtuple of read attributes."""
return template(tid=r.tid,
reference_start=r.reference_start,
cigar=r.cigar,
is_reverse=r.is_reverse,
mapq=r.mapping_quality,
query_alignment_start=r.query_alignment_start,
query_alignment_end=r.query_alignment_end)
class Tag(object):
"""Collect tag attributes and conversion."""
def __init__(self,
reference_start,
cigar,
is_reverse,
mapq=None,
query_alignment_start=None,
query_alignment_end=None,
tid=None,
reference_name=None,
header=None):
"""
Return new Tag instance from kwds.
Note that the cigar is always wrt the reference alignment.
When comparing Tag object by their cigar, one of the cigar needs to be inverted if the
Tag objects are not in the same orientation.
"""
self.reference_start = reference_start
self._cigar = cigar
self.is_reverse = is_reverse
self.mapq = mapq
self.query_alignment_start = query_alignment_start
self.query_alignment_end = query_alignment_end
self.tid = tid
self._reference_name = reference_name
self.header = header
@cached_property
def reference_name(self):
"""Return reference name for this instance."""
if self._reference_name:
return self._reference_name
else:
try:
return self.header.references[self.tid]
except Exception:
return None
@cached_property
def cigar(self):
"""
Lazily convert cigarstring to tuple if it doesn't exist.
>>> Tag(reference_start=0, cigar='20M30S', is_reverse='True', mapq=60, query_alignment_start=0, query_alignment_end=20, tid=5).cigar
[CIGAR(operation=0, length=20), CIGAR(operation=4, length=30)]
"""
if isinstance(self._cigar, str):
self._cigar = cigar_to_tuple(self._cigar)
self._cigar = cigartuples_to_named_cigartuples(self._cigar)
return self._cigar
@staticmethod
def from_read(r, header=None):
"""
Return Tag instance from pysam.AlignedSegment Instance.
>>> from tests.helpers import MockAlignedSegment as AlignedSegment
>>> t = Tag.from_read(AlignedSegment(cigar='20M30S'))
>>> isinstance(t, Tag)
True
>>> t.reference_name is None
True
"""
return Tag(tid=r.tid,
reference_start=r.reference_start,
cigar=r.cigar,
is_reverse=r.is_reverse,
mapq=r.mapping_quality,
query_alignment_start=r.query_alignment_start,
query_alignment_end=r.query_alignment_end,
header=header)
@staticmethod
def from_tag_str(tag_str):
"""
Return Tag Instance from tag string.
>>> t = Tag.from_tag_str('R:FBti0019061_rover_Gypsy,POS:7435,QSTART:0,QEND:34,CIGAR:34M91S,S:S,MQ:60')
>>> isinstance(t, Tag)
True
>>> t.cigar == [(0, 34), (4, 91)]
True
>>> t = Tag.from_tag_str('R:FBti0019061_rover_Gypsy,POS:7435,QSTART:0,QEND:34,CIGAR:34M91S,S:AS,MQ:60')
>>> t.is_reverse
True
"""
tag_to_attr = {'R': 'reference_name',
'POS': 'reference_start',
'QSTART': 'query_alignment_start',
'QEND': 'query_alignment_end',
'CIGAR': 'cigar',
'S': 'is_reverse',
'MQ': 'mapq'}
integers = ['reference_start', 'query_alignment_start', 'query_alignment_end', 'mapq']
tag_d = {tag_to_attr[k]: v for k, v in dict(item.split(':') for item in tag_str.split(',')).items()}
if tag_d['is_reverse'] == 'S':
tag_d['is_reverse'] = False
else:
tag_d['is_reverse'] = True
for integer in integers:
tag_d[integer] = int(tag_d.get(integer, 0))
return Tag(**tag_d)
def to_dict(self):
"""
Serialize self into dictionary.
>>> t = Tag.from_tag_str('R:FBti0019061_rover_Gypsy,POS:7435,QSTART:0,QEND:34,CIGAR:34M91S,S:S,MQ:60')
>>> t.to_dict()['is_reverse']
False
"""
return {'reference_start': self.reference_start,
'cigar': self.cigar,
'mapq': self.mapq,
'query_alignment_start': self.query_alignment_start,
'query_alignment_end': self.query_alignment_end,
'is_reverse': self.is_reverse,
'tid': self.tid} # Improve this by passing tid or reference name
def to_string(self, header=None):
"""
Serialize to tag string.
>>> t = Tag.from_tag_str('R:FBti0019061_rover_Gypsy,POS:7435,QSTART:0,QEND:34,CIGAR:34M91S,S:S,MQ:60')
>>> t.to_string() == 'R:FBti0019061_rover_Gypsy,POS:7435,QSTART:0,QEND:34,CIGAR:34M91S,S:S,MQ:60'
True
:param header:
:return:
"""
header = header or self.header
return "R:%s,POS:%d,QSTART:%d,QEND:%d,CIGAR:%s,S:%s,MQ:%d" % (self.reference_name or header['SQ'][self.tid]['SN'],
self.reference_start,
self.query_alignment_start,
self.query_alignment_end,
cigartuples_to_cigarstring(self.cigar),
'AS' if self.is_reverse else 'S',
self.mapq)
| bardin-lab/readtagger | readtagger/tags.py | Python | mit | 7,468 | [
"pysam"
] | a09595349f9bfdd0f1818df377ee8995070416786553bd6652d12d29bf54f4e0 |
"""
#python -c "import cyth, doctest; print(doctest.testmod(cyth.cyth_benchmarks))"
"""
from __future__ import absolute_import, division, print_function
import utool
import doctest
import ast
import astor
from cyth import cyth_helpers
def get_bench_text_fmt():
bench_text_fmt_ = r'''
" Autogenerated by cyth on {timestamp} "
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import timeit
import textwrap
import warnings
import utool
import six
warnings.simplefilter('ignore', SyntaxWarning)
warnings.simplefilter('ignore', RuntimeWarning)
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[{py_modname}.bench]')
def run_doctest(pyth_call, cyth_call, setup_script):
setup_globals_py, setup_locals_py = {{}}, {{}}
setup_globals_cy, setup_locals_cy = {{}}, {{}}
six.exec_(setup_script, setup_globals_py, setup_locals_py)
six.exec_(setup_script, setup_globals_cy, setup_locals_cy)
pyth_result = eval(cyth_call, setup_globals_py, setup_locals_py)
cyth_result = eval(cyth_call, setup_globals_cy, setup_locals_cy)
if repr(pyth_result) == repr(cyth_result):
#print('%r and %r have the same result.' % (pyth_call, cyth_call))
print('PASS: output is equivalent')
#%r and %r have the same result.' % (pyth_call, cyth_call))
else:
print('<FAILED>')
print('INCONSISTENCY: %r has different output than %r' % (pyth_call, cyth_call))
print('___________')
print('pyth_result')
print('-----------')
print(repr(pyth_result))
print('=========== (end pyth_result)')
print('___________')
print('cyth_result')
print('-----------')
print(repr(cyth_result))
print('=========== (end cyth_result)')
print('</FAILED>')
{codes}
def run_all_benchmarks(iterations):
print('\n\n')
print('=======================================')
print('[cyth] Run benchmarks for: {py_modname}')
with utool.Indenter(' * '):
results = []
{all_benchmarks}
#sorted_results = sorted(results)
#sorted_lines = [tup[2] for tup in sorted_results]
#print('\n'.join(utool.flatten(sorted_lines)))
return results
if __name__ == '__main__':
iterations = utool.get_argval(('--iterations', '-n'), type_=int, default=100)
run_all_benchmarks(iterations)
'''
bench_text_fmt = utool.unindent(bench_text_fmt_).strip('\n')
return bench_text_fmt
def make_bench_text(benchmark_codes, benchmark_names, py_modname):
# TODO: let each function individually specify number
codes = '\n\n\n'.join(benchmark_codes) # NOQA
list_ = [utool.quasiquote('results.extend({benchfunc}(iterations))')
for benchfunc in benchmark_names]
all_benchmarks = utool.indent('\n'.join(list_), ' ' * 8).strip() # NOQA
timestamp = utool.get_timestamp() # NOQA
bench_text_fmt = get_bench_text_fmt()
bench_text = utool.quasiquote(bench_text_fmt)
return bench_text
def parse_benchmarks(funcname, docstring, py_modname):
test_tuples_, setup_script_ = make_benchmarks(funcname, docstring, py_modname)
if len(test_tuples_) == 0:
test_tuples = '[]'
else:
test_tuples = '[\n' + (' ' * 8) + '\n '.join(list(map(str, test_tuples_))) + '\n ]' # NOQA
setup_script = utool.indent(setup_script_).strip() # NOQA
benchmark_name = utool.quasiquote('run_benchmark_{funcname}')
#test_tuples, setup_script = make_benchmarks('''{funcname}''', '''{docstring}''')
# http://en.wikipedia.org/wiki/Relative_change_and_difference
bench_code_fmt_ = r'''
def {benchmark_name}(iterations):
test_tuples = {test_tuples}
setup_script = textwrap.dedent("""
{setup_script}
""")
time_line = lambda line: timeit.timeit(stmt=line, setup=setup_script, number=iterations)
time_pair = lambda (x, y): (time_line(x), time_line(y))
def print_timing_info(tup):
from math import log
test_lines = []
def test_print(str):
if not utool.QUIET:
print(str)
test_lines.append(str)
test_print('\n---------------')
test_print('[bench] timing {benchmark_name} for %d iterations' % (iterations))
test_print('[bench] tests:')
test_print(' ' + str(tup))
(pyth_time, cyth_time) = time_pair(tup)
test_print("[bench.python] {funcname} time=%f seconds" % (pyth_time))
test_print("[bench.cython] {funcname} time=%f seconds" % (cyth_time))
time_delta = cyth_time - pyth_time
#pcnt_change_wrt_pyth = (time_delta / pyth_time) * 100
#pcnt_change_wrt_cyth = (time_delta / cyth_time) * 100
pyth_per_cyth = (pyth_time / cyth_time) * 100
inv_cyth_per_pyth = 1 / (cyth_time / pyth_time) * 100
nepers = log(cyth_time / pyth_time)
if time_delta < 0:
test_print('[bench.result] cython was %.1f%% of the speed of python' % (inv_cyth_per_pyth,))
#test_print('[bench.result] cython was %.1fx faster' % (-pcnt_change_wrt_pyth,))
test_print('[bench.result] cython was %.1f nepers faster' % (-nepers,))
test_print('[bench.result] cython was faster by %f seconds' % -time_delta)
else:
test_print('[bench.result] cython was %.1f%% of the speed of python' % (pyth_per_cyth,))
#test_print('[bench.result] cython was %.1fx slower' % (pcnt_change_wrt_pyth,))
test_print('[bench.result] cython was %.1f nepers slower' % (nepers,))
test_print('[bench.result] python was faster by %f seconds' % time_delta)
pyth_call, cyth_call = tup
run_doctest(pyth_call, cyth_call, setup_script)
return (pyth_time, cyth_time, test_lines)
test_results = list(map(print_timing_info, test_tuples))
# results are lists of (time1, time2, strlist)
return test_results'''
bench_code_fmt = utool.unindent(bench_code_fmt_).strip('\n')
bench_code = utool.quasiquote(bench_code_fmt)
return (benchmark_name, bench_code)
def replace_funcalls(source, funcname, replacement):
"""
>>> from cyth_script import * # NOQA
>>> replace_funcalls('foo(5)', 'foo', 'bar')
'bar(5)'
>>> replace_funcalls('foo(5)', 'bar', 'baz')
'foo(5)'
"""
# FIXME: !!!
# http://docs.cython.org/src/userguide/wrapping_CPlusPlus.html#nested-class-declarations
# C++ allows nested class declaration. Class declarations can also be nested in Cython:
# Note that the nested class is declared with a cppclass but without a cdef.
class FunctioncallReplacer(ast.NodeTransformer):
def visit_Call(self, node):
if isinstance(node.func, ast.Name) and node.func.id == funcname:
node.func.id = replacement
return node
generator = astor.codegen.SourceGenerator(' ' * 4)
generator.visit(FunctioncallReplacer().visit(ast.parse(source)))
return ''.join(generator.result)
#return ast.dump(tree)
def parse_doctest_examples(source):
# parse list of docstrings
comment_iter = doctest.DocTestParser().parse(source)
# remove any non-doctests
example_list = [c for c in comment_iter if isinstance(c, doctest.Example)]
return example_list
def get_benchline(src, funcname):
""" Returns the from a doctest source """
try:
pt = ast.parse(src)
except SyntaxError as e:
print("Syntax error \"%s\" in source fragment %r of function %r." % (e, src, funcname))
raise e
assert isinstance(pt, ast.Module), type(pt)
body = pt.body
if len(body) != 1:
return None
stmt = body[0]
if not isinstance(stmt, (ast.Expr, ast.Assign)):
return None
if isinstance(stmt.value, ast.Call) and isinstance(stmt.value.func, ast.Name):
if stmt.value.func.id == funcname:
benchline = cyth_helpers.ast_to_sourcecode(stmt.value)
return benchline
def make_benchmarks(funcname, docstring, py_modname):
r"""
>>> from cyth.cyth_script import * # NOQA
>>> funcname = 'replace_funcalls'
>>> docstring = replace_funcalls.func_doc
>>> py_modname = 'cyth.cyth_script'
>>> benchmark_list = list(make_benchmarks(funcname, docstring, py_modname))
>>> print(benchmark_list)
[[("replace_funcalls('foo(5)', 'foo', 'bar')", "_replace_funcalls_cyth('foo(5)', 'foo', 'bar')"), ("replace_funcalls('foo(5)', 'bar', 'baz')", "_replace_funcalls_cyth('foo(5)', 'bar', 'baz')")], "from cyth.cyth_script import _replace_funcalls_cyth\nfrom cyth_script import * # NOQA\nreplace_funcalls('foo(5)', 'foo', 'bar')\nreplace_funcalls('foo(5)', 'bar', 'baz')\n"]
#>>> output = [((x.source, x.want), y.source, y.want) for x, y in benchmark_list]
#>>> print(utool.hashstr(repr(output)))
"""
doctest_examples = parse_doctest_examples(docstring)
test_lines = []
cyth_lines = []
setup_lines = []
cyth_funcname = cyth_helpers.get_cyth_name(funcname)
for example in doctest_examples:
benchline = get_benchline(example.source, funcname)
if benchline is not None:
test_lines.append(benchline)
cyth_lines.append(replace_funcalls(benchline, funcname, cyth_funcname))
setup_lines.append(example.source)
else:
setup_lines.append(example.source)
test_tuples = list(zip(test_lines, cyth_lines))
setup_script = utool.unindent(''.join(setup_lines))
#modname = 'vtool.keypoint'
setup_script = 'from %s import %s\n' % (py_modname, cyth_funcname,) + setup_script
return test_tuples, setup_script
def build_runbench_shell_text(cy_bench_list):
# write script to run all cyth benchmarks
cmd_list = ['python ' + bench + ' $*'
for bench in cy_bench_list]
runbench_text = '\n'.join(['#!/bin/bash'] + cmd_list)
return runbench_text
def build_runbench_pyth_text(cy_bench_list):
# write script to run all cyth benchmarks
runbench_pytext_fmt_ = r'''
#!/usr/bin/env python
" Autogenerated by cyth on {timestamp} "
from __future__ import absolute_import, division, print_function
import utool
{bench_import_text}
SORTBY = utool.get_argval('--sortby', str, 'python')
if __name__ == '__main__':
all_results = []
iterations = utool.get_argval(('--iterations', '-n'), type_=int, default=100)
# Run the benchmarks
{bench_runline_text}
# Sort by chosen field
sortable_fields = ['python', 'cython']
sortx = sortable_fields.index(SORTBY)
sorted_allresults = sorted(all_results, key=lambda tup: tup[sortx])
sorted_lines = [tup[2] for tup in sorted_allresults]
# Report sorted results
print('\n\n')
print('==================================')
print('Aggregating all benchmarks results')
print('==================================')
print('\n')
print('sorting by %s' % sortable_fields[sortx])
print('\n'.join(utool.flatten(sorted_lines)))
'''
runbench_pytext_fmt = utool.unindent(runbench_pytext_fmt_).strip('\n')
from os.path import relpath, splitext
import os
def bench_fpath_to_modname(bench):
bench_upath = utool.unixpath(bench)
bench_relpath = relpath(bench_upath, os.getcwd())
bench_relname, _ = splitext(bench_relpath)
bench_modname = bench_relname.replace('\\', '/').replace('/', '.')
return bench_modname
bench_modnames = list(map(bench_fpath_to_modname, cy_bench_list))
bench_imports = ['import ' + bench_modname for bench_modname in bench_modnames]
runline_fmt = 'all_results.extend({bench_modname}.run_all_benchmarks(iterations))'
bench_runlines = [runline_fmt.format(bench_modname=bench_modname)
for bench_modname in bench_modnames]
bench_import_text = '\n'.join(bench_imports)
bench_runline_text = '\n '.join(bench_runlines)
timestamp = utool.get_timestamp() # NOQA
runbench_pytext = runbench_pytext_fmt.format(timestamp=timestamp,
bench_runline_text=bench_runline_text,
bench_import_text=bench_import_text)
return runbench_pytext
| aweinstock314/cyth | cyth/cyth_benchmarks.py | Python | apache-2.0 | 12,648 | [
"VisIt"
] | b164c927e64c4785b219468318f71109c6bd0175896e5c2df067774ab0ec970b |
"""
Test support for the HANDLE_OWNERS_NOT_AVAILABLE group flag, and calling
GetHandleOwners on MUC members.
By default, MUC channels should have the flag set. The flag should be unset
when presence is received that includes the MUC JID's owner JID.
"""
import dbus
from gabbletest import make_result_iq, exec_test, make_muc_presence
from servicetest import (
call_async, EventPattern, assertEquals, assertFlagsSet, assertFlagsUnset,
wrap_channel,
)
import constants as cs
def test(q, bus, conn, stream):
self_handle = conn.GetSelfHandle()
room_handle = conn.RequestHandles(cs.HT_ROOM, ['chat@conf.localhost'])[0]
call_async(q, conn, 'RequestChannel', cs.CHANNEL_TYPE_TEXT, cs.HT_ROOM,
room_handle, True)
gfc, _, _, _ = q.expect_many(
# Initial group flags
EventPattern('dbus-signal', signal='GroupFlagsChanged',
predicate=lambda e: e.args[0] != 0),
EventPattern('dbus-signal', signal='MembersChanged',
args=[u'', [], [], [], [2], 0, 0]),
# Removing CAN_ADD
EventPattern('dbus-signal', signal='GroupFlagsChanged',
args = [0, cs.GF_CAN_ADD], predicate=lambda e: e.args[0] == 0),
EventPattern('stream-presence', to='chat@conf.localhost/test'))
assert gfc.args[1] == 0
# Send presence for anonymous other member of room.
stream.send(make_muc_presence('owner', 'moderator', 'chat@conf.localhost', 'bob'))
# Send presence for anonymous other member of room (2)
stream.send(make_muc_presence('owner', 'moderator', 'chat@conf.localhost', 'brian'))
# Send presence for nonymous other member of room.
stream.send(make_muc_presence('none', 'participant', 'chat@conf.localhost',
'che', 'che@foo.com'))
# Send presence for nonymous other member of room (2)
stream.send(make_muc_presence('none', 'participant', 'chat@conf.localhost',
'chris', 'chris@foo.com'))
# Send presence for own membership of room.
stream.send(make_muc_presence('none', 'participant', 'chat@conf.localhost', 'test'))
# Since we received MUC presence that contains an owner JID, the
# OWNERS_NOT_AVAILABLE flag should be removed.
event = q.expect('dbus-signal', signal='GroupFlagsChanged',
args = [0, cs.GF_HANDLE_OWNERS_NOT_AVAILABLE ])
event = q.expect('dbus-signal', signal='HandleOwnersChanged')
owners = event.args[0]
event = q.expect('dbus-signal', signal='MembersChanged')
added = event.args[1]
[test, bob, brian, che, che_owner, chris, chris_owner] = \
conn.RequestHandles(cs.HT_CONTACT,
[ 'chat@conf.localhost/test', 'chat@conf.localhost/bob',
'chat@conf.localhost/brian', 'chat@conf.localhost/che',
'che@foo.com', 'chat@conf.localhost/chris', 'chris@foo.com',
])
expected_members = sorted([test, bob, brian, che, chris])
expected_owners = { test: self_handle,
bob: 0,
brian: 0,
che: che_owner,
chris: chris_owner
}
assertEquals(expected_members, sorted(added))
assertEquals(expected_owners, owners)
event = q.expect('dbus-return', method='RequestChannel')
chan = wrap_channel(bus.get_object(conn.bus_name, event.value[0]), 'Text')
# Exercise GetHandleOwners
assertEquals([che_owner, chris_owner],
chan.Group.GetHandleOwners([che, chris]))
# Exercise D-Bus properties
all = chan.Properties.GetAll(cs.CHANNEL_IFACE_GROUP)
assert all[u'LocalPendingMembers'] == [], all
assert sorted(all[u'Members']) == expected_members, all
assert all[u'RemotePendingMembers'] == [], all
assert all[u'SelfHandle'] == test, all
assert all[u'HandleOwners'] == expected_owners, all
flags = all[u'GroupFlags']
assertFlagsSet(cs.GF_PROPERTIES | cs.GF_CHANNEL_SPECIFIC_HANDLES, flags)
assertFlagsUnset(cs.GF_HANDLE_OWNERS_NOT_AVAILABLE, flags)
if __name__ == '__main__':
exec_test(test)
| jku/telepathy-gabble | tests/twisted/muc/test-muc-ownership.py | Python | lgpl-2.1 | 4,034 | [
"Brian"
] | 4d7a88013dd33b970cd03d8ca55df483dcaa7cd4df6513563a6912d2dfe8dde9 |
#!/usr/bin/env python
__description__ =\
"""
Parses the output from calc-rmsd.tcl and makes it pretty and R-readable.
"""
__author__ = "Michael J. Harms"
__date__ = "101122"
__usage__ = "calc-rsmd.py vmd_output_file"
import sys
HEADER = ["frame","ca_rmsd","lig_rmsd"]
class ParseVmdError(Exception):
"""
General error class for this module.
"""
pass
def parseVMDLine(line,fmt_string="%10.3f"):
"""
Parse a line of output from VMD.
"""
# Parse the line
split_line = line.split("|")
# Frame number
out = [int(split_line[1])]
out.extend([float(v) for v in split_line[2:]])
fmt = "%s%s\n" % ("%10i",(len(out[1:])*fmt_string))
return fmt % tuple(out)
def main(argv=None):
"""
Main function. Parse command line, read file, return output string.
"""
if argv == None:
argv = sys.argv[1:]
# Parse command line
try:
input_file = argv[0]
except IndexError:
err = __usage__
raise ParseVmdError(err)
# Read the input file
f = open(input_file,'r')
lines = f.readlines()
f.close()
# Grab appropriate lines
lines = [l for l in lines if l.startswith("->")]
# Parse each line in the file
out = []
for l in lines:
out.append(parseVMDLine(l,"%10.3f"))
# Add line numbers and header
header = [" "]
header.extend(HEADER)
out = ["%10i%s" % (i,x) for i, x in enumerate(out)]
out.insert(0,"%10s%10s%10s%10s\n" % tuple(header))
return "".join(out)
if __name__ == "__main__":
print main()
| harmsm/md-analysis-tools | global-rmsd/global-rmsd.py | Python | unlicense | 1,599 | [
"VMD"
] | 3cff9226b4b435a82d0370c944a03c1781fc0f6d9e071bd4ccdafdc35f7e214f |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is the entry point for the command-line interface (CLI) application.
.. note::
To learn more about Click visit the `project website <http://click.pocoo.org/5/>`_. There is
also a very helpful `tutorial video <https://www.youtube.com/watch?v=kNke39OZ2k0>`_.
"""
from .model import Country
from .sql import CooperSqlGenerator, SqlOutput
import click
import os
import sys
from pathlib import Path
from typing import List
class Info(object):
"""
This is an information object that can be used to pass data between CLI functions.
"""
def __init__(self): # Note that this object must have an empty constructor.
pass
pass_info = click.make_pass_decorator(Info, ensure=True) \
#: a decorator for functions that pass 'Info' objects
@click.group()
@pass_info
def cli(_: Info):
"""
This is a sample Click command-line application.
"""
pass
@cli.command() # This is a sub-command of 'cli'.
@click.option('--country', type=str, metavar='<country>',
help="the two-letter (alpha-2-code) that identifies the country")
@click.option('--outdir', type=click.Path(), default='dist', metavar='<outdir>')
@click.option('--combined/--separate', default=False,
help='Combine all the SQL in a single file?')
@click.option('--sequence', type=int, default='0', metavar='<startat>',
help="the initial sequence number for output file names")
@click.argument('datafile', type=click.Path(exists=True), required=True)
@pass_info
def sqlize(_: Info,
country: str,
outdir: os.PathLike,
combined: bool,
sequence: int,
datafile: os.PathLike):
# Make sure the output directory is available.
try:
os.makedirs(outdir, exist_ok=True)
except OSError: # pragma: no cover
click.echo(click.style(
'Cannot create or access {outdir}.'.format(outdir=outdir),
fg='red')
)
# We can't go on!
sys.exit(1)
# Load all the data.
_country = Country(alpha_2_code=country)
click.echo(click.style('Loading {}'.format(datafile), fg='magenta'))
try:
_country.load(datafile)
except Exception: # pragma: no cover
click.echo(click.style(
'An error occurred while attempting to load {}'.format(datafile),
fg='red')
)
# There's no going on.
sys.exit(1)
# We'll need a generator for this. (In the future we may need to specify which generator
# from the command line, but at present, there's only the one.)
sql_generator = CooperSqlGenerator()
# Create a list that will contain the unmodified output
sqlouts_raw: List[SqlOutput] = list(sql_generator.all(_country))
# If we haven't been asked to put all the output into one honking big file...
if not combined:
# Get all the outputs and put 'em in a list.
sqlouts_final = [
SqlOutput(
filename='{n}-create-{country}-{filename}'.format(
n="%04d" % (sequence + i,),
country=country,
filename=sqlouts_raw[i].filename),
sql=sqlouts_raw[i].sql
)
for i in range(0, len(sqlouts_raw))
]
else:
# Otherwise, get all the outputs and join them into one large string.
all_sql = '\n\n'.join([sqlo.sql for sqlo in sqlouts_raw])
# We'll now produce a list (but it just contains the one item).
sqlouts_final = [
SqlOutput(
filename='{n}-create-{country}-all'.format(
country=country,
n="%04d" % (sequence,)),
sql=all_sql)
]
# Write all the files.
for sqlo in sqlouts_final:
# Get the filename where the output will go.
outfile_path = os.path.join(outdir, sqlo.filename)
click.echo(click.style("Writing {}".format(sqlo.filename), fg='magenta'))
try:
with open(outfile_path, 'w') as outfile:
outfile.write(sqlo.sql)
except IOError: # pragma: no cover
# What happened?
click.echo(click.style(
"An error occurred while writing to {}.".format(outfile_path),
fg='red')
)
# That's that.
sys.exit(1)
# It looks like everything worked out.
click.echo(click.style(
'Created {count} files in {outdir}'.format(
count=len(sqlouts_final),
outdir=os.path.abspath(os.path.expanduser(outdir))
), fg='green'))
| mndarren/Code-Lib | Python_lib/cliff/cli.py | Python | apache-2.0 | 4,674 | [
"VisIt"
] | f364d77041aee19170f502e230b570c55d7405b1c4528b2c26899b34228e1264 |
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper as iw
import sys
import ast
class importlib_wrapper(ut.TestCase):
def test_substitute_variable_values(self):
str_inp = "n_steps=5000\nnsteps == 5\n"
str_exp = "n_steps = 10; _n_steps__original=5000\nnsteps == 5\n"
str_out = iw.substitute_variable_values(str_inp, n_steps=10)
self.assertEqual(str_out, str_exp)
str_out = iw.substitute_variable_values(str_inp, n_steps='10',
strings_as_is=True)
self.assertEqual(str_out, str_exp)
str_inp = "N=5000\nnsteps == 5\n"
str_exp = "N = 10\nnsteps == 5\n"
str_out = iw.substitute_variable_values(str_inp, N=10, keep_original=0)
self.assertEqual(str_out, str_exp)
# test exceptions
str_inp = "n_steps=5000\nnsteps == 5\n"
self.assertRaises(AssertionError, iw.substitute_variable_values,
str_inp, other_var=10)
str_inp = "other_var == 5\n"
self.assertRaises(AssertionError, iw.substitute_variable_values,
str_inp, other_var=10)
str_inp = "var, other_var = 5, 6\n"
self.assertRaises(AssertionError, iw.substitute_variable_values,
str_inp, var=10)
def test_set_cmd(self):
original_sys_argv = list(sys.argv)
sys.argv = [0, "test"]
# test substitutions
str_inp = "import sys\nimport argparse"
str_exp = "import sys;sys.argv = ['a.py', '1', '2'];" + str_inp
str_out, sys_argv = iw.set_cmd(str_inp, "a.py", (1, 2))
self.assertEqual(str_out, str_exp)
self.assertEqual(sys_argv, [0, "test"])
str_inp = "import argparse"
str_exp = "import sys;sys.argv = ['a.py', '1', '2'];" + str_inp
str_out, sys_argv = iw.set_cmd(str_inp, "a.py", ["1", 2])
self.assertEqual(str_out, str_exp)
self.assertEqual(sys_argv, [0, "test"])
# test exceptions
str_inp = "import re"
self.assertRaises(AssertionError, iw.set_cmd, str_inp, "a.py", (1, 2))
# restore sys.argv
sys.argv = original_sys_argv
def test_disable_matplotlib_gui(self):
str_inp = "if 1:\n\timport matplotlib as mp\nmp.use('PS')\n"
str_exp = ("if 1:\n\timport matplotlib as _mpl;_mpl.use('Agg');"
"import matplotlib as mp\n#mp.use('PS')\n")
str_out = iw.disable_matplotlib_gui(str_inp)
self.assertEqual(str_out, str_exp)
str_inp = "if 1:\n import matplotlib.pyplot as plt\nplt.ion()\n"
str_exp = ("if 1:\n import matplotlib as _mpl;_mpl.use('Agg');"
"import matplotlib.pyplot as plt\n#plt.ion()\n")
str_out = iw.disable_matplotlib_gui(str_inp)
self.assertEqual(str_out, str_exp)
str_inp = "if 1:\n\tget_ipython(\n).run_line_magic('matplotlib', 'x')\n"
str_exp = "if 1:\n#\tget_ipython(\n#).run_line_magic('matplotlib', 'x')\n"
str_out = iw.disable_matplotlib_gui(str_inp)
self.assertEqual(str_out, str_exp)
def test_set_random_seeds(self):
# NumPy seed
str_np = "import numpy as np\n"
str_lambda = "(lambda *args, **kwargs: None)"
str_inp = str_np + "np.random.seed(seed=42)"
str_exp = str_np + "np.random.seed;_random_seed_np = " + \
str_lambda + "(seed=42)"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
str_np = "import numpy.random as npr\n"
str_inp = str_np + "npr.seed(42)"
str_exp = str_np + "npr.seed;_random_seed_np = " + str_lambda + "(42)"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
def test_mock_es_visualization(self):
statement = "import espressomd.visualization"
expected = """
try:
import espressomd.visualization
if hasattr(espressomd.visualization.mayaviLive, 'deferred_ImportError') or \\
hasattr(espressomd.visualization.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
espressomd.visualization = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "import espressomd.visualization as test"
expected = """
try:
import espressomd.visualization as test
if hasattr(test.mayaviLive, 'deferred_ImportError') or \\
hasattr(test.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "import espressomd.visualization, espressomd.visualization as test"
expected = """
try:
import espressomd.visualization
if hasattr(espressomd.visualization.mayaviLive, 'deferred_ImportError') or \\
hasattr(espressomd.visualization.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
espressomd.visualization = MagicMock()
try:
import espressomd.visualization as test
if hasattr(test.mayaviLive, 'deferred_ImportError') or \\
hasattr(test.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization"
expected = """
try:
from espressomd import visualization
if hasattr(visualization.mayaviLive, 'deferred_ImportError') or \\
hasattr(visualization.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
visualization = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization as test"
expected = """
try:
from espressomd import visualization as test
if hasattr(test.mayaviLive, 'deferred_ImportError') or \\
hasattr(test.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization_mayavi"
expected = """
try:
from espressomd import visualization_mayavi
except ImportError:
from unittest.mock import MagicMock
import espressomd
visualization_mayavi = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization_mayavi as test"
expected = """
try:
from espressomd import visualization_mayavi as test
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization_mayavi import mayaviLive"
expected = """
try:
from espressomd.visualization_mayavi import mayaviLive
except ImportError:
from unittest.mock import MagicMock
import espressomd
mayaviLive = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization_mayavi import mayaviLive as test"
expected = """
try:
from espressomd.visualization_mayavi import mayaviLive as test
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization_mayavi import a as b, mayaviLive"
expected = """
try:
from espressomd.visualization_mayavi import a as b
except ImportError:
from unittest.mock import MagicMock
import espressomd
b = MagicMock()
try:
from espressomd.visualization_mayavi import mayaviLive
except ImportError:
from unittest.mock import MagicMock
import espressomd
mayaviLive = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization import openGLLive"
expected = """
try:
from espressomd.visualization import openGLLive
if hasattr(openGLLive, 'deferred_ImportError'):
raise openGLLive.deferred_ImportError
except ImportError:
from unittest.mock import MagicMock
import espressomd
openGLLive = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization import openGLLive as test"
expected = """
try:
from espressomd.visualization import openGLLive as test
if hasattr(test, 'deferred_ImportError'):
raise test.deferred_ImportError
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
# test exceptions
statements_without_namespace = [
"from espressomd.visualization import *",
"from espressomd.visualization_opengl import *",
"from espressomd.visualization_mayavi import *"
]
for s in statements_without_namespace:
self.assertRaises(ValueError, iw.mock_es_visualization, s)
def test_matplotlib_pyplot_visitor(self):
import_stmt = [
'import matplotlib',
'import matplotlib as mpl',
'import matplotlib.pyplot',
'import matplotlib.pyplot as plt',
'import matplotlib.pyplot.figure as fig',
'import ast, matplotlib',
'import ast, matplotlib as mpl',
'import ast, matplotlib.pyplot',
'import ast, matplotlib.pyplot as plt',
'import ast, matplotlib.pyplot.figure as fig',
'from matplotlib import pyplot',
'from matplotlib import pyplot as plt',
'from matplotlib.pyplot import figure',
'matplotlib.pyplot.ion()',
'matplotlib.pyplot.ioff()',
'plt.ion()',
'mpl.use("PS")',
'matplotlib.use("Agg")',
'get_ipython().run_line_magic("matplotlib", "notebook")',
]
tree = ast.parse('\n'.join(import_stmt))
v = iw.GetMatplotlibPyplot()
v.visit(tree)
# find first line where matplotlib is imported
self.assertEqual(v.matplotlib_first, 1)
# find all aliases for matplotlib
expected_mpl_aliases = ['matplotlib', 'mpl', 'matplotlib', 'mpl']
self.assertEqual(v.matplotlib_aliases, expected_mpl_aliases)
# find all aliases for matplotlib.pyplot
expected_plt_aliases = [
'matplotlib.pyplot', 'mpl.pyplot', 'matplotlib.pyplot', 'plt',
'matplotlib.pyplot', 'mpl.pyplot', 'matplotlib.pyplot', 'plt',
'pyplot', 'plt',
]
self.assertEqual(v.pyplot_aliases, expected_plt_aliases)
expected_plt_paths = {('matplotlib', 'pyplot'), ('mpl', 'pyplot'),
('plt',), ('pyplot',)}
self.assertEqual(v.pyplot_paths, expected_plt_paths)
# find lines interactive mode, backend setup and magic functions
self.assertEqual(v.pyplot_interactive_linenos, [14, 16])
self.assertEqual(v.matplotlib_backend_linenos, [17, 18])
self.assertEqual(v.ipython_magic_linenos, [19])
def test_prng_seed_espressomd_system_visitor(self):
import_stmt = [
'sys0 = espressomd.System() # nothing: espressomd not imported',
'import espressomd as es1',
'import espressomd.system as es2',
'import espressomd.System as s1, espressomd.system.System as s2',
'from espressomd import System as s3, electrostatics',
'from espressomd.system import System as s4',
'sys1 = es1.System()',
'sys2 = es1.system.System()',
'sys3 = es2.System()',
'sys4 = s1()',
'sys5 = s2()',
'sys6 = s3()',
'sys7 = s4()',
'import numpy as np',
'import numpy.random as npr1',
'from numpy import random as npr2',
'np.random.seed(1)',
'npr1.seed(1)',
'npr2.seed(1)',
]
tree = ast.parse('\n'.join(import_stmt))
v = iw.GetPrngSeedEspressomdSystem()
v.visit(tree)
# find all aliases for espressomd.system.System
expected_es_sys_aliases = {'es1.System', 'es1.system.System',
'es2.System', 's1', 's2', 's3', 's4'}
self.assertEqual(v.es_system_aliases, expected_es_sys_aliases)
# find all variables of type espressomd.system.System
expected_es_sys_objs = set('sys' + str(i) for i in range(1, 8))
self.assertEqual(v.variable_system_aliases, expected_es_sys_objs)
# find all seeds setup
self.assertEqual(v.numpy_seeds, [17, 18, 19])
# test exceptions
str_es_sys_list = [
'import espressomd.System',
'import espressomd.system.System',
'from espressomd import System',
'from espressomd.system import System',
]
exception_stmt = [
's, var = System(), 5',
'class A:\n\ts = System()',
'def A():\n\ts = System()',
]
for str_es_sys in str_es_sys_list:
for str_stmt in exception_stmt:
for alias in ['', ' as EsSystem']:
str_import = str_es_sys + alias + '\n'
alias = str_import.split()[-1]
code = str_import + str_stmt.replace('System', alias)
v = iw.GetPrngSeedEspressomdSystem()
tree = ast.parse(code)
err_msg = v.__class__.__name__ + \
' should fail on ' + repr(code)
with self.assertRaises(AssertionError, msg=err_msg):
v.visit(tree)
def test_delimit_statements(self):
lines = [
'a = 1 # NEWLINE becomes NL after a comment',
'print("""',
'',
'""")',
'',
'b = 1 +\\',
'3 + (',
'4)',
'if True:',
' c = 1',
]
source_code = '\n'.join(lines)
linenos_exp = {1: 1, 2: 4, 6: 8, 9: 9, 10: 10}
linenos_out = iw.delimit_statements(source_code)
self.assertEqual(linenos_out, linenos_exp)
if __name__ == "__main__":
ut.main()
| KaiSzuttor/espresso | testsuite/scripts/test_importlib_wrapper.py | Python | gpl-3.0 | 15,753 | [
"ESPResSo",
"VisIt"
] | bf7167c306fffc28140a6005db7468b2630de0ef434b7a885c035523ed61ac15 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###########################################################################
# Given the output of the vsearch-pipe after the BLAST this will create
# a SLRUM file to parse the BLAST data so as to create a single entry for
# each sequence. Based upon the BLAST data it will try and classify the
# sequence to species or genus or family. If it can't it will classify
# the sequence as Various.
#
# Command line arguments:
#
# --blastinput <blast input file i.e. output of vsearch-pipe>
# --otuinput <OTU input file "results" file>
# --outsummary <Output summary CSV file>
#
###########################################################################
#
# Imports
import os
import re
import glob
import csv
from collections import defaultdict
import argparse
# Regular expression to allow us to "mark" results that come from
# our created UK barcodes
uk_re = re.compile('NMW|NBGW|RBGE')
#
# Makes a directory if does not already exist
def mkdir(dname):
if not os.path.exists(dname):
os.makedirs(dname)
#
# Given a directory this returns a list of ext files
def ls(dir,ext):
return glob.glob('%s/*.%s' % (dir,ext))
#
# Returns the path to the CSV results file
def mkcsv(dir,file):
return '%s/%s.csv' % (dir,os.path.splitext(os.path.basename(file))[0])
#
# Returns if this record is an NCBI record or a FPUK record
def is_ncbi(rec):
ncbi = rec[5].rsplit('|')
return len(ncbi) > 2
#
# Gets the list of species and bit scores
def get_species(matched):
species_list = []
for match in matched:
bits = match[3].strip()
perident = match[2].strip()
quercov = match[6].strip()
parts = match[4].split('|')
species_list.append('%s %s %s (%s; PID:%s%%; QC:%s%%)' % (parts[3],parts[1],parts[2],bits,perident,quercov))
return species_list
#
# Extracts the species from the matched list
def extract_species(matched):
species_list = []
for rows in matched:
#print(rows)
parts = rows[4].split('|')
genus = parts[1]
species = parts[2]
if len(parts) < 4:
print('No family data for this line?', rows)
family = ''
else:
family = parts[3]
species_list.append('%s %s %s' % (family,genus,species))
unique = list(set(species_list))
species_list = []
for spp in unique:
tmp = spp.split(' ')
species_list.append([tmp[0],tmp[1],tmp[2]])
return species_list
#
# Look through the genus in the list and if one of them
# has a grater influence of 60% then pick that one
def genus_percentage(spp):
total = len(spp)
genus = defaultdict(list)
for sp in spp:
gen = sp[1]
if not gen in genus:
genus[gen] = 1
else:
genus[gen] += 1
for gen in genus:
if (float(genus[gen])/float(total))*100.0 >= 60:
return '%s %%' % gen
families = []
for sp in spp:
families.append(sp[0])
if len(list(set(families))) == 1:
return families[0]
return 'Various'
#
# Returns the match type for the species it can be:
# Zero
# Species
# Genus
# Various
def get_match_type(species):
if len(species) == 0:
return '----'
if len(species) == 1:
return '%s %s' % (species[0][1],species[0][2])
genus = species[0][1]
for item in species:
if genus != item[1]:
return genus_percentage(species)
return genus
#
# Outputs the header
def header(fd):
fd.write('SID,Number-Of-Sequences,Score,Match,Top Species\n')
#
# Output a row in the file
def row(fd,sid,numberof,score,type,species,blast_results,otu):
try:
# ID
fd.write('%s,' % sid)
# Number of sequences for this sequence
fd.write('%d,' % int(numberof))
#fd.write('%d,' % 1)
# Top score
fd.write('%f,' % float(score))
# Type of match ... speices, genus, various
fd.write('%s' % type)
except:
print(sid)
bang
# Top bit score taxa matches
for item in species:
fd.write(',%s %s' % (item[0],item[1]))
# We also output all the BLAST results on the same line with their bit score
# So if something does not look correct we can take a look at the full results
top_10 = get_species(blast_results)
fd.write(',')
for spp in top_10:
fd.write(',%s' % spp)
# And now we output what this is made up of
fd.write(',')
for ids in otu:
fd.write(',%s,%d' % (ids,otu[ids]))
fd.write('\n')
#
# Process one set of blast results for each ID
def process_set(fd,blast_results,otu):
top_bit_score = 0.0
top_set = []
# Work out the top bit score set
for blast_id in blast_results:
# Sort the entries by top bit score
blast_results[blast_id].sort(key = lambda row: row[3],reverse=True)
# Go through the entries and pick out all the top bit scored ones
for entry in blast_results[blast_id]:
if float(entry[3]) >= float(top_bit_score):
top_set.append(entry)
top_bit_score = entry[3]
else:
break
# Extract the species and the matched type
# i.e. does the top match to a specific species, genus or various?
species_list = extract_species(top_set)
matched = get_match_type(species_list)
# When our sequences where processed they where given the tags
# id-number, where number was the number of sequences within the
# huge data-set that matched exactly with this sequence. i.e. the
# sequences where merged. So we extract the number for the summaries
numberof = 1
if "size" in blast_id:
parts = blast_id.split('=')
if parts[-1].isdigit():
numberof = parts[-1]
# Output a row in the summary file for this ID set
row(fd,blast_id,numberof,top_bit_score,matched,species_list,blast_results[blast_id],otu)
#
# Reads the OTU file and creates a dictonary to assosiated clusters including
# the number of sequences that make them up.
def parse_otu(fname):
print('Parsing:',fname)
data = {}
with open(fname, 'r') as otu:
for line in otu:
# S record is a single
# H record is sussumed by the cluster
# C record is the total
parts = line.split()
if parts[0] == 'S':
ids = parts[8].split(';')
data[ids[0]] = {ids[0]:int(ids[1].split('=')[1])}
elif parts[0] == 'H':
ids1 = parts[8].split(';')
ids2 = parts[9].split(';')
data[ids2[0]][ids1[0]] = int(ids1[1].split('=')[1])
elif parts[0] == 'C':
pass
else:
pass
print('Finished Parsing:',fname)
return data
#
# Read the OTU file, then the BLAST file and process the summary
def blast_summary(blast_fname, otu_fname, out_fname):
# Read the OTU file in to memory
otu_dict = parse_otu(otu_fname)
# Process the BLAST results file
print('Processing: %s' % blast_fname)
with open(blast_fname, 'r') as in_fd:
with open(out_fname, 'w') as out_fd:
records = csv.reader(in_fd,delimiter=",")
header(out_fd)
working_set = defaultdict(list)
last_id = ""
# Go through each line in the file
for row in records:
# Check to see if we have got to the end of a set for
# a particular ID. There will be a set of results per ID
if len(working_set) > 0 and last_id != row[0]:
# Process this ID's set
ids = last_id.split('=')[1].split(';')[0]
process_set(out_fd, working_set, otu_dict[ids])
working_set.clear()
# Grab the data from the line
sid = row[0]
sid_description = row[1]
percent_score = row[2]
bit_score = row[11]
query_cover = row[13]
desc = row[14]
if len(row) > 15:
desc = row[14] + '|' + row[15].split('|')[1]
description = row[1] + '|' + desc
# Save off the data into the set
last_id = sid
working_set[row[0]].append([sid,1,percent_score,bit_score,description,sid_description,query_cover])
# There might be one left to process
if len(working_set) > 0:
ids = last_id.split('=')[1].split(';')[0]
process_set(out_fd, working_set, otu_dict[ids])
working_set.clear()
#
# Main
print('')
print('Running')
# Grab the input parameters
parser = argparse.ArgumentParser(
description="""Reads the CSV BLAST output to create a summary output""")
parser.add_argument(
'--blastinput',
dest='blast_input',
nargs=1,
required=True,
help='BLAST input file as output by vsearch pipeline')
parser.add_argument(
'--otuinput',
dest='otu_input',
nargs=1,
required=True,
help='OTU input file as generated by the vsearch cluster')
parser.add_argument(
'--outsummary',
dest='out_summary',
nargs=1,
required=True,
help='Output summary CSV file')
args = parser.parse_args()
# Create the summaries for each BLAST result file
blast_summary(args.blast_input[0], args.otu_input[0], args.out_summary[0])
print('End')
| colford/nbgw-plant-illumina-pipeline | bin/vsearch-blast-summary.py | Python | gpl-3.0 | 9,724 | [
"BLAST"
] | d7ee1878da92d41a42a7a7523dd31ff37893c93a4e843e9b08fa2419a6da75b3 |
"""
A serial module for accessing data from a vehicles OBD-II port
For more documentation, visit:
https://github.com/brendanwhitfield/python-OBD/wiki
"""
########################################################################
# #
# python-OBD: A python OBD-II serial module derived from pyobd #
# #
# Copyright 2004 Donour Sizemore (donour@uchicago.edu) #
# Copyright 2009 Secons Ltd. (www.obdtester.com) #
# Copyright 2009 Peter J. Creath #
# Copyright 2015 Brendan Whitfield (bcw7044@rit.edu) #
# #
########################################################################
# #
# __init__.py #
# #
# This file is part of python-OBD (a derivative of pyOBD) #
# #
# python-OBD is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# python-OBD is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with python-OBD. If not, see <http://www.gnu.org/licenses/>. #
# #
########################################################################
from .__version__ import __version__
from .obd import OBD
from .OBDCommand import OBDCommand
from .commands import commands
from .utils import scanSerial, Unit
from .debug import debug
from .async import Async
| cloud-rocket/python-OBD | obd/__init__.py | Python | gpl-2.0 | 2,488 | [
"VisIt"
] | 67d668f787e34793d85cc76554c9253a9801afac3764bd66aa6db9f244c0f80c |
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes corresponding to phyloXML elements.
See Also
--------
Official specification:
http://phyloxml.org/
Journal article:
Han and Zmasek (2009), doi:10.1186/1471-2105-10-356
"""
__docformat__ = "restructuredtext en"
import re
import warnings
from Bio import Alphabet
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.SeqRecord import SeqRecord
from Bio import BiopythonWarning
from Bio.Phylo import BaseTree
#TODO - Remove this hack for Python 2.4
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
class PhyloXMLWarning(BiopythonWarning):
"""Warning for non-compliance with the phyloXML specification."""
pass
def _check_str(text, testfunc):
"""Check a string using testfunc, and warn if there's no match."""
if text is not None and not testfunc(text):
warnings.warn("String %s doesn't match the given regexp" % text,
PhyloXMLWarning, stacklevel=2)
# Core elements
class PhyloElement(BaseTree.TreeElement):
"""Base class for all PhyloXML objects."""
class Phyloxml(PhyloElement):
"""Root node of the PhyloXML document.
Contains an arbitrary number of Phylogeny elements, possibly followed by
elements from other namespaces.
:Parameters:
attributes
(XML namespace definitions)
phylogenies
list of phylogenetic trees
other
list of arbitrary non-phyloXML elements, if any
"""
def __init__(self, attributes, phylogenies=None, other=None):
self.attributes = attributes
self.phylogenies = phylogenies or []
self.other = other or []
def __getitem__(self, index):
"""Get a phylogeny by index or name."""
if isinstance(index, int) or isinstance(index, slice):
return self.phylogenies[index]
if not isinstance(index, basestring):
raise KeyError("can't use %s as an index" % type(index))
for tree in self.phylogenies:
if tree.name == index:
return tree
else:
raise KeyError("no phylogeny found with name " + repr(index))
def __iter__(self):
"""Iterate through the phylogenetic trees in this object."""
return iter(self.phylogenies)
def __len__(self):
"""Number of phylogenetic trees in this object."""
return len(self.phylogenies)
def __str__(self):
return '%s([%s])' % (self.__class__.__name__,
',\n'.join(map(str, self.phylogenies)))
class Other(PhyloElement):
"""Container for non-phyloXML elements in the tree.
Usually, an Other object will have either a 'value' or a non-empty list
of 'children', but not both. This is not enforced here, though.
:Parameters:
tag : string
local tag for the XML node
namespace : string
XML namespace for the node -- should not be the default phyloXML
namespace.
attributes : dict of strings
attributes on the XML node
value : string
text contained directly within this XML node
children : list
child nodes, if any (also `Other` instances)
"""
def __init__(self, tag, namespace=None, attributes=None, value=None,
children=None):
self.tag = tag
self.namespace = namespace
self.attributes = attributes
self.value = value
self.children = children or []
def __iter__(self):
"""Iterate through the children of this object (if any)."""
return iter(self.children)
class Phylogeny(PhyloElement, BaseTree.Tree):
"""A phylogenetic tree.
:Parameters:
root : Clade
the root node/clade of this tree
rooted : bool
True if this tree is rooted
rerootable : bool
True if this tree is rerootable
branch_length_unit : string
unit for branch_length values on clades
name : string
identifier for this tree, not required to be unique
id : Id
unique identifier for this tree
description : string
plain-text description
date : Date
date for the root node of this tree
confidences : list
Confidence objects for this tree
clade_relations : list
CladeRelation objects
sequence_relations : list
SequenceRelation objects
properties : list
Property objects
other : list
non-phyloXML elements (type `Other`)
"""
def __init__(self, root=None, rooted=True,
rerootable=None, branch_length_unit=None, type=None,
# Child nodes
name=None, id=None, description=None, date=None,
# Collections
confidences=None, clade_relations=None, sequence_relations=None,
properties=None, other=None,
):
assert isinstance(rooted, bool)
self.root = root
self.rooted = rooted
self.rerootable = rerootable
self.branch_length_unit = branch_length_unit
self.type = type
self.name = name
self.id = id
self.description = description
self.date = date
self.confidences = confidences or []
self.clade_relations = clade_relations or []
self.sequence_relations = sequence_relations or []
self.properties = properties or []
self.other = other or []
@classmethod
def from_tree(cls, tree, **kwargs):
"""Create a new Phylogeny given a Tree (from Newick/Nexus or BaseTree).
Keyword arguments are the usual `Phylogeny` constructor parameters.
"""
phy = cls(
root=Clade.from_clade(tree.root),
rooted=tree.rooted,
name=tree.name,
id=(tree.id is not None) and Id(str(tree.id)) or None)
phy.__dict__.update(kwargs)
return phy
@classmethod
def from_clade(cls, clade, **kwargs):
"""Create a new Phylogeny given a Newick or BaseTree Clade object.
Keyword arguments are the usual `PhyloXML.Clade` constructor parameters.
"""
return Clade.from_clade(clade).to_phylogeny(**kwargs)
def as_phyloxml(self):
"""Return this tree, a PhyloXML-compatible Phylogeny object.
Overrides the `BaseTree` method.
"""
return self
def to_phyloxml_container(self, **kwargs):
"""Create a new Phyloxml object containing just this phylogeny."""
return Phyloxml(kwargs, phylogenies=[self])
def to_alignment(self):
"""Construct an alignment from the aligned sequences in this tree."""
def is_aligned_seq(elem):
if isinstance(elem, Sequence) and elem.mol_seq.is_aligned:
return True
return False
seqs = self._filter_search(is_aligned_seq, 'preorder', True)
try:
first_seq = seqs.next()
except StopIteration:
# No aligned sequences were found --> empty MSA
return MultipleSeqAlignment([])
msa = MultipleSeqAlignment([first_seq.to_seqrecord()],
first_seq.get_alphabet())
msa.extend(seq.to_seqrecord() for seq in seqs)
return msa
# Singular property for plural attribute
def _get_confidence(self):
"""Equivalent to self.confidences[0] if there is only 1 value.
See also: `Clade.confidence`, `Clade.taxonomy`
"""
if len(self.confidences) == 0:
return None
if len(self.confidences) > 1:
raise AttributeError("more than 1 confidence value available; "
"use Phylogeny.confidences")
return self.confidences[0]
def _set_confidence(self, value):
if value is None:
# Special case: mirror the behavior of _get_confidence
self.confidences = []
return
if isinstance(value, float) or isinstance(value, int):
value = Confidence(value)
elif not isinstance(value, Confidence):
raise ValueError("value must be a number or Confidence instance")
if len(self.confidences) == 0:
self.confidences.append(value)
elif len(self.confidences) == 1:
self.confidences[0] = value
else:
raise ValueError("multiple confidence values already exist; "
"use Phylogeny.confidences instead")
def _del_confidence(self):
self.confidences = []
confidence = property(_get_confidence, _set_confidence, _del_confidence)
class Clade(PhyloElement, BaseTree.Clade):
"""Describes a branch of the current phylogenetic tree.
Used recursively, describes the topology of a phylogenetic tree.
Both ``color`` and ``width`` elements should be interpreted by client code
as applying to the whole clade, including all descendents, unless
overwritten in-sub clades. This module doesn't automatically assign these
attributes to sub-clades to achieve this cascade -- and neither should you.
:Parameters:
branch_length
parent branch length of this clade
id_source
link other elements to a clade (on the xml-level)
name : string
short label for this clade
confidences : list of Confidence objects
used to indicate the support for a clade/parent branch.
width : float
branch width for this clade (including branch from parent)
color : BranchColor
color used for graphical display of this clade
node_id
unique identifier for the root node of this clade
taxonomies : list
Taxonomy objects
sequences : list
Sequence objects
events : Events
describe such events as gene-duplications at the root node/parent
branch of this clade
binary_characters : BinaryCharacters
binary characters
distributions : list of Distribution objects
distribution(s) of this clade
date : Date
a date for the root node of this clade
references : list
Reference objects
properties : list
Property objects
clades : list Clade objects
Sub-clades
other : list of Other objects
non-phyloXML objects
"""
def __init__(self,
# Attributes
branch_length=None, id_source=None,
# Child nodes
name=None, width=None, color=None, node_id=None, events=None,
binary_characters=None, date=None,
# Collections
confidences=None, taxonomies=None, sequences=None,
distributions=None, references=None, properties=None, clades=None,
other=None,
):
self.branch_length = branch_length
self.id_source = id_source
self.name = name
self.width = width
self.color = color
self.node_id = node_id
self.events = events
self.binary_characters = binary_characters
self.date = date
self.confidences = confidences or []
self.taxonomies = taxonomies or []
self.sequences = sequences or []
self.distributions = distributions or []
self.references = references or []
self.properties = properties or []
self.clades = clades or []
self.other = other or []
@classmethod
def from_clade(cls, clade, **kwargs):
"""Create a new PhyloXML Clade from a Newick or BaseTree Clade object.
Keyword arguments are the usual PhyloXML Clade constructor parameters.
"""
new_clade = cls(branch_length=clade.branch_length,
name=clade.name)
new_clade.clades = [cls.from_clade(c) for c in clade]
new_clade.confidence = clade.confidence
new_clade.__dict__.update(kwargs)
return new_clade
def to_phylogeny(self, **kwargs):
"""Create a new phylogeny containing just this clade."""
phy = Phylogeny(root=self, date=self.date)
phy.__dict__.update(kwargs)
return phy
# Shortcuts for list attributes that are usually only 1 item
# NB: Duplicated from Phylogeny class
def _get_confidence(self):
if len(self.confidences) == 0:
return None
if len(self.confidences) > 1:
raise AttributeError("more than 1 confidence value available; "
"use Clade.confidences")
return self.confidences[0]
def _set_confidence(self, value):
if value is None:
# Special case: mirror the behavior of _get_confidence
self.confidences = []
return
if isinstance(value, float) or isinstance(value, int):
value = Confidence(value)
elif not isinstance(value, Confidence):
raise ValueError("value must be a number or Confidence instance")
if len(self.confidences) == 0:
self.confidences.append(value)
elif len(self.confidences) == 1:
self.confidences[0] = value
else:
raise ValueError("multiple confidence values already exist; "
"use Phylogeny.confidences instead")
def _del_confidence(self):
self.confidences = []
confidence = property(_get_confidence, _set_confidence, _del_confidence)
def _get_taxonomy(self):
if len(self.taxonomies) == 0:
return None
if len(self.taxonomies) > 1:
raise AttributeError("more than 1 taxonomy value available; "
"use Clade.taxonomies")
return self.taxonomies[0]
def _set_taxonomy(self, value):
if not isinstance(value, Taxonomy):
raise ValueError("assigned value must be a Taxonomy instance")
if len(self.taxonomies) == 0:
self.taxonomies.append(value)
elif len(self.taxonomies) == 1:
self.taxonomies[0] = value
else:
raise ValueError("multiple taxonomy values already exist; "
"use Phylogeny.taxonomies instead")
taxonomy = property(_get_taxonomy, _set_taxonomy)
# Syntax sugar for setting the branch color
def _get_color(self):
return self._color
def _set_color(self, arg):
if arg is None or isinstance(arg, BranchColor):
self._color = arg
elif isinstance(arg, basestring):
if arg in BranchColor.color_names:
# Known color name
self._color = BranchColor.from_name(arg)
elif arg.startswith('#') and len(arg) == 7:
# HTML-style hex string
self._color = BranchColor.from_hex(arg)
else:
raise ValueError("invalid color string %s" % arg)
elif hasattr(arg, '__iter__') and len(arg) == 3:
# RGB triplet
self._color = BranchColor(*arg)
else:
raise ValueError("invalid color value %s" % arg)
color = property(_get_color, _set_color, doc="Branch color.")
# PhyloXML-specific complex types
class Accession(PhyloElement):
"""Captures the local part in a sequence identifier.
Example: In ``UniProtKB:P17304``, the Accession instance attribute ``value``
is 'P17304' and the ``source`` attribute is 'UniProtKB'.
"""
def __init__(self, value, source):
self.value = value
self.source = source
def __str__(self):
"""Show the class name and an identifying attribute."""
return '%s:%s' % (self.source, self.value)
class Annotation(PhyloElement):
"""The annotation of a molecular sequence.
It is recommended to annotate by using the optional 'ref' attribute.
:Parameters:
ref : string
reference string, e.g. 'GO:0008270',
'KEGG:Tetrachloroethene degradation', 'EC:1.1.1.1'
source : string
plain-text source for this annotation
evidence : str
describe evidence as free text (e.g. 'experimental')
desc : string
free text description
confidence : Confidence
state the type and value of support (type Confidence)
properties : list
typed and referenced annotations from external resources
uri : Uri
link
"""
re_ref = re.compile(r'[a-zA-Z0-9_]+:[a-zA-Z0-9_\.\-\s]+')
def __init__(self,
# Attributes
ref=None, source=None, evidence=None, type=None,
# Child nodes
desc=None, confidence=None, uri=None,
# Collection
properties=None):
_check_str(ref, self.re_ref.match)
self.ref = ref
self.source = source
self.evidence = evidence
self.type = type
self.desc = desc
self.confidence = confidence
self.uri = uri
self.properties = properties or []
class BinaryCharacters(PhyloElement):
"""The names and/or counts of binary characters present, gained, and lost
at the root of a clade.
"""
def __init__(self,
# Attributes
type=None, gained_count=None, lost_count=None, present_count=None,
absent_count=None,
# Child nodes (flattened into collections)
gained=None, lost=None, present=None, absent=None):
self.type=type
self.gained_count=gained_count
self.lost_count=lost_count
self.present_count=present_count
self.absent_count=absent_count
self.gained=gained or []
self.lost=lost or []
self.present=present or []
self.absent=absent or []
class BranchColor(PhyloElement):
"""Indicates the color of a clade when rendered graphically.
The color should be interpreted by client code (e.g. visualization
programs) as applying to the whole clade, unless overwritten by the
color(s) of sub-clades.
Color values must be integers from 0 to 255.
"""
color_names = {
'red': (255, 0, 0),
'r': (255, 0, 0),
'yellow': (255, 255, 0),
'y': (255, 255, 0),
'green': ( 0, 128, 0),
'g': ( 0, 128, 0),
'cyan': ( 0, 255, 255),
'c': ( 0, 255, 255),
'blue': ( 0, 0, 255),
'b': ( 0, 0, 255),
'magenta': (255, 0, 255),
'm': (255, 0, 255),
'black': ( 0, 0, 0),
'k': ( 0, 0, 0),
'white': (255, 255, 255),
'w': (255, 255, 255),
# Names standardized in HTML/CSS spec
# http://w3schools.com/html/html_colornames.asp
'maroon': (128, 0, 0),
'olive': (128, 128, 0),
'lime': ( 0, 255, 0),
'aqua': ( 0, 255, 255),
'teal': ( 0, 128, 128),
'navy': ( 0, 0, 128),
'fuchsia': (255, 0, 255),
'purple': (128, 0, 128),
'silver': (192, 192, 192),
'gray': (128, 128, 128),
# More definitions from matplotlib/gcolor2
'grey': (128, 128, 128),
'pink': (255, 192, 203),
'salmon': (250, 128, 114),
'orange': (255, 165, 0),
'gold': (255, 215, 0),
'tan': (210, 180, 140),
'brown': (165, 42, 42),
}
def __init__(self, red, green, blue):
for color in (red, green, blue):
assert (isinstance(color, int) and
0 <= color <= 255
), "Color values must be integers between 0 and 255."
self.red = red
self.green = green
self.blue = blue
@classmethod
def from_hex(cls, hexstr):
"""Construct a BranchColor object from a hexadecimal string.
The string format is the same style used in HTML and CSS, such as
'#FF8000' for an RGB value of (255, 128, 0).
"""
assert (isinstance(hexstr, basestring) and
hexstr.startswith('#') and
len(hexstr) == 7
), "need a 24-bit hexadecimal string, e.g. #000000"
def unpack(cc):
return int('0x'+cc, base=16)
RGB = hexstr[1:3], hexstr[3:5], hexstr[5:]
return cls(*map(unpack, RGB))
@classmethod
def from_name(cls, colorname):
"""Construct a BranchColor object by the color's name."""
return cls(*cls.color_names[colorname])
def to_hex(self):
"""Return a 24-bit hexadecimal RGB representation of this color.
The returned string is suitable for use in HTML/CSS, as a color
parameter in matplotlib, and perhaps other situations.
Example:
>>> bc = BranchColor(12, 200, 100)
>>> bc.to_hex()
'#0cc864'
"""
return '#' + hex(
self.red * (16**4)
+ self.green * (16**2)
+ self.blue)[2:].zfill(6)
def to_rgb(self):
"""Return a tuple of RGB values (0 to 255) representing this color.
Example:
>>> bc = BranchColor(255, 165, 0)
>>> bc.to_rgb()
(255, 165, 0)
"""
return (self.red, self.green, self.blue)
def __repr__(self):
"""Preserve the standard RGB order when representing this object."""
return (u'%s(red=%d, green=%d, blue=%d)'
% (self.__class__.__name__, self.red, self.green, self.blue))
def __str__(self):
"""Show the color's RGB values."""
return "(%d, %d, %d)" % (self.red, self.green, self.blue)
class CladeRelation(PhyloElement):
"""Expresses a typed relationship between two clades.
For example, this could be used to describe multiple parents of a clade.
@type id_ref_0: str
@type id_ref_1: str
@type distance: str
@type type: str
@type confidence: Confidence
"""
def __init__(self, type, id_ref_0, id_ref_1,
distance=None, confidence=None):
self.distance = distance
self.type = type
self.id_ref_0 = id_ref_0
self.id_ref_1 = id_ref_1
self.confidence = confidence
class Confidence(PhyloElement):
"""A general purpose confidence element.
For example, this can be used to express the bootstrap support value of a
clade (in which case the `type` attribute is 'bootstrap').
:Parameters:
value : float
confidence value
type : string
label for the type of confidence, e.g. 'bootstrap'
"""
def __init__(self, value, type='unknown'):
self.value = value
self.type = type
def __float__(self):
return float(self.value)
def __int__(self):
return int(self.value)
class Date(PhyloElement):
"""A date associated with a clade/node.
Its value can be numerical by using the 'value' element and/or free text
with the 'desc' element' (e.g. 'Silurian'). If a numerical value is used, it
is recommended to employ the 'unit' attribute.
:Parameters:
unit : string
type of numerical value (e.g. 'mya' for 'million years ago')
value : float
the date value
desc : string
plain-text description of the date
minimum : float
lower bound on the date value
maximum : float
upper bound on the date value
"""
def __init__(self, value=None, unit=None, desc=None,
minimum=None, maximum=None):
self.value = value
self.unit = unit
self.desc = desc
self.minimum = minimum
self.maximum = maximum
def __str__(self):
"""Show the class name and the human-readable date."""
if self.unit and self.value is not None:
return '%s %s' % (self.value, self.unit)
if self.desc is not None:
return self.desc
return self.__class__.__name__
class Distribution(PhyloElement):
"""Geographic distribution of the items of a clade (species, sequences).
Intended for phylogeographic applications.
:Parameters:
desc : string
free-text description of the location
points : list of `Point` objects
coordinates (similar to the 'Point' element in Google's KML format)
polygons : list of `Polygon` objects
coordinate sets defining geographic regions
"""
def __init__(self, desc=None, points=None, polygons=None):
self.desc = desc
self.points = points or []
self.polygons = polygons or []
class DomainArchitecture(PhyloElement):
"""Domain architecture of a protein.
:Parameters:
length : int
total length of the protein sequence
domains : list ProteinDomain objects
the domains within this protein
"""
def __init__(self, length=None, domains=None):
self.length = length
self.domains = domains
class Events(PhyloElement):
"""Events at the root node of a clade (e.g. one gene duplication).
All attributes are set to None by default, but this object can also be
treated as a dictionary, in which case None values are treated as missing
keys and deleting a key resets that attribute's value back to None.
"""
ok_type = set(('transfer', 'fusion', 'speciation_or_duplication', 'other',
'mixed', 'unassigned'))
def __init__(self, type=None, duplications=None, speciations=None,
losses=None, confidence=None):
_check_str(type, self.ok_type.__contains__)
self.type = type
self.duplications = duplications
self.speciations = speciations
self.losses = losses
self.confidence = confidence
def items(self):
return [(k, v) for k, v in self.__dict__.iteritems() if v is not None]
def keys(self):
return [k for k, v in self.__dict__.iteritems() if v is not None]
def values(self):
return [v for v in self.__dict__.itervalues() if v is not None]
def __len__(self):
return len(self.values())
def __getitem__(self, key):
if not hasattr(self, key):
raise KeyError(key)
val = getattr(self, key)
if val is None:
raise KeyError("%s has not been set in this object" % repr(key))
return val
def __setitem__(self, key, val):
setattr(self, key, val)
def __delitem__(self, key):
setattr(self, key, None)
def __iter__(self):
return iter(self.keys())
def __contains__(self, key):
return (hasattr(self, key) and getattr(self, key) is not None)
class Id(PhyloElement):
"""A general-purpose identifier element.
Allows to indicate the provider (or authority) of an identifier, e.g. NCBI,
along with the value itself.
"""
def __init__(self, value, provider=None):
self.value = value
self.provider = provider
def __str__(self):
if self.provider is not None:
return '%s:%s' % (self.provider, self.value)
return self.value
class MolSeq(PhyloElement):
"""Store a molecular sequence.
:Parameters:
value : string
the sequence itself
is_aligned : bool
True if this sequence is aligned with the others (usually meaning
all aligned seqs are the same length and gaps may be present)
"""
re_value = re.compile(r'[a-zA-Z\.\-\?\*_]+')
def __init__(self, value, is_aligned=None):
_check_str(value, self.re_value.match)
self.value = value
self.is_aligned = is_aligned
def __str__(self):
return self.value
class Point(PhyloElement):
"""Geographic coordinates of a point, with an optional altitude.
Used by element 'Distribution'.
:Parameters:
geodetic_datum : string, required
the geodetic datum (also called 'map datum'). For example, Google's
KML uses 'WGS84'.
lat : numeric
latitude
long : numeric
longitude
alt : numeric
altitude
alt_unit : string
unit for the altitude (e.g. 'meter')
"""
def __init__(self, geodetic_datum, lat, long, alt=None, alt_unit=None):
self.geodetic_datum = geodetic_datum
self.lat = lat
self.long = long
self.alt = alt
self.alt_unit = alt_unit
class Polygon(PhyloElement):
"""A polygon defined by a list of 'Points' (used by element 'Distribution').
:param points: list of 3 or more points representing vertices.
"""
def __init__(self, points=None):
self.points = points or []
def __str__(self):
return '%s([%s])' % (self.__class__.__name__,
',\n'.join(map(str, self.points)))
class Property(PhyloElement):
"""A typed and referenced property from an external resources.
Can be attached to `Phylogeny`, `Clade`, and `Annotation` objects.
:Parameters:
value : string
the value of the property
ref : string
reference to an external resource, e.g. "NOAA:depth"
applies_to : string
indicates the item to which a property applies to (e.g. 'node' for
the parent node of a clade, 'parent_branch' for the parent branch of
a clade, or just 'clade').
datatype : string
the type of a property; limited to xsd-datatypes
(e.g. 'xsd:string', 'xsd:boolean', 'xsd:integer', 'xsd:decimal',
'xsd:float', 'xsd:double', 'xsd:date', 'xsd:anyURI').
unit : string (optional)
the unit of the property, e.g. "METRIC:m"
id_ref : Id (optional)
allows to attached a property specifically to one element (on the
xml-level)
"""
re_ref = re.compile(r'[a-zA-Z0-9_]+:[a-zA-Z0-9_\.\-\s]+')
ok_applies_to = set(('phylogeny', 'clade', 'node', 'annotation',
'parent_branch', 'other'))
ok_datatype = set(('xsd:string', 'xsd:boolean', 'xsd:decimal', 'xsd:float',
'xsd:double', 'xsd:duration', 'xsd:dateTime', 'xsd:time', 'xsd:date',
'xsd:gYearMonth', 'xsd:gYear', 'xsd:gMonthDay', 'xsd:gDay',
'xsd:gMonth', 'xsd:hexBinary', 'xsd:base64Binary', 'xsd:anyURI',
'xsd:normalizedString', 'xsd:token', 'xsd:integer',
'xsd:nonPositiveInteger', 'xsd:negativeInteger', 'xsd:long', 'xsd:int',
'xsd:short', 'xsd:byte', 'xsd:nonNegativeInteger', 'xsd:unsignedLong',
'xsd:unsignedInt', 'xsd:unsignedShort', 'xsd:unsignedByte',
'xsd:positiveInteger'))
def __init__(self, value, ref, applies_to, datatype,
unit=None, id_ref=None):
_check_str(ref, self.re_ref.match)
_check_str(applies_to, self.ok_applies_to.__contains__)
_check_str(datatype, self.ok_datatype.__contains__)
_check_str(unit, self.re_ref.match)
self.unit = unit
self.id_ref = id_ref
self.value = value
self.ref = ref
self.applies_to = applies_to
self.datatype = datatype
class ProteinDomain(PhyloElement):
"""Represents an individual domain in a domain architecture.
The locations use 0-based indexing, as most Python objects including
SeqFeature do, rather than the usual biological convention starting at 1.
This means the start and end attributes can be used directly as slice
indexes on Seq objects.
:Parameters:
start : non-negative integer
start of the domain on the sequence, using 0-based indexing
end : non-negative integer
end of the domain on the sequence
confidence : float
can be used to store e.g. E-values
id : string
unique identifier/name
"""
def __init__(self, value, start, end, confidence=None, id=None):
self.value = value
self.start = start
self.end = end
self.confidence = confidence
self.id = id
@classmethod
def from_seqfeature(cls, feat):
return ProteinDomain(feat.id,
feat.location.nofuzzy_start,
feat.location.nofuzzy_end,
confidence=feat.qualifiers.get('confidence'))
def to_seqfeature(self):
feat = SeqFeature(location=FeatureLocation(self.start, self.end),
id=self.value)
if hasattr(self, 'confidence'):
feat.qualifiers['confidence'] = self.confidence
return feat
class Reference(PhyloElement):
"""Literature reference for a clade.
NB: Whenever possible, use the ``doi`` attribute instead of the free-text
``desc`` element.
"""
re_doi = re.compile(r'[a-zA-Z0-9_\.]+/[a-zA-Z0-9_\.]+')
def __init__(self, doi=None, desc=None):
_check_str(doi, self.re_doi.match)
self.doi = doi
self.desc = desc
class Sequence(PhyloElement):
"""A molecular sequence (Protein, DNA, RNA) associated with a node.
One intended use for ``id_ref`` is to link a sequence to a taxonomy (via the
taxonomy's ``id_source``) in case of multiple sequences and taxonomies per
node.
:Parameters:
type : {'dna', 'rna', 'protein'}
type of molecule this sequence represents
id_ref : string
reference to another resource
id_source : string
source for the reference
symbol : string
short symbol of the sequence, e.g. 'ACTM' (max. 10 chars)
accession : Accession
accession code for this sequence.
name : string
full name of the sequence, e.g. 'muscle Actin'
location
location of a sequence on a genome/chromosome.
mol_seq : MolSeq
the molecular sequence itself
uri : Uri
link
annotations : list of Annotation objects
annotations on this sequence
domain_architecture : DomainArchitecture
protein domains on this sequence
other : list of Other objects
non-phyloXML elements
"""
alphabets = {'dna': Alphabet.generic_dna,
'rna': Alphabet.generic_rna,
'protein': Alphabet.generic_protein}
re_symbol = re.compile(r'\S{1,10}')
def __init__(self,
# Attributes
type=None, id_ref=None, id_source=None,
# Child nodes
symbol=None, accession=None, name=None, location=None,
mol_seq=None, uri=None, domain_architecture=None,
# Collections
annotations=None, other=None,
):
_check_str(type, self.alphabets.__contains__)
_check_str(symbol, self.re_symbol.match)
self.type = type
self.id_ref = id_ref
self.id_source = id_source
self.symbol = symbol
self.accession = accession
self.name = name
self.location = location
self.mol_seq = mol_seq
self.uri = uri
self.domain_architecture = domain_architecture
self.annotations = annotations or []
self.other = other or []
@classmethod
def from_seqrecord(cls, record, is_aligned=None):
"""Create a new PhyloXML Sequence from a SeqRecord object."""
if is_aligned == None:
is_aligned = isinstance(record.seq.alphabet, Alphabet.Gapped)
params = {
'accession': Accession(record.id, ''),
'symbol': record.name,
'name': record.description,
'mol_seq': MolSeq(str(record.seq), is_aligned),
}
if isinstance(record.seq.alphabet, Alphabet.DNAAlphabet):
params['type'] = 'dna'
elif isinstance(record.seq.alphabet, Alphabet.RNAAlphabet):
params['type'] = 'rna'
elif isinstance(record.seq.alphabet, Alphabet.ProteinAlphabet):
params['type'] = 'protein'
# Unpack record.annotations
for key in ('id_ref', 'id_source', 'location'):
if key in record.annotations:
params[key] = record.annotations[key]
if isinstance(record.annotations.get('uri'), dict):
params['uri'] = Uri(**record.annotations['uri'])
# Build a Sequence.annotation object
if record.annotations.get('annotations'):
params['annotations'] = []
for annot in record.annotations['annotations']:
ann_args = {}
for key in ('ref', 'source', 'evidence', 'type', 'desc'):
if key in annot:
ann_args[key] = annot[key]
if isinstance(annot.get('confidence'), list):
ann_args['confidence'] = Confidence(
*annot['confidence'])
if isinstance(annot.get('properties'), list):
ann_args['properties'] = [Property(**prop)
for prop in annot['properties']
if isinstance(prop, dict)]
params['annotations'].append(Annotation(**ann_args))
# Unpack record.features
if record.features:
params['domain_architecture'] = DomainArchitecture(
length=len(record.seq),
domains=[ProteinDomain.from_seqfeature(feat)
for feat in record.features])
return Sequence(**params)
def to_seqrecord(self):
"""Create a SeqRecord object from this Sequence instance.
The seqrecord.annotations dictionary is packed like so::
{ # Sequence attributes with no SeqRecord equivalent:
'id_ref': self.id_ref,
'id_source': self.id_source,
'location': self.location,
'uri': { 'value': self.uri.value,
'desc': self.uri.desc,
'type': self.uri.type },
# Sequence.annotations attribute (list of Annotations)
'annotations': [{ 'ref': ann.ref,
'source': ann.source,
'evidence': ann.evidence,
'type': ann.type,
'confidence': [ ann.confidence.value,
ann.confidence.type ],
'properties': [{ 'value': prop.value,
'ref': prop.ref,
'applies_to': prop.applies_to,
'datatype': prop.datatype,
'unit': prop.unit,
'id_ref': prop.id_ref }
for prop in ann.properties],
} for ann in self.annotations],
}
"""
def clean_dict(dct):
"""Remove None-valued items from a dictionary."""
return dict((key, val) for key, val in dct.iteritems()
if val is not None)
seqrec = SeqRecord(Seq(self.mol_seq.value, self.get_alphabet()),
**clean_dict({
'id': str(self.accession),
'name': self.symbol,
'description': self.name,
# 'dbxrefs': None,
}))
if self.domain_architecture:
seqrec.features = [dom.to_seqfeature()
for dom in self.domain_architecture.domains]
# Sequence attributes with no SeqRecord equivalent
seqrec.annotations = clean_dict({
'id_ref': self.id_ref,
'id_source': self.id_source,
'location': self.location,
'uri': self.uri and clean_dict({
'value': self.uri.value,
'desc': self.uri.desc,
'type': self.uri.type,
}),
'annotations': self.annotations and [
clean_dict({
'ref': ann.ref,
'source': ann.source,
'evidence': ann.evidence,
'type': ann.type,
'confidence': ann.confidence and [
ann.confidence.value,
ann.confidence.type],
'properties': [clean_dict({
'value': prop.value,
'ref': prop.ref,
'applies_to': prop.applies_to,
'datatype': prop.datatype,
'unit': prop.unit,
'id_ref': prop.id_ref })
for prop in ann.properties],
}) for ann in self.annotations],
})
return seqrec
def get_alphabet(self):
alph = self.alphabets.get(self.type, Alphabet.generic_alphabet)
if self.mol_seq and self.mol_seq.is_aligned:
return Alphabet.Gapped(alph)
return alph
class SequenceRelation(PhyloElement):
"""Express a typed relationship between two sequences.
For example, this could be used to describe an orthology (in which case
attribute 'type' is 'orthology').
:Parameters:
id_ref_0 : Id
first sequence reference identifier
id_ref_1 : Id
second sequence reference identifier
distance : float
distance between the two sequences
type : restricted string
describe the type of relationship
confidence : Confidence
confidence value for this relation
"""
ok_type = set(('orthology', 'one_to_one_orthology', 'super_orthology',
'paralogy', 'ultra_paralogy', 'xenology', 'unknown', 'other'))
def __init__(self, type, id_ref_0, id_ref_1,
distance=None, confidence=None):
_check_str(type, self.ok_type.__contains__)
self.distance = distance
self.type = type
self.id_ref_0 = id_ref_0
self.id_ref_1 = id_ref_1
self.confidence = confidence
class Taxonomy(PhyloElement):
"""Describe taxonomic information for a clade.
:Parameters:
id_source : Id
link other elements to a taxonomy (on the XML level)
id : Id
unique identifier of a taxon, e.g. Id('6500',
provider='ncbi_taxonomy') for the California sea hare
code : restricted string
store UniProt/Swiss-Prot style organism codes, e.g. 'APLCA' for the
California sea hare 'Aplysia californica'
scientific_name : string
the standard scientific name for this organism, e.g. 'Aplysia
californica' for the California sea hare
authority : string
keep the authority, such as 'J. G. Cooper, 1863', associated with
the 'scientific_name'
common_names : list of strings
common names for this organism
synonyms : list of strings
synonyms for this taxon?
rank : restricted string
taxonomic rank
uri : Uri
link
other : list of Other objects
non-phyloXML elements
"""
re_code = re.compile(r'[a-zA-Z0-9_]{2,10}')
ok_rank = set(('domain', 'kingdom', 'subkingdom', 'branch', 'infrakingdom',
'superphylum', 'phylum', 'subphylum', 'infraphylum', 'microphylum',
'superdivision', 'division', 'subdivision', 'infradivision',
'superclass', 'class', 'subclass', 'infraclass', 'superlegion',
'legion', 'sublegion', 'infralegion', 'supercohort', 'cohort',
'subcohort', 'infracohort', 'superorder', 'order', 'suborder',
'superfamily', 'family', 'subfamily', 'supertribe', 'tribe', 'subtribe',
'infratribe', 'genus', 'subgenus', 'superspecies', 'species',
'subspecies', 'variety', 'subvariety', 'form', 'subform', 'cultivar',
'unknown', 'other'))
def __init__(self,
# Attributes
id_source=None,
# Child nodes
id=None, code=None, scientific_name=None, authority=None,
rank=None, uri=None,
# Collections
common_names=None, synonyms=None, other=None,
):
_check_str(code, self.re_code.match)
_check_str(rank, self.ok_rank.__contains__)
self.id_source = id_source
self.id = id
self.code = code
self.scientific_name = scientific_name
self.authority = authority
self.rank = rank
self.uri = uri
self.common_names = common_names or []
self.synonyms = synonyms or []
self.other = other or []
def __str__(self):
"""Show the class name and an identifying attribute."""
if self.code is not None:
return self.code
if self.scientific_name is not None:
return self.scientific_name
if self.rank is not None:
return self.rank
if self.id is not None:
return str(self.id)
return self.__class__.__name__
class Uri(PhyloElement):
"""A uniform resource identifier.
In general, this is expected to be an URL (for example, to link to an image
on a website, in which case the ``type`` attribute might be 'image' and
``desc`` might be 'image of a California sea hare').
"""
def __init__(self, value, desc=None, type=None):
self.value = value
self.desc = desc
self.type = type
def __str__(self):
if self.value:
return self.value
return repr(self)
| LyonsLab/coge | bin/last_wrapper/Bio/Phylo/PhyloXML.py | Python | bsd-2-clause | 47,107 | [
"Biopython"
] | b3cf7de19fac886cf705403bb5f175338d7ab46649e641131ba92b5b05177f90 |
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program PRANK.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, AbstractCommandline
class PrankCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program PRANK.
http://www.ebi.ac.uk/goldman-srv/prank/prank/
Example:
--------
To align a FASTA file (unaligned.fasta) with the output in aligned
FASTA format with the output filename starting with "aligned" (you
can't pick the filename explicitly), no tree output and no XML output,
use:
>>> from Bio.Align.Applications import PrankCommandline
>>> prank_cline = PrankCommandline(d="unaligned.fasta",
... o="aligned", # prefix only!
... f=8, # FASTA output
... notree=True, noxml=True)
>>> print(prank_cline)
prank -d=unaligned.fasta -o=aligned -f=8 -noxml -notree
You would typically run the command line with prank_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citations:
----------
Loytynoja, A. and Goldman, N. 2005. An algorithm for progressive
multiple alignment of sequences with insertions. Proceedings of
the National Academy of Sciences, 102: 10557--10562.
Loytynoja, A. and Goldman, N. 2008. Phylogeny-aware gap placement
prevents errors in sequence alignment and evolutionary analysis.
Science, 320: 1632.
Last checked against version: 081202
"""
def __init__(self, cmd="prank", **kwargs):
OUTPUT_FORMAT_VALUES = list(range(1, 18))
self.parameters = [
# ################# input/output parameters: ##################
# -d=sequence_file
_Option(["-d", "d"],
"Input filename",
filename=True,
is_required=True),
# -t=tree_file [default: no tree, generate approximate NJ tree]
_Option(["-t", "t"], "Input guide tree filename",
filename=True),
# -tree="tree_string" [tree in newick format; in double quotes]
_Option(["-tree", "tree"],
"Input guide tree as Newick string"),
# -m=model_file [default: HKY2/WAG]
_Option(["-m", "m"],
"User-defined alignment model filename. Default: "
"HKY2/WAG"),
# -o=output_file [default: 'output']
_Option(["-o", "o"],
"Output filenames prefix. Default: 'output'\n "
"Will write: output.?.fas (depending on requested "
"format), output.?.xml and output.?.dnd",
filename=True),
# -f=output_format [default: 8]
_Option(["-f", "f"],
"Output alignment format. Default: 8 FASTA\n"
"Option are:\n"
"1. IG/Stanford 8. Pearson/Fasta\n"
"2. GenBank/GB 11. Phylip3.2\n"
"3. NBRF 12. Phylip\n"
"4. EMBL 14. PIR/CODATA\n"
"6. DNAStrider 15. MSF\n"
"7. Fitch 17. PAUP/NEXUS",
checker_function=lambda x: x in OUTPUT_FORMAT_VALUES),
_Switch(["-noxml", "noxml"],
"Do not output XML files "
"(PRANK versions earlier than v.120626)"),
_Switch(["-notree", "notree"],
"Do not output dnd tree files "
"(PRANK versions earlier than v.120626)"),
_Switch(["-showxml", "showxml"],
"Output XML files (PRANK v.120626 and later)"),
_Switch(["-showtree", "showtree"],
"Output dnd tree files (PRANK v.120626 and later)"),
_Switch(["-shortnames", "shortnames"],
"Truncate names at first space"),
_Switch(["-quiet", "quiet"],
"Reduce verbosity"),
# ###################### model parameters: ######################
# +F [force insertions to be always skipped]
# -F [equivalent]
_Switch(["-F", "+F", "F"],
"Force insertions to be always skipped: same as +F"),
# -dots [show insertion gaps as dots]
_Switch(["-dots", "dots"],
"Show insertion gaps as dots"),
# -gaprate=# [gap opening rate; default: dna 0.025 / prot 0.0025]
_Option(["-gaprate", "gaprate"],
"Gap opening rate. Default: dna 0.025 prot 0.0025",
checker_function=lambda x: isinstance(x, float)),
# -gapext=# [gap extension probability; default: dna 0.5 / prot 0.5]
_Option(["-gapext", "gapext"],
"Gap extension probability. Default: dna 0.5 "
"/ prot 0.5",
checker_function=lambda x: isinstance(x, float)),
# -dnafreqs=#,#,#,# [ACGT; default: empirical]
_Option(["-dnafreqs", "dnafreqs"],
"DNA frequencies - 'A,C,G,T'. eg '25,25,25,25' as a quote "
"surrounded string value. Default: empirical",
checker_function=lambda x: isinstance(x, bytes)),
# -kappa=# [ts/tv rate ratio; default:2]
_Option(["-kappa", "kappa"],
"Transition/transversion ratio. Default: 2",
checker_function=lambda x: isinstance(x, int)),
# -rho=# [pur/pyr rate ratio; default:1]
_Option(["-rho", "rho"],
"Purine/pyrimidine ratio. Default: 1",
checker_function=lambda x: isinstance(x, int)),
# -codon [for DNA: use empirical codon model]
# Assuming this is an input file as in -m
_Option(["-codon", "codon"],
"Codon model filename. Default: empirical codon model"),
# -termgap [penalise terminal gaps normally]
_Switch(["-termgap", "termgap"],
"Penalise terminal gaps normally"),
# ############### other parameters: ################################
# -nopost [do not compute posterior support; default: compute]
_Switch(["-nopost", "nopost"],
"Do not compute posterior support. Default: compute"),
# -pwdist=# [expected pairwise distance for computing guidetree;
# default: dna 0.25 / prot 0.5]
_Option(["-pwdist", "pwdist"],
"Expected pairwise distance for computing guidetree. "
"Default: dna 0.25 / prot 0.5",
checker_function=lambda x: isinstance(x, float)),
_Switch(["-once", "once"],
"Run only once. Default: twice if no guidetree given"),
_Switch(["-twice", "twice"],
"Always run twice"),
_Switch(["-skipins", "skipins"],
"Skip insertions in posterior support"),
_Switch(["-uselogs", "uselogs"],
"Slower but should work for a greater number of sequences"),
_Switch(["-writeanc", "writeanc"],
"Output ancestral sequences"),
_Switch(["-printnodes", "printnodes"],
"Output each node; mostly for debugging"),
# -matresize=# [matrix resizing multiplier]
# Doesnt specify type but Float and Int work
_Option(["-matresize", "matresize"],
"Matrix resizing multiplier",
checker_function=lambda x: isinstance(x, float) or
isinstance(x, int)),
# -matinitsize=# [matrix initial size multiplier]
# Doesnt specify type but Float and Int work
_Option(["-matinitsize", "matinitsize"],
"Matrix initial size multiplier",
checker_function=lambda x: isinstance(x, float) or
isinstance(x, int)),
_Switch(["-longseq", "longseq"],
"Save space in pairwise alignments"),
_Switch(["-pwgenomic", "pwgenomic"],
"Do pairwise alignment, no guidetree"),
# -pwgenomicdist=# [distance for pairwise alignment; default: 0.3]
_Option(["-pwgenomicdist", "pwgenomicdist"],
"Distance for pairwise alignment. Default: 0.3",
checker_function=lambda x: isinstance(x, float)),
# -scalebranches=# [scale branch lengths; default: dna 1 / prot 2]
_Option(["-scalebranches", "scalebranches"],
"Scale branch lengths. Default: dna 1 / prot 2",
checker_function=lambda x: isinstance(x, int)),
# -fixedbranches=# [use fixed branch lengths]
# Assume looking for a float
_Option(["-fixedbranches", "fixedbranches"],
"Use fixed branch lengths of input value",
checker_function=lambda x: isinstance(x, float)),
# -maxbranches=# [set maximum branch length]
# Assume looking for a float
_Option(["-maxbranches", "maxbranches"],
"Use maximum branch lengths of input value",
checker_function=lambda x: isinstance(x, float)),
# -realbranches [disable branch length truncation]
_Switch(["-realbranches", "realbranches"],
"Disable branch length truncation"),
_Switch(["-translate", "translate"],
"Translate to protein"),
_Switch(["-mttranslate", "mttranslate"],
"Translate to protein using mt table"),
# ##################### other: ####################
_Switch(["-convert", "convert"],
"Convert input alignment to new format. Do "
"not perform alignment")
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running modules doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Align/Applications/_Prank.py | Python | gpl-2.0 | 10,752 | [
"Biopython"
] | f9b0cc772b4d4696c0c0ebf10610a8162929227c8ac0308120b681110e1b378b |
#/usr/bin/python3
__author__ = 'johannes'
import numpy as np
import functools
class SRM:
""" SRM_0 (Spike Response Model) """
def __init__(self, neurons, threshold, t_current, t_membrane, eta_reset, simulation_window_size=100, verbose=False):
"""
Neurons can have different threshold, t_current, t_membrane and eta_resets: Set those variables to 1D np.arrays of all the same size.
:param neurons: Number of neurons
:param threshold: Spiking threshold
:param t_current: Current-time-constant (:math:`t_s`)
:type t_current: Float or Numpy Float Array
:param t_membrane: Membrane-time-constant (t_m)
:param eta_reset: Reset constant
:param simulation_window_size: Only look at the n last spikes
:param verbose: Print verbose output to the console
:return: ``None``
"""
# Check user input
try: neurons = int(neurons)
except: raise ValueError("Variable neurons should be int or convertible to int")
# threshold, t_current, t_membrane, and eta_reset are all vector
threshold = np.array(threshold)
t_current = np.array(t_current)
t_membrane = np.array(t_membrane)
eta_reset = np.array(eta_reset)
if not(threshold.shape == t_current.shape == t_membrane.shape == eta_reset.shape):
raise ValueError("Vector of threshhold, t_current, t_membrane, and eta_reset must be same size")
try: simulation_window_size = int(simulation_window_size)
except: raise ValueError("Variable simulation_window_size should be int or convertible to int")
self.neurons = neurons
self.threshold = threshold
self.t_current = t_current
self.t_membrane = t_membrane
self.eta_reset = eta_reset
self.simulation_window_size = simulation_window_size
self.verbose = verbose
self.cache = {}
self.cache['last_t'] = -1
self.cache['last_spike'] = np.ones(self.neurons, dtype=float) * -1000000
self.cache['last_potential'] = np.zeros(self.neurons, dtype=float)
def eta(self, s):
r"""
Evaluate the Eta function:
.. math:: \eta (s) = - \eta_{reset} * \exp(\frac{- s}{\tau_m})
:label: eta
:param s: Time s
:return: Function eta(s) at time s
:return type: Float or Vector of Floats
"""
return - self.eta_reset*np.exp(-s/self.t_membrane)
@functools.lru_cache()
def eps(self, s):
r"""
Evaluate the Epsilon function:
.. math:: \epsilon (s) = \frac{1}{1 - \frac{\tau_c}{\tau_m}} (\exp(\frac{-s}{\tau_m}) - \exp(\frac{-s}{\tau_c}))
:label: epsilon
Returns a single Float Value if the time constants (current, membrane) are the same for each neuron.
Returns a Float Vector with eps(s) for each neuron, if the time constants are different for each neuron.
:param s: Time s
:return: Function eps(s) at time s
:rtype: Float or Vector of Floats
"""
return (1/(1-self.t_current/self.t_membrane))*(np.exp(-s/self.t_membrane) - np.exp(-s/self.t_current))
@functools.lru_cache()
def eps_matrix(self, k, size):
"""
Returns the epsilon helpermatrix.
:Example:
>>> eps_matrix(3, 5)
[[eps_0(3), eps_0(2), eps_0(1), eps_0(0), eps_0(0)],
[eps_1(3), eps_1(2), eps_1(1), eps_1(0), eps_1(0)]]
Where `eps_0(3)` means the epsilon function of neuron 0 at time 3.
:param k: Leftmost epsilon time
:param size: Width of the return matrix
:return: Epsilon helper matrix
:return type: Numpy Float Array, dimensions: (neurons x size)
"""
matrix = np.zeros((self.neurons, size), dtype=float)
for i in range(k):
matrix[:, i] = self.eps(k-i)
return matrix
def check_spikes(self, spiketrain, weights, t, additional_term=None):
"""
Simulate one time step at time t. Changes the spiketrain in place at time t!
Return the total membrane potential of all neurons.
:param spiketrain: Spiketrain (Time indexing begins with 0)
:param weights: Weights
:param t: Evaluation time
:param additional_term: Additional potential that gets added before we check for spikes (For example for extern voltage)
:return: total membrane potential of all neurons at time step t (vector), spikes at time t
"""
# Check correct user input
if type(spiketrain) != np.ndarray:
raise ValueError("Spiketrain should be a numpy array")
if type(weights) != np.ndarray:
raise ValueError("Weights should be a numpy matrix")
if additional_term != None and type(additional_term) != np.ndarray:
raise ValueError("Additional_term should be a numpy array")
try: t = int(t)
except: raise ValueError("Variable t should be int or convertible to int")
if t < 0:
raise ValueError("Time to be simulated is too small")
if t >= spiketrain.shape[1]:
raise ValueError("Spiketrain too short (0ms -- %dms) for simulating time %d" % (spiketrain.shape[1]-1, t))
if weights.shape[0] != self.neurons or self.neurons != weights.shape[1]:
raise ValueError("Weigths should be a quadratic matrix, with one row and one column for each neuron")
if spiketrain.shape[0] != self.neurons:
raise ValueError("Spikes should be a matrix, with one row for each neuron")
if additional_term != None and additional_term.shape[0] != self.neurons:
raise ValueError("Additional_term should be a vector with one element for each neuron")
if additional_term != None and len(additional_term) == 2 and additional_term.shape[1] != 1:
raise ValueError("Additional_term should be a vector with one element for each neuron")
# Work on a windowed view
spiketrain_window = spiketrain[:, max(0, t+1-self.simulation_window_size):t+1]
# Retrieve necessary simulation data from cache if possible
if self.cache['last_t'] == -1 or self.cache['last_t'] == t - 1:
last_spike = self.cache['last_spike']
last_potential = self.cache['last_potential']
else:
last_spike = t - np.argmax(spiketrain_window[:, ::-1], axis=1)
# TODO find a way to calculate last_potential (recursive call to check_spikes is not a good option)
last_potential = np.zeros(self.neurons)
neurons, timesteps = spiketrain_window.shape
epsilon_matrix = self.eps_matrix(min(self.simulation_window_size, t), timesteps)
# Calculate current
incoming_spikes = np.dot(weights.T, spiketrain_window)
incoming_potential = np.sum(incoming_spikes * epsilon_matrix, axis=1)
total_potential = self.eta(np.ones(neurons)*t - last_spike) + incoming_potential
# Calculate current end
# Add additional term (user-defined)
if additional_term != None:
total_potential += additional_term
# Any new spikes? Only spike if potential hits the threshold from below.
neurons_high_current = np.where((total_potential > self.threshold) & (last_potential < self.threshold))
spiketrain[neurons_high_current, t] = True
# Update cache (last_spike, last_potential and last_t)
spiking_neurons = np.where(spiketrain[:, t])
self.cache['last_spike'][spiking_neurons] = t
self.cache['last_potential'] = total_potential
self.cache['last_t'] = t
if self.verbose:
print("SRM Time step", t)
print("Incoming current", incoming_potential)
print("Total potential", total_potential)
print("Last spike", last_spike)
print("")
return total_potential
class SRM_X(SRM):
def __init__(self, neurons, threshold, t_current, t_membrane, eta_reset, ax_delay, simulation_window_size=100, verbose=False):
"""
Like the SRM model, but additionally it supports axonal delays.
:param neurons: Number of neurons
:param threshold: Spiking threshold
:param t_current: Current-time-constant (:math:`t_s`)
:type t_current: Float or Numpy Float Array
:param t_membrane: Membrane-time-constant (t_m)
:param eta_reset: Reset constant
:param ax_delay: Axonal delays
:param simulation_window_size: Only look at the n last spikes
:param verbose:
:return: ``None``
"""
# Check user input
# TODO
SRM.__init__(self, neurons, threshold, t_current, t_membrane, eta_reset, simulation_window_size=simulation_window_size,
verbose=verbose)
self.ax_delay = ax_delay
def eps(self, s):
r"""
Evaluate the Epsilon function with an axonal delay :math:`\tau_d`.
.. math:: \epsilon (s) = \frac{1}{1 - \frac{\tau_c}{\tau_m}} (\exp(\frac{-(s-\tau_d)}{\tau_m}) - \exp(\frac{-(s - \tau_d)}{\tau_c}))
:label: epsilon_axdelay
Returns a single Float Value if the time constants (current, membrane) are the same for each neuron.
Returns a Float Vector with eps(s) for each neuron, if the time constants are different for each neuron.
:param s: Time s
:return: Function eps(s) at time s
:rtype: Float or Vector of Floats
"""
eps = (1/(1-self.t_current/self.t_membrane))*(np.exp(-(s - self.ax_delay)/self.t_membrane)
- np.exp(- (s - self.ax_delay)/self.t_current))
eps[np.where(eps<0)] = 0
return eps
if __name__ == "__main__":
srm_model = SRM(neurons=3, threshold=1, t_current=0.3, t_membrane=20, eta_reset=5, verbose=True)
models = [srm_model]
for model in models:
print("-"*10)
if isinstance(model, SRM):
print('Demonstration of the SRM Model')
s = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
w = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0]])
neurons, timesteps = s.shape
for t in range(timesteps):
total_current = model.check_spikes(s, w, t)
print("Spiketrain:")
print(s) | johannesmik/neurons | neurons/spiking.py | Python | bsd-2-clause | 10,482 | [
"NEURON"
] | a8fc128434061f7f9723b87cd5900790b71d82dd23d47ce31ff2d4d8c9642a6b |
from nose.tools import *
from nose.plugins.attrib import attr
import json
import shutil
import os
import tempfile
import logging
import copy
import stat
from mock import patch
from optparse import OptionParser
from biomaj.bank import Bank
from biomaj.session import Session
from biomaj.workflow import Workflow, UpdateWorkflow
from biomaj.utils import Utils
from biomaj.download.ftp import FTPDownload
from biomaj.download.direct import DirectFTPDownload, DirectHttpDownload
from biomaj.download.http import HTTPDownload
from biomaj.download.localcopy import LocalDownload
from biomaj.download.downloadthreads import DownloadThread
from biomaj.config import BiomajConfig
from biomaj.process.processfactory import PostProcessFactory,PreProcessFactory,RemoveProcessFactory
from biomaj.user import BmajUser
from biomaj.bmajindex import BmajIndex
from ldap3.core.exceptions import LDAPBindError
import unittest
class UtilsForTest():
'''
Copy properties files to a temp directory and update properties to
use a temp directory
'''
def __init__(self):
'''
Setup the temp dirs and files.
'''
self.global_properties = None
self.bank_properties = None
self.test_dir = tempfile.mkdtemp('biomaj')
self.conf_dir =os.path.join(self.test_dir,'conf')
if not os.path.exists(self.conf_dir):
os.makedirs(self.conf_dir)
self.data_dir =os.path.join(self.test_dir,'data')
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.log_dir =os.path.join(self.test_dir,'log')
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.process_dir =os.path.join(self.test_dir,'process')
if not os.path.exists(self.process_dir):
os.makedirs(self.process_dir)
self.lock_dir =os.path.join(self.test_dir,'lock')
if not os.path.exists(self.lock_dir):
os.makedirs(self.lock_dir)
self.cache_dir =os.path.join(self.test_dir,'cache')
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
if self.global_properties is None:
self.__copy_global_properties()
if self.bank_properties is None:
self.__copy_test_bank_properties()
def clean(self):
'''
Deletes temp directory
'''
shutil.rmtree(self.test_dir)
def __copy_test_bank_properties(self):
if self.bank_properties is not None:
return
self.bank_properties = ['alu', 'local', 'testhttp']
curdir = os.path.dirname(os.path.realpath(__file__))
from_file = os.path.join(curdir, 'alu.properties')
to_file = os.path.join(self.conf_dir, 'alu.properties')
shutil.copyfile(from_file, to_file)
self.bank_process = ['test.sh']
curdir = os.path.dirname(os.path.realpath(__file__))
procdir = os.path.join(curdir, 'bank/process')
for proc in self.bank_process:
from_file = os.path.join(procdir, proc)
to_file = os.path.join(self.process_dir, proc)
shutil.copyfile(from_file, to_file)
os.chmod(to_file, stat.S_IRWXU)
# Manage local bank test, use bank test subdir as remote
properties = ['multi.properties', 'computederror.properties', 'error.properties', 'local.properties', 'localprocess.properties', 'testhttp.properties', 'computed.properties', 'computed2.properties', 'sub1.properties', 'sub2.properties']
for prop in properties:
from_file = os.path.join(curdir, prop)
to_file = os.path.join(self.conf_dir, prop)
fout = open(to_file,'w')
with open(from_file,'r') as fin:
for line in fin:
if line.startswith('remote.dir'):
fout.write("remote.dir="+os.path.join(curdir,'bank')+"\n")
elif line.startswith('remote.files'):
fout.write(line.replace('/tmp', os.path.join(curdir,'bank')))
else:
fout.write(line)
fout.close()
def __copy_global_properties(self):
if self.global_properties is not None:
return
self.global_properties = os.path.join(self.conf_dir,'global.properties')
curdir = os.path.dirname(os.path.realpath(__file__))
global_template = os.path.join(curdir,'global.properties')
fout = open(self.global_properties,'w')
with open(global_template,'r') as fin:
for line in fin:
if line.startswith('conf.dir'):
fout.write("conf.dir="+self.conf_dir+"\n")
elif line.startswith('log.dir'):
fout.write("log.dir="+self.log_dir+"\n")
elif line.startswith('data.dir'):
fout.write("data.dir="+self.data_dir+"\n")
elif line.startswith('process.dir'):
fout.write("process.dir="+self.process_dir+"\n")
elif line.startswith('lock.dir'):
fout.write("lock.dir="+self.lock_dir+"\n")
else:
fout.write(line)
fout.close()
class TestBiomajUtils(unittest.TestCase):
def setUp(self):
self.utils = UtilsForTest()
def tearDown(self):
self.utils.clean()
def test_mimes(self):
fasta_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'bank/test2.fasta')
(mime, encoding) = Utils.detect_format(fasta_file)
self.assertTrue('application/fasta' == mime)
@attr('compress')
def test_uncompress(self):
from_file = { 'root': os.path.dirname(os.path.realpath(__file__)),
'name': 'bank/test.fasta.gz'
}
to_dir = self.utils.data_dir
Utils.copy_files([from_file], to_dir)
Utils.uncompress(os.path.join(to_dir, from_file['name']))
self.assertTrue(os.path.exists(to_dir+'/bank/test.fasta'))
def test_copy_with_regexp(self):
from_dir = os.path.dirname(os.path.realpath(__file__))
to_dir = self.utils.data_dir
Utils.copy_files_with_regexp(from_dir, to_dir, ['.*\.py'])
self.assertTrue(os.path.exists(to_dir+'/biomaj_tests.py'))
def test_copy(self):
from_dir = os.path.dirname(os.path.realpath(__file__))
local_file = 'biomaj_tests.py'
files_to_copy = [ {'root': from_dir, 'name': local_file}]
to_dir = self.utils.data_dir
Utils.copy_files(files_to_copy, to_dir)
self.assertTrue(os.path.exists(to_dir+'/biomaj_tests.py'))
class TestBiomajLocalDownload(unittest.TestCase):
'''
Test Local downloader
'''
def setUp(self):
self.utils = UtilsForTest()
self.curdir = os.path.dirname(os.path.realpath(__file__))
self.examples = os.path.join(self.curdir,'bank') + '/'
BiomajConfig.load_config(self.utils.global_properties, allow_user_config=False)
'''
if not os.path.exists('/tmp/biomaj/config'):
os.makedirs('/tmp/biomaj/config')
if not os.path.exists(os.path.join('/tmp/biomaj/config','local.properties')):
shutil.copyfile(os.path.join(self.curdir,'local.properties'),
os.path.join('/tmp/biomaj/config','local.properties'))
flocal = open(os.path.join('/tmp/biomaj/config','local.properties'),'a')
flocal.write('\nremote.dir='+self.examples+"\n")
flocal.close()
'''
def tearDown(self):
self.utils.clean()
def test_local_list(self):
locald = LocalDownload(self.examples)
(file_list, dir_list) = locald.list()
locald.close()
self.assertTrue(len(file_list) > 1)
def test_local_download(self):
locald = LocalDownload(self.examples)
(file_list, dir_list) = locald.list()
locald.match([r'^test.*\.gz$'], file_list, dir_list)
locald.download(self.utils.data_dir)
locald.close()
self.assertTrue(len(locald.files_to_download) == 1)
def test_local_download_in_subdir(self):
locald = LocalDownload(self.curdir+'/')
(file_list, dir_list) = locald.list()
locald.match([r'^/bank/test.*\.gz$'], file_list, dir_list)
locald.download(self.utils.data_dir)
locald.close()
self.assertTrue(len(locald.files_to_download) == 1)
def test_parallel_local_download(self):
locald = LocalDownload(self.examples)
(file_list, dir_list) = locald.list()
locald.match([r'^test'], file_list, dir_list)
list1 = [locald.files_to_download[0]]
list2 = locald.files_to_download[1:]
locald.close()
locald1 = LocalDownload(self.examples)
locald1.files_to_download = list1
locald2 = LocalDownload(self.examples)
locald2.files_to_download = list2
t1 = DownloadThread(locald1, self.utils.data_dir)
t2 = DownloadThread(locald2, self.utils.data_dir)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertTrue(len(t1.downloader.files_to_download) == 1)
self.assertTrue(os.path.exists(self.utils.data_dir + '/' +list1[0]['name']))
self.assertTrue(len(t2.downloader.files_to_download) == 2)
self.assertTrue(os.path.exists(self.utils.data_dir + '/' +list2[0]['name']))
self.assertTrue(os.path.exists(self.utils.data_dir + '/' +list2[1]['name']))
@attr('network')
@attr('http')
class TestBiomajHTTPDownload(unittest.TestCase):
'''
Test HTTP downloader
'''
def setUp(self):
self.utils = UtilsForTest()
BiomajConfig.load_config(self.utils.global_properties, allow_user_config=False)
self.config = BiomajConfig('testhttp')
def tearDown(self):
self.utils.clean()
def test_http_list(self):
httpd = HTTPDownload('http', 'ftp2.fr.debian.org', '/debian/dists/', self.config)
(file_list, dir_list) = httpd.list()
httpd.close()
self.assertTrue(len(file_list) == 1)
def test_http_download(self):
httpd = HTTPDownload('http', 'ftp2.fr.debian.org', '/debian/dists/', self.config)
(file_list, dir_list) = httpd.list()
httpd.match([r'^README$'], file_list, dir_list)
httpd.download(self.utils.data_dir)
httpd.close()
self.assertTrue(len(httpd.files_to_download) == 1)
def test_http_download_in_subdir(self):
httpd = HTTPDownload('http', 'ftp2.fr.debian.org', '/debian/', self.config)
(file_list, dir_list) = httpd.list()
httpd.match([r'^dists/README$'], file_list, dir_list)
httpd.download(self.utils.data_dir)
httpd.close()
self.assertTrue(len(httpd.files_to_download) == 1)
@attr('directftp')
@attr('network')
class TestBiomajDirectFTPDownload(unittest.TestCase):
'''
Test DirectFTP downloader
'''
def setUp(self):
self.utils = UtilsForTest()
def tearDown(self):
self.utils.clean()
def test_ftp_list(self):
file_list = ['/blast/db/FASTA/alu.n.gz.md5']
ftpd = DirectFTPDownload('ftp', 'ftp.ncbi.nih.gov', '', file_list)
(file_list, dir_list) = ftpd.list()
ftpd.close()
self.assertTrue(len(file_list) == 1)
def test_download(self):
file_list = ['/blast/db/FASTA/alu.n.gz.md5']
ftpd = DirectFTPDownload('ftp', 'ftp.ncbi.nih.gov', '', file_list)
(file_list, dir_list) = ftpd.list()
ftpd.download(self.utils.data_dir, False)
ftpd.close()
self.assertTrue(os.path.exists(os.path.join(self.utils.data_dir,'alu.n.gz.md5')))
@attr('directhttp')
@attr('network')
class TestBiomajDirectHTTPDownload(unittest.TestCase):
'''
Test DirectFTP downloader
'''
def setUp(self):
self.utils = UtilsForTest()
def tearDown(self):
self.utils.clean()
def test_http_list(self):
file_list = ['/debian/README.html']
ftpd = DirectHttpDownload('http', 'ftp2.fr.debian.org', '', file_list)
fday = ftpd.files_to_download[0]['day']
fmonth = ftpd.files_to_download[0]['month']
fyear = ftpd.files_to_download[0]['year']
(file_list, dir_list) = ftpd.list()
ftpd.close()
self.assertTrue(len(file_list) == 1)
self.assertTrue(file_list[0]['size']!=0)
self.assertFalse(fyear == ftpd.files_to_download[0]['year'] and fmonth == ftpd.files_to_download[0]['month'] and fday == ftpd.files_to_download[0]['day'])
def test_download(self):
file_list = ['/debian/README.html']
ftpd = DirectHttpDownload('http', 'ftp2.fr.debian.org', '', file_list)
(file_list, dir_list) = ftpd.list()
ftpd.download(self.utils.data_dir, False)
ftpd.close()
self.assertTrue(os.path.exists(os.path.join(self.utils.data_dir,'README.html')))
def test_download_get_params_save_as(self):
file_list = ['/get']
ftpd = DirectHttpDownload('http', 'httpbin.org', '', file_list)
ftpd.param = { 'key1': 'value1', 'key2': 'value2'}
ftpd.save_as = 'test.json'
(file_list, dir_list) = ftpd.list()
ftpd.download(self.utils.data_dir, False)
ftpd.close()
self.assertTrue(os.path.exists(os.path.join(self.utils.data_dir,'test.json')))
with open(os.path.join(self.utils.data_dir,'test.json'), 'r') as content_file:
content = content_file.read()
my_json = json.loads(content)
self.assertTrue(my_json['args']['key1'] == 'value1')
def test_download_save_as(self):
file_list = ['/debian/README.html']
ftpd = DirectHttpDownload('http', 'ftp2.fr.debian.org', '', file_list)
ftpd.save_as = 'test.html'
(file_list, dir_list) = ftpd.list()
ftpd.download(self.utils.data_dir, False)
ftpd.close()
self.assertTrue(os.path.exists(os.path.join(self.utils.data_dir,'test.html')))
def test_download_post_params(self):
#file_list = ['/debian/README.html']
file_list = ['/post']
ftpd = DirectHttpDownload('http', 'httpbin.org', '', file_list)
#ftpd = DirectHttpDownload('http', 'ftp2.fr.debian.org', '', file_list)
ftpd.param = { 'key1': 'value1', 'key2': 'value2'}
ftpd.save_as = 'test.json'
ftpd.method = 'POST'
(file_list, dir_list) = ftpd.list()
ftpd.download(self.utils.data_dir, False)
ftpd.close()
self.assertTrue(os.path.exists(os.path.join(self.utils.data_dir,'test.json')))
with open(os.path.join(self.utils.data_dir,'test.json'), 'r') as content_file:
content = content_file.read()
my_json = json.loads(content)
self.assertTrue(my_json['form']['key1'] == 'value1')
@attr('ftp')
@attr('network')
class TestBiomajFTPDownload(unittest.TestCase):
'''
Test FTP downloader
'''
def setUp(self):
self.utils = UtilsForTest()
def tearDown(self):
self.utils.clean()
def test_ftp_list(self):
ftpd = FTPDownload('ftp', 'ftp.ncbi.nih.gov', '/blast/db/FASTA/')
(file_list, dir_list) = ftpd.list()
ftpd.close()
self.assertTrue(len(file_list) > 1)
def test_download(self):
ftpd = FTPDownload('ftp', 'ftp.ncbi.nih.gov', '/blast/db/FASTA/')
(file_list, dir_list) = ftpd.list()
ftpd.match([r'^alu.*\.gz$'], file_list, dir_list)
ftpd.download(self.utils.data_dir)
ftpd.close()
self.assertTrue(len(ftpd.files_to_download) == 2)
def test_download_in_subdir(self):
ftpd = FTPDownload('ftp', 'ftp.ncbi.nih.gov', '/blast/')
(file_list, dir_list) = ftpd.list()
ftpd.match([r'^db/FASTA/alu.*\.gz$'], file_list, dir_list)
ftpd.download(self.utils.data_dir)
ftpd.close()
self.assertTrue(len(ftpd.files_to_download) == 2)
def test_download_or_copy(self):
ftpd = FTPDownload('ftp', 'ftp.ncbi.nih.gov', '/blast/')
ftpd.files_to_download = [
{'name':'/test1', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test2', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test/test1', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test/test11', 'year': '2013', 'month': '11', 'day': '10', 'size': 10}
]
available_files = [
{'name':'/test1', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test12', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test3', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test/test1', 'year': '2013', 'month': '11', 'day': '10', 'size': 20},
{'name':'/test/test11', 'year': '2013', 'month': '11', 'day': '10', 'size': 10}
]
ftpd.download_or_copy(available_files, '/biomaj', False)
ftpd.close()
self.assertTrue(len(ftpd.files_to_download)==2)
self.assertTrue(len(ftpd.files_to_copy)==2)
def test_get_more_recent_file(self):
files = [
{'name':'/test1', 'year': '2013', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test2', 'year': '2013', 'month': '11', 'day': '12', 'size': 10},
{'name':'/test/test1', 'year': '1988', 'month': '11', 'day': '10', 'size': 10},
{'name':'/test/test11', 'year': '2013', 'month': '9', 'day': '23', 'size': 10}
]
release = Utils.get_more_recent_file(files)
self.assertTrue(release['year']=='2013')
self.assertTrue(release['month']=='11')
self.assertTrue(release['day']=='12')
class TestBiomajSetup(unittest.TestCase):
def setUp(self):
self.utils = UtilsForTest()
curdir = os.path.dirname(os.path.realpath(__file__))
BiomajConfig.load_config(self.utils.global_properties, allow_user_config=False)
# Delete all banks
b = Bank('alu')
b.banks.remove({})
self.config = BiomajConfig('alu')
data_dir = self.config.get('data.dir')
lock_file = os.path.join(data_dir,'alu.lock')
if os.path.exists(lock_file):
os.remove(lock_file)
def tearDown(self):
data_dir = self.config.get('data.dir')
lock_file = os.path.join(data_dir,'alu.lock')
if os.path.exists(lock_file):
os.remove(lock_file)
self.utils.clean()
def test_new_bank(self):
'''
Checks bank init
'''
b = Bank('alu')
def test_new_session(self):
'''
Checks an empty session is created
'''
b = Bank('alu')
b.load_session(UpdateWorkflow.FLOW)
for key in b.session._session['status'].keys():
self.assertFalse(b.session.get_status(key))
def test_session_reload_notover(self):
'''
Checks a session is used if present
'''
b = Bank('alu')
for i in range(1,5):
s = Session('alu', self.config, UpdateWorkflow.FLOW)
s._session['status'][Workflow.FLOW_INIT] = True
b.session = s
b.save_session()
b = Bank('alu')
b.load_session(UpdateWorkflow.FLOW)
self.assertTrue(b.session.get_status(Workflow.FLOW_INIT))
def test_clean_old_sessions(self):
'''
Checks a session is used if present
'''
b = Bank('local')
for i in range(1,5):
s = Session('alu', self.config, UpdateWorkflow.FLOW)
s._session['status'][Workflow.FLOW_INIT] = True
b.session = s
b.save_session()
b2 = Bank('local')
b2.update()
b2.clean_old_sessions()
self.assertTrue(len(b2.bank['sessions']) == 1)
def test_session_reload_over(self):
'''
Checks a session if is not over
'''
b = Bank('alu')
for i in range(1,5):
s = Session('alu', self.config, UpdateWorkflow.FLOW)
s._session['status'][Workflow.FLOW_INIT] = True
s._session['status'][Workflow.FLOW_OVER] = True
b.session = s
b.save_session()
b = Bank('alu')
b.load_session(UpdateWorkflow.FLOW)
self.assertFalse(b.session.get_status(Workflow.FLOW_INIT))
def test_bank_list(self):
b1 = Bank('alu')
b2 = Bank('local')
banks = Bank.list()
self.assertTrue(len(banks) == 2)
@attr('network')
def test_get_release(self):
'''
Get release
'''
b = Bank('alu')
b.load_session(UpdateWorkflow.FLOW)
res = b.update()
self.assertTrue(b.session.get('update'))
self.assertTrue(res)
self.assertTrue(b.session._session['release'] is not None)
def test_remove_session(self):
b = Bank('alu')
for i in range(1,5):
s = Session('alu', self.config, UpdateWorkflow.FLOW)
s._session['status'][Workflow.FLOW_INIT] = True
b.session = s
b.save_session()
self.assertTrue(len(b.bank['sessions'])==4)
b.remove_session(b.session.get('id'))
self.assertTrue(len(b.bank['sessions'])==3)
@attr('process')
def test_postprocesses_setup(self):
b = Bank('localprocess')
pfactory = PostProcessFactory(b)
pfactory.run(True)
self.assertTrue(len(pfactory.threads_tasks[0])==2)
self.assertTrue(len(pfactory.threads_tasks[1])==1)
@attr('process')
def test_postprocesses_exec_again(self):
'''
Execute once, set a status to false, check that False processes are executed
'''
b = Bank('localprocess')
pfactory = PostProcessFactory(b)
pfactory.run()
self.assertTrue(pfactory.blocks['BLOCK1']['META0']['PROC0'])
self.assertTrue(pfactory.blocks['BLOCK2']['META1']['PROC1'])
self.assertTrue(pfactory.blocks['BLOCK2']['META1']['PROC2'])
blocks = copy.deepcopy(pfactory.blocks)
blocks['BLOCK2']['META1']['PROC2'] = False
pfactory2 = PostProcessFactory(b, blocks)
pfactory2.run()
self.assertTrue(pfactory2.blocks['BLOCK2']['META1']['PROC2'])
@attr('process')
def test_preprocesses(self):
b = Bank('localprocess')
pfactory = PreProcessFactory(b)
pfactory.run()
self.assertTrue(pfactory.meta_status['META0']['PROC0'])
@attr('process')
def test_removeprocesses(self):
b = Bank('localprocess')
pfactory = RemoveProcessFactory(b)
pfactory.run()
self.assertTrue(pfactory.meta_status['META0']['PROC0'])
def test_dependencies_list(self):
b = Bank('computed')
deps = b.get_dependencies()
self.assertTrue(len(deps)==2)
class TestBiomajFunctional(unittest.TestCase):
def setUp(self):
self.utils = UtilsForTest()
curdir = os.path.dirname(os.path.realpath(__file__))
BiomajConfig.load_config(self.utils.global_properties, allow_user_config=False)
#Delete all banks
b = Bank('local')
b.banks.remove({})
self.config = BiomajConfig('local')
data_dir = self.config.get('data.dir')
lock_file = os.path.join(data_dir,'local.lock')
if os.path.exists(lock_file):
os.remove(lock_file)
def tearDown(self):
data_dir = self.config.get('data.dir')
lock_file = os.path.join(data_dir,'local.lock')
if os.path.exists(lock_file):
os.remove(lock_file)
self.utils.clean()
def test_extract_release_from_file_name(self):
b = Bank('local')
b.load_session(UpdateWorkflow.FLOW)
b.session.config.set('release.file', 'test_(\d+)\.txt')
b.session.config.set('release.regexp', '')
w = UpdateWorkflow(b)
w.wf_release()
self.assertTrue(b.session.get('release') == '100')
def test_extract_release_from_file_content(self):
b = Bank('local')
b.load_session(UpdateWorkflow.FLOW)
b.session.config.set('release.file', 'test_100\.txt')
b.session.config.set('release.regexp', 'Release\s*(\d+)')
w = UpdateWorkflow(b)
w.wf_release()
self.assertTrue(b.session.get('release') == '103')
def test_publish(self):
'''
Update a bank, then publish it
'''
b = Bank('local')
b.update()
current_link = os.path.join(b.config.get('data.dir'),
b.config.get('dir.version'),
'current')
self.assertFalse(os.path.exists(current_link))
self.assertTrue(b.bank['current'] is None)
b.publish()
self.assertTrue(os.path.exists(current_link))
self.assertTrue(b.bank['current'] == b.session._session['id'])
# Should test this on local downloader, changing 1 file to force update,
# else we would get same bank and there would be no update
def test_no_update(self):
'''
Try updating twice, at second time, bank should not be updated
'''
b = Bank('local')
b.update()
self.assertTrue(b.session.get('update'))
b.update()
self.assertFalse(b.session.get('update'))
self.assertFalse(b.session.get_status(Workflow.FLOW_POSTPROCESS))
def test_fromscratch_update(self):
'''
Try updating twice, at second time, bank should be updated (force with fromscratc)
'''
b = Bank('local')
b.update()
self.assertTrue(b.session.get('update'))
sess = b.session.get('release')
b.options.fromscratch = True
b.update()
self.assertTrue(b.session.get('update'))
self.assertEqual(b.session.get('release'), sess+'__1')
def test_fromscratch_update_with_release(self):
'''
Try updating twice, at second time, bank should be updated (force with fromscratch)
Use case with release defined in release file
'''
b = Bank('local')
b.load_session(UpdateWorkflow.FLOW)
b.session.config.set('release.file', 'test_(\d+)\.txt')
b.session.config.set('release.regexp', '')
w = UpdateWorkflow(b)
w.wf_release()
self.assertTrue(b.session.get('release') == '100')
os.makedirs(b.session.get_full_release_directory())
w = UpdateWorkflow(b)
# Reset release
b.session.set('release', None)
w.options.fromscratch = True
w.wf_release()
self.assertTrue(b.session.get('release') == '100__1')
def test_mix_stop_from_task(self):
'''
Get a first release, then fromscratch --stop-after, then restart from-task
'''
b = Bank('local')
b.update()
rel = b.session.get('release')
b2 = Bank('local')
b2.options.stop_after = 'download'
b2.options.fromscratch = True
res = b2.update()
self.assertTrue(b2.session.get('release') == rel+'__1')
b3 = Bank('local')
res = b3.update()
self.assertTrue(b3.session.get('release') == rel+'__1')
self.assertTrue(res)
def test_mix_stop_from_task2(self):
'''
Get a first release, then fromscratch --stop-after, then restart from-task
'''
b = Bank('local')
b.update()
rel = b.session.get('release')
b2 = Bank('local')
b2.options.stop_after = 'download'
b2.options.fromscratch = True
res = b2.update()
self.assertTrue(b2.session.get('release') == rel+'__1')
b3 = Bank('local')
res = b3.update()
b2.options.from_task = 'download'
self.assertTrue(b3.session.get('release') == rel+'__1')
self.assertTrue(res)
def test_mix_stop_from_task3(self):
'''
Get a first release, then fromscratch --stop-after, then restart from-task
'''
b = Bank('local')
b.update()
rel = b.session.get('release')
b2 = Bank('local')
b2.options.stop_after = 'download'
b2.options.fromscratch = True
res = b2.update()
self.assertTrue(b2.session.get('release') == rel+'__1')
b3 = Bank('local')
res = b3.update()
b2.options.from_task = 'postprocess'
self.assertTrue(b3.session.get('release') == rel+'__1')
self.assertTrue(res)
def test_mix_stop_from_task4(self):
'''
Get a first release, then fromscratch --stop-after, then restart from-task
'''
b = Bank('local')
b.update()
rel = b.session.get('release')
b2 = Bank('local')
b2.options.stop_before = 'download'
b2.options.fromscratch = True
res = b2.update()
b3 = Bank('local')
b3.options.from_task = 'postprocess'
res = b3.update()
self.assertFalse(res)
def test_delete_old_dirs(self):
'''
Try updating 3 times, oldest dir should be removed
'''
b = Bank('local')
b.removeAll(True)
b = Bank('local')
b.update()
self.assertTrue(b.session.get('update'))
b.options.fromscratch = True
b.update()
self.assertTrue(b.session.get('update'))
self.assertTrue(len(b.bank['production']) == 2)
b.update()
self.assertTrue(b.session.get('update'))
# one new dir, but olders must be deleted
self.assertTrue(len(b.bank['production']) == 2)
def test_delete_old_dirs_with_freeze(self):
'''
Try updating 3 times, oldest dir should be removed but not freezed releases
'''
b = Bank('local')
b.removeAll(True)
b = Bank('local')
b.update()
b.freeze(b.session.get('release'))
self.assertTrue(b.session.get('update'))
b.options.fromscratch = True
b.update()
b.freeze(b.session.get('release'))
self.assertTrue(b.session.get('update'))
self.assertTrue(len(b.bank['production']) == 2)
b.update()
self.assertTrue(b.session.get('update'))
# one new dir, but olders must be deleted
self.assertTrue(len(b.bank['production']) == 3)
def test_removeAll(self):
b = Bank('local')
b.update()
b.removeAll()
self.assertFalse(os.path.exists(b.get_data_dir()))
bdb = b.banks.find_one({'name': b.name})
self.assertTrue(bdb is None)
def test_remove(self):
'''
test removal of a production dir
'''
b = Bank('local')
b.update()
self.assertTrue(os.path.exists(b.session.get_full_release_directory()))
self.assertTrue(len(b.bank['production'])==1)
b.remove(b.session.get('release'))
self.assertFalse(os.path.exists(b.session.get_full_release_directory()))
b = Bank('local')
self.assertTrue(len(b.bank['production'])==0)
def test_update_stop_after(self):
b = Bank('local')
b.options.stop_after = 'download'
b.update()
self.assertTrue(b.session.get_status('download'))
self.assertFalse(b.session.get_status('postprocess'))
def test_update_stop_before(self):
b = Bank('local')
b.options.stop_before = 'postprocess'
b.update()
self.assertTrue(b.session.get_status('download'))
self.assertFalse(b.session.get_status('postprocess'))
def test_reupdate_from_task(self):
b = Bank('local')
b.options.stop_after = 'download'
b.update()
self.assertFalse(b.session.get_status('postprocess'))
b2 = Bank('local')
b2.options.from_task = 'postprocess'
b2.options.release = b.session.get('release')
b2.update()
self.assertTrue(b2.session.get_status('postprocess'))
self.assertEqual(b.session.get_full_release_directory(), b2.session.get_full_release_directory())
def test_reupdate_from_task_error(self):
b = Bank('local')
b.options.stop_after = 'check'
b.update()
self.assertFalse(b.session.get_status('postprocess'))
b2 = Bank('local')
b2.options.from_task = 'postprocess'
b2.options.release = b.session.get('release')
res = b2.update()
self.assertFalse(res)
def test_reupdate_from_task_wrong_release(self):
b = Bank('local')
b.options.stop_after = 'download'
b.update()
self.assertFalse(b.session.get_status('postprocess'))
b2 = Bank('local')
b2.options.from_task = 'postprocess'
b2.options.release = 'wrongrelease'
res = b2.update()
self.assertFalse(res)
@attr('process')
def test_postprocesses_restart_from_proc(self):
b = Bank('localprocess')
b.update()
proc1file = os.path.join(b.session.get_full_release_directory(),'proc1.txt')
proc2file = os.path.join(b.session.get_full_release_directory(),'proc2.txt')
self.assertTrue(os.path.exists(proc1file))
self.assertTrue(os.path.exists(proc2file))
os.remove(proc1file)
os.remove(proc2file)
# Restart from postprocess, reexecute all processes
b2 = Bank('localprocess')
b2.options.from_task = 'postprocess'
b2.options.release = b.session.get('release')
b2.update()
self.assertTrue(os.path.exists(proc1file))
self.assertTrue(os.path.exists(proc2file))
os.remove(proc1file)
os.remove(proc2file)
# Restart from postprocess, but at process PROC2 and following
b3 = Bank('localprocess')
b3.options.from_task = 'postprocess'
b3.options.process = 'PROC2'
b3.options.release = b.session.get('release')
b3.update()
#self.assertFalse(os.path.exists(proc1file))
self.assertTrue(os.path.exists(proc2file))
def test_computed(self):
b = Bank('computed')
res = b.update(True)
self.assertTrue(res)
self.assertTrue(os.path.exists(b.session.get_full_release_directory()+'/sub1/flat/test_100.txt'))
def test_computed_ref_release(self):
b = Bank('computed2')
res = b.update(True)
b2 = Bank('sub1')
b2release = b2.bank['production'][len(b2.bank['production'])-1]['release']
brelease = b.bank['production'][len(b.bank['production'])-1]['release']
self.assertTrue(res)
self.assertTrue(brelease == b2release)
def test_computederror(self):
b = Bank('computederror')
res = b.update(True)
self.assertFalse(res)
self.assertTrue(b.session._session['depends']['sub2'])
self.assertFalse(b.session._session['depends']['error'])
@attr('network')
def test_multi(self):
b = Bank('multi')
res = b.update()
with open(os.path.join(b.session.get_full_release_directory(),'flat/test1.json'), 'r') as content_file:
content = content_file.read()
my_json = json.loads(content)
self.assertTrue(my_json['args']['key1'] == 'value1')
with open(os.path.join(b.session.get_full_release_directory(),'flat/test2.json'), 'r') as content_file:
content = content_file.read()
my_json = json.loads(content)
self.assertTrue(my_json['form']['key1'] == 'value1')
def test_freeze(self):
b = Bank('local')
b.update()
rel = b.session.get('release')
b.freeze(rel)
prod = b.get_production(rel)
self.assertTrue(prod['freeze'] == True)
res = b.remove(rel)
self.assertTrue(res == False)
b.unfreeze(rel)
prod = b.get_production(rel)
self.assertTrue(prod['freeze'] == False)
res = b.remove(rel)
self.assertTrue(res == True)
def test_stats(self):
b = Bank('local')
b.update()
rel = b.session.get('release')
stats = Bank.get_banks_disk_usage()
self.assertTrue(stats[0]['size']>0)
for release in stats[0]['releases']:
if release['name'] == rel:
self.assertTrue(release['size']>0)
@attr('process')
def test_processes_meta_data(self):
b = Bank('localprocess')
b.update()
formats = b.session.get('formats')
self.assertTrue(len(formats['blast'])==2)
self.assertTrue(len(formats['test'][0]['files'])==3)
@attr('process')
def test_search(self):
b = Bank('localprocess')
b.update()
search_res = Bank.search(['blast'],[])
self.assertTrue(len(search_res)==1)
search_res = Bank.search([],['nucleic'])
self.assertTrue(len(search_res)==1)
search_res = Bank.search(['blast'],['nucleic'])
self.assertTrue(len(search_res)==1)
search_res = Bank.search(['blast'],['proteic'])
self.assertTrue(len(search_res)==0)
def test_owner(self):
'''
test ACL with owner
'''
b = Bank('local')
res = b.update()
self.assertTrue(res)
b.set_owner('sample')
b2 = Bank('local')
try:
res = b2.update()
self.fail('not owner, should not be allowed')
except Exception as e:
pass
@attr('elastic')
class TestElastic(unittest.TestCase):
'''
test indexing and search
'''
def setUp(self):
self.utils = UtilsForTest()
curdir = os.path.dirname(os.path.realpath(__file__))
BiomajConfig.load_config(self.utils.global_properties, allow_user_config=False)
# Delete all banks
b = Bank('local')
b.banks.remove({})
self.config = BiomajConfig('local')
data_dir = self.config.get('data.dir')
lock_file = os.path.join(data_dir,'local.lock')
if os.path.exists(lock_file):
os.remove(lock_file)
def tearDown(self):
data_dir = self.config.get('data.dir')
lock_file = os.path.join(data_dir,'local.lock')
if os.path.exists(lock_file):
os.remove(lock_file)
self.utils.clean()
BmajIndex.delete_all_bank('test')
def test_index(self):
prod = {
"data_dir" : "/tmp/test/data",
"formats" : {
"fasta" : [
{
"files" : [
"fasta/chr1.fa",
"fasta/chr2.fa"
],
"types" : [
"nucleic"
],
"tags" : {
"organism" : "hg19"
}
}
],
"blast": [
{
"files" : [
"blast/chr1/chr1db"
],
"types" : [
"nucleic"
],
"tags" : {
"chr" : "chr1",
"organism" : "hg19"
}
}
]
},
"freeze" : False,
"session" : 1416229253.930908,
"prod_dir" : "alu-2003-11-26",
"release" : "2003-11-26",
"types" : [
"nucleic"
]
}
BmajIndex.add('test',prod, True)
query = {
'query' : {
'match' : {'bank': 'test'}
}
}
res = BmajIndex.search(query)
self.assertTrue(len(res)==2)
class MockLdapConn(object):
ldap_user = 'biomajldap'
ldap_user_email = 'bldap@no-reply.org'
STRATEGY_SYNC = 0
AUTH_SIMPLE = 0
STRATEGY_SYNC = 0
STRATEGY_ASYNC_THREADED = 0
SEARCH_SCOPE_WHOLE_SUBTREE = 0
GET_ALL_INFO = 0
@staticmethod
def Server(ldap_host, port, get_info):
return None
@staticmethod
def Connection(ldap_server, auto_bind=True, read_only=True, client_strategy=0, user=None, password=None, authentication=0,check_names=True):
if user is not None and password is not None:
if password == 'notest':
#raise ldap3.core.exceptions.LDAPBindError('no bind')
return None
return MockLdapConn(ldap_server)
def __init__(self, url=None):
#self.ldap_user = 'biomajldap'
#self.ldap_user_email = 'bldap@no-reply.org'
pass
def search(self, base_dn, filter, scope, attributes=[]):
if MockLdapConn.ldap_user in filter:
self.response = [{'dn': MockLdapConn.ldap_user, 'attributes': {'mail': [MockLdapConn.ldap_user_email]}}]
return [(MockLdapConn.ldap_user, {'mail': [MockLdapConn.ldap_user_email]})]
else:
raise Exception('no match')
def unbind(self):
pass
@attr('user')
class TestUser(unittest.TestCase):
'''
Test user management
'''
def setUp(self):
self.utils = UtilsForTest()
self.curdir = os.path.dirname(os.path.realpath(__file__))
BiomajConfig.load_config(self.utils.global_properties, allow_user_config=False)
def tearDown(self):
self.utils.clean()
@patch('ldap3.Connection')
def test_get_user(self, initialize_mock):
mockldap = MockLdapConn()
initialize_mock.return_value = MockLdapConn.Connection(None, None, None, None)
user = BmajUser('biomaj')
self.assertTrue(user.user is None)
user.remove()
@patch('ldap3.Connection')
def test_create_user(self, initialize_mock):
mockldap = MockLdapConn()
initialize_mock.return_value = MockLdapConn.Connection(None, None, None, None)
user = BmajUser('biomaj')
user.create('test', 'test@no-reply.org')
self.assertTrue(user.user['email'] == 'test@no-reply.org')
user.remove()
@patch('ldap3.Connection')
def test_check_password(self, initialize_mock):
mockldap = MockLdapConn()
initialize_mock.return_value = MockLdapConn.Connection(None, None, None, None)
user = BmajUser('biomaj')
user.create('test', 'test@no-reply.org')
self.assertTrue(user.check_password('test'))
user.remove()
@patch('ldap3.Connection')
def test_ldap_user(self, initialize_mock):
mockldap = MockLdapConn()
initialize_mock.return_value = MockLdapConn.Connection(None, None, None, None)
user = BmajUser('biomajldap')
self.assertTrue(user.user['is_ldap'] == True)
self.assertTrue(user.user['_id'] is not None)
self.assertTrue(user.check_password('test'))
user.remove()
| markiskander/biomaj | tests/biomaj_tests.py | Python | agpl-3.0 | 38,862 | [
"BLAST"
] | 481af5c41b23113442d2456f992c6e576122a4d3d665c32ffd75d8740e09d645 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*
# Name: isodump3.py
# Purpose: Module to list and extract iso files.
# Authors: LiQiong Lee (written exclusively for multibootusb)
# Licence: This file is a part of multibootusb package. You can redistribute it or modify
# under the terms of GNU General Public License version 3
# Credit : I am grateful to LiQiong Lee. He not only wrote this module for multibootusb, but also extended the same
# to python3 within short time after request.
""" ISO9660fs
Dump raw meta data of iso9660 file system.
Extract directories and files.
"""
##
## Extract directory or file from iso.
## Support RRIP.
##
# Author : joni <joni.kartore.lee@gmail.com>
# version : 1.0
import sys
import struct
import os
import re
import stat
from ctypes import *
from . import config
from . import gen
BLOCK_SIZE = 2048
S_IFSOCKET = 0o140000
S_IFLINK = 0o120000
S_IFREG = 0o100000
S_IFBLK = 0o060000
S_IFCHR = 0o020000
S_IFDIR = 0o040000
S_IFIFO = 0o010000
E_SUCCESS = 0
E_FAILURE = -1
E_DEVICEFILE = -2 # can't write device file
class PrimaryVolume(Structure):
def __init__(self):
self.sysIdentifier = ""
self.volIdentifier = ""
self.volSize = 0
self.volSeq = 0
self.blockSize = 0
self.ptSize = 0
self.ptLRd = 0
self.fsVer = 0
self.rootLoc = 0
self.rootTotal = 0
class Rrip(Structure):
def __init__(self):
self.offset = -1
self.altname = ""
self.devH = 0
self.devL = 0
self.fMode = 0
class DirRecord(Structure):
def __init__(self):
self.lenDr = 0
self.lenEattr = 0
self.locExtent= 0
self.lenData = 0
self.dtYear = 0
self.dtMonth = 0
self.dtHour = 0
self.dtMinute = 0
self.dtSecond = 0
self.dtOffset = 0
self.fFlag = 0
self.fUnitSize= 0
self.gapSize = 0
self.volSeqNr = 0
self.lenFi = 0
self.fIdentifier = ""
self.sysUseStar = 0
self.suspBuf = ""
self.rrip = None
class PathTabelItem(Structure):
def __init__(self):
self.lenDi = 0
self.lenEattr = 0
self.locExtenti = 0
self.pdirNr = 0
self.fIdentifier = ""
class ISO9660:
"""
This class can dump iso9660 file system meta data and extract files.
Support:
RRIP extension.
"""
def __init__(self, isofile):
try:
f = open(isofile, 'rb')
except(IOError):
sys.stderr.write("can't open {0}".format(isofile))
sys.exit(-1)
self.isoFile = f
self.priVol = None
self.rootDir = None
self.rripOffset = -1
desc_nr = 0
while True:
desc_nr = desc_nr + 1
try:
self.isoFile.seek(BLOCK_SIZE*(15+desc_nr))
volume_dsc = self.isoFile.read(BLOCK_SIZE)
flag = struct.unpack('B',volume_dsc[0:1])[0]
if flag == 1:
self.__readPrimaryVolume__(volume_dsc)
continue
if flag == 255:
break
except Exception as e:
gen.log("Got exception when init iso file:", sys.exc_info()[0])
self.priVol = None
self.rootDir = None
break
def __del__(self):
self.isoFile.close()
def __readPrimaryVolume__(self, volume_dsc):
""" Dump primary volume descriptor """
global BLOCK_SIZE
priVol = PrimaryVolume()
priVol.sysIdentifier = volume_dsc[8:40]
priVol.volIdentifier = volume_dsc[40:72]
priVol.volSize = struct.unpack('<L',volume_dsc[80:84])[0]
priVol.volSeq = struct.unpack('<H',volume_dsc[124:126])[0]
priVol.blockSize = struct.unpack('<H',volume_dsc[128:130])[0]
priVol.ptSize = struct.unpack('<L',volume_dsc[132:136])[0]
priVol.ptLRd = struct.unpack('<L',volume_dsc[140:144])[0]
priVol.fsVer = struct.unpack('B', volume_dsc[881:882])[0]
dirRec = self.readDirrecord(volume_dsc[156:190])
priVol.rootLoc = dirRec.locExtent
priVol.rootTotal = dirRec.lenData
BLOCK_SIZE = priVol.blockSize
# Check RRIP
#gen.log("loc extent(%d)"%(dirRec.locExtent))
self.priVol = priVol # readDirItems will use self.priVol
root_dir = self.readDirItems(dirRec.locExtent, priVol.rootTotal)[0]
rripNode = self.__rripLoop__(root_dir.suspBuf, root_dir.lenDr-root_dir.sysUseStar)
if rripNode.offset != -1:
self.rripOffset = rripNode.offset
#gen.log("RRIP: rrip_offset %d"%(self.rripOffset))
else:
gen.log("This ISO doesn't support RRIP")
self.rootDir = root_dir
# Rrip extension
def __rripLoop__(self, desc_buf, len_buf):
if self.rripOffset > 0:
entry_buf = desc_buf[self.rripOffset:]
gen.log("__rripLoop__ offset:%d"%(self.rripOffset))
else:
entry_buf = desc_buf
rr = Rrip()
while True:
ce_blk = 0
ce_len = 0
ce_off = 0
head = 0
len_entry = 0
while True:
#gen.log(("\n%d, %d\n")%(len_buf, head))
head += len_entry
if len_buf - head < 4: # less than one entry
break
entry_buf = entry_buf[len_entry:]
sig1 = struct.unpack("B", entry_buf[0:1])[0]
sig2 = struct.unpack("B", entry_buf[1:2])[0]
len_entry = struct.unpack("B", entry_buf[2:3])[0]
ver = struct.unpack("B", entry_buf[3:4])[0]
#if len_entry == 0:
# gen.log "Got a entry in __rripLoop__ (%c,%c) of SUSP with length:(%d),version:(%d)-->"%(sig1,sig2,len_entry, ver),
if len_entry == 0:
break;
if sig1 == ord('S') and sig2 == ord('P'):
ck1 = struct.unpack("B", entry_buf[4:5])[0]
ck2 = struct.unpack("B", entry_buf[5:6])[0]
skip = struct.unpack("B", entry_buf[6:7])[0]
#gen.log "-->(0x%x==0xBE,0x%x==EF,%d)" %(ck1, ck2, skip)
if ck1 == 0xBE and ck2 == 0xEF:
rr.offset = skip
continue
if sig1 == ord('C') and sig2 == ord('E'):
ce_blk = struct.unpack("<L", entry_buf[4:8])[0]
ce_off = struct.unpack("<L", entry_buf[12:16])[0]
ce_len = struct.unpack("<L", entry_buf[20:24])[0]
#gen.log "-->(%d,%d,%d)" %(ce_blk, ce_off, ce_len)
continue
if sig1 == ord('N') and sig2 == ord('M'):
flag = struct.unpack("B", entry_buf[4:5])[0]
#gen.log "-->(flag:(0x%x), name:(%s))" %(flag, entry_buf[5:len_entry])
if flag == 0x02: # FLAG_CURRENT
rr.altname += "."
elif flag == 0x04: # FLAG_PARENT
rr.altname += ".."
elif flag == 0x01 or flag ==0: # 1:FLAG_CONTINUE
rr.altname += entry_buf[5:len_entry].decode()
continue
if sig1 == ord('P') and sig2 == ord('N'):
rr.devH = struct.unpack("<L", entry_buf[4:8])[0]
rr.devL = struct.unpack("<L", entry_buf[12:16])[0]
continue
if sig1 == ord('E') and sig2 == ord('R'):
len_id = struct.unpack("B", entry_buf[4:5])[0]
len_des = struct.unpack("B", entry_buf[5:6])[0]
len_src = struct.unpack("B", entry_buf[6:7])[0]
ext_ver = struct.unpack("B", entry_buf[7:8])[0]
continue
if sig1 == ord('P') and sig2 == ord('X'):
rr.fMode = struct.unpack("<L", entry_buf[4:8])[0]
s_link = struct.unpack("<L", entry_buf[12:16])[0]
uid = struct.unpack("<L", entry_buf[20:24])[0]
gid = struct.unpack("<L", entry_buf[28:32])[0]
continue
if sig1 == ord('S') and sig2 == ord('T'):
return rr
#gen.log "\n"
# while (True) end #
if ce_len > 0:
#gen.log " Read CE block, (%d, %d, %d)"%(ce_blk, ce_len, ce_off)
self.isoFile.seek(ce_blk*BLOCK_SIZE + ce_off)
entry_buf = self.isoFile.read(ce_len)
len_buf = ce_len
else:
break
# while (True) end #
return rr
def checkISOBootable(self):
""" Struct of a classical generic MBR.
0x0000 Bootstrap Code area
-----------------------------------------
0x01BE
.. Partition table
0x01EE
------------------------------------------
0x01FE 55h
Boot signature
0x01FF AAh
"""
self.isoFile.seek(0x01FE)
h = self.isoFile.read(2)
s1 = struct.unpack('B', h[0:1])[0]
s2 = struct.unpack('B', h[1:2])[0]
#gen.log "-->(0x%x,0x%x)" %(s1, s2)
if (s1 == 0x55) and (s2 == 0xAA):
result = True # "Bootable"
else:
result = False # "Not bootable"
return result
def searchDir(self, path):
# /root/abc/ - ['', 'root', 'abc', '']
# /root/abc - ['', 'root', 'abc']
# / - ['', '']
dircomps = path.split('/')
if dircomps[-1] == '':
dircomps.pop()
if dircomps == []:
return
if self.priVol == None:
return
if len(dircomps) == 1:
return self.rootDir
pdir_loc = self.priVol.rootLoc
pdir_len = self.priVol.rootTotal
i_dircomp = 1
while True:
found = False
dirs = self.readDirItems(pdir_loc, pdir_len)
for item in dirs:
if item.fIdentifier == dircomps[i_dircomp]:
pdir_loc = item.locExtent
pdir_len = item.lenData
found = True
#gen.log "found (%s)"%(dircomps[i_dircomp])
break
if found: # advacne
if i_dircomp < len(dircomps)-1:
i_dircomp = i_dircomp + 1
else:
return item
else:
gen.log("can't find " + dircomps[i_dircomp])
return None
def readDirrecord(self, desc_buf):
""" Dump file dirctory record
Return a directory record reading from a Directory Descriptors.
"""
dirRec = DirRecord()
try:
dirRec.lenDr = struct.unpack("B", desc_buf[0:1])[0]
if dirRec.lenDr == 0:
return None
except:
return None
dirRec.lenEattr = struct.unpack("B", desc_buf[1:2])[0]
dirRec.locExtent = struct.unpack("<L", desc_buf[2:6])[0]
dirRec.lenData = struct.unpack("<L", desc_buf[10:14])[0]
dirRec.fFlag = struct.unpack("B", desc_buf[25:26])[0]
dirRec.fUnitSize = struct.unpack("B", desc_buf[26:27])[0]
dirRec.gapSize = struct.unpack("B", desc_buf[27:28])[0]
dirRec.volSeqNr = struct.unpack("<H", desc_buf[28:30])[0]
dirRec.lenFi = struct.unpack("B", desc_buf[32:33])[0]
dirRec.fIdentifier = ""
if dirRec.lenFi == 1:
dirRec.fIdentifier = struct.unpack("B", desc_buf[33:34])[0]
if dirRec.fIdentifier == 0:
dirRec.fIdentifier = "."
elif dirRec.fIdentifier == 1:
dirRec.fIdentifier = ".."
else:
dirRec.fIdentifier = desc_buf[33:33+dirRec.lenFi].decode()
idx = dirRec.fIdentifier.rfind(";")
if idx != -1:
dirRec.fIdentifier = dirRec.fIdentifier[0:idx]
dirRec.suspBuf = ""
dirRec.sysUseStar = 34 + dirRec.lenFi -1
if dirRec.lenFi % 2 == 0:
dirRec.sysUseStar += 1
# Extension Attribute
if dirRec.lenDr > dirRec.sysUseStar+4:
if dirRec.locExtent == self.priVol.rootLoc:
dirRec.suspBuf = desc_buf[dirRec.sysUseStar:dirRec.lenDr]
suspBuf = desc_buf[dirRec.sysUseStar:dirRec.lenDr]
if self.rripOffset != -1:
rripNode = self.__rripLoop__(suspBuf, dirRec.lenDr-dirRec.sysUseStar)
dirRec.rrip = rripNode
if rripNode != None:
if rripNode.altname != "":
dirRec.fIdentifier = rripNode.altname
dirRec.lenFi = len(rripNode.altname)
#gen.log "rrip_altname: %s"%(dirRec.fIdentifier)
# if rripNode end #
# if self.rripOffset != -1 end #
# if dirRec.lenDr > .. end #
return dirRec
def readDirItems(self, block_nr=None, total=None):
""" Read file dirctory records
Read dirctory records from 'block_nr' with a length of 'total'.
Return a list containing directory records(DirRecord).
"""
dirs = []
total_blk = (total+BLOCK_SIZE-1)//BLOCK_SIZE
i_blk = 0
while i_blk < total_blk:
self.isoFile.seek((block_nr+i_blk)*BLOCK_SIZE)
desc_buf = self.isoFile.read(BLOCK_SIZE)
i_blk = i_blk + 1
while True:
dirItem = self.readDirrecord(desc_buf)
if dirItem == None:
break
dirs.append(dirItem)
if desc_buf.__len__() > dirItem.lenDr:
desc_buf = desc_buf[dirItem.lenDr:]
else:
break
return dirs
def readPathtableL(self):
""" Read path table of L typde """
if self.priVol == None:
return
block_nr = self.priVol.ptLRd
total = self.priVol.ptSize
path_table = []
self.isoFile.seek(block_nr*BLOCK_SIZE)
ptbuf = self.isoFile.read((BLOCK_SIZE * ((total+BLOCK_SIZE-1)//BLOCK_SIZE)))
i = 0
r_size = 0
while True :
i = i+1
t = PathTabelItem()
t.lenDi = struct.unpack('B', ptbuf[0:1])[0]
t.lenEattr = struct.unpack('B', ptbuf[1:2])[0]
t.locExtent = struct.unpack('<L', ptbuf[2:6])[0]
t.pdirNr = struct.unpack('<H', ptbuf[6:8])[0]
t.fIdentifier = ptbuf[8:8+t.lenDi].decode()
path_table.append(t)
if t.lenDi % 2 :
len_pd = 1
else:
len_pd = 0
r_size += 9+t.lenDi-1+len_pd
if r_size >= total:
break
ptbuf = ptbuf[9+t.lenDi-1+len_pd:]
# while True
return path_table
# @path -- path within iso file system.
# @output -- what local path you want write to.
# @pattern -- regular expression.
# @r -- recursion flag, write the whole sub-directories or not.
# @all_type -- which file type should be writed.
# False: Write regular type files only.
# True: Wirte all types files (regular, device file, link, socket, etc)
def writeDir(self, path, output, pattern="", r=True, all_type=False):
""" Extract a directory
Return 0 means success otherwise failure.
"""
d = self.searchDir(path)
if d != None:
if output.endswith("/"):
output = output[0:-1]
# Try to make target directory.
if not os.path.exists(output):
try:
os.makedirs(output)
except(OSError):
sys.stderr.write("can't make dirs({0})\n".format(output))
return E_FAILURE
pp = None
if pattern != "":
p = r'{0}'.format(pattern)
pp = re.compile(p)
#gen.log "writeDir: flag(%x)"%(d.fFlag)
if d.fFlag & 0x02 == 0x02:
# Check if a clean directory.
#try:
# if len(os.listdir(output)) > 0:
# sys.stderr.write("The target directory is not empty\n")
# return E_FAILURE
#except(OSError):
# sys.stderr.write("can't access dirs({0})\n".format(p))
# return E_FAILURE
self.writeDir_r(output, d, pp, r, all_type)
return E_SUCCESS
else:
return self.writeFile(d, output+path, all_type)
else:
return E_FAILURE
def writeDir_r(self, det_dir, dire, pp, r, all_type):
#gen.log "writeDir_r:(%s)"%(det_dir)
dirs = self.readDirItems(dire.locExtent, dire.lenData)
for d in dirs:
if not d.fIdentifier in [".", ".."]:
if (pp != None) and (pp.search(d.fIdentifier) == None):
match = False
else:
match = True
#gen.log "mathing %s, %s, (%x)"%(match, d.fIdentifier, d.fFlag)
p = det_dir + "/" + d.fIdentifier
if d.fFlag & 0x02 == 0x02:
if not os.path.exists(p):
os.makedirs(p, 0o777)
if r:
if match:
self.writeDir_r(p, d, None, r, all_type) # Don't need to match subdirectory.
else:
self.writeDir_r(p, d, pp, r, all_type)
elif match:
self.writeFile(d, p, all_type)
# if not d.fIdentifier end #
# for d in dirs end #
def writeFile(self, dirRec, detFile, all_type):
""" Write a file to detFile
Return 0 means success otherwise failure.
"""
global file_out
if detFile == "" or dirRec == None:
sys.stderr.write("can't write file\n")
return E_FAILURE
#gen.log "write file (%s)"%(detFile)
config.status_text = detFile
dirname = os.path.dirname(detFile)
if not os.path.exists(dirname):
try:
os.makedirs(dirname, 0o777)
except(OSError):
sys.stderr.write("can't makedirs\n")
return E_FAILURE
if all_type == True:
# device file
if dirRec.rrip != None and (dirRec.rrip.devH != 0 or dirRec.rrip.devL != 0):
#fFlag == 0
high = dirRec.rrip.devH
low = dirRec.rrip.devL
if high == 0:
device = os.makedev(os.major(low), os.minor(low))
else:
device = os.makedev(high, os.minor(low))
try:
mode = dirRec.rrip.fMode & 0o770000
if mode == S_IFCHR:
os.mknod(detFile, 0o777|stat.S_IFCHR, device)
elif mode == S_IFBLK:
os.mknod(detFile, 0o777|stat.S_IFBLK, device)
except(OSError):
sys.stderr.write("can't mknode, maybe no permission\n")
return E_DEVICEFILE
return E_SUCCESS
loc = dirRec.locExtent
length = dirRec.lenData
self.isoFile.seek(BLOCK_SIZE * loc)
#gen.log "file length(%d)"%(length)
r_size = BLOCK_SIZE*1024*50 #100M cache
try:
f_output = open(detFile, 'wb', r_size)
except(IOError):
sys.stderr.write("can't open{0} for write\n".format(detFile))
return E_FAILURE
while True:
if length == 0:
break
elif length <= r_size:
r_size = length
length = 0
else:
length = length - r_size
buf = self.isoFile.read(r_size)
f_output.write(buf)
f_output.flush()
# while True end.
f_output.close()
return E_SUCCESS
def readDir(self, dir_path, r=True):
file_list = []
d = self.searchDir(dir_path)
if d != None:
if (d.fFlag & 0x02) == 0x02:
#gen.log "readDir (%x, %x)"%(d.locExtent, d.lenData)
if dir_path.endswith("/"):
dir_path = dir_path[0:-1]
self.readDir_r(file_list, dir_path, d, r)
# if (d.fFlag & 0x02) == 0x02: #
# if d != None:
return file_list
def readDir_r(self, file_list, dir_path, dire, r):
if (dire.fFlag & 0x02) != 0x02:
return
dirs = self.readDirItems(dire.locExtent, dire.lenData)
for d in dirs:
if not d.fIdentifier in [".", ".."]:
p = dir_path + "/" + d.fIdentifier
file_list.append(p)
if r:
self.readDir_r(file_list, p, d, r)
# if not d.fIdentifier #
# for d in dirs: #
def checkIntegrity(self):
if self.priVol == None: # no primary volume
return False
if self.priVol.ptSize == 0: # empty ?
return True
path_table = self.readPathtableL()
if path_table == []: # pathtable record is broken.
return False
# find last file item to check
for dr in reversed(path_table):
#gen.log dr.fIdentifier
dirs = self.readDirItems(dr.locExtent, BLOCK_SIZE)
if len(dirs) > 2:
dot = dirs[0]
dirs2 = self.readDirItems(dot.locExtent, dot.lenData) # get the whole items.
for dr2 in reversed(dirs2): # search last file item.
if dr2.fFlag == 0:
#gen.log "get last file(%s)"%(dr2.fIdentifier)
try:
#self.isoFile.seek(BLOCK_SIZE * dr2.locExtent+dr2.lenData)
lastfile_end = BLOCK_SIZE * dr2.locExtent + dr2.lenData
self.isoFile.seek(0, os.SEEK_END)
iso_end = self.isoFile.tell()
#gen.log("%d-->%d")%(lastfile_end, iso_end)
if iso_end >= lastfile_end:
return True
else:
return False
except(IOError):
#gen.log "exception when seek. iso is broken"
return False
elif len(dirs) < 2: # Dir record is broken. At least, should have two entries.
return False
return True
###########################################################################
def dump_dir_record(dirs):
""" Dump all the file directory records contained in desc_buf """
gen.log("Dump file/directory record")
gen.log("===========================", end="\n")
if dirs != None:
for f in dirs:
gen.log("length of directory record:(0x%x), length of extend attribute:(%d), \
location of record:(%d)BLOCK->(0x%x), data length(%d) size of file unit:(%d), \
interleave gap size:(%d), file flag:(0x%x),name length:(%d) identify:(%s)\n" \
%(f.lenDr, f.lenEattr, f.locExtent, f.locExtent*BLOCK_SIZE,f.lenData, \
f.fUnitSize, f.gapSize, f.fFlag, f.lenFi, f.fIdentifier))
def dump_pathtable_L(path_table):
""" Dump path table of L typde """
gen.log("Dump path table")
gen.log("================", end="\n")
#path_table = readPathtableL()
i = 0
for t in path_table:
i = i + 1
if t.lenDi == 1:
if t.fIdentifier in [0, 1]:
gen.log("is a root directory(%d)" %(is_root))
gen.log("%d->length of identify:(%d), length of extend attribute:(%d), \
local:(%d)->(0x%x), parent dir number:(%d), identify:(%s)\n" \
%(i, t.lenDi, t.lenEattr, t.locExtent, t.locExtent*BLOCK_SIZE, t.pdirNr, t.fIdentifier))
def dump_primary_volume(privol=None):
""" Dump primary volume descriptor """
if privol == None:
gen.log("Can't dump, maybe iso is broken")
return
gen.log("===== Dump primary volume descriptor ==")
gen.log("System Identifier:(%s)" %(privol.sysIdentifier.decode()))
gen.log("Volume Identifier:(%s)" %privol.volIdentifier.decode())
gen.log("Volume Space size:(0x%x)BLOCKS(2kB)" %privol.volSize)
gen.log("Volume sequence number:(%d)" %(privol.volSeq))
gen.log("logic block size:(0x%x)" %(privol.blockSize))
gen.log("Volume path talbe L's BLOCK number is :(0x%x-->0x%x), size(%d)" %(privol.ptLRd, privol.ptLRd*BLOCK_SIZE, privol.ptSize))
# gen.log "Abstract File Identifier: (%s)" %(volume_dsc[739:776])
# gen.log "Bibliographic File Identifier: (%s)" %(volume_dsc[776:813])
gen.log("pathtable locate (%d)" %(privol.ptLRd))
gen.log("File Structure Version:(%d)" %(privol.fsVer))
gen.log("Root directory is at (%d)block, have(0x%x)bytes" %(privol.rootLoc, privol.rootTotal))
# dump_dir_record(None, 23, 1)
def dump_boot_record(volume_dsc):
""" Dump boot record """
gen.log("===== Dump boot record ==")
std_identifier = volume_dsc[1:6]
gen.log("Standard Identifier:(%s)" %std_identifier)
vol_ver = struct.unpack('B', volume_dsc[6])
gen.log("Volume descriptor version:(%d)" %vol_ver)
bootsys_identifier = volume_dsc[7:39]
gen.log("boot system identifier(%s)" %bootsys_identifier)
boot_identifier = volume_dsc[39:71]
gen.log("boot identifier(%s)" %boot_identifier)
def usage():
""" Prompt user how to use """
gen.log("""
Usage: isodump dump-what [options] iso-file
[dump-what]
-----------
boot - Dump boot record.
primary-volume - Dump primary volume.
pathtable - Dump path table.
dir-record [block number] [length] - Dump a raw data of a Directory Record
iso:/dir [-r] [-o output] [-p pattern] - Dump a dirctory or file to [output]
-r recursively visit directory.
-p spcify a Regular expression pattern for re.search(pattern,).
isodump xx.iso - Dump the root directory
isodump pathtable xx.iso - Dump the path table record.
isodump iso:/ -r xx.iso
-- Dump the root directory of xx.iso recursively.
isodump iso:/ -r -o /tmp/iso xx.iso
-- Extract the iso to /tmp/iso/.
isodump iso:/boot -o /tmp/iso/boot xx.iso
-- Extract the /boot directory of xx.iso to /tmp/iso/boot.
isodump iso:/boot/grup.cfg -o /tmp/grub.cfg xx.iso
-- Extract the file "grup.cfg" to "/tmp/grub.cfg"
isodump iso:/boot -r -o /tmp/iso -p "*.cfg" xx.iso
-- Extract any files or directories under /boot maching "*.cfg" to /tmp/iso/.
""")
sys.exit(-1)
if __name__ == '__main__':
argv = sys.argv
if len(argv) < 3:
usage()
iso9660fs = ISO9660(argv[-1])
integrity = iso9660fs.checkIntegrity()
if integrity == False:
gen.log("iso file is broken")
sys.exit(-1)
dump_what = argv[1]
if dump_what == "primary-volume":
dump_primary_volume(iso9660fs.priVol)
elif dump_what == "pathtable":
path_table = iso9660fs.readPathtableL()
dump_pathtable_L(path_table)
if dump_what == "dir-record":
if len(argv) == 5:
gen.log("dump dir-record (%s, %s)"%(argv[2], argv[3]))
dirs = iso9660fs.readDirItems(int(argv[2]), int(argv[3]))
dump_dir_record(dirs)
else:
usage()
elif dump_what.startswith("iso:"):
o_path = ""
r = False
o = False
p = False
pattern = ""
for arg in argv[2:-1]:
if arg == "-r":
r = True
o = False
p = False
elif arg == "-o":
o = True
p = False
elif arg == "-p":
o = False
p = True
elif o == True:
o_path = arg
o = False
elif p == True:
pattern = arg
p = False
isodir = dump_what[4:]
if o_path == "":
gen.log("dump_dir(%s)"%(isodir))
filelist = iso9660fs.readDir(isodir, r)
if filelist == []:
gen.log("can't read any file from (%s)"%(isodir))
else:
for f in filelist:
gen.log(f)
else:
gen.log("writeDir(%s)->(%s) with pattern(%s)"%(isodir, o_path, pattern))
sys.exit(iso9660fs.writeDir(isodir, o_path, pattern, r, True))
| alindt/multibootusb | scripts/isodump3.py | Python | gpl-2.0 | 29,132 | [
"VisIt"
] | 1d2329d75dbcbbdff4dfc4edc74c509e477c82566701cf7f8b5c6f4d3db72c08 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008-2011 Kees Bakker
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Import from Pro-Gen"
from __future__ import print_function, unicode_literals
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import re
import os
import struct
import sys
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger('.ImportProGen')
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.utils.id import create_id
from gramps.gui.utils import ProgressMeter
from gramps.gen.lib import (Attribute, AttributeType, ChildRef, Date, Event,
EventRef, EventType, Family, FamilyRelType, Name,
NameType, Note, NoteType, Person, Place, Source,
Surname, Citation, Location, NameOriginType)
from gramps.gen.db import DbTxn
class ProgenError(Exception):
"""Error used to report Progen errors."""
def __init__(self, value=""):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
def _importData(database, filename, user):
try:
g = ProgenParser(database, filename)
except IOError as msg:
user.notify_error(_("%s could not be opened") % filename, str(msg))
return
try:
status = g.parse_progen_file()
except ProgenError as msg:
user.notify_error(_("Pro-Gen data error"), str(msg))
return
except IOError as msg:
user.notify_error(_("%s could not be opened") % filename, str(msg))
return
def _find_from_handle(progen_id, table):
"""
Find a handle corresponding to the specified Pro-Gen ID.
The passed table contains the mapping. If the value is found, we return
it, otherwise we create a new handle, store it, and return it.
"""
intid = table.get(progen_id)
if not intid:
intid = create_id()
table[progen_id] = intid
return intid
def _read_mem(bname):
'''
Each record is 32 bytes. First a 4 byte reference to the next record
followed by 28 bytes of text.
The information forms a chain of records, that stops when a reference is 0
or smaller.
There are two special sequences:
<ESC> <CR> hard return
<ESC> <^Z> end of the memo field
'''
if os.path.exists(bname + '.MEM'):
fname = bname + '.MEM'
else:
fname = bname + '.mem'
f = open(fname, "rb")
log.debug("The current system is %s-endian" % sys.byteorder)
# The input file comes from [what was originally] a DOS machine so will
# be little-endian, regardless of the 'native' byte order of the host
# system
recfmt = "<i28s"
reclen = struct.calcsize( str(recfmt) )
#print("# reclen = %d" % reclen)
mems = []
while 1:
buf = f.read(reclen)
if not buf:
break
(recno, text) = struct.unpack(recfmt, buf)
mems.append([recno, text])
return mems
def _read_recs(table, bname):
'Read records from .PER or .REL file.'
if os.path.exists(bname + table.fileext):
fname = bname + table.fileext
else:
fname = bname + table.fileext.lower()
f = open(fname, "rb")
recfmt = table.recfmt
log.info("# %s - recfmt = %s" % (table['name1'], recfmt))
reclen = struct.calcsize(str(recfmt))
log.info("# %s - reclen = %d" % (table['name1'], reclen))
recs = []
while 1:
buf = f.read(reclen)
if not buf:
break
tups = struct.unpack(recfmt, buf)
recs.append(tups)
log.info("# length %s.recs[] = %d" % (table['name1'], len(recs)))
return recs
def _get_defname(fname):
"""
Get the name of the PG30 DEF file by looking at the user DEF file. And return
the name of the DEF file. fname is expected to be somewhere in the PG30 tree.
Contents of <fname> is something like:
=> \\0
=> C:\\PG30\\NL\\PG30-1.DEF
We will strip the C: and convert the rest to a native pathname. Next, this pathname
is compared with <fname>.
"""
lines = open(fname).readlines()
if not lines[0].startswith(r'\0') or len(lines) < 2:
raise ProgenError(_("Not a Pro-Gen file"))
return None, '?'
defname = lines[1]
defname = defname.strip()
# Strip drive, if any
defname = re.sub( r'^\w:', '', defname )
defname = defname.replace('\\', os.sep)
# Strip leading slash, if any.
if defname.startswith(os.sep):
defname = defname[1:]
#log.warning('_get_defname: fname=%(fname)s => defname=%(defname)s' % vars())
# Using the directory of <fname>, go to the parent directory until
# the DEF is found.
dir_, f = os.path.split(os.path.abspath(fname))
while dir_ and dir_ != os.sep:
#log.warning('_get_defname: dir=%(dir_)s => defname=%(defname)s' % vars())
newdefname = os.path.join(dir_, defname)
if os.path.exists(newdefname):
return newdefname, defname
newdefname = newdefname.lower()
if os.path.exists(newdefname):
return newdefname, defname
# One level up
dir_, f = os.path.split(dir_)
return None, defname
esc_ctrlz_pat = re.compile(r'\033\032.*')
def _get_mem_text(mems, i):
'Notice that Pro-Gen starts the mem numbering at 1.'
if i <= 0:
return ''
i -= 1
recno = mems[i][0]
text = mems[i][1].decode('cp850')
if recno != 0:
text += _get_mem_text(mems, recno)
# ESC-^M is newline
text = text.replace('\033\r', '\n')
# ESC-^Z is end of string
text = esc_ctrlz_pat.sub('', text)
# There can be nul bytes. Remove them.
text = text.replace('\0', '')
# Strip leading/trailing whitespace
text = text.strip()
#print(text)
return text
month_values = {
'jan' : 1,
'feb' : 2,
'febr' : 2,
'maa' : 3,
'mar' : 3,
'march' : 3,
'mrt' : 3,
'maart' : 3,
'apr' : 4,
'april' : 4,
'mei' : 5,
'may' : 5,
'jun' : 6,
'juni' : 6,
'jul' : 7,
'juli' : 7,
'aug' : 8,
'sep' : 9,
'sept' : 9,
'ok' : 10,
'okt' : 10,
'oct' : 10,
'nov' : 11,
'dec' : 12,
}
def _cnv_month_to_int(m):
return month_values.get(m, 0)
# Split "van", "de" prefixes
_surname_prefixes = [
"'t ",
'den ',
'der ',
'de ',
'het ',
'in den ',
'ten ',
'ter ',
'te ',
'van den ',
'van der ',
'van de ',
'van ',
]
def _split_surname(surname):
for p in _surname_prefixes:
if surname.startswith(p):
return p.strip(), surname[len(p):].strip()
return '', surname
# Example field:
# ['Voornaam', '47', '64', '4', '2', '15', '""', '""']
# item 0
# item 1 is a number indicating the fieldtype
# item 2
# item 3 is the size of the field
class PG30_Def_Table_Field:
'This class represents a field in one of the tables in the DEF file.'
def __init__(self, name, value):
self.fieldname = name
self.fields = value.split(',')
self.fields = [p.strip() for p in self.fields]
self.name = self.fields[0]
self.type_ = int(self.fields[1])
self.size = int(self.fields[3])
def __repr__(self):
return self.fieldname + ' -> ' + ', '.join(self.fields)
class PG30_Def_Table:
'This class represents a table in the DEF file.'
def __init__(self, name, lines):
self.name = name
self.parms = {}
self.recfmt = None
# Example line:
#f02=Persoon gewijzigd ,32,10,10, 1,68,"","INDI CHAN DATE"
line_pat = re.compile(r'(\w+) = (.*)', re.VERBOSE)
for l in lines:
#print(l)
m = line_pat.match(l)
if m:
# TODO. Catch duplicates?
self.parms[m.group(1)] = m.group(2)
self.fileext = self.parms.get('fileext', None)
#self.name1 = self.parms.get('name1', None)
# If there is a n_fields entry then this is a table that
# has details about the record format of another file (PER or REL).
if 'n_fields' in self.parms:
self.get_fields()
self.recfmt = self.get_recfmt()
self.nam2fld = {}
self.nam2idx = {}
self.recflds = [] # list of fields that use up space in a record
j = 0
for i, f in enumerate(self.flds):
#print("# field %s" % f)
nam = f.name
self.nam2fld[nam] = f
if f.size != 0:
self.nam2idx[nam] = j
#print("# %s <= %d" % (f.fieldname, j))
self.recflds.append(f)
j = j + 1
def __getitem__(self, i):
return self.parms.get(i, None)
def get_recfmt(self):
'Get the record format for struct.unpack'
# Example field:
# ['Voornaam', '47', '64', '4', '2', '15', '""', '""']
# item 0
# item 1 is a number indicating the fieldtype
# item 2
# item 3 is the size of the field
# ...
flds = self.flds
# The input file comes from [what was originally] a DOS machine so will
# be little-endian, regardless of the 'native' byte order of the host
# system
fmt = '<'
for f in flds:
fldtyp = f.type_
if fldtyp == 2 or fldtyp == 3 or fldtyp == 22 or fldtyp == 23:
fmt += 'i'
elif fldtyp == 31:
pass
elif fldtyp == 32 or fldtyp == 44 or fldtyp == 45:
fmt += '%ds' % f.size
elif fldtyp == 41:
fmt += 'h'
elif fldtyp == 42 or fldtyp == 43 or fldtyp == 46 or fldtyp == 47:
fmt += 'i'
else:
pass # ???? Do we want to know?
return fmt
def get_fields(self):
# For example from pg30-1.def
#n_fields=58
#f01=Persoon record ,31, 6, 0, 1,17,"","INDI RFN"
#f02=Persoon gewijzigd ,32,10,10, 1,68,"","INDI CHAN DATE"
#f03=Voornaam ,47,64, 4, 2,15,"",""
n_fields = int(self.parms['n_fields'])
flds = []
for i in range(n_fields):
fld_name = 'f%02d' % (i+1)
fld = self.parms.get(fld_name, None)
flds.append(PG30_Def_Table_Field(fld_name, fld))
self.flds = flds
def get_record_field_index(self, fldname):
'Return the index number in the record tuple, based on the name.'
if not fldname in self.nam2idx:
raise ProgenError(_("Field '%(fldname)s' not found") % locals())
return self.nam2idx[fldname]
def convert_record_to_list(self, rec, mems):
flds = []
for i in range(len(rec)):
if self.field_ix_is_record_number(i):
flds.append("%d" % rec[i])
elif self.field_ix_is_mem_type(i):
flds.append(_get_mem_text(mems, rec[i]))
else:
# Not a record number, not a mem number. It must be just text.
fld = rec[i].strip()
# Convert to unicode
fld = fld.decode('cp850')
flds.append(fld)
#print(', '.join(flds))
return flds
def get_field_names(self):
ret = []
for f in self.flds:
if f.size != 0:
ret.append(f.name)
return ret
def field_is_mem_type(self, fldname):
if not fldname in self.nam2fld:
return None
typ = self.nam2fld[fldname].type_
if typ == 46 or typ == 47:
return True
return False
# TODO. Integrate this into field_is_mem_type()
def field_ix_is_mem_type(self, ix):
typ = self.recflds[ix].type_
if typ == 46 or typ == 47:
return True
return False
def field_ix_is_record_number(self, ix):
typ = self.recflds[ix].type_
if typ == 2 or typ == 3 or typ == 22 or typ == 23:
return True
return False
def diag(self):
txt = self.name + '\n'
if 'n_fields' in self.parms:
txt += 'n_fields = %s\n' % self.parms['n_fields']
# Just grab a field
f = self.flds[1]
txt += '"%s"\n' % f
txt += 'recfmt = %s (length=%d)' % (self.recfmt,
struct.calcsize(str(self.recfmt)))
return txt
class PG30_Def:
'''
Utility class to read PG30-1.DEF and to get certain information
from it.
The contents of the DEF file is separated in sections that start
with [<section name>]. For example:
[general]
dateformat=DD-MM-YYYY
pointerlength=4
tables=2
'''
def __init__(self, fname):
#print fname
fname, deffname = _get_defname(fname)
if not fname:
raise ProgenError(_("Cannot find DEF file: %(deffname)s") % locals())
# This can throw a IOError
import io
lines = None
with io.open(fname, buffering=1,
encoding='cp437', errors='strict') as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
content = '\n'.join(lines).encode('utf-8')
parts = re.split(r'\n(?=\[)', content)
self.parts = {}
self.tables = {}
for p in parts:
lines = p.splitlines()
# Get section name
k = re.sub(r'\[(.*)\]', r'\1', lines[0])
# Store section contents in a hashtable using that section name
self.parts[k] = lines[1:]
self.tables[k] = PG30_Def_Table(k, self.parts[k])
# Some sections are special: Table_1 and Table_2
def __getitem__(self, i):
return self.tables.get(i, None)
# TODO. Maybe rename to __repr__
def diag(self):
return '\n\n'.join([self.tables[t].diag() for t in self.tables])
class ProgenParser(object):
def __init__(self, dbase, file_):
self.bname, ext = os.path.splitext(file_)
if ext.lower() in ('.per', '.rel', '.mem'):
file_ = self.bname + '.def'
self.db = dbase
self.fname = file_
self.gid2id = {} # Maps person id
self.fid2id = {} # Maps family id
self.fm2fam = {}
self.pkeys = {} # Caching place handles
self.skeys = {} # Caching source handles
def parse_progen_file(self):
self.def_ = PG30_Def(self.fname)
#print self.def_.diag()
self.progress = ProgressMeter(_("Import from Pro-Gen"), '')
self.mems = _read_mem(self.bname)
self.pers = _read_recs(self.def_['Table_1'], self.bname)
self.rels = _read_recs(self.def_['Table_2'], self.bname)
with DbTxn(_("Pro-Gen import"), self.db, batch=True) as self.trans:
self.db.disable_signals()
self.create_persons()
self.create_families()
self.add_children()
self.db.enable_signals()
self.db.request_rebuild()
self.progress.close()
def __find_person_handle(self, progen_id):
"""
Return the database handle associated with the person's Pro-Gen ID
"""
return _find_from_handle(progen_id, self.gid2id)
def __find_family_handle(self, progen_id):
"""
Return the database handle associated with the family's Pro-Gen ID
"""
return _find_from_handle(progen_id, self.fid2id)
def __find_or_create_person(self, progen_id):
"""
Finds or creates a person based on the Pro-Gen ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new person, assign the handle and GRAMPS ID.
"""
person = Person()
intid = self.gid2id.get(progen_id)
if self.db.has_person_handle(intid):
person.unserialize(self.db.get_raw_person_data(intid))
else:
gramps_id = self.db.id2user_format("I%d" % progen_id)
if self.db.id_trans.get(gramps_id):
gramps_id = self.db.find_next_person_gramps_id()
intid = _find_from_handle(progen_id, self.gid2id)
person.set_handle(intid)
person.set_gramps_id(gramps_id)
return person
def __find_or_create_family(self, progen_id):
"""
Finds or creates a family based on the Pro-Gen ID. If the ID is
already used (is in the db), we return the item in the db. Otherwise,
we create a new family, assign the handle and GRAMPS ID.
"""
family = Family()
intid = self.fid2id.get(progen_id)
if self.db.has_family_handle(intid):
family.unserialize(self.db.get_raw_family_data(intid))
else:
gramps_id = self.db.fid2user_format("F%d" % progen_id)
if self.db.id_trans.get(gramps_id):
gramps_id = self.db.find_next_family_gramps_id()
intid = _find_from_handle(progen_id, self.fid2id)
family.set_handle(intid)
family.set_gramps_id(gramps_id)
return family
def __get_or_create_place(self, place_name):
if not place_name:
return None
place = None
if place_name in self.pkeys:
place = self.db.get_place_from_handle(self.pkeys[place_name])
else:
# Create a new Place
place = Place()
place.set_title(place_name)
self.db.add_place(place, self.trans)
self.db.commit_place(place, self.trans)
self.pkeys[place_name] = place.get_handle()
return place
def __get_or_create_citation(self, source_name, aktenr=None,
source_text=None):
if not source_name:
return None
# Aktenr is something very special and it belongs with the source_name
if aktenr:
source_name = "%(source_name)s, aktenr: %(aktenr)s" % locals()
if source_name in self.skeys:
source = self.db.get_source_from_handle(self.skeys[source_name])
else:
# Create a new Source
source = Source()
source.set_title(source_name)
self.db.add_source(source, self.trans)
self.db.commit_source(source, self.trans)
self.skeys[source_name] = source.get_handle()
citation = Citation()
citation.set_reference_handle(source.get_handle())
if aktenr:
sattr = SrcAttribute()
sattr.set_type("REFN")
sattr.set_value(aktenr)
citation.add_attribute(sattr)
if source_text:
note = Note()
note_type = NoteType()
note_type.set((NoteType.CUSTOM, "Brontekst"))
note.set_type(note_type)
note.set(source_text)
self.db.add_note(note, self.trans)
citation.add_note(note.handle)
self.db.add_citation(citation, self.trans)
self.db.commit_citation(citation, self.trans)
return citation
def __create_event_and_ref(self, type_, desc=None, date=None, place=None,
citation=None, note_text=None, time=None):
event = Event()
event.set_type(EventType(type_))
if desc:
event.set_description(desc)
if date:
event.set_date_object(date)
if place:
event.set_place_handle(place.get_handle())
if citation:
event.add_citation(citation.handle)
if time:
attr = Attribute()
attr.set_type(AttributeType.TIME)
attr.set_value(time)
event.add_attribute(attr)
if note_text:
note = Note()
note_type = NoteType()
note_type.set((NoteType.CUSTOM, "Info"))
note.set_type(note_type)
note.set(note_text)
self.db.add_note(note, self.trans)
event.add_note(note.handle)
self.db.add_event(event, self.trans)
self.db.commit_event(event, self.trans)
event_ref = EventRef()
event_ref.set_reference_handle(event.get_handle())
return event, event_ref
__date_pat1 = re.compile(r'(?P<day>\d{1,2}) (-|=) (?P<month>\d{1,2}) (-|=) (?P<year>\d{2,4})', re.VERBOSE)
__date_pat2 = re.compile(r'(?P<month>\d{1,2}) (-|=) (?P<year>\d{4})', re.VERBOSE)
__date_pat3 = re.compile(r'(?P<year>\d{3,4})', re.VERBOSE)
__date_pat4 = re.compile(r'(v|vóór|voor|na|circa|ca|rond|±) (\.|\s)* (?P<year>\d{3,4})', re.VERBOSE)
__date_pat5 = re.compile(r'(oo|OO) (-|=) (oo|OO) (-|=) (?P<year>\d{2,4})', re.VERBOSE)
__date_pat6 = re.compile(r'(?P<month>(%s)) (\.|\s)* (?P<year>\d{3,4})' % '|'.join(list(month_values.keys())), re.VERBOSE | re.IGNORECASE)
def __create_date_from_text(self, txt, diag_msg=None):
'''
Pro-Gen has a text field for the date. It can be anything. Mostly it will be dd-mm-yyyy,
but we have seen:
yyyy
mm-yyyy
voor yyyy
dd=mm-yyyy (typo I guess)
00-00-yyyy
oo-oo-yyyy
dd-mm-00 (does this mean we do not know about the year?)
This function tries to parse the text and create a proper Gramps Date() object.
If all else fails we create a MOD_TEXTONLY Date() object.
'''
if not txt or txt == 'onbekend' or txt == '??':
return None
date = Date()
# dd-mm-yyyy
m = self.__date_pat1.match(txt)
if m:
day = int(m.group('day'))
month = int(m.group('month'))
year = int(m.group('year'))
if day and month and year:
date.set_yr_mon_day(year, month, day)
else:
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN, (day, month, year, None))
return date
# mm-yyyy
m = self.__date_pat2.match(txt)
if m:
month = int(m.group('month'))
year = int(m.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN, (0, month, year, None))
return date
# yyy or yyyy
m = self.__date_pat3.match(txt)
if m:
year = int(m.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN, (0, 0, year, None))
return date
# voor|na|... yyyy
m = self.__date_pat4.match(txt)
if m:
year = int(m.group('year'))
if m.group(1) == 'voor' or m.group(1) == 'v' or m.group(1) == 'vóór':
date.set(Date.QUAL_NONE, Date.MOD_BEFORE, Date.CAL_GREGORIAN, (0, 0, year, None))
elif m.group(1) == 'na':
date.set(Date.QUAL_NONE, Date.MOD_AFTER, Date.CAL_GREGORIAN, (0, 0, year, None))
else:
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN, (0, 0, year, None))
return date
# oo-oo-yyyy
m = self.__date_pat5.match(txt)
if m:
year = int(m.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN, (0, 0, year, None))
return date
# mmm yyyy (textual month)
m = self.__date_pat6.match(txt)
if m:
year = int(m.group('year'))
month = _cnv_month_to_int(m.group('month'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN, (0, month, year, None))
return date
log.warning(_("date did not match: '%(text)s' (%(msg)s)") % {
'text' : txt.encode('utf-8'), 'msg' : diag_msg or '' } )
# Hmmm. Just use the plain text.
date.set_as_text(txt)
return date
def create_persons(self):
table = self.def_['Table_1']
# Records in the PER file using PG30-1.DEF contain the following fields:
# (Note. We want this to be computed just once.
#log.info(table.get_field_names())
# I'm sure I can find a better way to do this through the local() dict)
first_name_ix = table.get_record_field_index('Voornaam')
surname_ix = table.get_record_field_index('Achternaam')
gender_ix = table.get_record_field_index('Geslacht')
patron_ix = table.get_record_field_index('Patroniem')
call_name_ix = table.get_record_field_index('Roepnaam')
alias_ix = table.get_record_field_index('Alias')
per_code_ix = table.get_record_field_index('Persoon code')
title1_ix = table.get_record_field_index('Titel1')
title2_ix = table.get_record_field_index('Titel2')
title3_ix = table.get_record_field_index('Titel3')
father_ix = table.get_record_field_index('Vader')
mother_ix = table.get_record_field_index('Moeder')
occu_ix = table.get_record_field_index('Beroep')
per_klad_ix = table.get_record_field_index('Persoon klad')
per_info_ix = table.get_record_field_index('Persoon info')
addr_date_ix = table.get_record_field_index('Adres datum')
addr_street_ix = table.get_record_field_index('Adres straat')
addr_postal_ix = table.get_record_field_index('Adres postcode')
addr_place_ix = table.get_record_field_index('Adres plaats')
addr_country_ix = table.get_record_field_index('Adres land')
addr_telno_ix = table.get_record_field_index('Adres telefoon')
addr_info_ix = table.get_record_field_index('Adres info')
birth_date_ix = table.get_record_field_index('Geboorte datum')
birth_place_ix = table.get_record_field_index('Geboorte plaats')
birth_time_ix = table.get_record_field_index('Geboorte tijd')
birth_source_ix = table.get_record_field_index('Geboorte bron')
birth_aktenr_ix = table.get_record_field_index('Geboorte akte')
birth_source_text_ix = table.get_record_field_index('Geboorte brontekst')
birth_info_ix = table.get_record_field_index('Geboorte info')
bapt_date_ix = table.get_record_field_index('Doop datum')
bapt_place_ix = table.get_record_field_index('Doop plaats')
bapt_reli_ix = table.get_record_field_index('Gezindte')
bapt_witn_ix = table.get_record_field_index('Doop getuigen')
bapt_source_ix = table.get_record_field_index('Doop bron')
bapt_aktenr_ix = table.get_record_field_index('Doop akte')
bapt_source_text_ix = table.get_record_field_index('Doop brontekst')
bapt_info_ix = table.get_record_field_index('Doop info')
death_date_ix = table.get_record_field_index('Overlijden datum')
death_place_ix = table.get_record_field_index('Overlijden plaats')
death_time_ix = table.get_record_field_index('Overlijden tijd')
death_source_ix = table.get_record_field_index('Overlijden bron')
death_aktenr_ix = table.get_record_field_index('Overlijden akte')
death_source_text_ix = table.get_record_field_index('Overlijden brontekst')
death_info_ix = table.get_record_field_index('Overlijden info')
crem_date_ix = table.get_record_field_index('Crematie datum')
crem_place_ix = table.get_record_field_index('Crematie plaats')
crem_source_ix = table.get_record_field_index('Crematie bron')
crem_aktenr_ix = table.get_record_field_index('Crematie akte')
crem_source_text_ix = table.get_record_field_index('Crematie brontekst')
crem_info_ix = table.get_record_field_index('Crematie info')
bur_date_ix = table.get_record_field_index('Begrafenis datum')
bur_place_ix = table.get_record_field_index('Begrafenis plaats')
bur_source_ix = table.get_record_field_index('Begrafenis bron')
bur_aktenr_ix = table.get_record_field_index('Begrafenis akte')
bur_source_text_ix = table.get_record_field_index('Begrafenis brontekst')
bur_info_ix = table.get_record_field_index('Begrafenis info')
# The records are numbered 1..N
self.progress.set_pass(_('Importing individuals'), len(self.pers))
for i, rec in enumerate(self.pers):
pers_id = i + 1
log.debug(("Person id %d " % pers_id) + " ".join(("%s" % r) for r in rec))
father = rec[father_ix]
mother = rec[mother_ix]
if father >= 0 and mother >= 0:
recflds = table.convert_record_to_list(rec, self.mems)
gender = recflds[gender_ix]
if gender == 'M':
gender = Person.MALE
elif gender == 'V':
gender = Person.FEMALE
else:
gender = Person.UNKNOWN
person = self.__find_or_create_person(pers_id)
first_name = recflds[first_name_ix]
surname_prefix, surname = _split_surname(recflds[surname_ix])
patronym = recflds[patron_ix] # INDI _PATR
alias = recflds[alias_ix] # INDI NAME _ALIA/INDI NAME COMM
title1 = recflds[title1_ix] # INDI TITL
title2 = recflds[title2_ix] # INDI _TITL2
title3 = recflds[title3_ix] # INDI _TITL3
diag_msg = "%s: %s %s" % (person.gramps_id, first_name.encode('utf-8'), surname.encode('utf-8'))
# process the name/given name
name = Name()
name.set_type(NameType.BIRTH)
name.set_first_name(first_name)
if recflds[call_name_ix]:
name.set_call_name(recflds[call_name_ix])
title = [_f for _f in [title1, title2, title3] if _f]
if title:
name.set_title(", ".join(title))
# process the normal surname
sname = Surname()
sname.set_surname(surname)
if surname_prefix:
sname.set_prefix(surname_prefix)
name.add_surname(sname)
# process the Patronymic
if patronym:
pname = Surname()
pname.set_surname(patronym)
pname.set_origintype(NameOriginType.PATRONYMIC)
name.add_surname(pname)
person.set_primary_name(name)
person.set_gender(gender)
per_code = recflds[per_code_ix] # INDI REFN
if per_code:
attr = Attribute()
attr.set_type((AttributeType.CUSTOM, "REFN"))
attr.set_value(per_code)
person.add_attribute(attr)
per_klad = recflds[per_klad_ix] # INDI _COMM/INDI COMM
per_info = recflds[per_info_ix] # INDI NOTE
note_txt = [_f for _f in [per_info, per_klad] if _f]
if note_txt:
note = Note()
note.set('\n'.join(note_txt))
note.set_type(NoteType.PERSON)
self.db.add_note(note, self.trans)
person.add_note(note.handle)
# Alias. Two possibilities: extra Name, or Attribute
if alias:
aname = alias.split()
if len(aname) == 1:
attr = Attribute()
attr.set_type(AttributeType.NICKNAME)
attr.set_value(alias)
person.add_attribute(attr)
else:
# ???? Don't know if this is OK.
name = Name()
sname = Surname()
sname.set_surname(aname[-1].strip())
name.add_surname(sname)
name.set_first_name(' '.join(aname[0:-1]))
name.set_type(NameType.AKA)
person.add_alternate_name(name)
if recflds[occu_ix]:
event, event_ref = self.__create_event_and_ref(EventType.OCCUPATION, recflds[occu_ix])
person.add_event_ref(event_ref)
# Birth
date = self.__create_date_from_text(recflds[birth_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[birth_place_ix])
time = recflds[birth_time_ix]
if time:
time_text = "tijd: " + time
else:
time_text = None
source_title = recflds[birth_source_ix]
source_refn = recflds[birth_aktenr_ix]
source_text = recflds[birth_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[birth_info_ix]
if date or place or info or citation:
desc = [_f for _f in [info, time_text, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, birth_ref = self.__create_event_and_ref(EventType.BIRTH, desc, date, place, citation, info, time)
person.set_birth_ref(birth_ref)
# Baptism
date = self.__create_date_from_text(recflds[bapt_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[bapt_place_ix])
reli = recflds[bapt_reli_ix]
witness = recflds[bapt_witn_ix]
source_title = recflds[bapt_source_ix]
source_refn = recflds[bapt_aktenr_ix]
source_text = recflds[bapt_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[bapt_info_ix]
if date or place or info or citation or reli or witness:
desc = [_f for _f in [reli, info, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, bapt_ref = self.__create_event_and_ref(EventType.BAPTISM, desc, date, place, citation, info)
person.add_event_ref(bapt_ref)
if witness:
attr = Attribute()
attr.set_type(AttributeType.WITNESS)
attr.set_value(witness)
event.add_attribute(attr)
# Death
date = self.__create_date_from_text(recflds[death_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[death_place_ix])
time = recflds[death_time_ix]
if time:
time = "tijd: " + time
source_title = recflds[death_source_ix]
source_refn = recflds[death_aktenr_ix]
source_text = recflds[death_source_text_ix]
info = recflds[death_info_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
if date or place or info or citation:
desc = [_f for _f in [info, time, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, death_ref = self.__create_event_and_ref(EventType.DEATH, desc, date, place, citation, info, time)
person.set_death_ref(death_ref)
# Burial
date = self.__create_date_from_text(recflds[bur_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[bur_place_ix])
source_title = recflds[bur_source_ix]
source_refn = recflds[bur_aktenr_ix]
source_text = recflds[bur_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[bur_info_ix]
if date or place or info or citation:
desc = [_f for _f in [info, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, burial_ref = self.__create_event_and_ref(EventType.BURIAL, desc, date, place, citation, info)
person.add_event_ref(burial_ref)
# Cremation
date = self.__create_date_from_text(recflds[crem_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[crem_place_ix])
source_title = recflds[crem_source_ix]
source_refn = recflds[crem_aktenr_ix]
source_text = recflds[crem_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[crem_info_ix]
if date or place or info or citation:
# TODO. Check that not both burial and cremation took place.
desc = [_f for _f in [info, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, cremation_ref = self.__create_event_and_ref(EventType.CREMATION, desc, date, place, citation)
person.add_event_ref(cremation_ref)
# TODO. Address
date = self.__create_date_from_text(recflds[addr_date_ix], diag_msg)
street = recflds[addr_street_ix]
postal = recflds[addr_postal_ix]
place = self.__get_or_create_place(recflds[addr_place_ix])
country = recflds[addr_country_ix]
telno = recflds[addr_telno_ix]
info = recflds[addr_info_ix] # INDI RESI NOTE/INDI ADDR
if place:
loc = Location()
loc.set_street(street)
loc.set_postal_code(postal)
loc.set_country(country)
loc.set_phone(telno)
place.set_main_location(loc)
self.db.commit_place(place, self.trans)
desc = info or None
event, resi_ref = self.__create_event_and_ref(EventType.RESIDENCE, desc, date, place)
if info:
note = Note()
note.set(info)
note.set_type(NoteType.EVENT)
self.db.add_note(note, self.trans)
event.add_note(note.handle)
self.db.commit_event(event, self.trans)
person.add_event_ref(resi_ref)
self.db.commit_person(person, self.trans)
self.progress.step()
def create_families(self):
table = self.def_['Table_2']
# Records in the REL file using PG30-1.DEF contain the following fields:
# (Note. We want this to be computed just once.
#log.info(table.get_field_names())
man_ix = table.get_record_field_index('Man')
vrouw_ix = table.get_record_field_index('Vrouw')
rel_code_ix = table.get_record_field_index('Relatie code')
rel_klad_ix = table.get_record_field_index('Relatie klad')
rel_info_ix = table.get_record_field_index('Relatie info')
civu_date_ix = table.get_record_field_index('Samenwonen datum')
civu_place_ix = table.get_record_field_index('Samenwonen plaats')
civu_source_ix = table.get_record_field_index('Samenwonen bron')
civu_aktenr_ix = table.get_record_field_index('Samenwonen akte')
civu_source_text_ix = table.get_record_field_index('Samenwonen brontekst')
civu_info_ix = table.get_record_field_index('Samenwonen info')
marl_date_ix = table.get_record_field_index('Ondertrouw datum')
marl_place_ix = table.get_record_field_index('Ondertrouw plaats')
marl_witn_ix = table.get_record_field_index('Ondertrouw getuigen')
marl_source_ix = table.get_record_field_index('Ondertrouw bron')
marl_aktenr_ix = table.get_record_field_index('Ondertrouw akte')
marl_source_text_ix = table.get_record_field_index('Ondertrouw brontekst')
marl_info_ix = table.get_record_field_index('Ondertrouw info')
mar_date_ix = table.get_record_field_index('Wettelijk datum')
mar_place_ix = table.get_record_field_index('Wettelijk plaats')
mar_witn_ix = table.get_record_field_index('Wettelijk getuigen')
mar_source_ix = table.get_record_field_index('Wettelijk bron')
mar_aktenr_ix = table.get_record_field_index('Wettelijk akte')
mar_source_text_ix = table.get_record_field_index('Wettelijk brontekst')
mar_info_ix = table.get_record_field_index('Wettelijk info')
marc_date_ix = table.get_record_field_index('Kerkelijk datum')
marc_place_ix = table.get_record_field_index('Kerkelijk plaats')
marc_reli_ix = table.get_record_field_index('Kerk')
marc_witn_ix = table.get_record_field_index('Kerkelijk getuigen')
marc_source_ix = table.get_record_field_index('Kerkelijk bron')
marc_aktenr_ix = table.get_record_field_index('Kerkelijk akte')
marc_source_text_ix = table.get_record_field_index('Kerkelijk brontekst')
marc_info_ix = table.get_record_field_index('Kerkelijk info')
div_date_ix = table.get_record_field_index('Scheiding datum')
div_place_ix = table.get_record_field_index('Scheiding plaats')
div_source_ix = table.get_record_field_index('Scheiding bron')
div_aktenr_ix = table.get_record_field_index('Scheiding akte')
div_source_text_ix = table.get_record_field_index('Scheiding brontekst')
div_info_ix = table.get_record_field_index('Scheiding info')
# The records are numbered 1..N
self.progress.set_pass(_('Importing families'), len(self.rels))
for i, rec in enumerate(self.rels):
fam_id = i + 1
husband = rec[man_ix]
wife = rec[vrouw_ix]
if husband > 0 or wife > 0:
recflds = table.convert_record_to_list(rec, self.mems)
self.highest_fam_id = fam_id
fam = self.__find_or_create_family(fam_id)
husband_handle = None
if husband > 0:
husband_handle = self.__find_person_handle(husband)
fam.set_father_handle(husband_handle)
husband_person = self.db.get_person_from_handle(husband_handle)
husband_person.add_family_handle(fam.get_handle())
self.db.commit_person(husband_person, self.trans)
wife_handle = None
if wife > 0:
wife_handle = self.__find_person_handle(wife)
fam.set_mother_handle(wife_handle)
wife_person = self.db.get_person_from_handle(wife_handle)
wife_person.add_family_handle(fam.get_handle())
self.db.commit_person(wife_person, self.trans)
diag_msg = "%s: %s %s" % (fam.gramps_id,
husband_person.gramps_id if husband_handle else "",
wife_person.gramps_id if wife_handle else "")
self.fm2fam[husband_handle, wife_handle] = fam
rel_code = recflds[rel_code_ix]
rel_klad = recflds[rel_klad_ix]
rel_info = recflds[rel_info_ix]
note_txt = [_f for _f in [rel_info, rel_klad] if _f]
if note_txt:
note = Note()
note.set('\n'.join(note_txt))
note.set_type(NoteType.FAMILY)
self.db.add_note(note, self.trans)
fam.add_note(note.handle)
if rel_code:
attr = Attribute()
attr.set_type((AttributeType.CUSTOM, "REFN"))
attr.set_value(rel_code)
fam.add_attribute(attr)
# Wettelijk => Marriage
date = self.__create_date_from_text(recflds[mar_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[mar_place_ix])
witness = recflds[mar_witn_ix]
citation = self.__get_or_create_citation(recflds[mar_source_ix], recflds[mar_aktenr_ix])
source_title = recflds[mar_source_ix]
source_refn = recflds[mar_aktenr_ix]
source_text = recflds[mar_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[mar_info_ix]
if date or place or info or citation:
desc = [_f for _f in [info, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, mar_ref = self.__create_event_and_ref(EventType.MARRIAGE, desc, date, place, citation, info)
fam.add_event_ref(mar_ref)
if witness:
attr = Attribute()
attr.set_type(AttributeType.WITNESS)
attr.set_value(witness)
event.add_attribute(attr)
self.db.commit_event(event, self.trans)
# Type of relation
fam.set_relationship(FamilyRelType(FamilyRelType.MARRIED))
# Kerkelijk => Marriage
date = self.__create_date_from_text(recflds[marc_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[marc_place_ix])
reli = recflds[marc_reli_ix]
witness = recflds[marc_witn_ix]
citation = self.__get_or_create_citation(recflds[marc_source_ix], recflds[marc_aktenr_ix])
source_title = recflds[marc_source_ix]
source_refn = recflds[marc_aktenr_ix]
source_text = recflds[marc_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[marc_info_ix]
if date or place or info or citation:
desc = [_f for _f in [reli, info, source_text] if _f]
desc.insert(0, 'Kerkelijk huwelijk')
desc = desc and '; '.join(desc) or None
event, marc_ref = self.__create_event_and_ref(EventType.MARRIAGE, desc, date, place, citation, info)
fam.add_event_ref(marc_ref)
if witness:
attr = Attribute()
attr.set_type(AttributeType.WITNESS)
attr.set_value(witness)
event.add_attribute(attr)
self.db.commit_event(event, self.trans)
# Type of relation
fam.set_relationship(FamilyRelType(FamilyRelType.MARRIED))
# Ondertrouw => Marriage License
date = self.__create_date_from_text(recflds[marl_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[marl_place_ix])
witness = recflds[marl_witn_ix]
citation = self.__get_or_create_citation(recflds[marl_source_ix], recflds[marl_aktenr_ix])
source_title = recflds[marl_source_ix]
source_refn = recflds[marl_aktenr_ix]
source_text = recflds[marl_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[marl_info_ix]
if date or place or info or citation:
desc = [_f for _f in [info, source_text] if _f]
desc.insert(0, 'Ondertrouw')
desc = desc and '; '.join(desc) or None
event, marl_ref = self.__create_event_and_ref(EventType.MARR_LIC, desc, date, place, citation, info)
fam.add_event_ref(marl_ref)
if witness:
attr = Attribute()
attr.set_type(AttributeType.WITNESS)
attr.set_value(witness)
event.add_attribute(attr)
self.db.commit_event(event, self.trans)
# Samenwonen => Civil Union
date = self.__create_date_from_text(recflds[civu_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[civu_place_ix])
citation = self.__get_or_create_citation(recflds[civu_source_ix], recflds[civu_aktenr_ix])
source_title = recflds[civu_source_ix]
source_refn = recflds[civu_aktenr_ix]
source_text = recflds[civu_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[civu_info_ix]
if date or place or info or citation:
desc = [_f for _f in [info, source_text] if _f]
desc.insert(0, 'Samenwonen')
desc = desc and '; '.join(desc) or None
event, civu_ref = self.__create_event_and_ref(EventType.UNKNOWN, desc, date, place, citation, info)
fam.add_event_ref(civu_ref)
# Type of relation
fam.set_relationship(FamilyRelType(FamilyRelType.CIVIL_UNION))
# Scheiding => Divorce
date = self.__create_date_from_text(recflds[div_date_ix], diag_msg)
place = self.__get_or_create_place(recflds[div_place_ix])
citation = self.__get_or_create_citation(recflds[div_source_ix], recflds[div_aktenr_ix])
source_title = recflds[div_source_ix]
source_refn = recflds[div_aktenr_ix]
source_text = recflds[div_source_text_ix]
citation = self.__get_or_create_citation(source_title,
source_refn,
source_text)
info = recflds[div_info_ix]
if date or place or info or citation:
desc = [_f for _f in [info, source_text] if _f]
desc = desc and '; '.join(desc) or None
event, div_ref = self.__create_event_and_ref(EventType.DIVORCE, desc, date, place, citation, info)
fam.add_event_ref(div_ref)
self.db.commit_family(fam, self.trans)
self.progress.step()
def add_children(self):
# Once more to record the father and mother
table = self.def_['Table_1']
father_ix = table.get_record_field_index('Vader')
mother_ix = table.get_record_field_index('Moeder')
# The records are numbered 1..N
self.progress.set_pass(_('Adding children'), len(self.pers))
for i, rec in enumerate(self.pers):
pers_id = i + 1
father = rec[father_ix]
mother = rec[mother_ix]
if father > 0 or mother > 0:
# Find the family with this father and mother
person_handle = self.__find_person_handle(pers_id)
father_handle = father > 0 and self.__find_person_handle(father) or None
mother_handle = mother > 0 and self.__find_person_handle(mother) or None
if father > 0 and not father_handle:
log.warning(_("cannot find father for I%(person)s (father=%(id)d)") % {
'person' : pers_id, 'id' : father } )
elif mother > 0 and not mother_handle:
log.warning(_("cannot find mother for I%(person)s (mother=%(mother)d)") % {
'person' : pers_id, 'mother' : mother } )
else:
fam = self.fm2fam.get((father_handle, mother_handle), None)
if not fam:
# Family not present in REL. Create a new one.
self.highest_fam_id = self.highest_fam_id + 1
fam_id = self.highest_fam_id
fam = self.__find_or_create_family(fam_id)
if father_handle:
fam.set_father_handle(father_handle)
father_person = self.db.get_person_from_handle(father_handle)
father_person.add_family_handle(fam.get_handle())
self.db.commit_person(father_person, self.trans)
if mother_handle:
fam.set_mother_handle(mother_handle)
mother_person = self.db.get_person_from_handle(mother_handle)
mother_person.add_family_handle(fam.get_handle())
self.db.commit_person(mother_person, self.trans)
if fam:
childref = ChildRef()
childref.set_reference_handle(person_handle)
fam.add_child_ref(childref)
self.db.commit_family(fam, self.trans)
person = self.db.get_person_from_handle(person_handle)
person.add_parent_family_handle(fam.get_handle())
self.db.commit_person(person, self.trans)
self.progress.step()
| pmghalvorsen/gramps_branch | gramps/plugins/importer/importprogen.py | Python | gpl-2.0 | 55,893 | [
"Brian"
] | 7da37b305073aad3f8135b68c5bea80b15b0834c3d49e69de34fb0b3073cb2d7 |
#!/usr/bin/env python3
import argparse
import os
import csv
import datetime
import getpass
from selenium import webdriver
from scraper import Page
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
parser = argparse.ArgumentParser()
parser.add_argument(
'-b', '--browser',
choices=['firefox', 'phantom'],
default='firefox',
help='choose browser'
)
parser.add_argument('user', help='your facebook user')
def get_friends(username, password, browser):
"""
Logs to facebook with credentials and scrapes the
friends page.
:param username: str (facebook username)
:param password: str (password used to login to fb)
:param browser: WebDriver
:return: list
the list contains name and info split by '\n'
E.g ['John Doe\n3 mutual friends', ...]
"""
fb = Page(browser)
fb.visit()
fb.login(username, password)
fb.got_to_friends_page()
friends = fb.get_all_friends()
fb.driver.quit()
return friends
def write_to_csv(username, friends):
"""
Writes data to csv file with format:
NO;NAME;INFO
"""
data_dir = os.path.join(DATA_DIR, username.split('@')[0])
if not os.path.exists(data_dir):
os.makedirs(data_dir)
csv_file = os.path.join(
data_dir,
datetime.datetime.now().isoformat().split('.')[0] + '.csv'
)
with open(csv_file, 'w') as f:
fieldnames = ['NO', 'NAME', 'INFO']
writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=';')
writer.writeheader()
row_no = 1
for friend in sorted(friends):
f = friend.split('\n')
writer.writerow(
{
'NO': row_no, 'NAME': f[0],
'INFO': f[1] if len(f) == 2 else 'N/A'
})
row_no += 1
return csv_file
if __name__ == '__main__':
args = parser.parse_args()
username = args.user
password = getpass.getpass()
if args.browser == 'phantom':
browser = webdriver.PhantomJS()
else:
browser = webdriver.Firefox()
friends = get_friends(username, password, browser)
csv_file = write_to_csv(username, friends)
print("Record saved in %s" % csv_file)
| skamsie/facebook-friends-tracker | scrutinize.py | Python | mit | 2,264 | [
"VisIt"
] | cff068235f59ce37203e393da2bd8b9fec633369917c5a2bca10054e5af5ba58 |
import os
import sys
import glob
import shutil
import subprocess
def cmd(c):
x = os.system(c)
assert x == 0, c
def fail(subject, email=None, filename='/dev/null', mailer='mail'):
assert mailer in ['mailx', 'mail', 'mutt']
import os
if email is not None:
if filename == '/dev/null':
assert os.system('mail -s "%s" %s < %s' %
(subject, email, filename)) == 0
else: # attachments
filenames = filename.split()
if mailer == 'mailx': # new mailx (12?)
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('echo | mail %s -s "%s" %s' %
(attach, subject, email)) == 0
elif mailer == 'mail': # old mailx (8?)
attach = '('
for f in filenames:
ext = os.path.splitext(f)[-1]
if ext:
flog = os.path.basename(f).replace(ext, '.log')
else:
flog = f
attach += 'uuencode %s %s&&' % (f, flog)
# remove final &&
attach = attach[:-2]
attach += ')'
assert os.system('%s | mail -s "%s" %s' %
(attach, subject, email)) == 0
else: # mutt
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('mutt %s -s "%s" %s < /dev/null' %
(attach, subject, email)) == 0
raise SystemExit
if '--dir' in sys.argv:
i = sys.argv.index('--dir')
dir = os.path.abspath(sys.argv[i+1])
else:
dir = 'agts'
if '--email' in sys.argv:
i = sys.argv.index('--email')
email = sys.argv[i+1]
else:
email = None
assert os.path.isdir(dir)
gpawdir = os.path.join(dir, 'gpaw')
# remove the old run directory
if os.path.isdir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
os.chdir(dir)
cmd('svn checkout https://svn.fysik.dtu.dk/projects/gpaw/trunk gpaw')
# a version of gpaw is needed for imports from within this script!
cmd("\
cd " + gpawdir + "&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
python setup.py build_ext 2>&1 > build_ext.log")
# import gpaw from where it was installed
sys.path.insert(0, gpawdir)
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
module load openmpi/1.3.3-1.el5.fys.open64.4.2.3 && \
module load hdf5/1.8.6-5.el5.fys.open64.4.2.3.openmpi.1.3.3 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-xeon-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > thul.log' | ssh thul bash")
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-opteron-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > fjorm.log' | ssh fjorm bash")
cmd("""wget --no-check-certificate --quiet \
http://wiki.fysik.dtu.dk/gpaw-files/gpaw-setups-latest.tar.gz && \
tar xzf gpaw-setups-latest.tar.gz && \
rm gpaw-setups-latest.tar.gz && \
mv gpaw-setups-[0-9]* gpaw/gpaw-setups""")
cmd('svn export https://svn.fysik.dtu.dk/projects/ase/trunk ase')
# ase needed
sys.path.insert(0, '%s/ase' % dir)
from gpaw.test.big.agts import AGTSQueue
from gpaw.test.big.niflheim import NiflheimCluster
queue = AGTSQueue()
queue.collect()
cluster = NiflheimCluster(asepath=os.path.join(dir, 'ase'),
setuppath=os.path.join(gpawdir, 'gpaw-setups'))
# Example below is confusing: job.script must NOT be the *.agts.py script,
# but the actual python script to be run!
# testsuite.agts.py does both: see gpaw/test/big/miscellaneous/testsuite.agts.py
#queue.jobs = [job for job in queue.jobs if job.script == 'testsuite.agts.py']
nfailed = queue.run(cluster)
gfiles = os.path.join(dir, 'gpaw-files')
if not os.path.isdir(gfiles):
os.mkdir(gfiles)
queue.copy_created_files(gfiles)
# make files readable by go
files = glob.glob(gfiles + '/*')
for f in files:
os.chmod(f, 0644)
from gpaw.version import version
subject = 'AGTS GPAW %s: ' % str(version)
# Send mail:
sfile = os.path.join(dir, 'status.log')
attach = sfile
if not nfailed:
subject += ' succeeded'
fail(subject, email, attach, mailer='mutt')
else:
subject += ' failed'
# attach failed tests error files
ft = [l.split()[0] for l in open(sfile).readlines() if 'FAILED' in l]
for t in ft:
ef = glob.glob(os.path.join(dir, t) + '.e*')
for f in ef:
attach += ' ' + f
fail(subject, email, attach, mailer='mutt')
if 0:
# Analysis:
import matplotlib
matplotlib.use('Agg')
from gpaw.test.big.analysis import analyse
user = os.environ['USER']
analyse(queue,
'../analysis/analyse.pickle', # file keeping history
'../analysis', # Where to dump figures
rev=niflheim.revision,
#mailto='gpaw-developers@listserv.fysik.dtu.dk',
mailserver='servfys.fysik.dtu.dk',
attachment='status.log')
| robwarm/gpaw-symm | tools/niflheim-agts.py | Python | gpl-3.0 | 5,426 | [
"ASE",
"GPAW"
] | 2cb540b0e4b3aba10705d24149ec7dccb632b244f97d0515391ea0a2c97c1a6a |
#!/usr/bin/env python2
import numpy,re,sys
def pyscf2QP(cell,mf, kpts=[], int_threshold = 1E-15):
# The integral will be not printed in they are bellow that
PBC=False
ComputeMode= re.split('[. ]', str(mf))
print 'ComputeMode=',ComputeMode
for n in ComputeMode:
if n in ("UHF","KUHF","UKS"):
sys.exit('Unrestricted calculation unsupported in Quantum Package')
if n == "pbc":
PBC=True
if PBC and len(kpts) == 0:
sys.exit("ERROR (read!): You need to specify explicit the list of K-point (including gamma)")
print 'Performing PBC?:',PBC
if PBC:
from pyscf.pbc import ao2mo
from pyscf.pbc import tools
else:
from pyscf import ao2mo
natom = len(cell.atom_coords())
print 'n_atom', natom
print 'num_elec', cell.nelectron
print 'nucl_num', len(cell.atom_coords())
print ''
mo_coeff = mf.mo_coeff # List of mo_coeff for each k-point
if not PBC:
nmo = mo_coeff.shape[1]
else:
nmo = mo_coeff[0].shape[1]
# Wrote all the parameter need to creat a dummy EZFIO folder who will containt the integral after.
# More an implentation detail than a real thing
with open('param','w') as f:
f.write(' '.join(map(str,(cell.nelectron, nmo, natom))))
# _
# |\ | _ | _ _. ._ |_) _ ._ | _ o _ ._
# | \| |_| (_ | (/_ (_| | | \ (/_ |_) |_| | _> | (_) | |
# |
print 'mf, cell', mf.energy_nuc(), cell.energy_nuc()
shift = tools.pbc.madelung(cell, numpy.zeros(3))*cell.nelectron * -.5 if PBC else 0
e_nuc = cell.energy_nuc() + shift
print 'nucl_repul', e_nuc
with open('e_nuc','w') as f:
f.write(str(e_nuc))
from itertools import product
# ___
# | ._ _|_ _ _ ._ _. | _ |\/| _ ._ _
# _|_ | | |_ (/_ (_| | (_| | _> | | (_) | | (_)
# _|
if PBC:
h_ao = ('kinetic', mf.get_hcore(kpts=kpts) ) # Give only one k point ?
dummy_ao = ('nuclear', numpy.zeros( (len(kpts),nmo,nmo), dtype=numpy.float ))
else:
h_ao = ('kinetic', mf.get_hcore() )
dummy_ao = ('nuclear', numpy.zeros( (nmo,nmo), dtype=numpy.float ))
def gen_mono_MO(mo_coeff,l_int,shift=0):
# 2Id transfortion Transformation. For now we handle only one or zero K point.
print 'l_int.shape=',l_int.shape
l_int_mo = reduce(numpy.dot, (mo_coeff.T, l_int, mo_coeff)) #This formula is only right for one kpt.
print 'l_int_mo=',l_int_mo
for i,j in product(range(nmo), repeat=2):
int_ = l_int_mo[i,j]
yield (i+1+shift,j+1+shift, int_)
# Print
for name, ao in (h_ao,dummy_ao):
with open('%s_mo' % name,'w') as f:
print '%s_mo' % name
if not PBC:
for mono in gen_mono_MO(mo_coeff,ao):
f.write('%s %s %s\n'% mono)
else:
for i,(m,a) in enumerate(zip(mo_coeff,ao)):
for mono in gen_mono_MO(m,a,i):
f.write('%s %s %s\n'% mono)
# ___ _
# | ._ _|_ _ _ ._ _. | _ |_) o
# _|_ | | |_ (/_ (_| | (_| | _> |_) |
# _|
#
def ao2mo_amazing(mo_coeff):
if PBC:
eri_4d= mf.with_df.ao2mo(mo_coeff,compact=False)
else:
eri_4d= ao2mo.kernel(cell,mo_coeff,compact=False)
return eri_4d.reshape((nmo,)*4)
def write_amazing(eri_4d, shift=0):
# HANDLE 8 FOLD by Scemama way. Maybe we can use compact=True
for l in range(nmo):
for k in range(nmo):
for j in range(l,nmo):
for i in range(max(j,k),nmo):
v = eri_4d[i,k,j,l]
if abs(v) > int_threshold:
f.write('%s %s %s %s %s\n' % (i+1+shift,j+1+shift,k+1+shift,l+1+shift,v))
if PBC:
eri_4d= mf.with_df.ao2mo(mo_coeff[0],compact=False)
else: #Molecular
eri_4d= ao2mo.kernel(cell,mo_coeff,compact=False)
eri_4d = eri_4d.reshape((nmo,)*4)
f = open('bielec_mo','w')
for i,mc in enumerate(mo_coeff):
eri = ao2mo_amazing(mc)
write_amazing(eri, nmo*i)
| TApplencourt/quantum_package | plugins/tapplencourt/pyscf/PyscfToQp.py | Python | gpl-3.0 | 4,289 | [
"PySCF"
] | 8979760b24548690a50e310b500aeab5955515dd2e8bd6cd35aaa696d3181e88 |
"""
Test Levels
"""
__RCSID__ = "$Id$"
import unittest
from DIRAC.FrameworkSystem.private.standardLogging.test.TestLoggingBase import Test_Logging, gLogger
class Test_Levels(Test_Logging):
"""
Test get and set levels.
"""
def test_00shown(self):
"""
Test the validity of the shown method
"""
gLogger.setLevel('warn')
gLogger.debug('message')
self.assertEqual(self.buffer.getvalue(), "")
self.assertEqual(gLogger.shown('debug'), False)
self.buffer.truncate(0)
gLogger.warn('message')
self.assertIn("", self.buffer.getvalue())
self.assertEqual(gLogger.shown('warn'), True)
self.buffer.truncate(0)
def test_01setLevelGetLevel(self):
"""
Set gLogger level to error and get it
"""
gLogger.setLevel('error')
self.assertEqual(gLogger.getLevel(), 'ERROR')
def test_02setLevelCreateLog(self):
"""
Set gLogger level to error and try to create debug and error logs
"""
gLogger.setLevel('error')
self.assertEqual(gLogger.shown('debug'), False)
self.assertEqual(gLogger.shown('verbose'), False)
self.assertEqual(gLogger.shown('info'), False)
self.assertEqual(gLogger.shown('warn'), False)
self.assertEqual(gLogger.shown('notice'), False)
self.assertEqual(gLogger.shown('error'), True)
self.assertEqual(gLogger.shown('always'), True)
self.assertEqual(gLogger.shown('fatal'), True)
def test_03setLevelGetSubLogLevel(self):
"""
Set gLogger level to error and get its sublogger level
"""
gLogger.setLevel('error')
self.assertEqual(self.log.getLevel(), 'ERROR')
def test_04setLevelCreateLogSubLog(self):
"""
Set gLogger level to error and try to create debug and error logs and sublogs
"""
gLogger.setLevel('error')
gLogger.debug("message")
self.log.debug("message")
self.assertEqual(gLogger.shown('debug'), False)
self.assertEqual(self.log.shown('debug'), False)
gLogger.verbose('message')
self.log.verbose('message')
self.assertEqual(gLogger.shown('verbose'), False)
self.assertEqual(self.log.shown('verbose'), False)
gLogger.info('message')
self.log.info('message')
self.assertEqual(gLogger.shown('info'), False)
self.assertEqual(self.log.shown('info'), False)
gLogger.warn('message')
self.log.warn('message')
self.assertEqual(gLogger.shown('warn'), False)
self.assertEqual(self.log.shown('warn'), False)
gLogger.notice('message')
self.log.notice('message')
self.assertEqual(gLogger.shown('notice'), False)
self.assertEqual(self.log.shown('notice'), False)
gLogger.error('message')
self.log.error('message')
self.assertEqual(gLogger.shown('error'), True)
self.assertEqual(self.log.shown('error'), True)
gLogger.always('message')
self.log.always('message')
self.assertEqual(gLogger.shown('always'), True)
self.assertEqual(self.log.shown('always'), True)
gLogger.fatal('message')
self.log.fatal('message')
self.assertEqual(gLogger.shown('fatal'), True)
self.assertEqual(self.log.shown('fatal'), True)
def test_05setLevelSubLevelCreateLogSubLog(self):
"""
Set gLogger level to error and log level to debug, and try to create debug and error logs and sublogs
"""
gLogger.setLevel('error')
self.log.setLevel('debug')
self.assertEqual(gLogger.debug("message"), False)
self.assertEqual(self.log.debug("message"), True)
self.assertEqual(gLogger.verbose('message'), False)
self.assertEqual(self.log.verbose('message'), True)
self.assertEqual(gLogger.info('message'), False)
self.assertEqual(self.log.info('message'), True)
self.assertEqual(gLogger.warn('message'), False)
self.assertEqual(self.log.warn('message'), True)
self.assertEqual(gLogger.notice('message'), False)
self.assertEqual(self.log.notice('message'), True)
self.assertEqual(gLogger.error('message'), True)
self.assertEqual(self.log.error('message'), True)
self.assertEqual(gLogger.always('message'), True)
self.assertEqual(self.log.always('message'), True)
self.assertEqual(gLogger.fatal('message'), True)
self.assertEqual(self.log.fatal('message'), True)
def test_06setLevelSubLevelCreateLogSubLog2(self):
"""
Set gLogger level to debug and log level to error, and try to create debug and error logs and sublogs
"""
gLogger.setLevel('debug')
self.log.setLevel('error')
self.assertEqual(gLogger.debug("message"), True)
self.assertEqual(self.log.debug("message"), False)
self.assertEqual(gLogger.verbose('message'), True)
self.assertEqual(self.log.verbose('message'), False)
self.assertEqual(gLogger.info('message'), True)
self.assertEqual(self.log.info('message'), False)
self.assertEqual(gLogger.warn('message'), True)
self.assertEqual(self.log.warn('message'), False)
self.assertEqual(gLogger.notice('message'), True)
self.assertEqual(self.log.notice('message'), False)
self.assertEqual(gLogger.error('message'), True)
self.assertEqual(self.log.error('message'), True)
self.assertEqual(gLogger.always('message'), True)
self.assertEqual(self.log.always('message'), True)
self.assertEqual(gLogger.fatal('message'), True)
self.assertEqual(self.log.fatal('message'), True)
def test_07getAllLevels(self):
"""
Get all possible levels
"""
self.assertEqual(gLogger.getAllPossibleLevels(), ['INFO', 'WARN',
'NOTICE', 'VERBOSE', 'ERROR', 'DEBUG', 'ALWAYS', 'FATAL'])
self.assertEqual(self.log.getAllPossibleLevels(), ['INFO', 'WARN',
'NOTICE', 'VERBOSE', 'ERROR', 'DEBUG', 'ALWAYS', 'FATAL'])
def test_08modifySubLevelAndGetSubSubLevel(self):
"""
Modify the sub logger level, then the gLogger level and get the subsublogger level
"""
gLogger.setLevel('debug')
sublogger = self.log.getSubLogger("sublog")
self.assertEqual(gLogger.getLevel(), "DEBUG")
self.assertEqual(self.log.getLevel(), "DEBUG")
self.assertEqual(sublogger.getLevel(), "DEBUG")
gLogger.setLevel('error')
self.assertEqual(gLogger.getLevel(), "ERROR")
self.assertEqual(self.log.getLevel(), "ERROR")
self.assertEqual(sublogger.getLevel(), "ERROR")
self.log.setLevel('notice')
self.assertEqual(gLogger.getLevel(), "ERROR")
self.assertEqual(self.log.getLevel(), "NOTICE")
self.assertEqual(sublogger.getLevel(), "NOTICE")
gLogger.setLevel('verbose')
self.assertEqual(gLogger.getLevel(), "VERBOSE")
self.assertEqual(self.log.getLevel(), "NOTICE")
self.assertEqual(sublogger.getLevel(), "NOTICE")
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test_Levels)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
| andresailer/DIRAC | FrameworkSystem/private/standardLogging/test/Test_Levels.py | Python | gpl-3.0 | 6,874 | [
"DIRAC"
] | 5df9c368b58bc09e4a29f2c4834d199f205ca91e9ee54e5c589baf50e5b12aa0 |
"""
Support for running a tool in Galaxy via an internal job management system
"""
from abc import ABCMeta
from abc import abstractmethod
import time
import copy
import datetime
import galaxy
import logging
import os
import pwd
import random
import re
import shutil
import subprocess
import sys
import traceback
from galaxy import model, util
from galaxy.datatypes import metadata
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.jobs.actions.post import ActionBox
from galaxy.jobs.mapper import JobRunnerMapper
from galaxy.jobs.runners import BaseJobRunner, JobState
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
from galaxy.util.json import loads
from galaxy.util import unicodify
from .output_checker import check_output
from .datasets import TaskPathRewriter
from .datasets import OutputsToWorkingDirectoryPathRewriter
from .datasets import NullDatasetPathRewriter
from .datasets import DatasetPath
log = logging.getLogger( __name__ )
DATABASE_MAX_STRING_SIZE = util.DATABASE_MAX_STRING_SIZE
DATABASE_MAX_STRING_SIZE_PRETTY = util.DATABASE_MAX_STRING_SIZE_PRETTY
# This file, if created in the job's working directory, will be used for
# setting advanced metadata properties on the job and its associated outputs.
# This interface is currently experimental, is only used by the upload tool,
# and should eventually become API'd
TOOL_PROVIDED_JOB_METADATA_FILE = 'galaxy.json'
class JobDestination( Bunch ):
"""
Provides details about where a job runs
"""
def __init__(self, **kwds):
self['id'] = None
self['url'] = None
self['tags'] = None
self['runner'] = None
self['legacy'] = False
self['converted'] = False
self['env'] = []
self['resubmit'] = []
# dict is appropriate (rather than a bunch) since keys may not be valid as attributes
self['params'] = dict()
# Use the values persisted in an existing job
if 'from_job' in kwds and kwds['from_job'].destination_id is not None:
self['id'] = kwds['from_job'].destination_id
self['params'] = kwds['from_job'].destination_params
super(JobDestination, self).__init__(**kwds)
# Store tags as a list
if self.tags is not None:
self['tags'] = [ x.strip() for x in self.tags.split(',') ]
class JobToolConfiguration( Bunch ):
"""
Provides details on what handler and destination a tool should use
A JobToolConfiguration will have the required attribute 'id' and optional
attributes 'handler', 'destination', and 'params'
"""
def __init__(self, **kwds):
self['handler'] = None
self['destination'] = None
self['params'] = dict()
super(JobToolConfiguration, self).__init__(**kwds)
def get_resource_group( self ):
return self.get( "resources", None )
class JobConfiguration( object ):
"""A parser and interface to advanced job management features.
These features are configured in the job configuration, by default, ``job_conf.xml``
"""
DEFAULT_NWORKERS = 4
def __init__(self, app):
"""Parse the job configuration XML.
"""
self.app = app
self.runner_plugins = []
self.dynamic_params = None
self.handlers = {}
self.handler_runner_plugins = {}
self.default_handler_id = None
self.destinations = {}
self.destination_tags = {}
self.default_destination_id = None
self.tools = {}
self.resource_groups = {}
self.default_resource_group = None
self.resource_parameters = {}
self.limits = Bunch()
self.__parse_resource_parameters()
# Initialize the config
try:
tree = util.parse_xml(self.app.config.job_config_file)
self.__parse_job_conf_xml(tree)
except IOError:
log.warning( 'Job configuration "%s" does not exist, using legacy job configuration from Galaxy config file "%s" instead' % ( self.app.config.job_config_file, self.app.config.config_file ) )
self.__parse_job_conf_legacy()
def __parse_job_conf_xml(self, tree):
"""Loads the new-style job configuration from options in the job config file (by default, job_conf.xml).
:param tree: Object representing the root ``<job_conf>`` object in the job config file.
:type tree: ``xml.etree.ElementTree.Element``
"""
root = tree.getroot()
log.debug('Loading job configuration from %s' % self.app.config.job_config_file)
# Parse job plugins
plugins = root.find('plugins')
if plugins is not None:
for plugin in self.__findall_with_required(plugins, 'plugin', ('id', 'type', 'load')):
if plugin.get('type') == 'runner':
workers = plugin.get('workers', plugins.get('workers', JobConfiguration.DEFAULT_NWORKERS))
runner_kwds = self.__get_params(plugin)
runner_info = dict(id=plugin.get('id'),
load=plugin.get('load'),
workers=int(workers),
kwds=runner_kwds)
self.runner_plugins.append(runner_info)
else:
log.error('Unknown plugin type: %s' % plugin.get('type'))
for plugin in self.__findall_with_required(plugins, 'plugin', ('id', 'type')):
if plugin.get('id') == 'dynamic' and plugin.get('type') == 'runner':
self.dynamic_params = self.__get_params(plugin)
# Load tasks if configured
if self.app.config.use_tasked_jobs:
self.runner_plugins.append(dict(id='tasks', load='tasks', workers=self.app.config.local_task_queue_workers))
# Parse handlers
handlers = root.find('handlers')
if handlers is not None:
for handler in self.__findall_with_required(handlers, 'handler'):
id = handler.get('id')
if id in self.handlers:
log.error("Handler '%s' overlaps handler with the same name, ignoring" % id)
else:
log.debug("Read definition for handler '%s'" % id)
self.handlers[id] = (id,)
for plugin in handler.findall('plugin'):
if id not in self.handler_runner_plugins:
self.handler_runner_plugins[id] = []
self.handler_runner_plugins[id].append( plugin.get('id') )
if handler.get('tags', None) is not None:
for tag in [ x.strip() for x in handler.get('tags').split(',') ]:
if tag in self.handlers:
self.handlers[tag].append(id)
else:
self.handlers[tag] = [id]
# Determine the default handler(s)
self.default_handler_id = self.__get_default(handlers, self.handlers.keys())
# Parse destinations
destinations = root.find('destinations')
job_metrics = self.app.job_metrics
for destination in self.__findall_with_required(destinations, 'destination', ('id', 'runner')):
id = destination.get('id')
destination_metrics = destination.get( "metrics", None )
if destination_metrics:
if not util.asbool( destination_metrics ):
# disable
job_metrics.set_destination_instrumenter( id, None )
else:
metrics_conf_path = self.app.config.resolve_path( destination_metrics )
job_metrics.set_destination_conf_file( id, metrics_conf_path )
else:
metrics_elements = self.__findall_with_required( destination, 'job_metrics', () )
if metrics_elements:
job_metrics.set_destination_conf_element( id, metrics_elements[ 0 ] )
job_destination = JobDestination(**dict(destination.items()))
job_destination['params'] = self.__get_params(destination)
job_destination['env'] = self.__get_envs(destination)
job_destination['resubmit'] = self.__get_resubmits(destination)
self.destinations[id] = (job_destination,)
if job_destination.tags is not None:
for tag in job_destination.tags:
if tag not in self.destinations:
self.destinations[tag] = []
self.destinations[tag].append(job_destination)
# Determine the default destination
self.default_destination_id = self.__get_default(destinations, self.destinations.keys())
# Parse resources...
resources = root.find('resources')
if resources is not None:
self.default_resource_group = resources.get( "default", None )
for group in self.__findall_with_required(resources, 'group'):
id = group.get('id')
fields_str = group.get('fields', None) or group.text or ''
fields = [ f for f in fields_str.split(",") if f ]
self.resource_groups[ id ] = fields
# Parse tool mappings
tools = root.find('tools')
if tools is not None:
for tool in self.__findall_with_required(tools, 'tool'):
# There can be multiple definitions with identical ids, but different params
id = tool.get('id').lower().rstrip('/')
if id not in self.tools:
self.tools[id] = list()
self.tools[id].append(JobToolConfiguration(**dict(tool.items())))
self.tools[id][-1]['params'] = self.__get_params(tool)
types = dict(registered_user_concurrent_jobs=int,
anonymous_user_concurrent_jobs=int,
walltime=str,
output_size=util.size_to_bytes)
self.limits = Bunch(registered_user_concurrent_jobs=None,
anonymous_user_concurrent_jobs=None,
walltime=None,
walltime_delta=None,
output_size=None,
destination_user_concurrent_jobs={},
destination_total_concurrent_jobs={})
# Parse job limits
limits = root.find('limits')
if limits is not None:
for limit in self.__findall_with_required(limits, 'limit', ('type',)):
type = limit.get('type')
# concurrent_jobs renamed to destination_user_concurrent_jobs in job_conf.xml
if type in ( 'destination_user_concurrent_jobs', 'concurrent_jobs', 'destination_total_concurrent_jobs' ):
id = limit.get('tag', None) or limit.get('id')
if type == 'destination_total_concurrent_jobs':
self.limits.destination_total_concurrent_jobs[id] = int(limit.text)
else:
self.limits.destination_user_concurrent_jobs[id] = int(limit.text)
elif limit.text:
self.limits.__dict__[type] = types.get(type, str)(limit.text)
if self.limits.walltime is not None:
h, m, s = [ int( v ) for v in self.limits.walltime.split( ':' ) ]
self.limits.walltime_delta = datetime.timedelta( 0, s, 0, 0, m, h )
log.debug('Done loading job configuration')
def __parse_job_conf_legacy(self):
"""Loads the old-style job configuration from options in the galaxy config file (by default, config/galaxy.ini).
"""
log.debug('Loading job configuration from %s' % self.app.config.config_file)
# Always load local and lwr
self.runner_plugins = [dict(id='local', load='local', workers=self.app.config.local_job_queue_workers), dict(id='lwr', load='lwr', workers=self.app.config.cluster_job_queue_workers)]
# Load tasks if configured
if self.app.config.use_tasked_jobs:
self.runner_plugins.append(dict(id='tasks', load='tasks', workers=self.app.config.local_task_queue_workers))
for runner in self.app.config.start_job_runners:
self.runner_plugins.append(dict(id=runner, load=runner, workers=self.app.config.cluster_job_queue_workers))
# Set the handlers
for id in self.app.config.job_handlers:
self.handlers[id] = (id,)
self.handlers['default_job_handlers'] = self.app.config.default_job_handlers
self.default_handler_id = 'default_job_handlers'
# Set tool handler configs
for id, tool_handlers in self.app.config.tool_handlers.items():
self.tools[id] = list()
for handler_config in tool_handlers:
# rename the 'name' key to 'handler'
handler_config['handler'] = handler_config.pop('name')
self.tools[id].append(JobToolConfiguration(**handler_config))
# Set tool runner configs
for id, tool_runners in self.app.config.tool_runners.items():
# Might have been created in the handler parsing above
if id not in self.tools:
self.tools[id] = list()
for runner_config in tool_runners:
url = runner_config['url']
if url not in self.destinations:
# Create a new "legacy" JobDestination - it will have its URL converted to a destination params once the appropriate plugin has loaded
self.destinations[url] = (JobDestination(id=url, runner=url.split(':', 1)[0], url=url, legacy=True, converted=False),)
for tool_conf in self.tools[id]:
if tool_conf.params == runner_config.get('params', {}):
tool_conf['destination'] = url
break
else:
# There was not an existing config (from the handlers section) with the same params
# rename the 'url' key to 'destination'
runner_config['destination'] = runner_config.pop('url')
self.tools[id].append(JobToolConfiguration(**runner_config))
self.destinations[self.app.config.default_cluster_job_runner] = (JobDestination(id=self.app.config.default_cluster_job_runner, runner=self.app.config.default_cluster_job_runner.split(':', 1)[0], url=self.app.config.default_cluster_job_runner, legacy=True, converted=False),)
self.default_destination_id = self.app.config.default_cluster_job_runner
# Set the job limits
self.limits = Bunch(registered_user_concurrent_jobs=self.app.config.registered_user_job_limit,
anonymous_user_concurrent_jobs=self.app.config.anonymous_user_job_limit,
walltime=self.app.config.job_walltime,
walltime_delta=self.app.config.job_walltime_delta,
output_size=self.app.config.output_size_limit,
destination_user_concurrent_jobs={},
destination_total_concurrent_jobs={})
log.debug('Done loading job configuration')
def get_tool_resource_parameters( self, tool_id ):
""" Given a tool id, return XML elements describing parameters to
insert into job resources.
:tool id: A tool ID (a string)
:returns: List of parameter elements.
"""
fields = []
if not tool_id:
return fields
# TODO: Only works with exact matches, should handle different kinds of ids
# the way destination lookup does.
resource_group = None
if tool_id in self.tools:
resource_group = self.tools[ tool_id ][ 0 ].get_resource_group()
resource_group = resource_group or self.default_resource_group
if resource_group and resource_group in self.resource_groups:
fields_names = self.resource_groups[ resource_group ]
fields = [ self.resource_parameters[ n ] for n in fields_names ]
return fields
def __parse_resource_parameters( self ):
if not os.path.exists( self.app.config.job_resource_params_file ):
return
resource_definitions = util.parse_xml( self.app.config.job_resource_params_file )
resource_definitions_root = resource_definitions.getroot()
# TODO: Also handling conditionals would be awesome!
for parameter_elem in resource_definitions_root.findall( "param" ):
name = parameter_elem.get( "name" )
# Considered prepending __job_resource_param__ here and then
# stripping it off when making it available to dynamic job
# destination. Not needed because resource parameters are wrapped
# in a conditional.
## expanded_name = "__job_resource_param__%s" % name
## parameter_elem.set( "name", expanded_name )
self.resource_parameters[ name ] = parameter_elem
def __get_default(self, parent, names):
"""Returns the default attribute set in a parent tag like <handlers> or <destinations>, or return the ID of the child, if there is no explicit default and only one child.
:param parent: Object representing a tag that may or may not have a 'default' attribute.
:type parent: ``xml.etree.ElementTree.Element``
:param names: The list of destination or handler IDs or tags that were loaded.
:type names: list of str
:returns: str -- id or tag representing the default.
"""
rval = parent.get('default')
if rval is not None:
# If the parent element has a 'default' attribute, use the id or tag in that attribute
if rval not in names:
raise Exception("<%s> default attribute '%s' does not match a defined id or tag in a child element" % (parent.tag, rval))
log.debug("<%s> default set to child with id or tag '%s'" % (parent.tag, rval))
elif len(names) == 1:
log.info("Setting <%s> default to child with id '%s'" % (parent.tag, names[0]))
rval = names[0]
else:
raise Exception("No <%s> default specified, please specify a valid id or tag with the 'default' attribute" % parent.tag)
return rval
def __findall_with_required(self, parent, match, attribs=None):
"""Like ``xml.etree.ElementTree.Element.findall()``, except only returns children that have the specified attribs.
:param parent: Parent element in which to find.
:type parent: ``xml.etree.ElementTree.Element``
:param match: Name of child elements to find.
:type match: str
:param attribs: List of required attributes in children elements.
:type attribs: list of str
:returns: list of ``xml.etree.ElementTree.Element``
"""
rval = []
if attribs is None:
attribs = ('id',)
for elem in parent.findall(match):
for attrib in attribs:
if attrib not in elem.attrib:
log.warning("required '%s' attribute is missing from <%s> element" % (attrib, match))
break
else:
rval.append(elem)
return rval
def __get_params(self, parent):
"""Parses any child <param> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <param> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = {}
for param in parent.findall('param'):
rval[param.get('id')] = param.text
return rval
def __get_envs(self, parent):
"""Parses any child <env> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <env> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = []
for param in parent.findall('env'):
rval.append( dict(
name=param.get('id'),
file=param.get('file'),
execute=param.get('exec'),
value=param.text,
raw=util.asbool(param.get('raw', 'false'))
) )
return rval
def __get_resubmits(self, parent):
"""Parses any child <resubmit> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <resubmit> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = []
for resubmit in parent.findall('resubmit'):
rval.append( dict(
condition=resubmit.get('condition'),
destination=resubmit.get('destination'),
handler=resubmit.get('handler')
) )
return rval
@property
def default_job_tool_configuration(self):
"""The default JobToolConfiguration, used if a tool does not have an explicit defintion in the configuration. It consists of a reference to the default handler and default destination.
:returns: JobToolConfiguration -- a representation of a <tool> element that uses the default handler and destination
"""
return JobToolConfiguration(id='default', handler=self.default_handler_id, destination=self.default_destination_id)
# Called upon instantiation of a Tool object
def get_job_tool_configurations(self, ids):
"""Get all configured JobToolConfigurations for a tool ID, or, if given a list of IDs, the JobToolConfigurations for the first id in ``ids`` matching a tool definition.
.. note::
You should not mix tool shed tool IDs, versionless tool shed IDs, and tool config tool IDs that refer to the same tool.
:param ids: Tool ID or IDs to fetch the JobToolConfiguration of.
:type ids: list or str.
:returns: list -- JobToolConfiguration Bunches representing <tool> elements matching the specified ID(s).
Example tool ID strings include:
* Full tool shed id: ``toolshed.example.org/repos/nate/filter_tool_repo/filter_tool/1.0.0``
* Tool shed id less version: ``toolshed.example.org/repos/nate/filter_tool_repo/filter_tool``
* Tool config tool id: ``filter_tool``
"""
rval = []
# listify if ids is a single (string) id
ids = util.listify(ids)
for id in ids:
if id in self.tools:
# If a tool has definitions that include job params but not a
# definition for jobs without params, include the default
# config
for job_tool_configuration in self.tools[id]:
if not job_tool_configuration.params:
break
else:
rval.append(self.default_job_tool_configuration)
rval.extend(self.tools[id])
break
else:
rval.append(self.default_job_tool_configuration)
return rval
def __get_single_item(self, collection):
"""Given a collection of handlers or destinations, return one item from the collection at random.
"""
# Done like this to avoid random under the assumption it's faster to avoid it
if len(collection) == 1:
return collection[0]
else:
return random.choice(collection)
# This is called by Tool.get_job_handler()
def get_handler(self, id_or_tag):
"""Given a handler ID or tag, return the provided ID or an ID matching the provided tag
:param id_or_tag: A handler ID or tag.
:type id_or_tag: str
:returns: str -- A valid job handler ID.
"""
if id_or_tag is None:
id_or_tag = self.default_handler_id
return self.__get_single_item(self.handlers[id_or_tag])
def get_destination(self, id_or_tag):
"""Given a destination ID or tag, return the JobDestination matching the provided ID or tag
:param id_or_tag: A destination ID or tag.
:type id_or_tag: str
:returns: JobDestination -- A valid destination
Destinations are deepcopied as they are expected to be passed in to job
runners, which will modify them for persisting params set at runtime.
"""
if id_or_tag is None:
id_or_tag = self.default_destination_id
return copy.deepcopy(self.__get_single_item(self.destinations[id_or_tag]))
def get_destinations(self, id_or_tag):
"""Given a destination ID or tag, return all JobDestinations matching the provided ID or tag
:param id_or_tag: A destination ID or tag.
:type id_or_tag: str
:returns: list or tuple of JobDestinations
Destinations are not deepcopied, so they should not be passed to
anything which might modify them.
"""
return self.destinations.get(id_or_tag, None)
def get_job_runner_plugins(self, handler_id):
"""Load all configured job runner plugins
:returns: list of job runner plugins
"""
rval = {}
if handler_id in self.handler_runner_plugins:
plugins_to_load = [ rp for rp in self.runner_plugins if rp['id'] in self.handler_runner_plugins[handler_id] ]
log.info( "Handler '%s' will load specified runner plugins: %s", handler_id, ', '.join( [ rp['id'] for rp in plugins_to_load ] ) )
else:
plugins_to_load = self.runner_plugins
log.info( "Handler '%s' will load all configured runner plugins", handler_id )
for runner in plugins_to_load:
class_names = []
module = None
id = runner['id']
load = runner['load']
if ':' in load:
# Name to load was specified as '<module>:<class>'
module_name, class_name = load.rsplit(':', 1)
class_names = [ class_name ]
module = __import__( module_name )
else:
# Name to load was specified as '<module>'
if '.' not in load:
# For legacy reasons, try from galaxy.jobs.runners first if there's no '.' in the name
module_name = 'galaxy.jobs.runners.' + load
try:
module = __import__( module_name )
except ImportError:
# No such module, we'll retry without prepending galaxy.jobs.runners.
# All other exceptions (e.g. something wrong with the module code) will raise
pass
if module is None:
# If the name included a '.' or loading from the static runners path failed, try the original name
module = __import__( load )
module_name = load
if module is None:
# Module couldn't be loaded, error should have already been displayed
continue
for comp in module_name.split( "." )[1:]:
module = getattr( module, comp )
if not class_names:
# If there's not a ':', we check <module>.__all__ for class names
try:
assert module.__all__
class_names = module.__all__
except AssertionError:
log.error( 'Runner "%s" does not contain a list of exported classes in __all__' % load )
continue
for class_name in class_names:
runner_class = getattr( module, class_name )
try:
assert issubclass(runner_class, BaseJobRunner)
except TypeError:
log.warning("A non-class name was found in __all__, ignoring: %s" % id)
continue
except AssertionError:
log.warning("Job runner classes must be subclassed from BaseJobRunner, %s has bases: %s" % (id, runner_class.__bases__))
continue
try:
rval[id] = runner_class( self.app, runner[ 'workers' ], **runner.get( 'kwds', {} ) )
except TypeError:
log.exception( "Job runner '%s:%s' has not been converted to a new-style runner or encountered TypeError on load" % ( module_name, class_name ) )
rval[id] = runner_class( self.app )
log.debug( "Loaded job runner '%s:%s' as '%s'" % ( module_name, class_name, id ) )
return rval
def is_id(self, collection):
"""Given a collection of handlers or destinations, indicate whether the collection represents a tag or a real ID
:param collection: A representation of a destination or handler
:type collection: tuple or list
:returns: bool
"""
return type(collection) == tuple
def is_tag(self, collection):
"""Given a collection of handlers or destinations, indicate whether the collection represents a tag or a real ID
:param collection: A representation of a destination or handler
:type collection: tuple or list
:returns: bool
"""
return type(collection) == list
def is_handler(self, server_name):
"""Given a server name, indicate whether the server is a job handler
:param server_name: The name to check
:type server_name: str
:return: bool
"""
for collection in self.handlers.values():
if server_name in collection:
return True
return False
def convert_legacy_destinations(self, job_runners):
"""Converts legacy (from a URL) destinations to contain the appropriate runner params defined in the URL.
:param job_runners: All loaded job runner plugins.
:type job_runners: list of job runner plugins
"""
for id, destination in [ ( id, destinations[0] ) for id, destinations in self.destinations.items() if self.is_id(destinations) ]:
# Only need to deal with real destinations, not members of tags
if destination.legacy and not destination.converted:
if destination.runner in job_runners:
destination.params = job_runners[destination.runner].url_to_destination(destination.url).params
destination.converted = True
if destination.params:
log.debug("Legacy destination with id '%s', url '%s' converted, got params:" % (id, destination.url))
for k, v in destination.params.items():
log.debug(" %s: %s" % (k, v))
else:
log.debug("Legacy destination with id '%s', url '%s' converted, got params:" % (id, destination.url))
else:
log.warning("Legacy destination with id '%s' could not be converted: Unknown runner plugin: %s" % (id, destination.runner))
class JobWrapper( object ):
"""
Wraps a 'model.Job' with convenience methods for running processes and
state management.
"""
def __init__( self, job, queue, use_persisted_destination=False ):
self.job_id = job.id
self.session_id = job.session_id
self.user_id = job.user_id
self.tool = queue.app.toolbox.tools_by_id.get( job.tool_id, None )
self.queue = queue
self.app = queue.app
self.sa_session = self.app.model.context
self.extra_filenames = []
self.command_line = None
# Tool versioning variables
self.write_version_cmd = None
self.version_string = ""
self.galaxy_lib_dir = None
# With job outputs in the working directory, we need the working
# directory to be set before prepare is run, or else premature deletion
# and job recovery fail.
# Create the working dir if necessary
try:
self.app.object_store.create(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
self.working_directory = self.app.object_store.get_filename(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
log.debug('(%s) Working directory for job is: %s' % (self.job_id, self.working_directory))
except ObjectInvalid:
raise Exception('Unable to create job working directory, job failure')
self.dataset_path_rewriter = self._job_dataset_path_rewriter( self.working_directory )
self.output_paths = None
self.output_hdas_and_paths = None
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
self.job_runner_mapper = JobRunnerMapper( self, queue.dispatcher.url_to_destination, self.app.job_config )
self.params = None
if job.params:
self.params = loads( job.params )
if use_persisted_destination:
self.job_runner_mapper.cached_job_destination = JobDestination( from_job=job )
self.__user_system_pwent = None
self.__galaxy_system_pwent = None
def _job_dataset_path_rewriter( self, working_directory ):
if self.app.config.outputs_to_working_directory:
dataset_path_rewriter = OutputsToWorkingDirectoryPathRewriter( working_directory )
else:
dataset_path_rewriter = NullDatasetPathRewriter( )
return dataset_path_rewriter
def can_split( self ):
# Should the job handler split this job up?
return self.app.config.use_tasked_jobs and self.tool.parallelism
def get_job_runner_url( self ):
log.warning('(%s) Job runner URLs are deprecated, use destinations instead.' % self.job_id)
return self.job_destination.url
def get_parallelism(self):
return self.tool.parallelism
# legacy naming
get_job_runner = get_job_runner_url
@property
def job_destination(self):
"""Return the JobDestination that this job will use to run. This will
either be a configured destination, a randomly selected destination if
the configured destination was a tag, or a dynamically generated
destination from the dynamic runner.
Calling this method for the first time causes the dynamic runner to do
its calculation, if any.
:returns: ``JobDestination``
"""
return self.job_runner_mapper.get_job_destination(self.params)
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
def get_id_tag(self):
# For compatability with drmaa, which uses job_id right now, and TaskWrapper
return self.get_job().get_id_tag()
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = self.get_job()
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def get_version_string_path( self ):
return os.path.abspath(os.path.join(self.app.config.new_file_path, "GALAXY_VERSION_STRING_%s" % self.job_id))
def prepare( self, compute_environment=None ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
self.sa_session.expunge_all() # this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
if not os.path.exists( self.working_directory ):
os.mkdir( self.working_directory )
job = self._load_job()
def get_special( ):
special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first()
if not special:
special = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first()
return special
tool_evaluator = self._get_tool_evaluator( job )
compute_environment = compute_environment or self.default_compute_environment( job )
tool_evaluator.set_compute_environment( compute_environment, get_special=get_special )
self.sa_session.flush()
self.command_line, self.extra_filenames = tool_evaluator.build()
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
job.command_line = self.command_line
self.sa_session.add( job )
self.sa_session.flush()
# Return list of all extra files
self.param_dict = tool_evaluator.param_dict
version_string_cmd = self.tool.version_string_cmd
if version_string_cmd:
self.write_version_cmd = "%s > %s 2>&1" % ( version_string_cmd, compute_environment.version_path() )
else:
self.write_version_cmd = None
return self.extra_filenames
def default_compute_environment( self, job=None ):
if not job:
job = self.get_job()
return SharedComputeEnvironment( self, job )
def _load_job( self ):
# Load job from database and verify it has user or session.
# Restore parameters from the database
job = self.get_job()
if job.user is None and job.galaxy_session is None:
raise Exception( 'Job %s has no user and no session.' % job.id )
return job
def _get_tool_evaluator( self, job ):
# Hacky way to avoid cirular import for now.
# Placing ToolEvaluator in either jobs or tools
# result in ciruclar dependency.
from galaxy.tools.evaluation import ToolEvaluator
tool_evaluator = ToolEvaluator(
app=self.app,
job=job,
tool=self.tool,
local_working_directory=self.working_directory,
)
return tool_evaluator
def fail( self, message, exception=False, stdout="", stderr="", exit_code=None ):
"""
Indicate job failure by setting state and message on all output
datasets.
"""
job = self.get_job()
self.sa_session.refresh( job )
# if the job was deleted, don't fail it
if not job.state == job.states.DELETED:
# Check if the failure is due to an exception
if exception:
# Save the traceback immediately in case we generate another
# below
job.traceback = traceback.format_exc()
# Get the exception and let the tool attempt to generate
# a better message
etype, evalue, tb = sys.exc_info()
m = self.tool.handle_job_failure_exception( evalue )
if m:
message = m
if self.app.config.outputs_to_working_directory:
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ), e:
log.error( "fail(): Missing output file in working directory: %s" % e )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
self.sa_session.refresh( dataset )
dataset.state = dataset.states.ERROR
dataset.blurb = 'tool error'
dataset.info = message
dataset.set_size()
dataset.dataset.set_total_size()
dataset.mark_unhidden()
if dataset.ext == 'auto':
dataset.extension = 'data'
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
# Pause any dependent jobs (and those jobs' outputs)
for dep_job_assoc in dataset.dependent_jobs:
self.pause( dep_job_assoc.job, "Execution of this dataset's job is paused because its input datasets are in an error state." )
self.sa_session.add( dataset )
self.sa_session.flush()
job.state = job.states.ERROR
job.command_line = self.command_line
job.info = message
# TODO: Put setting the stdout, stderr, and exit code in one place
# (not duplicated with the finish method).
if ( len( stdout ) > DATABASE_MAX_STRING_SIZE ):
stdout = util.shrink_string_by_size( stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
log.info( "stdout for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stdout = stdout
if ( len( stderr ) > DATABASE_MAX_STRING_SIZE ):
stderr = util.shrink_string_by_size( stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
log.info( "stderr for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stderr = stderr
# Let the exit code be Null if one is not provided:
if ( exit_code != None ):
job.exit_code = exit_code
self.sa_session.add( job )
self.sa_session.flush()
#Perform email action even on failure.
for pja in [pjaa.post_job_action for pjaa in job.post_job_actions if pjaa.post_job_action.action_type == "EmailAction"]:
ActionBox.execute(self.app, self.sa_session, pja, job)
# If the job was deleted, call tool specific fail actions (used for e.g. external metadata) and clean up
if self.tool:
self.tool.job_failed( self, message, exception )
delete_files = self.app.config.cleanup_job == 'always' or (self.app.config.cleanup_job == 'onsuccess' and job.state == job.states.DELETED)
self.cleanup( delete_files=delete_files )
def pause( self, job=None, message=None ):
if job is None:
job = self.get_job()
if message is None:
message = "Execution of this dataset's job is paused"
if job.state == job.states.NEW:
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = dataset_assoc.dataset.dataset.states.PAUSED
dataset_assoc.dataset.info = message
self.sa_session.add( dataset_assoc.dataset )
job.state = job.states.PAUSED
self.sa_session.add( job )
def mark_as_resubmitted( self ):
job = self.get_job()
self.sa_session.refresh( job )
for dataset in [ dataset_assoc.dataset for dataset_assoc in job.output_datasets + job.output_library_datasets ]:
dataset._state = model.Dataset.states.RESUBMITTED
self.sa_session.add( dataset )
job.state = model.Job.states.RESUBMITTED
self.sa_session.add( job )
self.sa_session.flush()
def change_state( self, state, info=False ):
job = self.get_job()
self.sa_session.refresh( job )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
self.sa_session.refresh( dataset )
dataset.state = state
if info:
dataset.info = info
self.sa_session.add( dataset )
self.sa_session.flush()
if info:
job.info = info
job.state = state
self.sa_session.add( job )
self.sa_session.flush()
def get_state( self ):
job = self.get_job()
self.sa_session.refresh( job )
return job.state
def set_runner( self, runner_url, external_id ):
log.warning('set_runner() is deprecated, use set_job_destination()')
self.set_job_destination(self.job_destination, external_id)
def set_job_destination( self, job_destination, external_id=None ):
"""
Persist job destination params in the database for recovery.
self.job_destination is not used because a runner may choose to rewrite
parts of the destination (e.g. the params).
"""
job = self.get_job()
self.sa_session.refresh(job)
log.debug('(%s) Persisting job destination (destination id: %s)' % (job.id, job_destination.id))
job.destination_id = job_destination.id
job.destination_params = job_destination.params
job.job_runner_name = job_destination.runner
job.job_runner_external_id = external_id
self.sa_session.add(job)
self.sa_session.flush()
def finish( self, stdout, stderr, tool_exit_code=None, remote_working_directory=None ):
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
stdout = unicodify( stdout )
stderr = unicodify( stderr )
# default post job setup
self.sa_session.expunge_all()
job = self.get_job()
# TODO: After failing here, consider returning from the function.
try:
self.reclaim_ownership()
except:
log.exception( '(%s) Failed to change ownership of %s, failing' % ( job.id, self.working_directory ) )
return self.fail( job.info, stdout=stdout, stderr=stderr, exit_code=tool_exit_code )
# if the job was deleted, don't finish it
if job.state == job.states.DELETED or job.state == job.states.ERROR:
# SM: Note that, at this point, the exit code must be saved in case
# there was an error. Errors caught here could mean that the job
# was deleted by an administrator (based on old comments), but it
# could also mean that a job was broken up into tasks and one of
# the tasks failed. So include the stderr, stdout, and exit code:
return self.fail( job.info, stderr=stderr, stdout=stdout, exit_code=tool_exit_code )
# Check the tool's stdout, stderr, and exit code for errors, but only
# if the job has not already been marked as having an error.
# The job's stdout and stderr will be set accordingly.
# We set final_job_state to use for dataset management, but *don't* set
# job.state until after dataset collection to prevent history issues
if ( self.check_tool_output( stdout, stderr, tool_exit_code, job ) ):
final_job_state = job.states.OK
else:
final_job_state = job.states.ERROR
if self.write_version_cmd:
version_filename = self.get_version_string_path()
if os.path.exists(version_filename):
self.version_string = open(version_filename).read()
os.unlink(version_filename)
if self.app.config.outputs_to_working_directory and not self.__link_file_check():
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "finish(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ):
# this can happen if Galaxy is restarted during the job's
# finish method - the false_path file has already moved,
# and when the job is recovered, it won't be found.
if os.path.exists( dataset_path.real_path ) and os.stat( dataset_path.real_path ).st_size > 0:
log.warning( "finish(): %s not found, but %s is not empty, so it will be used instead" % ( dataset_path.false_path, dataset_path.real_path ) )
else:
# Prior to fail we need to set job.state
job.state = final_job_state
return self.fail( "Job %s's output dataset(s) could not be read" % job.id )
job_context = ExpressionContext( dict( stdout=job.stdout, stderr=job.stderr ) )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: # need to update all associated output hdas, i.e. history was shared with job running
trynum = 0
while trynum < self.app.config.retry_job_output_collection:
try:
# Attempt to short circuit NFS attribute caching
os.stat( dataset.dataset.file_name )
os.chown( dataset.dataset.file_name, os.getuid(), -1 )
trynum = self.app.config.retry_job_output_collection
except ( OSError, ObjectNotFound ), e:
trynum += 1
log.warning( 'Error accessing %s, will retry: %s', dataset.dataset.file_name, e )
time.sleep( 2 )
if getattr( dataset, "hidden_beneath_collection_instance", None ):
dataset.visible = False
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = (dataset.info or '')
if context['stdout'].strip():
#Ensure white space between entries
dataset.info = dataset.info.rstrip() + "\n" + context['stdout'].strip()
if context['stderr'].strip():
#Ensure white space between entries
dataset.info = dataset.info.rstrip() + "\n" + context['stderr'].strip()
dataset.tool_version = self.version_string
dataset.set_size()
if 'uuid' in context:
dataset.dataset.uuid = context['uuid']
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
if job.states.ERROR == final_job_state:
dataset.blurb = "error"
dataset.mark_unhidden()
elif dataset.has_data():
# If the tool was expected to set the extension, attempt to retrieve it
if dataset.ext == 'auto':
dataset.extension = context.get( 'ext', 'data' )
dataset.init_meta( copy_from=dataset )
#if a dataset was copied, it won't appear in our dictionary:
#either use the metadata from originating output dataset, or call set_meta on the copies
#it would be quicker to just copy the metadata from the originating output dataset,
#but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta()
if ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and self.app.config.retry_metadata_internally ):
dataset.datatype.set_meta( dataset, overwrite=False ) # call datatype.set_meta directly for the initial set_meta call during dataset creation
elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and job.states.ERROR != final_job_state:
dataset._state = model.Dataset.states.FAILED_METADATA
else:
#load metadata from file
#we need to no longer allow metadata to be edited while the job is still running,
#since if it is edited, the metadata changed on the running output will no longer match
#the metadata that was stored to disk for use via the external process,
#and the changes made by the user will be lost, without warning or notice
output_filename = self.external_output_metadata.get_output_filenames_by_dataset( dataset, self.sa_session ).filename_out
def path_rewriter( path ):
if not remote_working_directory or not path:
return path
normalized_remote_working_directory = os.path.normpath( remote_working_directory )
normalized_path = os.path.normpath( path )
if normalized_path.startswith( normalized_remote_working_directory ):
return normalized_path.replace( normalized_remote_working_directory, self.working_directory, 1 )
return path
dataset.metadata.from_JSON_dict( output_filename, path_rewriter=path_rewriter )
try:
assert context.get( 'line_count', None ) is not None
if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
dataset.set_peek( line_count=context['line_count'], is_multi_byte=True )
else:
dataset.set_peek( line_count=context['line_count'] )
except:
if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
dataset.set_peek( is_multi_byte=True )
else:
dataset.set_peek()
try:
# set the name if provided by the tool
dataset.name = context['name']
except:
pass
else:
dataset.blurb = "empty"
if dataset.ext == 'auto':
dataset.extension = 'txt'
self.sa_session.add( dataset )
if job.states.ERROR == final_job_state:
log.debug( "setting dataset state to ERROR" )
# TODO: This is where the state is being set to error. Change it!
dataset_assoc.dataset.dataset.state = model.Dataset.states.ERROR
# Pause any dependent jobs (and those jobs' outputs)
for dep_job_assoc in dataset_assoc.dataset.dependent_jobs:
self.pause( dep_job_assoc.job, "Execution of this dataset's job is paused because its input datasets are in an error state." )
else:
dataset_assoc.dataset.dataset.state = model.Dataset.states.OK
# If any of the rest of the finish method below raises an
# exception, the fail method will run and set the datasets to
# ERROR. The user will never see that the datasets are in error if
# they were flushed as OK here, since upon doing so, the history
# panel stops checking for updates. So allow the
# self.sa_session.flush() at the bottom of this method set
# the state instead.
for pja in job.post_job_actions:
ActionBox.execute(self.app, self.sa_session, pja.post_job_action, job)
# Flush all the dataset and job changes above. Dataset state changes
# will now be seen by the user.
self.sa_session.flush()
# Save stdout and stderr
if len( job.stdout ) > DATABASE_MAX_STRING_SIZE:
log.info( "stdout for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stdout = util.shrink_string_by_size( job.stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
if len( job.stderr ) > DATABASE_MAX_STRING_SIZE:
log.info( "stderr for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stderr = util.shrink_string_by_size( job.stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
# The exit code will be null if there is no exit code to be set.
# This is so that we don't assign an exit code, such as 0, that
# is either incorrect or has the wrong semantics.
if None != tool_exit_code:
job.exit_code = tool_exit_code
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
input_ext = 'data'
for _, data in inp_data.items():
# For loop odd, but sort simulating behavior in galaxy.tools.actions
if not data:
continue
input_ext = data.ext
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
self.tool.collect_associated_files(out_data, self.working_directory)
# Create generated output children and primary datasets and add to param_dict
collected_datasets = {
'children': self.tool.collect_child_datasets(out_data, self.working_directory),
'primary': self.tool.collect_primary_datasets(out_data, self.working_directory, input_ext)
}
param_dict.update({'__collected_datasets__': collected_datasets})
# Certain tools require tasks to be completed after job execution
# ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job=job )
# Call 'exec_after_process' hook
self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
out_data=out_data, param_dict=param_dict,
tool=self.tool, stdout=job.stdout, stderr=job.stderr )
job.command_line = self.command_line
bytes = 0
# Once datasets are collected, set the total dataset size (includes extra files)
for dataset_assoc in job.output_datasets:
dataset_assoc.dataset.dataset.set_total_size()
bytes += dataset_assoc.dataset.dataset.get_total_size()
if job.user:
job.user.total_disk_usage += bytes
# fix permissions
for path in [ dp.real_path for dp in self.get_mutable_output_fnames() ]:
util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid )
# Finally set the job state. This should only happen *after* all
# dataset creation, and will allow us to eliminate force_history_refresh.
job.state = final_job_state
if not job.tasks:
# If job was composed of tasks, don't attempt to recollect statisitcs
self._collect_metrics( job )
self.sa_session.flush()
log.debug( 'job %d ended' % self.job_id )
delete_files = self.app.config.cleanup_job == 'always' or ( job.state == job.states.OK and self.app.config.cleanup_job == 'onsuccess' )
self.cleanup( delete_files=delete_files )
def check_tool_output( self, stdout, stderr, tool_exit_code, job ):
return check_output( self.tool, stdout, stderr, tool_exit_code, job )
def cleanup( self, delete_files=True ):
# At least one of these tool cleanup actions (job import), is needed
# for thetool to work properly, that is why one might want to run
# cleanup but not delete files.
try:
if delete_files:
for fname in self.extra_filenames:
os.remove( fname )
self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.app, self.job_id ).cleanup_after_job()
if delete_files:
self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id))
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
def _collect_metrics( self, has_metrics ):
job = has_metrics.get_job()
per_plugin_properties = self.app.job_metrics.collect_properties( job.destination_id, self.job_id, self.working_directory )
if per_plugin_properties:
log.info( "Collecting job metrics for %s" % has_metrics )
for plugin, properties in per_plugin_properties.iteritems():
for metric_name, metric_value in properties.iteritems():
if metric_value is not None:
has_metrics.add_metric( plugin, metric_name, metric_value )
def get_output_sizes( self ):
sizes = []
output_paths = self.get_output_fnames()
for outfile in [ str( o ) for o in output_paths ]:
if os.path.exists( outfile ):
sizes.append( ( outfile, os.stat( outfile ).st_size ) )
else:
sizes.append( ( outfile, 0 ) )
return sizes
def check_limits(self, runtime=None):
if self.app.job_config.limits.output_size > 0:
for outfile, size in self.get_output_sizes():
if size > self.app.job_config.limits.output_size:
log.warning( '(%s) Job output size %s has exceeded the global output size limit', self.get_id_tag(), os.path.basename( outfile ) )
return JobState.runner_states.OUTPUT_SIZE_LIMIT, 'Job output file grew too large (greater than %s), please try different inputs or parameters' % util.nice_size( self.app.job_config.limits.output_size )
if self.app.job_config.limits.walltime_delta is not None and runtime is not None:
if runtime > self.app.job_config.limits.walltime_delta:
log.warning( '(%s) Job runtime %s has exceeded the global walltime, it will be terminated', self.get_id_tag(), runtime )
return JobState.runner_states.GLOBAL_WALLTIME_REACHED, 'Job ran longer than the maximum allowed execution time (runtime: %s, limit: %s), please try different inputs or parameters' % ( str(runtime).split('.')[0], self.app.job_config.limits.walltime )
return None
def has_limits( self ):
has_output_limit = self.app.job_config.limits.output_size > 0
has_walltime_limit = self.app.job_config.limits.walltime_delta is not None
return has_output_limit or has_walltime_limit
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_env_setup_clause( self ):
if self.app.config.environment_setup_file is None:
return ''
return '[ -f "%s" ] && . %s' % ( self.app.config.environment_setup_file, self.app.config.environment_setup_file )
def get_input_dataset_fnames( self, ds ):
filenames = []
filenames = [ ds.file_name ]
#we will need to stage in metadata file names also
#TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.)
for key, value in ds.metadata.items():
if isinstance( value, model.MetadataFile ):
filenames.append( value.file_name )
return filenames
def get_input_fnames( self ):
job = self.get_job()
filenames = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames.extend(self.get_input_dataset_fnames(da.dataset))
return filenames
def get_input_paths( self, job=None ):
if job is None:
job = self.get_job()
paths = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames = self.get_input_dataset_fnames(da.dataset)
for real_path in filenames:
false_path = self.dataset_path_rewriter.rewrite_dataset_path( da.dataset, 'input' )
paths.append( DatasetPath( da.id, real_path=real_path, false_path=false_path, mutable=False ) )
return paths
def get_output_fnames( self ):
if self.output_paths is None:
self.compute_outputs()
return self.output_paths
def get_mutable_output_fnames( self ):
if self.output_paths is None:
self.compute_outputs()
return filter( lambda dsp: dsp.mutable, self.output_paths )
def get_output_hdas_and_fnames( self ):
if self.output_hdas_and_paths is None:
self.compute_outputs()
return self.output_hdas_and_paths
def compute_outputs( self ) :
dataset_path_rewriter = self.dataset_path_rewriter
job = self.get_job()
# Job output datasets are combination of history, library, and jeha datasets.
special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first()
false_path = None
results = []
for da in job.output_datasets + job.output_library_datasets:
da_false_path = dataset_path_rewriter.rewrite_dataset_path( da.dataset, 'output' )
mutable = da.dataset.dataset.external_filename is None
dataset_path = DatasetPath( da.dataset.dataset.id, da.dataset.file_name, false_path=da_false_path, mutable=mutable )
results.append( ( da.name, da.dataset, dataset_path ) )
self.output_paths = [t[2] for t in results]
self.output_hdas_and_paths = dict([(t[0], t[1:]) for t in results])
if special:
false_path = dataset_path_rewriter.rewrite_dataset_path( special.dataset, 'output' )
dsp = DatasetPath( special.dataset.id, special.dataset.file_name, false_path )
self.output_paths.append( dsp )
return self.output_paths
def get_output_file_id( self, file ):
if self.output_paths is None:
self.get_output_fnames()
for dp in self.output_paths:
if self.app.config.outputs_to_working_directory and os.path.basename( dp.false_path ) == file:
return dp.dataset_id
elif os.path.basename( dp.real_path ) == file:
return dp.dataset_id
return None
def get_tool_provided_job_metadata( self ):
if self.tool_provided_job_metadata is not None:
return self.tool_provided_job_metadata
# Look for JSONified job metadata
self.tool_provided_job_metadata = []
meta_file = os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE )
if os.path.exists( meta_file ):
for line in open( meta_file, 'r' ):
try:
line = loads( line )
assert 'type' in line
except:
log.exception( '(%s) Got JSON data from tool, but data is improperly formatted or no "type" key in data' % self.job_id )
log.debug( 'Offending data was: %s' % line )
continue
# Set the dataset id if it's a dataset entry and isn't set.
# This isn't insecure. We loop the job's output datasets in
# the finish method, so if a tool writes out metadata for a
# dataset id that it doesn't own, it'll just be ignored.
if line['type'] == 'dataset' and 'dataset_id' not in line:
try:
line['dataset_id'] = self.get_output_file_id( line['dataset'] )
except KeyError:
log.warning( '(%s) Tool provided job dataset-specific metadata without specifying a dataset' % self.job_id )
continue
self.tool_provided_job_metadata.append( line )
return self.tool_provided_job_metadata
def get_dataset_finish_context( self, job_context, dataset ):
for meta in self.get_tool_provided_job_metadata():
if meta['type'] == 'dataset' and meta['dataset_id'] == dataset.id:
return ExpressionContext( meta, job_context )
return job_context
def setup_external_metadata( self, exec_dir=None, tmp_dir=None, dataset_files_path=None, config_root=None, config_file=None, datatypes_config=None, set_extension=True, **kwds ):
# extension could still be 'auto' if this is the upload tool.
job = self.get_job()
if set_extension:
for output_dataset_assoc in job.output_datasets:
if output_dataset_assoc.dataset.ext == 'auto':
context = self.get_dataset_finish_context( dict(), output_dataset_assoc.dataset.dataset )
output_dataset_assoc.dataset.extension = context.get( 'ext', 'data' )
self.sa_session.flush()
if tmp_dir is None:
#this dir should should relative to the exec_dir
tmp_dir = self.app.config.new_file_path
if dataset_files_path is None:
dataset_files_path = self.app.model.Dataset.file_path
if config_root is None:
config_root = self.app.config.root
if config_file is None:
config_file = self.app.config.config_file
if datatypes_config is None:
datatypes_config = self.app.datatypes_registry.integrated_datatypes_configs
return self.external_output_metadata.setup_external_metadata( [ output_dataset_assoc.dataset for output_dataset_assoc in job.output_datasets + job.output_library_datasets ],
self.sa_session,
exec_dir=exec_dir,
tmp_dir=tmp_dir,
dataset_files_path=dataset_files_path,
config_root=config_root,
config_file=config_file,
datatypes_config=datatypes_config,
job_metadata=os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE ),
**kwds )
@property
def user( self ):
job = self.get_job()
if job.user is not None:
return job.user.email
elif job.galaxy_session is not None and job.galaxy_session.user is not None:
return job.galaxy_session.user.email
elif job.history is not None and job.history.user is not None:
return job.history.user.email
elif job.galaxy_session is not None:
return 'anonymous@' + job.galaxy_session.remote_addr.split()[-1]
else:
return 'anonymous@unknown'
def __link_file_check( self ):
""" outputs_to_working_directory breaks library uploads where data is
linked. This method is a hack that solves that problem, but is
specific to the upload tool and relies on an injected job param. This
method should be removed ASAP and replaced with some properly generic
and stateful way of determining link-only datasets. -nate
"""
job = self.get_job()
param_dict = job.get_param_values( self.app )
return self.tool.id == 'upload1' and param_dict.get( 'link_data_only', None ) == 'link_to_files'
def _change_ownership( self, username, gid ):
job = self.get_job()
# FIXME: hardcoded path
cmd = [ '/usr/bin/sudo', '-E', self.app.config.external_chown_script, self.working_directory, username, str( gid ) ]
log.debug( '(%s) Changing ownership of working directory with: %s' % ( job.id, ' '.join( cmd ) ) )
p = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
# TODO: log stdout/stderr
stdout, stderr = p.communicate()
assert p.returncode == 0
def change_ownership_for_run( self ):
job = self.get_job()
if self.app.config.external_chown_script and job.user is not None:
try:
self._change_ownership( self.user_system_pwent[0], str( self.user_system_pwent[3] ) )
except:
log.exception( '(%s) Failed to change ownership of %s, making world-writable instead' % ( job.id, self.working_directory ) )
os.chmod( self.working_directory, 0777 )
def reclaim_ownership( self ):
job = self.get_job()
if self.app.config.external_chown_script and job.user is not None:
self._change_ownership( self.galaxy_system_pwent[0], str( self.galaxy_system_pwent[3] ) )
@property
def user_system_pwent( self ):
if self.__user_system_pwent is None:
job = self.get_job()
try:
self.__user_system_pwent = pwd.getpwnam( job.user.email.split('@')[0] )
except:
pass
return self.__user_system_pwent
@property
def galaxy_system_pwent( self ):
if self.__galaxy_system_pwent is None:
self.__galaxy_system_pwent = pwd.getpwuid(os.getuid())
return self.__galaxy_system_pwent
def get_output_destination( self, output_path ):
"""
Destination for outputs marked as from_work_dir. This is the normal case,
just copy these files directly to the ulimate destination.
"""
return output_path
@property
def requires_setting_metadata( self ):
if self.tool:
return self.tool.requires_setting_metadata
return False
class TaskWrapper(JobWrapper):
"""
Extension of JobWrapper intended for running tasks.
Should be refactored into a generalized executable unit wrapper parent, then jobs and tasks.
"""
# Abstract this to be more useful for running tasks that *don't* necessarily compose a job.
def __init__(self, task, queue):
super(TaskWrapper, self).__init__(task.job, queue)
self.task_id = task.id
working_directory = task.working_directory
self.working_directory = working_directory
job_dataset_path_rewriter = self._job_dataset_path_rewriter( self.working_directory )
self.dataset_path_rewriter = TaskPathRewriter( working_directory, job_dataset_path_rewriter )
if task.prepare_input_files_cmd is not None:
self.prepare_input_files_cmds = [ task.prepare_input_files_cmd ]
else:
self.prepare_input_files_cmds = None
self.status = task.states.NEW
def can_split( self ):
# Should the job handler split this job up? TaskWrapper should
# always return False as the job has already been split.
return False
def get_job( self ):
if self.job_id:
return self.sa_session.query( model.Job ).get( self.job_id )
else:
return None
def get_task( self ):
return self.sa_session.query(model.Task).get(self.task_id)
def get_id_tag(self):
# For compatibility with drmaa job runner and TaskWrapper, instead of using job_id directly
return self.get_task().get_id_tag()
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = self.sa_session.query( model.Job ).get( self.job_id )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def prepare( self, compute_environment=None ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
# Restore parameters from the database
job = self._load_job()
task = self.get_task()
# DBTODO New method for generating command line for a task?
tool_evaluator = self._get_tool_evaluator( job )
compute_environment = compute_environment or self.default_compute_environment( job )
tool_evaluator.set_compute_environment( compute_environment )
self.sa_session.flush()
self.command_line, self.extra_filenames = tool_evaluator.build()
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
task.command_line = self.command_line
self.sa_session.add( task )
self.sa_session.flush()
self.param_dict = tool_evaluator.param_dict
self.status = 'prepared'
return self.extra_filenames
def fail( self, message, exception=False ):
log.error("TaskWrapper Failure %s" % message)
self.status = 'error'
# How do we want to handle task failure? Fail the job and let it clean up?
def change_state( self, state, info=False ):
task = self.get_task()
self.sa_session.refresh( task )
if info:
task.info = info
task.state = state
self.sa_session.add( task )
self.sa_session.flush()
def get_state( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.state
def get_exit_code( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.exit_code
def set_runner( self, runner_url, external_id ):
task = self.get_task()
self.sa_session.refresh( task )
task.task_runner_name = runner_url
task.task_runner_external_id = external_id
# DBTODO Check task job_runner_stuff
self.sa_session.add( task )
self.sa_session.flush()
def finish( self, stdout, stderr, tool_exit_code=None ):
# DBTODO integrate previous finish logic.
# Simple finish for tasks. Just set the flag OK.
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
stdout = unicodify( stdout )
stderr = unicodify( stderr )
# This may have ended too soon
log.debug( 'task %s for job %d ended; exit code: %d'
% (self.task_id, self.job_id,
tool_exit_code if tool_exit_code != None else -256 ) )
# default post job setup_external_metadata
self.sa_session.expunge_all()
task = self.get_task()
# if the job was deleted, don't finish it
if task.state == task.states.DELETED:
# Job was deleted by an administrator
delete_files = self.app.config.cleanup_job in ( 'always', 'onsuccess' )
self.cleanup( delete_files=delete_files )
return
elif task.state == task.states.ERROR:
self.fail( task.info )
return
# Check what the tool returned. If the stdout or stderr matched
# regular expressions that indicate errors, then set an error.
# The same goes if the tool's exit code was in a given range.
if ( self.check_tool_output( stdout, stderr, tool_exit_code, task ) ):
task.state = task.states.OK
else:
task.state = task.states.ERROR
# Save stdout and stderr
if len( stdout ) > DATABASE_MAX_STRING_SIZE:
log.error( "stdout for task %d is greater than %s, only a portion will be logged to database" % ( task.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
task.stdout = util.shrink_string_by_size( stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
if len( stderr ) > DATABASE_MAX_STRING_SIZE:
log.error( "stderr for task %d is greater than %s, only a portion will be logged to database" % ( task.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
self._collect_metrics( task )
task.stderr = util.shrink_string_by_size( stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
task.exit_code = tool_exit_code
task.command_line = self.command_line
self.sa_session.flush()
def cleanup( self ):
# There is no task cleanup. The job cleans up for all tasks.
pass
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_output_file_id( self, file ):
# There is no permanent output file for tasks.
return None
def get_tool_provided_job_metadata( self ):
# DBTODO Handle this as applicable for tasks.
return None
def get_dataset_finish_context( self, job_context, dataset ):
# Handled at the parent job level. Do nothing here.
pass
def setup_external_metadata( self, exec_dir=None, tmp_dir=None, dataset_files_path=None, config_root=None, config_file=None, datatypes_config=None, set_extension=True, **kwds ):
# There is no metadata setting for tasks. This is handled after the merge, at the job level.
return ""
def get_output_destination( self, output_path ):
"""
Destination for outputs marked as from_work_dir. These must be copied with
the same basenme as the path for the ultimate output destination. This is
required in the task case so they can be merged.
"""
return os.path.join( self.working_directory, os.path.basename( output_path ) )
class ComputeEnvironment( object ):
""" Definition of the job as it will be run on the (potentially) remote
compute server.
"""
__metaclass__ = ABCMeta
@abstractmethod
def output_paths( self ):
""" Output DatasetPaths defined by job. """
@abstractmethod
def input_paths( self ):
""" Input DatasetPaths defined by job. """
@abstractmethod
def working_directory( self ):
""" Job working directory (potentially remote) """
@abstractmethod
def config_directory( self ):
""" Directory containing config files (potentially remote) """
@abstractmethod
def sep( self ):
""" os.path.sep for the platform this job will execute in.
"""
@abstractmethod
def new_file_path( self ):
""" Location to dump new files for this job on remote server. """
@abstractmethod
def version_path( self ):
""" Location of the version file for the underlying tool. """
@abstractmethod
def unstructured_path_rewriter( self ):
""" Return a function that takes in a value, determines if it is path
to be rewritten (will be passed non-path values as well - onus is on
this function to determine both if its input is a path and if it should
be rewritten.)
"""
class SimpleComputeEnvironment( object ):
def config_directory( self ):
return self.working_directory( )
def sep( self ):
return os.path.sep
def unstructured_path_rewriter( self ):
return lambda v: v
class SharedComputeEnvironment( SimpleComputeEnvironment ):
""" Default ComputeEnviornment for job and task wrapper to pass
to ToolEvaluator - valid when Galaxy and compute share all the relevant
file systems.
"""
def __init__( self, job_wrapper, job ):
self.app = job_wrapper.app
self.job_wrapper = job_wrapper
self.job = job
def output_paths( self ):
return self.job_wrapper.get_output_fnames()
def input_paths( self ):
return self.job_wrapper.get_input_paths( self.job )
def working_directory( self ):
return self.job_wrapper.working_directory
def new_file_path( self ):
return os.path.abspath( self.app.config.new_file_path )
def version_path( self ):
return self.job_wrapper.get_version_string_path()
class NoopQueue( object ):
"""
Implements the JobQueue / JobStopQueue interface but does nothing
"""
def put( self, *args, **kwargs ):
return
def put_stop( self, *args ):
return
def shutdown( self ):
return
class ParallelismInfo(object):
"""
Stores the information (if any) for running multiple instances of the tool in parallel
on the same set of inputs.
"""
def __init__(self, tag):
self.method = tag.get('method')
if isinstance(tag, dict):
items = tag.iteritems()
else:
items = tag.attrib.items()
self.attributes = dict( [ item for item in items if item[ 0 ] != 'method' ])
if len(self.attributes) == 0:
# legacy basic mode - provide compatible defaults
self.attributes['split_size'] = 20
self.attributes['split_mode'] = 'number_of_parts'
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/jobs/__init__.py | Python | gpl-3.0 | 87,953 | [
"Galaxy"
] | d8d1021ff6bd006310fb1ed158478494e52846d88d7898fcffa8dda2eeafb744 |
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import datetime
import gzip
import logging
import operator
import os
import random
import json
import struct
import shutil
import subprocess
import time
import threading
from bsddb import db as bdb
from gi.repository import GObject
from gi.repository import Gio
# py3 compat
try:
import cPickle as pickle
pickle # pyflakes
except ImportError:
import pickle
# py3 compat
try:
from io import StringIO
StringIO # pyflakes
from urllib.parse import quote_plus
quote_plus # pyflakes
except ImportError:
from StringIO import StringIO
from urllib import quote_plus
from softwarecenter.backend.piston.rnrclient import RatingsAndReviewsAPI
from softwarecenter.backend.piston.rnrclient_pristine import ReviewDetails
from softwarecenter.db.categories import CategoriesParser
from softwarecenter.db.database import Application, StoreDatabase
import softwarecenter.distro
from softwarecenter.utils import (upstream_version_compare,
uri_to_filename,
get_language,
save_person_to_config,
get_person_from_config,
calc_dr,
wilson_score,
utf8,
)
from softwarecenter.paths import (SOFTWARE_CENTER_CACHE_DIR,
APP_INSTALL_PATH,
XAPIAN_BASE_PATH,
RNRApps,
PistonHelpers,
)
#from softwarecenter.enums import *
from softwarecenter.netstatus import network_state_is_connected
from spawn_helper import SpawnHelper
LOG = logging.getLogger(__name__)
class ReviewStats(object):
def __init__(self, app):
self.app = app
self.ratings_average = None
self.ratings_total = 0
self.rating_spread = [0,0,0,0,0]
self.dampened_rating = 3.00
def __repr__(self):
return ("<ReviewStats '%s' ratings_average='%s' ratings_total='%s'"
" rating_spread='%s' dampened_rating='%s'>" %
(self.app, self.ratings_average, self.ratings_total,
self.rating_spread, self.dampened_rating))
class UsefulnessCache(object):
USEFULNESS_CACHE = {}
def __init__(self, try_server=False):
self.rnrclient = RatingsAndReviewsAPI()
fname = "usefulness.p"
self.USEFULNESS_CACHE_FILE = os.path.join(SOFTWARE_CENTER_CACHE_DIR,
fname)
self._retrieve_votes_from_cache()
#Only try to get votes from the server if required, otherwise just use cache
if try_server:
self._retrieve_votes_from_server()
def _retrieve_votes_from_cache(self):
if os.path.exists(self.USEFULNESS_CACHE_FILE):
try:
self.USEFULNESS_CACHE = pickle.load(open(self.USEFULNESS_CACHE_FILE))
except:
LOG.exception("usefulness cache load fallback failure")
os.rename(self.USEFULNESS_CACHE_FILE, self.USEFULNESS_CACHE_FILE+".fail")
return
def _retrieve_votes_from_server(self):
LOG.debug("_retrieve_votes_from_server started")
user = get_person_from_config()
if not user:
LOG.warn("Could not get usefulness from server, no username in config file")
return False
# run the command and add watcher
cmd = [os.path.join(
softwarecenter.paths.datadir, PistonHelpers.GET_USEFUL_VOTES),
"--username", user,
]
spawn_helper = SpawnHelper()
spawn_helper.connect("data-available", self._on_usefulness_data)
spawn_helper.run(cmd)
def _on_usefulness_data(self, spawn_helper, results):
'''called if usefulness retrieved from server'''
LOG.debug("_usefulness_loaded started")
self.USEFULNESS_CACHE.clear()
for result in results:
self.USEFULNESS_CACHE[str(result['review_id'])] = result['useful']
if not self.save_usefulness_cache_file():
LOG.warn("Read usefulness results from server but failed to write to cache")
def save_usefulness_cache_file(self):
"""write the dict out to cache file"""
cachedir = SOFTWARE_CENTER_CACHE_DIR
try:
if not os.path.exists(cachedir):
os.makedirs(cachedir)
pickle.dump(self.USEFULNESS_CACHE,
open(self.USEFULNESS_CACHE_FILE, "w"))
return True
except:
return False
def add_usefulness_vote(self, review_id, useful):
"""pass a review id and useful boolean vote and save it into the dict, then try to save to cache file"""
self.USEFULNESS_CACHE[str(review_id)] = useful
if self.save_usefulness_cache_file():
return True
return False
def check_for_usefulness(self, review_id):
"""pass a review id and get a True/False useful back or None if the review_id is not in the dict"""
return self.USEFULNESS_CACHE.get(str(review_id))
class Review(object):
"""A individual review object """
def __init__(self, app):
# a softwarecenter.db.database.Application object
self.app = app
self.app_name = app.appname
self.package_name = app.pkgname
# the review items that the object fills in
self.id = None
self.language = None
self.summary = ""
self.review_text = ""
self.package_version = None
self.date_created = None
self.rating = None
self.reviewer_username = None
self.reviewer_displayname = None
self.version = ""
self.usefulness_total = 0
self.usefulness_favorable = 0
# this will be set if tryint to submit usefulness for this review failed
self.usefulness_submit_error = False
self.delete_error = False
self.modify_error = False
def __repr__(self):
return "[Review id=%s review_text='%s' reviewer_username='%s']" % (
self.id, self.review_text, self.reviewer_username)
def __cmp__(self, other):
# first compare version, high version number first
vc = upstream_version_compare(self.version, other.version)
if vc != 0:
return vc
# then wilson score
uc = cmp(wilson_score(self.usefulness_favorable,
self.usefulness_total),
wilson_score(other.usefulness_favorable,
other.usefulness_total))
if uc != 0:
return uc
# last is date
t1 = datetime.datetime.strptime(self.date_created, '%Y-%m-%d %H:%M:%S')
t2 = datetime.datetime.strptime(other.date_created, '%Y-%m-%d %H:%M:%S')
return cmp(t1, t2)
@classmethod
def from_piston_mini_client(cls, other):
""" converts the rnrclieent reviews we get into
"our" Review object (we need this as we have more
attributes then the rnrclient review object)
"""
app = Application("", other.package_name)
review = cls(app)
for (attr, value) in other.__dict__.items():
if not attr.startswith("_"):
setattr(review, attr, value)
return review
@classmethod
def from_json(cls, other):
""" convert json reviews into "out" review objects """
app = Application("", other["package_name"])
review = cls(app)
for k, v in other.items():
setattr(review, k, v)
return review
class ReviewLoader(object):
"""A loader that returns a review object list"""
# cache the ReviewStats
REVIEW_STATS_CACHE = {}
_cache_version_old = False
def __init__(self, cache, db, distro=None):
self.cache = cache
self.db = db
self.distro = distro
if not self.distro:
self.distro = softwarecenter.distro.get_distro()
fname = "%s_%s" % (uri_to_filename(self.distro.REVIEWS_SERVER),
"review-stats-pkgnames.p")
self.REVIEW_STATS_CACHE_FILE = os.path.join(SOFTWARE_CENTER_CACHE_DIR,
fname)
self.REVIEW_STATS_BSDDB_FILE = "%s__%s.%s.db" % (
self.REVIEW_STATS_CACHE_FILE,
bdb.DB_VERSION_MAJOR,
bdb.DB_VERSION_MINOR)
self.language = get_language()
if os.path.exists(self.REVIEW_STATS_CACHE_FILE):
try:
self.REVIEW_STATS_CACHE = pickle.load(open(self.REVIEW_STATS_CACHE_FILE))
self._cache_version_old = self._missing_histogram_in_cache()
except:
LOG.exception("review stats cache load failure")
os.rename(self.REVIEW_STATS_CACHE_FILE, self.REVIEW_STATS_CACHE_FILE+".fail")
def _missing_histogram_in_cache(self):
'''iterate through review stats to see if it has been fully reloaded
with new histogram data from server update'''
for app in self.REVIEW_STATS_CACHE.values():
result = getattr(app, 'rating_spread', False)
if not result:
return True
return False
def get_reviews(self, application, callback, page=1, language=None):
"""run callback f(app, review_list)
with list of review objects for the given
db.database.Application object
"""
return []
def update_review_stats(self, translated_application, stats):
application = Application("", translated_application.pkgname)
self.REVIEW_STATS_CACHE[application] = stats
def get_review_stats(self, translated_application):
"""return a ReviewStats (number of reviews, rating)
for a given application. this *must* be super-fast
as it is called a lot during tree view display
"""
# check cache
try:
application = Application("", translated_application.pkgname)
if application in self.REVIEW_STATS_CACHE:
return self.REVIEW_STATS_CACHE[application]
except ValueError:
pass
return None
def refresh_review_stats(self, callback):
""" get the review statists and call callback when its there """
pass
def save_review_stats_cache_file(self, nonblocking=True):
""" save review stats cache file in xdg cache dir """
cachedir = SOFTWARE_CENTER_CACHE_DIR
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# write out the stats
if nonblocking:
t = threading.Thread(target=self._save_review_stats_cache_blocking)
t.run()
else:
self._save_review_stats_cache_blocking()
def _save_review_stats_cache_blocking(self):
# dump out for software-center in simple pickle
self._dump_pickle_for_sc()
# dump out in c-friendly dbm format for unity
try:
outfile = self.REVIEW_STATS_BSDDB_FILE
outdir = self.REVIEW_STATS_BSDDB_FILE + ".dbenv/"
self._dump_bsddbm_for_unity(outfile, outdir)
except bdb.DBError as e:
# see bug #858437, db corruption seems to be rather common
# on ecryptfs
LOG.warn("error creating bsddb: '%s' (corrupted?)" % e)
try:
shutil.rmtree(outdir)
self._dump_bsddbm_for_unity(outfile, outdir)
except:
LOG.exception("trying to repair DB failed")
def _dump_pickle_for_sc(self):
""" write out the full REVIEWS_STATS_CACHE as a pickle """
pickle.dump(self.REVIEW_STATS_CACHE,
open(self.REVIEW_STATS_CACHE_FILE, "w"))
def _dump_bsddbm_for_unity(self, outfile, outdir):
""" write out the subset that unity needs of the REVIEW_STATS_CACHE
as a C friendly (using struct) bsddb
"""
env = bdb.DBEnv()
if not os.path.exists(outdir):
os.makedirs(outdir)
env.open (outdir,
bdb.DB_CREATE | bdb.DB_INIT_CDB | bdb.DB_INIT_MPOOL |
bdb.DB_NOMMAP, # be gentle on e.g. nfs mounts
0600)
db = bdb.DB (env)
db.open (outfile,
dbtype=bdb.DB_HASH,
mode=0600,
flags=bdb.DB_CREATE)
for (app, stats) in self.REVIEW_STATS_CACHE.iteritems():
# pkgname is ascii by policy, so its fine to use str() here
db[str(app.pkgname)] = struct.pack('iii',
stats.ratings_average or 0,
stats.ratings_total,
stats.dampened_rating)
db.close ()
env.close ()
def get_top_rated_apps(self, quantity=12, category=None):
"""Returns a list of the packages with the highest 'rating' based on
the dampened rating calculated from the ReviewStats rating spread.
Also optionally takes a category (string) to filter by"""
cache = self.REVIEW_STATS_CACHE
if category:
applist = self._get_apps_for_category(category)
cache = self._filter_cache_with_applist(cache, applist)
#create a list of tuples with (Application,dampened_rating)
dr_list = []
for item in cache.items():
if hasattr(item[1],'dampened_rating'):
dr_list.append((item[0], item[1].dampened_rating))
else:
dr_list.append((item[0], 3.00))
#sorted the list descending by dampened rating
sorted_dr_list = sorted(dr_list, key=operator.itemgetter(1),
reverse=True)
#return the quantity requested or as much as we can
if quantity < len(sorted_dr_list):
return_qty = quantity
else:
return_qty = len(sorted_dr_list)
top_rated = []
for i in range (0,return_qty):
top_rated.append(sorted_dr_list[i][0])
return top_rated
def _filter_cache_with_applist(self, cache, applist):
"""Take the review cache and filter it to only include the apps that
also appear in the applist passed in"""
filtered_cache = {}
for key in cache.keys():
if key.pkgname in applist:
filtered_cache[key] = cache[key]
return filtered_cache
def _get_query_for_category(self, category):
cat_parser = CategoriesParser(self.db)
categories = cat_parser.parse_applications_menu(APP_INSTALL_PATH)
for c in categories:
if category == c.untranslated_name:
query = c.query
return query
return False
def _get_apps_for_category(self, category):
query = self._get_query_for_category(category)
if not query:
LOG.warn("_get_apps_for_category: received invalid category")
return []
pathname = os.path.join(XAPIAN_BASE_PATH, "xapian")
db = StoreDatabase(pathname, self.cache)
db.open()
docs = db.get_docs_from_query(query)
#from the db docs, return a list of pkgnames
applist = []
for doc in docs:
applist.append(db.get_pkgname(doc))
return applist
# writing new reviews spawns external helper
# FIXME: instead of the callback we should add proper gobject signals
def spawn_write_new_review_ui(self, translated_app, version, iconname,
origin, parent_xid, datadir, callback):
""" this spawns the UI for writing a new review and
adds it automatically to the reviews DB """
app = translated_app.get_untranslated_app(self.db)
cmd = [os.path.join(datadir, RNRApps.SUBMIT_REVIEW),
"--pkgname", app.pkgname,
"--iconname", iconname,
"--parent-xid", "%s" % parent_xid,
"--version", version,
"--origin", origin,
"--datadir", datadir,
]
if app.appname:
# needs to be (utf8 encoded) str, otherwise call fails
cmd += ["--appname", utf8(app.appname)]
spawn_helper = SpawnHelper(format="json")
spawn_helper.connect(
"data-available", self._on_submit_review_data, app, callback)
spawn_helper.run(cmd)
def _on_submit_review_data(self, spawn_helper, review_json, app, callback):
""" called when submit_review finished, when the review was send
successfully the callback is triggered with the new reviews
"""
LOG.debug("_on_submit_review_data")
# read stdout from submit_review
review = ReviewDetails.from_dict(review_json)
# FIXME: ideally this would be stored in ubuntu-sso-client
# but it dosn't so we store it here
save_person_to_config(review.reviewer_username)
if not app in self._reviews:
self._reviews[app] = []
self._reviews[app].insert(0, Review.from_piston_mini_client(review))
callback(app, self._reviews[app])
def spawn_report_abuse_ui(self, review_id, parent_xid, datadir, callback):
""" this spawns the UI for reporting a review as inappropriate
and adds the review-id to the internal hide list. once the
operation is complete it will call callback with the updated
review list
"""
cmd = [os.path.join(datadir, RNRApps.REPORT_REVIEW),
"--review-id", review_id,
"--parent-xid", "%s" % parent_xid,
"--datadir", datadir,
]
spawn_helper = SpawnHelper("json")
spawn_helper.connect("exited",
self._on_report_abuse_finished,
review_id, callback)
spawn_helper.run(cmd)
def _on_report_abuse_finished(self, spawn_helper, exitcode, review_id, callback):
""" called when report_absuse finished """
LOG.debug("hide id %s " % review_id)
if exitcode == 0:
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
# remove the one we don't want to see anymore
self._reviews[app].remove(review)
callback(app, self._reviews[app], None, 'remove', review)
break
def spawn_submit_usefulness_ui(self, review_id, is_useful, parent_xid, datadir, callback):
cmd = [os.path.join(datadir, RNRApps.SUBMIT_USEFULNESS),
"--review-id", "%s" % review_id,
"--is-useful", "%s" % int(is_useful),
"--parent-xid", "%s" % parent_xid,
"--datadir", datadir,
]
spawn_helper = SpawnHelper(format="none")
spawn_helper.connect("exited",
self._on_submit_usefulness_finished,
review_id, is_useful, callback)
spawn_helper.connect("error",
self._on_submit_usefulness_error,
review_id, callback)
spawn_helper.run(cmd)
def _on_submit_usefulness_finished(self, spawn_helper, res, review_id, is_useful, callback):
""" called when report_usefulness finished """
# "Created", "Updated", "Not modified" -
# once lp:~mvo/rnr-server/submit-usefulness-result-strings makes it
response = spawn_helper._stdout
if response == '"Not modified"':
self._on_submit_usefulness_error(spawn_helper, response, review_id, callback)
return
LOG.debug("usefulness id %s " % review_id)
useful_votes = UsefulnessCache()
useful_votes.add_usefulness_vote(review_id, is_useful)
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
# update usefulness, older servers do not send
# usefulness_{total,favorable} so we use getattr
review.usefulness_total = getattr(review, "usefulness_total", 0) + 1
if is_useful:
review.usefulness_favorable = getattr(review, "usefulness_favorable", 0) + 1
callback(app, self._reviews[app], useful_votes, 'replace', review)
break
def _on_submit_usefulness_error(self, spawn_helper, error_str, review_id, callback):
LOG.warn("submit usefulness id=%s failed with error: %s" %
(review_id, error_str))
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
review.usefulness_submit_error = True
callback(app, self._reviews[app], None, 'replace', review)
break
def spawn_delete_review_ui(self, review_id, parent_xid, datadir, callback):
cmd = [os.path.join(datadir, RNRApps.DELETE_REVIEW),
"--review-id", "%s" % review_id,
"--parent-xid", "%s" % parent_xid,
"--datadir", datadir,
]
spawn_helper = SpawnHelper(format="none")
spawn_helper.connect("exited",
self._on_delete_review_finished,
review_id, callback)
spawn_helper.connect("error", self._on_delete_review_error,
review_id, callback)
spawn_helper.run(cmd)
def _on_delete_review_finished(self, spawn_helper, res, review_id, callback):
""" called when delete_review finished"""
LOG.debug("delete id %s " % review_id)
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
# remove the one we don't want to see anymore
self._reviews[app].remove(review)
callback(app, self._reviews[app], None, 'remove', review)
break
def _on_delete_review_error(self, spawn_helper, error_str, review_id, callback):
"""called if delete review errors"""
LOG.warn("delete review id=%s failed with error: %s" % (review_id, error_str))
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
review.delete_error = True
callback(app, self._reviews[app], action='replace',
single_review=review)
break
def spawn_modify_review_ui(self, parent_xid, iconname, datadir, review_id, callback):
""" this spawns the UI for writing a new review and
adds it automatically to the reviews DB """
cmd = [os.path.join(datadir, RNRApps.MODIFY_REVIEW),
"--parent-xid", "%s" % parent_xid,
"--iconname", iconname,
"--datadir", "%s" % datadir,
"--review-id", "%s" % review_id,
]
spawn_helper = SpawnHelper(format="json")
spawn_helper.connect("data-available",
self._on_modify_review_finished,
review_id, callback)
spawn_helper.connect("error", self._on_modify_review_error,
review_id, callback)
spawn_helper.run(cmd)
def _on_modify_review_finished(self, spawn_helper, review_json, review_id, callback):
"""called when modify_review finished"""
LOG.debug("_on_modify_review_finished")
#review_json = spawn_helper._stdout
mod_review = ReviewDetails.from_dict(review_json)
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
# remove the one we don't want to see anymore
self._reviews[app].remove(review)
new_review = Review.from_piston_mini_client(mod_review)
self._reviews[app].insert(0, new_review)
callback(app, self._reviews[app], action='replace',
single_review=new_review)
break
def _on_modify_review_error(self, spawn_helper, error_str, review_id, callback):
"""called if modify review errors"""
LOG.debug("modify review id=%s failed with error: %s" % (review_id, error_str))
for (app, reviews) in self._reviews.items():
for review in reviews:
if str(review.id) == str(review_id):
review.modify_error = True
callback(app, self._reviews[app], action='replace',
single_review=review)
break
# this code had several incernations:
# - python threads, slow and full of latency (GIL)
# - python multiprocesing, crashed when accessibility was turned on,
# does not work in the quest session (#743020)
# - GObject.spawn_async() looks good so far (using the SpawnHelper code)
class ReviewLoaderSpawningRNRClient(ReviewLoader):
""" loader that uses multiprocessing to call rnrclient and
a glib timeout watcher that polls periodically for the
data
"""
def __init__(self, cache, db, distro=None):
super(ReviewLoaderSpawningRNRClient, self).__init__(cache, db, distro)
cachedir = os.path.join(SOFTWARE_CENTER_CACHE_DIR, "rnrclient")
self.rnrclient = RatingsAndReviewsAPI(cachedir=cachedir)
cachedir = os.path.join(SOFTWARE_CENTER_CACHE_DIR, "rnrclient")
self.rnrclient = RatingsAndReviewsAPI(cachedir=cachedir)
self._reviews = {}
def _update_rnrclient_offline_state(self):
# this needs the lp:~mvo/piston-mini-client/offline-mode branch
self.rnrclient._offline_mode = not network_state_is_connected()
# reviews
def get_reviews(self, translated_app, callback, page=1, language=None):
""" public api, triggers fetching a review and calls callback
when its ready
"""
# its fine to use the translated appname here, we only submit the
# pkgname to the server
app = translated_app
self._update_rnrclient_offline_state()
if language is None:
language = self.language
# gather args for the helper
try:
origin = self.cache.get_origin(app.pkgname)
except:
# this can happen if e.g. the app has multiple origins, this
# will be handled later
origin = None
# special case for not-enabled PPAs
if not origin and self.db:
details = app.get_details(self.db)
ppa = details.ppaname
if ppa:
origin = "lp-ppa-%s" % ppa.replace("/", "-")
# if there is no origin, there is nothing to do
if not origin:
callback(app, [])
return
distroseries = self.distro.get_codename()
# run the command and add watcher
cmd = [os.path.join(softwarecenter.paths.datadir, PistonHelpers.GET_REVIEWS),
"--language", language,
"--origin", origin,
"--distroseries", distroseries,
"--pkgname", str(app.pkgname), # ensure its str, not unicode
"--page", str(page),
]
spawn_helper = SpawnHelper()
spawn_helper.connect(
"data-available", self._on_reviews_helper_data, app, callback)
spawn_helper.run(cmd)
def _on_reviews_helper_data(self, spawn_helper, piston_reviews, app, callback):
# convert into our review objects
reviews = []
for r in piston_reviews:
reviews.append(Review.from_piston_mini_client(r))
# add to our dicts and run callback
self._reviews[app] = reviews
callback(app, self._reviews[app])
return False
# stats
def refresh_review_stats(self, callback):
""" public api, refresh the available statistics """
try:
mtime = os.path.getmtime(self.REVIEW_STATS_CACHE_FILE)
days_delta = int((time.time() - mtime) // (24*60*60))
days_delta += 1
except OSError:
days_delta = 0
LOG.debug("refresh with days_delta: %s" % days_delta)
#origin = "any"
#distroseries = self.distro.get_codename()
cmd = [os.path.join(
softwarecenter.paths.datadir, PistonHelpers.GET_REVIEW_STATS),
# FIXME: the server currently has bug (#757695) so we
# can not turn this on just yet and need to use
# the old "catch-all" review-stats for now
#"--origin", origin,
#"--distroseries", distroseries,
]
if days_delta:
cmd += ["--days-delta", str(days_delta)]
spawn_helper = SpawnHelper()
spawn_helper.connect("data-available", self._on_review_stats_data, callback)
spawn_helper.run(cmd)
def _on_review_stats_data(self, spawn_helper, piston_review_stats, callback):
""" process stdout from the helper """
review_stats = self.REVIEW_STATS_CACHE
if self._cache_version_old and self._server_has_histogram(piston_review_stats):
self.REVIEW_STATS_CACHE = {}
self.save_review_stats_cache_file()
self.refresh_review_stats(callback)
return
# convert to the format that s-c uses
for r in piston_review_stats:
s = ReviewStats(Application("", r.package_name))
s.ratings_average = float(r.ratings_average)
s.ratings_total = float(r.ratings_total)
if r.histogram:
s.rating_spread = json.loads(r.histogram)
else:
s.rating_spread = [0,0,0,0,0]
s.dampened_rating = calc_dr(s.rating_spread)
review_stats[s.app] = s
self.REVIEW_STATS_CACHE = review_stats
callback(review_stats)
self.save_review_stats_cache_file()
def _server_has_histogram(self, piston_review_stats):
'''check response from server to see if histogram is supported'''
supported = getattr(piston_review_stats[0], "histogram", False)
if not supported:
return False
return True
class ReviewLoaderJsonAsync(ReviewLoader):
""" get json (or gzip compressed json) """
def _gio_review_download_complete_callback(self, source, result):
app = source.get_data("app")
callback = source.get_data("callback")
try:
(success, json_str, etag) = source.load_contents_finish(result)
except GObject.GError:
# ignore read errors, most likely transient
return callback(app, [])
# check for gzip header
if json_str.startswith("\37\213"):
gz=gzip.GzipFile(fileobj=StringIO(json_str))
json_str = gz.read()
reviews_json = json.loads(json_str)
reviews = []
for review_json in reviews_json:
review = Review.from_json(review_json)
reviews.append(review)
# run callback
callback(app, reviews)
def get_reviews(self, app, callback, page=1, language=None):
""" get a specific review and call callback when its available"""
# FIXME: get this from the app details
origin = self.cache.get_origin(app.pkgname)
distroseries = self.distro.get_codename()
if app.appname:
appname = ";"+app.appname
else:
appname = ""
url = self.distro.REVIEWS_URL % { 'pkgname' : app.pkgname,
'appname' : quote_plus(appname.encode("utf-8")),
'language' : self.language,
'origin' : origin,
'distroseries' : distroseries,
'version' : 'any',
}
LOG.debug("looking for review at '%s'" % url)
f=Gio.File.new_for_uri(url)
f.set_data("app", app)
f.set_data("callback", callback)
f.load_contents_async(self._gio_review_download_complete_callback)
# review stats code
def _gio_review_stats_download_finished_callback(self, source, result):
callback = source.get_data("callback")
try:
(json_str, length, etag) = source.load_contents_finish(result)
except GObject.GError:
# ignore read errors, most likely transient
return
# check for gzip header
if json_str.startswith("\37\213"):
gz=gzip.GzipFile(fileobj=StringIO(json_str))
json_str = gz.read()
review_stats_json = json.loads(json_str)
review_stats = {}
for review_stat_json in review_stats_json:
#appname = review_stat_json["app_name"]
pkgname = review_stat_json["package_name"]
app = Application('', pkgname)
stats = ReviewStats(app)
stats.ratings_total = int(review_stat_json["ratings_total"])
stats.ratings_average = float(review_stat_json["ratings_average"])
review_stats[app] = stats
# update review_stats dict
self.REVIEW_STATS_CACHE = review_stats
self.save_review_stats_cache_file()
# run callback
callback(review_stats)
def refresh_review_stats(self, callback):
""" get the review statists and call callback when its there """
f=Gio.File(self.distro.REVIEW_STATS_URL)
f.set_data("callback", callback)
f.load_contents_async(self._gio_review_stats_download_finished_callback)
class ReviewLoaderFake(ReviewLoader):
USERS = ["Joe Doll", "John Foo", "Cat Lala", "Foo Grumpf", "Bar Tender", "Baz Lightyear"]
SUMMARIES = ["Cool", "Medium", "Bad", "Too difficult"]
IPSUM = "no ipsum\n\nstill no ipsum"
def __init__(self, cache, db):
self._review_stats_cache = {}
self._reviews_cache = {}
def _random_person(self):
return random.choice(self.USERS)
def _random_text(self):
return random.choice(self.LOREM.split("\n\n"))
def _random_summary(self):
return random.choice(self.SUMMARIES)
def get_reviews(self, application, callback, page=1, language=None):
if not application in self._review_stats_cache:
self.get_review_stats(application)
stats = self._review_stats_cache[application]
if not application in self._reviews_cache:
reviews = []
for i in range(0, stats.ratings_total):
review = Review(application)
review.id = random.randint(1,50000)
# FIXME: instead of random, try to match the avg_rating
review.rating = random.randint(1,5)
review.summary = self._random_summary()
review.date_created = time.strftime("%Y-%m-%d %H:%M:%S")
review.reviewer_username = self._random_person()
review.review_text = self._random_text().replace("\n","")
review.usefulness_total = random.randint(1, 20)
review.usefulness_favorable = random.randint(1, 20)
reviews.append(review)
self._reviews_cache[application] = reviews
reviews = self._reviews_cache[application]
callback(application, reviews)
def get_review_stats(self, application):
if not application in self._review_stats_cache:
stat = ReviewStats(application)
stat.ratings_average = random.randint(1,5)
stat.ratings_total = random.randint(1,20)
self._review_stats_cache[application] = stat
return self._review_stats_cache[application]
def refresh_review_stats(self, callback):
review_stats = []
callback(review_stats)
class ReviewLoaderFortune(ReviewLoaderFake):
def __init__(self, cache, db):
ReviewLoaderFake.__init__(self, cache, db)
self.LOREM = ""
for i in range(10):
out = subprocess.Popen(["fortune"], stdout=subprocess.PIPE).communicate()[0]
self.LOREM += "\n\n%s" % out
class ReviewLoaderTechspeak(ReviewLoaderFake):
""" a test review loader that does not do any network io
and returns random review texts
"""
LOREM=u"""This package is using cloud based technology that will
make it suitable in a distributed environment where soup and xml-rpc
are used. The backend is written in C++ but the frontend code will
utilize dynamic languages lika LUA to provide a execution environment
based on JIT technology.
The software in this packages has a wonderful GUI, its based on OpenGL
but can alternative use DirectX (on plattforms were it is
available). Dynamic shading utilizes all GPU cores and out-of-order
thread scheduling is used to visualize the data optimally on multi
core systems.
The database support in tthis application is bleding edge. Not only
classical SQL techniques are supported but also object-relational
models and advanced ORM technology that will do auto-lookups based on
dynamic join/select optimizations to leverage sharded or multihosted
databases to their peak performance.
The Enterprise computer system is controlled by three primary main
processing cores cross linked with a redundant melacortz ramistat and
fourteen kiloquad interface modules. The core elements are based on
FTL nanoprocessor units arranged into twenty-five bilateral
kelilactirals with twenty of those units being slaved to the central
heisenfram terminal. . . . Now this is the isopalavial interface which
controls the main firomactal drive unit. . . . The ramistat kiloquad
capacity is a function of the square root of the intermix ratio times
the sum of the plasma injector quotient.
The iApp is using the new touch UI that feels more natural then
tranditional window based offerings. It supports a Job button that
will yell at you when pressed and a iAmCool mode where the logo of
your new device blinks so that you attract maximum attention.
This app is a lifestyle choice.
It sets you apart from those who are content with bland UI designed
around 1990's paradigms. This app represents you as a dynamic trend
setter with taste. The carefully controlled user interface is
perfectly tailored to the needs of a new age individual, and extreme
care has been taken to ensure that all buttons are large enough for even the
most robust digits.
Designed with the web 2.0 and touch screen portable technologies in
mind this app is the ultimate in media experience. With this
lifestyle application you extend your social media and search reach.
Exciting innovations in display and video reinvigorates the user
experience, offering beautifully rendered advertisements straight to
your finger tips. This has limitless possibilities and will permeate
every facet of your life. Believe the hype."""
class ReviewLoaderIpsum(ReviewLoaderFake):
""" a test review loader that does not do any network io
and returns random lorem ipsum review texts
"""
#This text is under public domain
#Lorem ipsum
#Cicero
LOREM=u"""lorem ipsum "dolor" äöü sit amet consetetur sadipscing elitr sed diam nonumy
eirmod tempor invidunt ut labore et dolore magna aliquyam erat sed diam
voluptua at vero eos et accusam et justo duo dolores et ea rebum stet clita
kasd gubergren no sea takimata sanctus est lorem ipsum dolor sit amet lorem
ipsum dolor sit amet consetetur sadipscing elitr sed diam nonumy eirmod
tempor invidunt ut labore et dolore magna aliquyam erat sed diam voluptua at
vero eos et accusam et justo duo dolores et ea rebum stet clita kasd
gubergren no sea takimata sanctus est lorem ipsum dolor sit amet lorem ipsum
dolor sit amet consetetur sadipscing elitr sed diam nonumy eirmod tempor
invidunt ut labore et dolore magna aliquyam erat sed diam voluptua at vero
eos et accusam et justo duo dolores et ea rebum stet clita kasd gubergren no
sea takimata sanctus est lorem ipsum dolor sit amet
duis autem vel eum iriure dolor in hendrerit in vulputate velit esse
molestie consequat vel illum dolore eu feugiat nulla facilisis at vero eros
et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril
delenit augue duis dolore te feugait nulla facilisi lorem ipsum dolor sit
amet consectetuer adipiscing elit sed diam nonummy nibh euismod tincidunt ut
laoreet dolore magna aliquam erat volutpat
ut wisi enim ad minim veniam quis nostrud exerci tation ullamcorper suscipit
lobortis nisl ut aliquip ex ea commodo consequat duis autem vel eum iriure
dolor in hendrerit in vulputate velit esse molestie consequat vel illum
dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio
dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te
feugait nulla facilisi
nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet
doming id quod mazim placerat facer possim assum lorem ipsum dolor sit amet
consectetuer adipiscing elit sed diam nonummy nibh euismod tincidunt ut
laoreet dolore magna aliquam erat volutpat ut wisi enim ad minim veniam quis
nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea
commodo consequat
duis autem vel eum iriure dolor in hendrerit in vulputate velit esse
molestie consequat vel illum dolore eu feugiat nulla facilisis
at vero eos et accusam et justo duo dolores et ea rebum stet clita kasd
gubergren no sea takimata sanctus est lorem ipsum dolor sit amet lorem ipsum
dolor sit amet consetetur sadipscing elitr sed diam nonumy eirmod tempor
invidunt ut labore et dolore magna aliquyam erat sed diam voluptua at vero
eos et accusam et justo duo dolores et ea rebum stet clita kasd gubergren no
sea takimata sanctus est lorem ipsum dolor sit amet lorem ipsum dolor sit
amet consetetur sadipscing elitr at accusam aliquyam diam diam dolore
dolores duo eirmod eos erat et nonumy sed tempor et et invidunt justo labore
stet clita ea et gubergren kasd magna no rebum sanctus sea sed takimata ut
vero voluptua est lorem ipsum dolor sit amet lorem ipsum dolor sit amet
consetetur sadipscing elitr sed diam nonumy eirmod tempor invidunt ut labore
et dolore magna aliquyam erat
consetetur sadipscing elitr sed diam nonumy eirmod tempor invidunt ut labore
et dolore magna aliquyam erat sed diam voluptua at vero eos et accusam et
justo duo dolores et ea rebum stet clita kasd gubergren no sea takimata
sanctus est lorem ipsum dolor sit amet lorem ipsum dolor sit amet consetetur
sadipscing elitr sed diam nonumy eirmod tempor invidunt ut labore et dolore
magna aliquyam erat sed diam voluptua at vero eos et accusam et justo duo
dolores et ea rebum stet clita kasd gubergren no sea takimata sanctus est
lorem ipsum dolor sit amet lorem ipsum dolor sit amet consetetur sadipscing
elitr sed diam nonumy eirmod tempor invidunt ut labore et dolore magna
aliquyam erat sed diam voluptua at vero eos et accusam et justo duo dolores
et ea rebum stet clita kasd gubergren no sea takimata sanctus est lorem
ipsum dolor sit amet"""
review_loader = None
def get_review_loader(cache, db=None):
"""
factory that returns a reviews loader singelton
"""
global review_loader
if not review_loader:
if "SOFTWARE_CENTER_IPSUM_REVIEWS" in os.environ:
review_loader = ReviewLoaderIpsum(cache, db)
elif "SOFTWARE_CENTER_FORTUNE_REVIEWS" in os.environ:
review_loader = ReviewLoaderFortune(cache, db)
elif "SOFTWARE_CENTER_TECHSPEAK_REVIEWS" in os.environ:
review_loader = ReviewLoaderTechspeak(cache, db)
elif "SOFTWARE_CENTER_GIO_REVIEWS" in os.environ:
review_loader = ReviewLoaderJsonAsync(cache, db)
else:
review_loader = ReviewLoaderSpawningRNRClient(cache, db)
return review_loader
if __name__ == "__main__":
def callback(app, reviews):
print "app callback:"
print app, reviews
def stats_callback(stats):
print "stats callback:"
print stats
# cache
from softwarecenter.db.pkginfo import get_pkg_info
cache = get_pkg_info()
cache.open()
db = StoreDatabase(XAPIAN_BASE_PATH+"/xapian", cache)
db.open()
# rnrclient loader
app = Application("ACE", "unace")
#app = Application("", "2vcard")
loader = ReviewLoaderSpawningRNRClient(cache, db)
print loader.refresh_review_stats(stats_callback)
print loader.get_reviews(app, callback)
print "\n\n"
print "default loader, press ctrl-c for next loader"
context = GObject.main_context_default()
main = GObject.MainLoop(context)
main.run()
# default loader
app = Application("","2vcard")
loader = get_review_loader(cache, db)
loader.refresh_review_stats(stats_callback)
loader.get_reviews(app, callback)
main.run()
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/software-center/softwarecenter/backend/reviews.py | Python | gpl-3.0 | 46,555 | [
"exciting"
] | 5c0f7fbdb9e37dabd072831535aea81d66314db9db032c41f570531421c31987 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views import defaults as default_views
from django.views.generic import TemplateView
from filebrowser.sites import site
from tejas.pages import views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^learnings/$', TemplateView.as_view(template_name='pages/learnings.html'), name="learnings"),
url(r'^projects/$', TemplateView.as_view(template_name='pages/projects.html'), name="projects"),
url(r'^writings/$', TemplateView.as_view(template_name='pages/writings.html'), name="writings"),
url(r'^learnings/(?P<slug>[\w-]+)/$', views.learnings, name='learnings_detail'),
url(r'^projects/(?P<slug>[\w-]+)/$', views.projects, name='projects_detail'),
url(r'^writings/(?P<slug>[\w-]+)/$', views.writings, name='writings_detail'),
# Django Admin, use {% url 'admin:index' %}
url(r'^files/', include('filemanager.urls', namespace='filemanager')),
url(r'^admin/filebrowser/', include(site.urls)),
url(r'^grappelli/', include('grappelli.urls')),
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("tejas.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
| TejasM/Personal | config/urls.py | Python | bsd-3-clause | 2,482 | [
"VisIt"
] | 6c8ff3c3a79a2b080e5d33bc855a5cdacff916e64f08906523a3507e8e17ac9f |
#!/usr/bin/python
import csv
import glob
from numpy.distutils import cpuinfo
from optparse import OptionParser
import os
import re
from subprocess import Popen, call, check_call
import subprocess
import sys
import time
parser = OptionParser( usage = "%prog [otions] number" )
parser.add_option( "--spec-config", help = "SPEC config file" )
parser.add_option( "--parsec", action = "store_true", help = "run parsec" )
parser.add_option(
"--sleep-step", metavar = "sec", default = 10,
help = "step size (sec) for different sleep \"benchmarks\""
)
parser.add_option( "--test", action = "store_true", help = "run test inputs" )
options, args = parser.parse_args()
if len( args ) < 1:
parser.print_help()
exit()
try:
n = int( args[ 0 ] )
except:
print "ERROR: number must be a number"
exit( 1 )
root = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
def sleep( bmark, dotest ):
duration = bmark[ 5: ]
return lambda submit: [
'/bin/sh', '-xc', submit + " sleep " + duration
]
def spec( bmark, dotest ):
data = "test" if dotest else "ref"
return lambda submit: [
'runspec',
'-a', 'run',
'-c', options.spec_config,
'--iterations', '1',
'--noreportable',
'-i', data,
'--define', "SUBMIT_CMD=" + submit,
bmark
]
def parsec( bmark, dotest ):
data = "test" if dotest else "native"
return lambda submit: [
'parsecmgmt',
'-a', 'run',
'-p', "parsec." + bmark,
'-i', data,
'-s', submit
]
benchmarks = list()
for i in range( 2, 3 if options.test else 61, int( options.sleep_step ) ):
benchmarks.append( ( "sleep" + str( i ), sleep ) )
if options.parsec:
benchmarks += [
( "blackscholes", parsec ),
( "bodytrack", parsec ),
( "facesim", parsec ),
# ( "canneal", parsec ),
( "ferret", parsec ),
( "fluidanimate", parsec ),
( "freqmine", parsec ),
# ( "netstreamcluster", parsec ),
# ( "streamcluster", parsec ),
( "raytrace", parsec ),
( "swaptions", parsec ),
( "vips", parsec ),
( "x264", parsec ),
]
if options.spec_config:
benchmarks += [
( "400.perlbench", spec ),
( "401.bzip2", spec ),
( "403.gcc", spec ),
( "410.bwaves", spec ),
( "416.gamess", spec ),
( "429.mcf", spec ),
( "433.milc", spec ),
( "434.zeusmp", spec ),
( "435.gromacs", spec ),
( "436.cactusADM", spec ),
( "437.leslie3d", spec ),
( "444.namd", spec ),
( "445.gobmk", spec ),
# ( "447.dealII", spec ),
( "450.soplex", spec ),
( "453.povray", spec ),
( "454.calculix", spec ),
( "456.hmmer", spec ),
( "458.sjeng", spec ),
( "459.GemsFDTD", spec ),
( "462.libquantum", spec ),
( "464.h264ref", spec ),
( "465.tonto", spec ),
( "470.lbm", spec ),
( "471.omnetpp", spec ),
( "473.astar", spec ),
( "481.wrf", spec ),
( "482.sphinx3", spec ),
( "483.xalancbmk", spec ),
( "998.specrand", spec ),
( "999.specrand", spec ),
]
def genDatfile( bmark, *keys ):
keys = map( str, keys )
oldfiles = os.path.join(
os.getcwd(), ".".join( [ bmark ] + keys + [ "*", "dat" ] )
)
for fname in glob.glob( oldfiles ):
print "deleting", fname
return oldfiles.replace( "*", "$workload" )
def findDatfiles( bmark, pattern ):
workload_pat = re.escape( pattern ).replace( "\$workload", "(\d+)" )
workload_pat = re.compile( workload_pat )
datfiles = list()
for fname in glob.glob( pattern.replace( "$workload", "*" ) ):
m = workload_pat.match( fname )
if m:
datfiles.append( ( bmark + ".w" + m.group( 1 ), fname ) )
else:
datfiles.append( ( bmark, fname ) )
return datfiles
allkeys = list()
def readData( fname, *keys ):
result = dict()
with open( fname ) as fh:
reader = csv.DictReader( fh )
for line in reader:
pass
for key in keys:
if key in line:
if not key in allkeys:
allkeys.append( key )
result[ key ] = line[ key ]
os.remove( fname )
return result
def runWU( bmark, i, run, *counters ):
datfile = genDatfile( bmark, "power", i )
time.sleep( 5 )
check_call( run( wattsup + " -o " + datfile + " ttyUSB0 --" ) )
result = dict()
for bmark, fname in findDatfiles( bmark, datfile ):
result[ bmark ] = readData( fname, *counters )
return result
def runPerf( bmark, i, run, *counters ):
datfile = genDatfile( bmark, "perf", i )
cmd = "perf stat -o " + datfile + " -e " + ",".join( counters ) + " --"
check_call( run( cmd ) )
result = dict()
for bmark, fname in findDatfiles( bmark, datfile ):
result[ bmark ] = dict()
with open( fname ) as fh:
for line in fh:
line = line.strip().split()
if len( line ) >= 2 and \
line[ 0 ][ 0 ].isdigit() and \
line[ 1 ] in counters:
result[ bmark ][ line[ 1 ] ] = line[ 0 ].replace( ",", "" )
if not line[ 1 ] in allkeys:
allkeys.append( line[ 1 ] )
os.remove( fname )
return result
def merge( d1, d2 ):
for key in d2:
if key in d1 and type( d1[ key ] ) is dict:
merge( d1[ key ], d2[ key ] )
else:
d1[ key ] = d2[ key ]
if cpuinfo.cpuinfo().is_AMD():
flop_regs = [ "r533f00" ]
else:
flop_regs = [ "r532010", "r538010" ]
wattsup = os.path.join( root, "wu.py" )
with open( "benchmarks.csv", 'w' ) as fh:
writer = None
for i in range( n ):
for bmark, runcmd in benchmarks:
run = runcmd( bmark, options.test )
data = dict()
merge( data, runWU( bmark, i, run, "time", "kwh", "watts" ) )
merge( data, runPerf( bmark, i, run, "cycles", "instructions", *flop_regs ) )
merge( data, runPerf( bmark, i, run, "cache-references", "cache-misses" ) )
if writer is None:
writer = csv.writer( fh )
writer.writerow( [ "benchmark" ] + allkeys )
for bmark in data:
row = [ bmark ]
for key in allkeys:
row.append( data[ bmark ][ key ] )
writer.writerow( row )
fh.flush()
| eschulte/goa | bin/collect_model_data.py | Python | gpl-3.0 | 6,660 | [
"GAMESS",
"Gromacs",
"NAMD"
] | 86c1f32cd00dcd7d304dea4d80d0761b4833a1c4398c22a087c25792cb847b8a |
# Orca
#
# Copyright 2014 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Script-customizable support for application spellcheckers."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2014 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import re
from orca import guilabels
from orca import messages
from orca import settings_manager
_settingsManager = settings_manager.getManager()
class SpellCheck:
def __init__(self, script, hasChangeToEntry=True):
self._script = script
self._hasChangeToEntry = hasChangeToEntry
self._window = None
self._errorWidget = None
self._changeToEntry = None
self._suggestionsList = None
self._activated = False
self._documentPosition = None, -1
self.spellErrorCheckButton = None
self.spellSuggestionCheckButton = None
self.presentContextCheckButton = None
def activate(self, window):
if not self._isCandidateWindow(window):
return False
if self._hasChangeToEntry:
self._changeToEntry = self._findChangeToEntry(window)
if not self._changeToEntry:
return False
self._errorWidget = self._findErrorWidget(window)
if not self._errorWidget:
return False
self._suggestionsList = self._findSuggestionsList(window)
if not self._suggestionsList:
return False
self._window = window
self._activated = True
return True
def deactivate(self):
self._clearState()
def getDocumentPosition(self):
return self._documentPosition
def setDocumentPosition(self, obj, offset):
self._documentPosition = obj, offset
def getErrorWidget(self):
return self._errorWidget
def getMisspelledWord(self):
if not self._errorWidget:
return ""
return self._script.utilities.displayedText(self._errorWidget)
def getCompletionMessage(self):
if not self._errorWidget:
return ""
return self._script.utilities.displayedText(self._errorWidget)
def getChangeToEntry(self):
return self._changeToEntry
def getSuggestionsList(self):
return self._suggestionsList
def isActive(self):
return self._activated
def isCheckWindow(self, window):
if window and window == self._window:
return True
return self.activate(window)
def isComplete(self):
try:
state = self._changeToEntry.getState()
except:
return False
return not state.contains(pyatspi.STATE_SENSITIVE)
def isAutoFocusEvent(self, event):
return False
def presentContext(self):
if not self.isActive():
return False
obj, offset = self._documentPosition
if not (obj and offset >= 0):
return False
try:
text = obj.queryText()
except:
return False
# This should work, but some toolkits are broken.
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
string, start, end = text.getTextAtOffset(offset, boundary)
if not string:
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
string, start, end = text.getTextAtOffset(offset, boundary)
sentences = re.split(r'(?:\.|\!|\?)', string)
word = self.getMisspelledWord()
if string.count(word) == 1:
match = list(filter(lambda x: x.count(word), sentences))
string = match[0]
if not string:
return False
self._script.speakMessage(messages.MISSPELLED_WORD_CONTEXT % string)
return True
def presentCompletionMessage(self):
if not (self.isActive() and self.isComplete()):
return False
self._script.clearBraille()
self._script.presentMessage(self.getCompletionMessage())
return True
def presentErrorDetails(self, detailed=False):
if self.isComplete():
return False
if self.presentMistake(detailed):
self.presentSuggestion(detailed)
if detailed or _settingsManager.getSetting('spellcheckPresentContext'):
self.presentContext()
return True
return False
def presentMistake(self, detailed=False):
if not self.isActive():
return False
word = self.getMisspelledWord()
if not word:
return False
self._script.speakMessage(messages.MISSPELLED_WORD % word)
if detailed or _settingsManager.getSetting('spellcheckSpellError'):
self._script.spellCurrentItem(word)
return True
def presentSuggestion(self, detailed=False):
if not self._hasChangeToEntry:
return self.presentSuggestionListItem(detailed)
if not self.isActive():
return False
entry = self._changeToEntry
if not entry:
return False
label = self._script.utilities.displayedLabel(entry)
string = self._script.utilities.substring(entry, 0, -1)
self._script.speakMessage("%s %s" % (label, string))
if detailed or _settingsManager.getSetting('spellcheckSpellSuggestion'):
self._script.spellCurrentItem(string)
return True
def presentSuggestionListItem(self, detailed=False):
if not self.isActive():
return False
suggestions = self._suggestionsList
if not suggestions:
return False
items = self._script.utilities.selectedChildren(suggestions)
if not len(items) == 1:
return False
string = items[0].name
self._script.speakMessage(string)
if detailed or _settingsManager.getSetting('spellcheckSpellSuggestion'):
self._script.spellCurrentItem(string)
return True
def _clearState(self):
self._window = None
self._errorWidget = None
self._changeToEntry = None
self._suggestionsList = None
self._activated = False
self._documentPosition = None, -1
def _isCandidateWindow(self, window):
return False
def _findChangeToEntry(self, root):
return None
def _findErrorWidget(self, root):
return None
def _findSuggestionsList(self, root):
return None
def getAppPreferencesGUI(self):
from gi.repository import Gtk
frame = Gtk.Frame()
label = Gtk.Label(label="<b>%s</b>" % guilabels.SPELL_CHECK)
label.set_use_markup(True)
frame.set_label_widget(label)
alignment = Gtk.Alignment.new(0.5, 0.5, 1, 1)
alignment.set_padding(0, 0, 12, 0)
frame.add(alignment)
grid = Gtk.Grid()
alignment.add(grid)
label = guilabels.SPELL_CHECK_SPELL_ERROR
value = _settingsManager.getSetting('spellcheckSpellError')
self.spellErrorCheckButton = Gtk.CheckButton.new_with_mnemonic(label)
self.spellErrorCheckButton.set_active(value)
grid.attach(self.spellErrorCheckButton, 0, 0, 1, 1)
label = guilabels.SPELL_CHECK_SPELL_SUGGESTION
value = _settingsManager.getSetting('spellcheckSpellSuggestion')
self.spellSuggestionCheckButton = Gtk.CheckButton.new_with_mnemonic(label)
self.spellSuggestionCheckButton.set_active(value)
grid.attach(self.spellSuggestionCheckButton, 0, 1, 1, 1)
label = guilabels.SPELL_CHECK_PRESENT_CONTEXT
value = _settingsManager.getSetting('spellcheckPresentContext')
self.presentContextCheckButton = Gtk.CheckButton.new_with_mnemonic(label)
self.presentContextCheckButton.set_active(value)
grid.attach(self.presentContextCheckButton, 0, 2, 1, 1)
return frame
def setAppPreferences(self, prefs):
prefix = "orca.settings"
value = self.spellErrorCheckButton.get_active()
_settingsManager.setSetting('spellcheckSpellError', value)
prefs.writelines("\n")
prefs.writelines("%s.spellcheckSpellError = %s\n" % (prefix, value))
value = self.spellSuggestionCheckButton.get_active()
_settingsManager.setSetting('spellcheckSpellSuggestion', value)
prefs.writelines("\n")
prefs.writelines("%s.spellcheckSpellSuggestion = %s\n" % (prefix, value))
value = self.presentContextCheckButton.get_active()
_settingsManager.setSetting('spellcheckPresentContext', value)
prefs.writelines("\n")
prefs.writelines("%s.spellcheckPresentContext = %s\n" % (prefix, value))
| h4ck3rm1k3/orca-sonar | src/orca/spellcheck.py | Python | lgpl-2.1 | 9,432 | [
"ORCA"
] | 6a6c92088b053aab5d751b43c77dbcd9f755e0a2ef4804dcad39f4844a1f1693 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# XCode Project Creator
#
import os, sys, re, shutil, codecs
from shutil import copyfile
from os.path import join, splitext, split, exists
from datetime import date
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
from tools import *
fileTargets = ['.c','.cpp','.h','.m','.mm','.pbxproj']
ignoreFiles = ['.gitignore', '.cvsignore','bridge.txt','libTitanium.a']
ignoreDirs = ['.git','.svn', 'CVS']
symbolicMap = ['Titanium','Appcelerator']
exclusions = ['TiCore']
class Projector(object):
def make_self(self,s):
r = re.compile('[0-9a-zA-Z_]')
buf = ''
for i in s:
if i=='-':
buf+='_'
continue
if r.match(i)!=None:
buf+=i
# if name starts with number, we simply append a k to it
if re.match('^[0-9]+',buf):
buf = 'k%s' % buf
return buf
def __init__(self,name,sdk_version,sdk_root,project_root,appid,deploy_target):
self.sdk_version = sdk_version
self.sdk_root = os.path.abspath(sdk_root)
self.project_root = os.path.abspath(project_root)
self.project_id = appid
self.deploy_target = deploy_target
self.name = name
self.namespace = self.make_self(name)
self.namespace_upper = self.namespace.upper()+'_'
def form_target_filename(self,fn):
return fn
def process_file(self,source,target,cb=None):
for exclude in exclusions:
if source.find(exclude)>0:
return False
# first deal with the filename
target_filename = self.form_target_filename(target)
print "[DEBUG] processing %s => %s" % (source,target_filename)
content = codecs.open(source,'r','utf-8','replace').read()
# fixup special case
content = content.replace('TitaniumViewController','%s$ViewController'%self.namespace)
content = content.replace('TitaniumModule','%s$Module'%self.namespace)
for symbol in symbolicMap:
content = content.replace(symbol,self.namespace)
# fixup titanium vars
content = content.replace('titanium','_%s'%self.namespace.lower())
# fixup double module replacement
content = content.replace('%s%sModule' %(self.namespace,self.namespace),'%sModule'%self.namespace)
content = content.replace('%s%s$Module' %(self.namespace,self.namespace),'%s$Module'%self.namespace)
# fixup namespaces
content = content.replace('org.appcelerator','org.%s'%self.namespace.lower())
content = content.replace('com.appcelerator','com.%s'%self.namespace.lower())
# fixup Copyright
content = content.replace('* %s %s Mobile'%(self.namespace,self.namespace),'* Appcelerator Titanium Mobile')
content = content.replace('* Copyright (c) 2009-2010 by %s, Inc.'%(self.namespace),'* Copyright (c) 2009-%s by Appcelerator, Inc.' % date.today().strftime('%Y'))
content = content.replace("""* Please see the LICENSE included with this distribution for details.
*/""", """* Please see the LICENSE included with this distribution for details.
*
* WARNING: This is generated code. Modify at your own risk and without support.
*/""")
if cb!=None:
content = cb(content)
target_file = codecs.open(target_filename,'w','utf-8','replace')
target_file.write(content)
target_file.close()
# then deal with the contents
return True
def copy_module_resources(self, source, target):
if not os.path.exists(os.path.expanduser(target)):
os.mkdir(os.path.expanduser(target))
for root, dirs, files in os.walk(source, True, None, True):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file_ in files:
if file_ in ignoreFiles:
continue
from_ = join(root, file_)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
processed = False
if splitext(file_)[-1] in fileTargets:
processed = self.process_file(from_,to_)
if not processed:
if os.path.exists(to_):
os.remove(to_)
print "[DEBUG] copying: %s => %s" % (from_,to_)
copyfile(from_, to_)
def process_xcode(self,content):
content = content.replace('../Classes','Classes')
content = content.replace('../Resources','Resources')
content = content.replace('../headers/%sCore'%self.namespace,'headers/TiCore')
content = content.replace('../headers','headers')
content = content.replace('../lib','lib')
content = content.replace('Titanium.plist','Info.plist')
content = content.replace('Titanium',self.namespace)
content = content.replace('%s-KitchenSink' % self.namespace, self.name)
content = content.replace('path = %s.app;' % self.namespace, 'path = "%s.app";'%self.name)
content = content.replace('PRODUCT_NAME = %s'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = %s-iPad'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = "%s-iPad"'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = %s-universal'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = "%s-universal"'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('Resources-iPad','Resources')
content = content.replace('%s.app'%self.namespace,'%s.app'%self.name)
content = content.replace('path = %s_Prefix.pch;'%self.namespace,'path = "%s_Prefix.pch";'%self.name)
content = content.replace('%s_Prefix.pch'%self.namespace,'%s_Prefix.pch'%self.name)
content = content.replace('GCC_PREFIX_HEADER = %s_Prefix.pch;'%self.name,'GCC_PREFIX_HEADER = "%s_Prefix.pch";'%self.name)
if self.deploy_target:
content = re.sub(r'IPHONEOS_DEPLOYMENT_TARGET = [0-9.]+;','IPHONEOS_DEPLOYMENT_TARGET = %s;'%self.deploy_target, content)
builder_py = os.path.abspath(os.path.join(self.sdk_root,"builder.py"))
pre_compile_script = "\\\"%s\\\" xcode\\nexit $?" % (builder_py)
content = fix_xcode_script(content,"Pre-Compile",pre_compile_script)
content = fix_xcode_script(content,"Post-Compile","echo 'post-compile'")
return content
def create(self,in_dir,out_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for dir_ in ['Classes','lib','Resources','headers']:
from_ = os.path.join(in_dir,dir_)
to_ = os.path.join(out_dir,dir_)
if not os.path.exists(to_): os.makedirs(to_)
self.copy_module_resources(from_,to_)
copyfile(os.path.join(in_dir,'iphone','Titanium_Prefix.pch'),os.path.join(out_dir,'%s_Prefix.pch'%self.name))
copyfile(os.path.join(in_dir,'iphone','Titanium.plist'),os.path.join(out_dir,'Info.plist'))
xcode_dir = os.path.join(out_dir,'%s.xcodeproj'%self.name)
if not os.path.exists(xcode_dir):
os.makedirs(xcode_dir)
xcode_proj = os.path.join(xcode_dir,'project.pbxproj')
src_xcode_proj = os.path.join(in_dir,'iphone','Titanium.xcodeproj','project.pbxproj')
# we do special processing here
c = open(src_xcode_proj).read()
c = self.process_xcode(c)
f = codecs.open(os.path.join(out_dir,'%s.xcodeproj'%self.name,'project.pbxproj'),'w',encoding='utf-8')
f.write(c)
f.close()
xcconfig = os.path.join(out_dir,"project.xcconfig")
xcconfig = open(xcconfig,'w')
xcconfig.write("TI_VERSION=%s\n" % self.sdk_version)
xcconfig.write("TI_SDK_DIR=%s\n" % self.sdk_root.replace(self.sdk_version,'$(TI_VERSION)'))
xcconfig.write("TI_APPID=%s\n" % self.project_id)
xcconfig.write("OTHER_LDFLAGS[sdk=iphoneos*]=$(inherited) -weak_framework iAd\n")
xcconfig.write("OTHER_LDFLAGS[sdk=iphonesimulator*]=$(inherited) -weak_framework iAd\n")
xcconfig.write("#include \"module\"\n")
xcconfig.close()
xcconfig = os.path.join(out_dir,"module.xcconfig")
xcconfig = open(xcconfig,'w')
xcconfig.write("// this is a generated file - DO NOT EDIT\n\n")
xcconfig.close()
def usage(args):
print "%s <name> <in> <out>" % (os.path.basename(args[0]))
sys.exit(-1)
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def main(args):
if len(args) < 4:
usage(args)
name = dequote(args[1])
version = dequote(args[2])
sdk_root = os.path.expanduser(dequote(args[3]))
project_root = os.path.expanduser(dequote(args[4]))
p = Projector(name,version,sdk_root,project_root,"com.appcelerator.test", None)
p.create(sdk_root,project_root)
sys.exit(0)
if __name__ == "__main__":
#main(sys.argv)
main([sys.argv[0],"KitchenSink-iPad","1.3.0","/Library/Application Support/Titanium/mobilesdk/osx/1.3.0/iphone","/Users/jhaynie/tmp/one_three"])
| gianina-ingenuity/titanium-branch-deep-linking | testbed/x/mobilesdk/osx/5.5.1.GA/iphone/projector.py | Python | mit | 8,529 | [
"VisIt"
] | 15792612048a6d3c7808280083f64e66998392614a7546cac47806ac002674a4 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that call upon those in modules
:py:mod:`proc`, :py:mod:`driver`, and :py:mod:`wrappers`.
Place in this file quickly defined procedures such as
- aliases for complex methods
- simple modifications to existing methods
"""
import os
import re
import warnings
from psi4.driver import driver_cbs
# Python procedures like these can be run directly from the input file or integrated
# with the energy(), etc. routines by means of lines like those at the end
# of this file.
def fake_file11(wfn, filename='fake_file11.dat', **kwargs):
r"""Function to print a file *filename* of the old file11 format
from molecule and gradient information in *wfn*.
.. versionadded:: 0.6
*wfn* parameter passed explicitly
:returns: None
:type filename: str
:param filename: destination file name for file11 file
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, gradient from which to generate file11
:examples:
>>> # [1] file11 for CISD calculation
>>> G, wfn = gradient('cisd', return_wfn=True)
>>> fake_file11(wfn, 'mycalc.11')
"""
molecule = wfn.molecule()
molecule.update_geometry()
gradient = wfn.gradient()
with open(filename, 'w') as handle:
handle.write('%d\n' % (molecule.natom()))
for at in range(molecule.natom()):
handle.write('%6s %16.8f %16.8f %16.8f\n' % (molecule.symbol(
at), molecule.x(at), molecule.y(at), molecule.z(at)))
for at in range(molecule.natom()):
handle.write('%6s %16.8f %16.8f %16.8f\n' % (
'', gradient.get(at, 0), gradient.get(at, 1), gradient.get(at, 2)))
def sherrill_gold_standard(func, label, **kwargs):
r"""Function to call the quantum chemical method known as 'Gold Standard'
in the Sherrill group. Uses :py:func:`~psi4.driver.cbs` to evaluate
the following expression. Two-point extrapolation of the correlation energy
performed according to :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`.
.. math:: E_{total}^{\text{Au\_std}} = E_{total,\; \text{SCF}}^{\text{aug-cc-pVQZ}} \; + E_{corl,\; \text{MP2}}^{\text{aug-cc-pV[TQ]Z}} \; + \delta_{\text{MP2}}^{\text{CCSD(T)}}\big\vert_{\text{aug-cc-pVTZ}}
>>> # [1] single-point energy by this composite method
>>> energy('sherrill_gold_standard')
>>> # [2] finite-difference geometry optimization
>>> optimize('sherrill_gold_standard')
>>> # [3] finite-difference geometry optimization, overwriting some pre-defined sherrill_gold_standard options
>>> optimize('sherrill_gold_standard', corl_basis='cc-pV[DT]Z', delta_basis='3-21g')
"""
kwargs['scf_basis'] = kwargs.get('scf_basis', 'aug-cc-pVQZ')
kwargs['scf_scheme'] = kwargs.get('scf_scheme', driver_cbs.xtpl_highest_1)
kwargs['corl_wfn'] = kwargs.get('corl_wfn', 'mp2')
kwargs['corl_basis'] = kwargs.get('corl_basis', 'aug-cc-pV[TQ]Z')
kwargs['corl_scheme'] = kwargs.get('corl_scheme', driver_cbs.corl_xtpl_helgaker_2)
kwargs['delta_wfn'] = kwargs.get('delta_wfn', 'ccsd(t)')
kwargs['delta_wfn_lesser'] = kwargs.get('delta_wfn_lesser', 'mp2')
kwargs['delta_basis'] = kwargs.get('delta_basis', 'aug-cc-pVTZ')
kwargs['delta_scheme'] = kwargs.get('delta_scheme', driver_cbs.xtpl_highest_1)
if label == 'custom_function':
label = 'Sherrill Group Gold Standard'
return driver_cbs.cbs(func, label, **kwargs)
def allen_focal_point(func, label, **kwargs):
r"""Function to call Wes Allen-style Focal
Point Analysis. JCP 127 014306. Uses
:py:func:`~psi4.driver.cbs` to evaluate the following
expression. SCF employs a three-point extrapolation according
to :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_3`. MP2, CCSD, and
CCSD(T) employ two-point extrapolation performed according to
:py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`. CCSDT and CCSDT(Q)
are plain deltas. This wrapper requires :ref:`Kallay's MRCC code <sec:mrcc>`.
.. math:: E_{total}^{\text{FPA}} = E_{total,\; \text{SCF}}^{\text{cc-pV[Q56]Z}} \; + E_{corl,\; \text{MP2}}^{\text{cc-pV[56]Z}} \; + \delta_{\text{MP2}}^{\text{CCSD}}\big\vert_{\text{cc-pV[56]Z}} \; + \delta_{\text{CCSD}}^{\text{CCSD(T)}}\big\vert_{\text{cc-pV[56]Z}} \; + \delta_{\text{CCSD(T)}}^{\text{CCSDT}}\big\vert_{\text{cc-pVTZ}} \; + \delta_{\text{CCSDT}}^{\text{CCSDT(Q)}}\big\vert_{\text{cc-pVDZ}}
>>> # [1] single-point energy by this composite method
>>> energy('allen_focal_point')
>>> # [2] finite-difference geometry optimization embarrasingly parallel
>>> optimize('allen_focal_point', mode='sow')
"""
# SCF
kwargs['scf_basis'] = kwargs.get('scf_basis', 'cc-pV[Q56]Z')
kwargs['scf_scheme'] = kwargs.get('scf_scheme', driver_cbs.scf_xtpl_helgaker_3)
# delta MP2 - SCF
kwargs['corl_wfn'] = kwargs.get('corl_wfn', 'mp2')
kwargs['corl_basis'] = kwargs.get('corl_basis', 'cc-pV[56]Z')
kwargs['corl_scheme'] = kwargs.get('corl_scheme', driver_cbs.corl_xtpl_helgaker_2)
# delta CCSD - MP2
kwargs['delta_wfn'] = kwargs.get('delta_wfn', 'mrccsd')
kwargs['delta_wfn_lesser'] = kwargs.get('delta_wfn_lesser', 'mp2')
kwargs['delta_basis'] = kwargs.get('delta_basis', 'cc-pV[56]Z')
kwargs['delta_scheme'] = kwargs.get('delta_scheme', driver_cbs.corl_xtpl_helgaker_2)
# delta CCSD(T) - CCSD
kwargs['delta2_wfn'] = kwargs.get('delta2_wfn', 'mrccsd(t)')
kwargs['delta2_wfn_lesser'] = kwargs.get('delta2_wfn_lesser', 'mrccsd')
kwargs['delta2_basis'] = kwargs.get('delta2_basis', 'cc-pV[56]Z')
kwargs['delta2_scheme'] = kwargs.get('delta2_scheme', driver_cbs.corl_xtpl_helgaker_2)
# delta CCSDT - CCSD(T)
kwargs['delta3_wfn'] = kwargs.get('delta3_wfn', 'mrccsdt')
kwargs['delta3_wfn_lesser'] = kwargs.get('delta3_wfn_lesser', 'mrccsd(t)')
kwargs['delta3_basis'] = kwargs.get('delta3_basis', 'cc-pVTZ')
kwargs['delta3_scheme'] = kwargs.get('delta3_scheme', driver_cbs.xtpl_highest_1)
# delta CCSDT(Q) - CCSDT
kwargs['delta4_wfn'] = kwargs.get('delta4_wfn', 'mrccsdt(q)')
kwargs['delta4_wfn_lesser'] = kwargs.get('delta4_wfn_lesser', 'mrccsdt')
kwargs['delta4_basis'] = kwargs.get('delta4_basis', 'cc-pVDZ')
kwargs['delta4_scheme'] = kwargs.get('delta4_scheme', driver_cbs.xtpl_highest_1)
if label == 'custom_function':
label = 'Allen Focal Point'
return driver_cbs.cbs(func, label, **kwargs)
| psi4/psi4 | psi4/driver/aliases.py | Python | lgpl-3.0 | 7,396 | [
"Psi4"
] | 4be739119ed3da23a197a8fe13737cafddd4c5e74c95e173cb7e16986b38d062 |
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.signal import pool
from lasagne import init
from lasagne import nonlinearities
from lasagne import layers
from lasagne.theano_extensions import padding
from lasagne.utils import as_tuple
floatX = theano.config.floatX
# Duplicate of conv1d in lasagne.theano_extensions.conv.
# It is copied just in case it is changed in the future
def conv1d_mc0(input, filters, input_shape=None, filter_shape=None,
border_mode='valid', subsample=(1,)):
"""
using conv2d with width == 1
"""
if input_shape is None:
input_shape_mc0 = None
else:
# (b, c, i0) to (b, c, 1, i0)
input_shape_mc0 = (input_shape[0], input_shape[1], 1, input_shape[2])
if filter_shape is None:
filter_shape_mc0 = None
else:
filter_shape_mc0 = (filter_shape[0], filter_shape[1], 1,
filter_shape[2])
input_mc0 = input.dimshuffle(0, 1, 'x', 2)
filters_mc0 = filters.dimshuffle(0, 1, 'x', 2)
conved = T.nnet.conv2d(
input_mc0, filters_mc0, input_shape=input_shape_mc0,
filter_shape=filter_shape_mc0, subsample=(1, subsample[0]),
border_mode=border_mode)
return conved[:, :, 0, :] # drop the unused dimension
# modified from lasagne. Add 'strictsamex' for pad.
def conv_output_length(input_length, filter_size, stride, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int
The output size corresponding to the given convolution parameters.
Raises
------
RuntimeError
When an invalid padding is specified, a `RuntimeError` is raised.
"""
if input_length is None:
return None
if pad == 'valid':
output_length = input_length - filter_size + 1
elif pad == 'full':
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif pad == 'strictsamex':
output_length = input_length
elif isinstance(pad, int):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
# modified from lasagne
def pool_output_length(input_length, pool_size, stride, pad, ignore_border):
"""
Compute the output length of a pooling operator
along a single dimension.
Parameters
----------
input_length : integer
The length of the input in the pooling dimension
pool_size : integer
The length of the pooling region
stride : integer
The stride between successive pooling regions
pad : integer
The number of elements to be added to the input on each side.
ignore_border: bool
If ``True``, partial pooling regions will be ignored.
Must be ``True`` if ``pad != 0``.
Returns
-------
output_length
* None if either input is None.
* Computed length of the pooling operator otherwise.
Notes
-----
When ``ignore_border == True``, this is given by the number of full
pooling regions that fit in the padded input length,
divided by the stride (rounding down).
If ``ignore_border == False``, a single partial pooling region is
appended if at least one input element would be left uncovered otherwise.
"""
if input_length is None or pool_size is None:
return None
if pad == 'strictsame':
output_length = input_length
elif ignore_border:
output_length = input_length + 2 * pad - pool_size + 1
output_length = (output_length + stride - 1) // stride
# output length calculation taken from:
# https://github.com/Theano/Theano/blob/master/theano/tensor/signal/downsample.py
else:
assert pad == 0
if stride >= pool_size:
output_length = (input_length + stride - 1) // stride
else:
output_length = max(
0, (input_length - pool_size + stride - 1) // stride) + 1
return output_length
# add 'strictsamex' method for pad
class Pool2DXLayer(layers.Layer):
"""
2D pooling layer
Performs 2D mean or max-pooling over the two trailing axes
of a 4D input tensor.
Parameters
----------
incoming : a :class:`Layer` instance or tuple
The layer feeding into this layer, or the expected input shape.
pool_size : integer or iterable
The length of the pooling region in each dimension. If an integer, it
is promoted to a square pooling region. If an iterable, it should have
two elements.
stride : integer, iterable or ``None``
The strides between sucessive pooling regions in each dimension.
If ``None`` then ``stride = pool_size``.
pad : integer or iterable
Number of elements to be added on each side of the input
in each dimension. Each value must be less than
the corresponding stride.
ignore_border : bool
If ``True``, partial pooling regions will be ignored.
Must be ``True`` if ``pad != (0, 0)``.
mode : {'max', 'average_inc_pad', 'average_exc_pad'}
Pooling mode: max-pooling or mean-pooling including/excluding zeros
from partially padded pooling regions. Default is 'max'.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
See Also
--------
MaxPool2DLayer : Shortcut for max pooling layer.
Notes
-----
The value used to pad the input is chosen to be less than
the minimum of the input, so that the output of each pooling region
always corresponds to some element in the unpadded input region.
Using ``ignore_border=False`` prevents Theano from using cuDNN for the
operation, so it will fall back to a slower implementation.
"""
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0),
ignore_border=True, mode='max', **kwargs):
super(Pool2DXLayer, self).__init__(incoming, **kwargs)
self.pool_size = as_tuple(pool_size, 2)
if stride is None:
self.stride = self.pool_size
else:
self.stride = as_tuple(stride, 2)
if pad == 'strictsamex':
self.pad = pad
else:
self.pad = as_tuple(pad, 2)
self.ignore_border = ignore_border
self.mode = mode
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # copy / convert to mutable list
if self.pad == 'strictsamex':
output_shape[2] = pool_output_length(
input_shape[2],
pool_size=self.pool_size[0],
stride=self.stride[0],
pad='strictsame',
ignore_border=self.ignore_border,
)
output_shape[3] = pool_output_length(
input_shape[3],
pool_size=self.pool_size[1],
stride=self.stride[1],
pad=0,
ignore_border=self.ignore_border,
)
else:
output_shape[2] = pool_output_length(
input_shape[2],
pool_size=self.pool_size[0],
stride=self.stride[0],
pad=self.pad[0],
ignore_border=self.ignore_border,
)
output_shape[3] = pool_output_length(
input_shape[3],
pool_size=self.pool_size[1],
stride=self.stride[1],
pad=self.pad[1],
ignore_border=self.ignore_border,
)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
if self.pad == 'strictsamex':
assert(self.stride[0] == 1)
kk = self.pool_size[0]
ll = int(np.ceil(kk/2.))
# rr = kk-ll
# pad = (ll, 0)
pad = [(ll, 0)]
length = input.shape[2]
self.ignore_border = True
input = padding.pad(input, pad, batch_ndim=2)
pad = (0, 0)
else:
pad = self.pad
pooled = pool.pool_2d(input,
ds=self.pool_size,
st=self.stride,
ignore_border=self.ignore_border,
padding=pad,
mode=self.mode,
)
if self.pad == 'strictsamex':
pooled = pooled[:, :, :length or None, :]
return pooled
# add 'strictsamex' method for pad
class MaxPool2DXLayer(Pool2DXLayer):
"""
2D max-pooling layer
Performs 2D max-pooling over the two trailing axes of a 4D input tensor.
Parameters
----------
incoming : a :class:`Layer` instance or tuple
The layer feeding into this layer, or the expected input shape.
pool_size : integer or iterable
The length of the pooling region in each dimension. If an integer, it
is promoted to a square pooling region. If an iterable, it should have
two elements.
stride : integer, iterable or ``None``
The strides between sucessive pooling regions in each dimension.
If ``None`` then ``stride = pool_size``.
pad : integer or iterable
Number of elements to be added on each side of the input
in each dimension. Each value must be less than
the corresponding stride.
ignore_border : bool
If ``True``, partial pooling regions will be ignored.
Must be ``True`` if ``pad != (0, 0)``.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
Notes
-----
The value used to pad the input is chosen to be less than
the minimum of the input, so that the output of each pooling region
always corresponds to some element in the unpadded input region.
Using ``ignore_border=False`` prevents Theano from using cuDNN for the
operation, so it will fall back to a slower implementation.
"""
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0),
ignore_border=True, **kwargs):
super(MaxPool2DXLayer, self).__init__(incoming,
pool_size,
stride,
pad,
ignore_border,
mode='max',
**kwargs)
# add 'strictsamex' method for pad
class Conv2DXLayer(layers.Layer):
"""
lasagne.layers.Conv2DLayer(incoming, num_filters, filter_size,
stride=(1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify,
convolution=theano.tensor.nnet.conv2d, **kwargs)
2D convolutional layer
Performs a 2D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 2-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of two integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'strictsamex'`` pads to the right of the third axis (x axis)
to keep the same dim as input
require stride=(1, 1)
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 4D tensor with shape
``(num_filters, num_input_channels, filter_rows, filter_columns)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. Usually it should be fine to
leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
Notes
-----
Theano's underlying convolution (:func:`theano.tensor.nnet.conv.conv2d`)
only supports ``pad=0`` and ``pad='full'``. This layer emulates other modes
by cropping a full convolution or explicitly padding the input with zeros.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d, **kwargs):
super(Conv2DXLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.untie_biases = untie_biases
self.convolution = convolution
if pad == 'same':
if any(s % 2 == 0 for s in self.filter_size):
raise NotImplementedError(
'`same` padding requires odd filter size.')
if pad == 'strictsamex':
if not (stride == 1 or stride == (1, 1)):
raise NotImplementedError(
'`strictsamex` padding requires stride=(1, 1) or 1')
if pad == 'valid':
self.pad = (0, 0)
elif pad in ('full', 'same', 'strictsamex'):
self.pad = pad
else:
self.pad = as_tuple(pad, 2, int)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2], self.
output_shape[3])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0],
self.filter_size[1])
def get_output_shape_for(self, input_shape):
if self.pad == 'strictsamex':
pad = ('strictsamex', 'valid')
else:
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * 2
output_rows = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
pad[0])
output_columns = conv_output_length(input_shape[3],
self.filter_size[1],
self.stride[1],
pad[1])
return (input_shape[0], self.num_filters, output_rows, output_columns)
def get_output_for(self, input, input_shape=None, **kwargs):
# The optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
if self.stride == (1, 1) and self.pad == 'same':
# simulate same convolution by cropping a full convolution
conved = self.convolution(input, self.W, subsample=self.stride,
input_shape=input_shape,
# image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode='full')
crop_x = self.filter_size[0] // 2
crop_y = self.filter_size[1] // 2
conved = conved[:, :, crop_x:-crop_x or None,
crop_y:-crop_y or None]
else:
# no padding needed, or explicit padding of input needed
if self.pad == 'full':
border_mode = 'full'
pad = [(0, 0), (0, 0)]
elif self.pad == 'same':
border_mode = 'valid'
pad = [(self.filter_size[0] // 2,
self.filter_size[0] // 2),
(self.filter_size[1] // 2,
self.filter_size[1] // 2)]
elif self.pad == 'strictsamex':
border_mode = 'valid'
kk = self.filter_size[0]-1
rr = kk // 2
ll = kk-rr
pad = [(ll, rr),
(0, 0)]
else:
border_mode = 'valid'
pad = [(self.pad[0], self.pad[0]), (self.pad[1], self.pad[1])]
if pad != [(0, 0), (0, 0)]:
input = padding.pad(input, pad, batch_ndim=2)
input_shape = (input_shape[0], input_shape[1],
None if input_shape[2] is None else
input_shape[2] + pad[0][0] + pad[0][1],
None if input_shape[3] is None else
input_shape[3] + pad[1][0] + pad[1][1])
conved = self.convolution(input, self.W, subsample=self.stride,
input_shape=input_shape,
# image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode=border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
class GaussianScan1DLayer(layers.Layer):
""" 1D Adaptive Gaussian filter
Gaussian filters that scan through the third dimension
It is implemented with convolution.
Each element in the channel axis has its own standard deviation (\sigma)
for Gaussian.
Gaussian filter is adjusting its \sigma during training.
Performs a 1D convolution on its input
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 3D tensor, with shape
``(batch_size, num_input_channels, input_length)``.
filter_size : int or iterable of int
An integer or a 1-element tuple specifying the size of the filters.
This is the width of the filters that accomodate the Gaussian filters
init_std : float
The initial \sigma for the Gaussian filters
stride : int or iterable of int
An integer or a 1-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
An integer or a 1-element tuple results in symmetric zero-padding of
the given size on both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
W_logstd : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 1D tensor with shape
``(num_input_channels, )``.
Note:
The std is provided in log-scale, log(std).
convolution : callable
The convolution implementation to use. The
`lasagne.theano_extensions.conv` module provides some alternative
implementations for 1D convolutions, because the Theano API only
features a 2D convolution implementation. Usually it should be fine
to leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
"""
def __init__(self, incoming, filter_size,
init_std=5., W_logstd=None,
stride=1, pad=0,
nonlinearity=None,
convolution=conv1d_mc0, **kwargs):
super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
# convolution = conv1d_gpucorrmm_mc0
# convolution = conv.conv1d_mc0
# convolution = T.nnet.conv2d
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.convolution = convolution
# if self.filter_size[0] % 2 == 0:
# raise NotImplementedError(
# 'GaussianConv1dLayer requires odd filter size.')
if pad == 'valid':
self.pad = (0,)
elif pad in ('full', 'same', 'strictsame'):
self.pad = pad
else:
self.pad = as_tuple(pad, 1, int)
if W_logstd is None:
init_std = np.asarray(init_std, dtype=floatX)
W_logstd = init.Constant(np.log(init_std))
# print(W_std)
# W_std = init.Constant(init_std),
self.num_input_channels = self.input_shape[1]
# self.num_filters = self.num_input_channels
self.W_logstd = self.add_param(W_logstd,
(self.num_input_channels,),
name="W_logstd",
regularizable=False)
self.W = self.make_gaussian_filter()
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
return (self.num_input_channels, self.num_input_channels,
self.filter_size[0])
def get_output_shape_for(self, input_shape):
if self.pad == 'strictsame':
output_length = input_shape[2]
else:
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,)
output_length = conv_output_length(
input_shape[2],
self.filter_size[0], self.stride[0], pad[0])
return (input_shape[0], self.num_input_channels, output_length)
def make_gaussian_filter(self):
W_shape = self.get_W_shape()
k = self.filter_size[0]
k_low = int(np.floor(-(k-1)/2))
k_high = k_low+k
W_std = T.exp(self.W_logstd)
std_array = T.tile(
W_std.dimshuffle('x', 0, 'x'),
(self.num_input_channels, 1, k)
)
x = np.arange(k_low, k_high).reshape((1, 1, -1))
x = T.tile(
x, (self.num_input_channels, self.num_input_channels, 1)
).astype(floatX)
p1 = (1./(np.sqrt(2.*np.pi))).astype(floatX)
p2 = np.asarray(2., dtype=floatX)
gf = (p1/std_array)*T.exp(-x**2/(p2*(std_array**2)))
# gf = gf.astype(theano.config.floatX)
mask = np.zeros(W_shape)
rg = np.arange(self.num_input_channels)
mask[rg, rg, :] = 1
mask = mask.astype(floatX)
gf = gf*mask
return gf
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
if self.stride == (1,) and self.pad == 'same':
# simulate same convolution by cropping a full convolution
conved = self.convolution(input, self.W, subsample=self.stride,
input_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode='full')
crop = self.filter_size[0] // 2
conved = conved[:, :, crop:-crop or None]
else:
# no padding needed, or explicit padding of input needed
if self.pad == 'full':
border_mode = 'full'
pad = (0, 0)
elif self.pad == 'same':
border_mode = 'valid'
pad = (self.filter_size[0] // 2,
(self.filter_size[0] - 1) // 2)
elif self.pad == 'strictsame':
self.stride = (1,)
border_mode = 'valid'
kk = self.filter_size[0]-1
rr = kk // 2
ll = kk-rr
pad = (ll, rr)
else:
border_mode = 'valid'
pad = (self.pad[0], self.pad[0])
if pad != (0, 0):
input = padding.pad(input, [pad], batch_ndim=2)
input_shape = (input_shape[0], input_shape[1],
None if input_shape[2] is None else
input_shape[2] + pad[0] + pad[1])
conved = self.convolution(input, self.W, subsample=self.stride,
input_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode=border_mode)
activation = conved
return self.nonlinearity(activation)
class FixedGaussianScan1DLayer(GaussianScan1DLayer):
""" 1D Fixed Gaussian filter
Gaussian filter is not changing during the training
Performs a 1D convolution on its input
"""
def __init__(self, incoming, filter_size, init_std=5.,
stride=1, pad=0,
nonlinearity=None,
convolution=conv1d_mc0, **kwargs):
super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.convolution = convolution
if pad == 'valid':
self.pad = (0,)
elif pad in ('full', 'same', 'strictsame'):
self.pad = pad
else:
self.pad = as_tuple(pad, 1, int)
init_std = np.asarray(init_std, dtype=floatX)
W_logstd = init.Constant(np.log(init_std))
# print(W_std)
# W_std = init.Constant(init_std),
self.num_input_channels = self.input_shape[1]
# self.num_filters = self.num_input_channels
self.W_logstd = self.add_param(W_logstd,
(self.num_input_channels,),
name="W_logstd",
regularizable=False,
trainable=False)
self.W = self.make_gaussian_filter()
| ciaua/clip2frame | clip2frame/layers.py | Python | isc | 32,463 | [
"Gaussian"
] | e2344c0e4a7708d704e56ea1473b4bbfa65d1a1e814e20063c5bb0ac7ef481b2 |
#!/usr/bin/env python
#
# Data manager for reference data for the 'mothur_toolsuite' Galaxy tools
import json
import optparse
import os
import shutil
import sys
import tarfile
import tempfile
import urllib2
import zipfile
# When extracting files from archives, skip names that
# start with the following strings
IGNORE_PATHS = ('.', '__MACOSX/', '__')
# Map file extensions to data table names
MOTHUR_FILE_TYPES = {".map": "map",
".fasta": "aligndb",
".pat": "lookup",
".tax": "taxonomy"}
# Reference data URLs
MOTHUR_REFERENCE_DATA = {
# Look up data
# http://www.mothur.org/wiki/Lookup_files
"lookup_titanium": {
"GS FLX Titanium": ["http://www.mothur.org/w/images/9/96/LookUp_Titanium.zip", ]
},
"lookup_gsflx": {
"GSFLX": ["http://www.mothur.org/w/images/8/84/LookUp_GSFLX.zip", ]
},
"lookup_gs20": {
"GS20": ["http://www.mothur.org/w/images/7/7b/LookUp_GS20.zip", ]
},
# RDP reference files
# http://www.mothur.org/wiki/RDP_reference_files
"RDP_v10": {
"16S rRNA RDP training set 10":
["http://www.mothur.org/w/images/b/b5/Trainset10_082014.rdp.tgz", ],
"16S rRNA PDS training set 10":
["http://www.mothur.org/w/images/2/24/Trainset10_082014.pds.tgz", ],
},
"RDP_v9": {
"16S rRNA RDP training set 9":
["http://www.mothur.org/w/images/7/72/Trainset9_032012.rdp.zip", ],
"16S rRNA PDS training set 9":
["http://www.mothur.org/w/images/5/59/Trainset9_032012.pds.zip", ],
},
"RDP_v7": {
"16S rRNA RDP training set 7":
["http://www.mothur.org/w/images/2/29/Trainset7_112011.rdp.zip", ],
"16S rRNA PDS training set 7":
["http://www.mothur.org/w/images/4/4a/Trainset7_112011.pds.zip", ],
"8S rRNA Fungi training set 7":
["http://www.mothur.org/w/images/3/36/FungiLSU_train_v7.zip", ],
},
"RDP_v6": {
"RDP training set 6":
["http://www.mothur.org/w/images/4/49/RDPTrainingSet.zip", ],
},
# Silva reference files
# http://www.mothur.org/wiki/Silva_reference_files
"silva_release_119": {
"SILVA release 119":
["http://www.mothur.org/w/images/2/27/Silva.nr_v119.tgz",
"http://www.mothur.org/w/images/5/56/Silva.seed_v119.tgz", ],
},
"silva_release_102": {
"SILVA release 102":
["http://www.mothur.org/w/images/9/98/Silva.bacteria.zip",
"http://www.mothur.org/w/images/3/3c/Silva.archaea.zip",
"http://www.mothur.org/w/images/1/1a/Silva.eukarya.zip", ],
},
"silva_gold_bacteria": {
"SILVA gold":
["http://www.mothur.org/w/images/f/f1/Silva.gold.bacteria.zip", ],
},
# Greengenes
# http://www.mothur.org/wiki/Greengenes-formatted_databases
"greengenes_August2013": {
"Greengenes August 2013":
["http://www.mothur.org/w/images/1/19/Gg_13_8_99.refalign.tgz",
"http://www.mothur.org/w/images/6/68/Gg_13_8_99.taxonomy.tgz", ],
},
"greengenes_May2013": {
"Greengenes May 2013":
["http://www.mothur.org/w/images/c/cd/Gg_13_5_99.refalign.tgz",
"http://www.mothur.org/w/images/9/9d/Gg_13_5_99.taxonomy.tgz", ],
},
"greengenes_old": {
"Greengenes pre-May 2013":
["http://www.mothur.org/w/images/7/72/Greengenes.alignment.zip",
"http://www.mothur.org/w/images/1/16/Greengenes.tax.tgz", ],
},
"greengenes_gold_alignment": {
"Greengenes gold alignment":
["http://www.mothur.org/w/images/2/21/Greengenes.gold.alignment.zip", ],
},
# Secondary structure maps
# http://www.mothur.org/wiki/Secondary_structure_map
"secondary_structure_maps_silva": {
"SILVA":
["http://www.mothur.org/w/images/6/6d/Silva_ss_map.zip", ],
},
"secondary_structure_maps_greengenes": {
"Greengenes":
["http://www.mothur.org/w/images/4/4b/Gg_ss_map.zip", ],
},
# Lane masks: not used here?
"lane_masks": {
"Greengenes-compatible":
["http://www.mothur.org/w/images/2/2a/Lane1241.gg.filter",
"http://www.mothur.org/w/images/a/a0/Lane1287.gg.filter",
"http://www.mothur.org/w/images/3/3d/Lane1349.gg.filter", ],
"SILVA-compatible":
["http://www.mothur.org/w/images/6/6d/Lane1349.silva.filter", ]
},
}
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
# Utility functions for downloading and unpacking archive files
def download_file(url, target=None, wd=None):
"""Download a file from a URL
Fetches a file from the specified URL.
If 'target' is specified then the file is saved to this
name; otherwise it's saved as the basename of the URL.
If 'wd' is specified then it is used as the 'working
directory' where the file will be save on the local
system.
Returns the name that the file is saved with.
"""
print "Downloading %s" % url
if not target:
target = os.path.basename(url)
if wd:
target = os.path.join(wd, target)
print "Saving to %s" % target
open(target, 'wb').write(urllib2.urlopen(url).read())
return target
def unpack_zip_archive(filen, wd=None):
"""Extract files from a ZIP archive
Given a ZIP archive, extract the files it contains
and return a list of the resulting file names and
paths.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
Once all the files are extracted the ZIP archive
file is deleted from the file system.
"""
if not zipfile.is_zipfile(filen):
print "%s: not ZIP formatted file"
return [filen]
file_list = []
z = zipfile.ZipFile(filen)
for name in z.namelist():
if reduce(lambda x, y: x or name.startswith(y), IGNORE_PATHS, False):
print "Ignoring %s" % name
continue
if wd:
target = os.path.join(wd, name)
else:
target = name
if name.endswith('/'):
# Make directory
print "Creating dir %s" % target
try:
os.makedirs(target)
except OSError:
pass
else:
# Extract file
print "Extracting %s" % name
try:
os.makedirs(os.path.dirname(target))
except OSError:
pass
open(target, 'wb').write(z.read(name))
file_list.append(target)
print "Removing %s" % filen
os.remove(filen)
return file_list
def unpack_tar_archive(filen, wd=None):
"""Extract files from a TAR archive
Given a TAR archive (which optionally can be
compressed with either gzip or bz2), extract the
files it contains and return a list of the
resulting file names and paths.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
Once all the files are extracted the TAR archive
file is deleted from the file system.
"""
file_list = []
if not tarfile.is_tarfile(filen):
print "%s: not TAR file"
return [filen]
t = tarfile.open(filen)
for name in t.getnames():
# Check for unwanted files
if reduce(lambda x, y: x or name.startswith(y), IGNORE_PATHS, False):
print "Ignoring %s" % name
continue
# Extract file
print "Extracting %s" % name
t.extract(name, wd)
if wd:
target = os.path.join(wd, name)
else:
target = name
file_list.append(target)
print "Removing %s" % filen
os.remove(filen)
return file_list
def unpack_archive(filen, wd=None):
"""Extract files from an archive
Wrapper function that calls the appropriate
unpacking function depending on the archive
type, and returns a list of files that have
been extracted.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
"""
print "Unpack %s" % filen
ext = os.path.splitext(filen)[1]
print "Extension: %s" % ext
if ext == ".zip":
return unpack_zip_archive(filen, wd=wd)
elif ext == ".tgz":
return unpack_tar_archive(filen, wd=wd)
else:
return [filen]
def fetch_files(urls, wd=None, files=None):
"""Download and unpack files from a list of URLs
Given a list of URLs, download and unpack each
one, and return a list of the extracted files.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
If 'files' is given then the list of extracted
files will be appended to this list before being
returned.
"""
if files is None:
files = []
for url in urls:
filen = download_file(url, wd=wd)
files.extend(unpack_archive(filen, wd=wd))
return files
# Utility functions specific to the Mothur reference data
def identify_type(filen):
"""Return the data table name based on the file name
"""
ext = os.path.splitext(filen)[1]
try:
return MOTHUR_FILE_TYPES[ext]
except KeyError:
return None
def get_name(filen):
"""Generate a descriptive name based on the file name
"""
# type_ = identify_type(filen)
name = os.path.splitext(os.path.basename(filen))[0]
for delim in ('.', '_'):
name = name.replace(delim, ' ')
return name
def fetch_from_mothur_website(data_tables, target_dir, datasets):
"""Fetch reference data from the Mothur website
For each dataset in the list 'datasets', download (and if
necessary unpack) the related files from the Mothur website,
copy them to the data manager's target directory, and add
references to the files to the appropriate data table.
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
target_dir: directory to put the downloaded files
datasets: a list of dataset names corresponding to keys in
the MOTHUR_REFERENCE_DATA dictionary
"""
# Make working dir
wd = tempfile.mkdtemp(suffix=".mothur", dir=os.getcwd())
print "Working dir %s" % wd
# Iterate over all requested reference data URLs
for dataset in datasets:
print "Handling dataset '%s'" % dataset
for name in MOTHUR_REFERENCE_DATA[dataset]:
for f in fetch_files(MOTHUR_REFERENCE_DATA[dataset][name], wd=wd):
type_ = identify_type(f)
entry_name = "%s (%s)" % (os.path.splitext(os.path.basename(f))[0], name)
print "%s\t\'%s'\t.../%s" % (type_, entry_name, os.path.basename(f))
if type_ is not None:
# Move to target dir
ref_data_file = os.path.basename(f)
f1 = os.path.join(target_dir, ref_data_file)
print "Moving %s to %s" % (f, f1)
os.rename(f, f1)
# Add entry to data table
table_name = "mothur_%s" % type_
add_data_table_entry(data_tables, table_name, dict(name=entry_name, value=ref_data_file))
# Remove working dir
print "Removing %s" % wd
shutil.rmtree(wd)
def files_from_filesystem_paths(paths):
"""Return list of file paths from arbitrary input paths
Given a list of filesystem paths, return a list of
full paths corresponding to all files found recursively
from under those paths.
"""
# Collect files to add
files = []
for path in paths:
path = os.path.abspath(path)
print "Examining '%s'..." % path
if os.path.isfile(path):
# Store full path for file
files.append(path)
elif os.path.isdir(path):
# Descend into directory and collect the files
for f in os.listdir(path):
files.extend(files_from_filesystem_paths((os.path.join(path, f), )))
else:
print "Not a file or directory, ignored"
return files
def import_from_server(data_tables, target_dir, paths, description, link_to_data=False):
"""Import reference data from filesystem paths
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
target_dir: directory to put copy or link to the data file
paths: list of file and/or directory paths to import
description: text to associate with the files
link_to_data: boolean, if False then copy the data file
into Galaxy (default); if True then make a symlink to
the data file
"""
# Collect list of files based on input paths
files = files_from_filesystem_paths(paths)
# Handle each file individually
for f in files:
type_ = identify_type(f)
if type_ is None:
print "%s: unrecognised type, skipped" % f
continue
ref_data_file = os.path.basename(f)
target_file = os.path.join(target_dir, ref_data_file)
entry_name = "%s" % os.path.splitext(ref_data_file)[0]
if description:
entry_name += " (%s)" % description
print "%s\t\'%s'\t.../%s" % (type_, entry_name, ref_data_file)
# Link to or copy the data
if link_to_data:
os.symlink(f, target_file)
else:
shutil.copyfile(f, target_file)
# Add entry to data table
table_name = "mothur_%s" % type_
add_data_table_entry(data_tables, table_name, dict(name=entry_name, value=ref_data_file))
if __name__ == "__main__":
print "Starting..."
# Read command line
parser = optparse.OptionParser()
parser.add_option('--source', action='store', dest='data_source')
parser.add_option('--datasets', action='store', dest='datasets', default='')
parser.add_option('--paths', action='store', dest='paths', default=[])
parser.add_option('--description', action='store', dest='description', default='')
parser.add_option('--link', action='store_true', dest='link_to_data')
options, args = parser.parse_args()
print "options: %s" % options
print "args : %s" % args
# Check for JSON file
if len(args) != 1:
sys.stderr.write("Need to supply JSON file name")
sys.exit(1)
jsonfile = args[0]
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print "Making %s" % target_dir
os.mkdir(target_dir)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
add_data_table(data_tables, 'mothur_lookup')
add_data_table(data_tables, 'mothur_aligndb')
add_data_table(data_tables, 'mothur_map')
add_data_table(data_tables, 'mothur_taxonomy')
# Fetch data from specified data sources
if options.data_source == 'mothur_website':
datasets = options.datasets.split(',')
fetch_from_mothur_website(data_tables, target_dir, datasets)
elif options.data_source == 'filesystem_paths':
# Check description text
description = options.description.strip()
# Get list of paths (need to remove any escapes for '\n' and '\r'
# that might have been inserted by Galaxy)
paths = options.paths.replace('__cn__', '\n').replace('__cr__', '\r').split()
import_from_server(data_tables, target_dir, paths, description, link_to_data=options.link_to_data)
# Write output JSON
print "Outputting JSON"
print str(json.dumps(data_tables))
open(jsonfile, 'wb').write(json.dumps(data_tables))
print "Done."
| blankclemens/tools-iuc | data_managers/data_manager_mothur_toolsuite/data_manager/fetch_mothur_reference_data.py | Python | mit | 18,096 | [
"Galaxy"
] | 6ddc0232fb6f7dc9876a7163149707e8b1ab353d7f306b0ce567d6c462a5f421 |
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import copy
import re
from typing import Union, Tuple, List, Optional, Dict, Generator, Any, Iterator
import networkx as nx
from knossos_utils import knossosdataset
from scipy import spatial
from .rep_helper import subfold_from_ix, knossos_ml_from_svixs, SegmentationBase, get_unique_subfold_ixs
from .segmentation_helper import *
from ..handler.basics import get_filepaths_from_dir, safe_copy, \
write_txt2kzip
from ..handler.basics import load_pkl2obj, write_obj2pkl, kd_factory
from ..handler.config import DynConfig
from ..proc import meshes
from ..proc.meshes import mesh_area_calc
from ..backend.storage import VoxelStorageDyn
MeshType = Union[Tuple[np.ndarray, np.ndarray, np.ndarray], List[np.ndarray],
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]
class SegmentationObject(SegmentationBase):
"""
Represents individual supervoxels. Used for cell shape ('sv'), cell organelles,
e.g. mitochondria ('mi'), vesicle clouds ('vc') and synaptic junctions ('sj').
Examples:
Can be used to initialized single :class:`~SegmentationObject` object of
a specific type, is also returned by :func:`~SegmentationDataset.get_segmentation_object`::
from syconn.reps.segmentation import SegmentationObject, SegmentationDataset
cell_sv = SegmentationObject(obj_id=.., obj_type='sv', working_dir='..')
cell_sv.load_attr_dict() # populates `cell_sv.attr_dict`
cell_sd = SegmentationDataset(obj_type='sv', working_dir='..')
cell_sv_from_sd = cell_sd.get_segmentation_object(obj_id=cell_sv.id)
cell_sv_from_sd.load_attr_dict()
keys1 = set(cell_sv.attr_dict.keys())
keys2 = set(cell_sv_from_sd.attr_dict.keys())
print(keys1 == keys2)
Attributes:
attr_dict: Attribute dictionary which serves as a general-purpose container. Accessed via
the :class:`~syconn.backend.storage.AttributeDict` interface.
enable_locking: If True, enables file locking.
"""
def __init__(self, obj_id: int, obj_type: str = "sv",
version: Optional[str] = None, working_dir: Optional[str] = None,
rep_coord: Optional[np.ndarray] = None, size: Optional[int] = None,
scaling: Optional[np.ndarray] = None, create: bool = False,
voxel_caching: bool = True, mesh_caching: bool = False,
view_caching: bool = False, config: DynConfig = None,
n_folders_fs: int = None, enable_locking: bool = True,
skeleton_caching: bool = True, mesh: Optional[MeshType] = None):
"""
If `working_dir` is given and the directory contains a valid `config.yml`file,
all other optional kwargs will be defined by the :class:`~syconn.handler.config.DynConfig`
object available in :attr:`~syconn.global_params.config`.
Args:
obj_id: Unique supervoxel ID.
obj_type: Type of the supervoxel, keys used currently are:
* 'mi': Mitochondria
* 'vc': Vesicle clouds
* 'sj': Synaptic junction
* 'syn_ssv': Synapses between two
* 'syn': Synapse fragment between two
:class:`~syconn.reps.segmentation.SegmentationObject`s.
:class:`~syconn.reps.super_segmentation_object.SuperSegmentationObject`s.
* 'cs': Contact site
version: Version string identifier. if 'tmp' is used, no data will
be saved to disk.
working_dir: Path to folder which contains SegmentationDataset of type 'obj_type'.
rep_coord: Representative coordinate.
size: Number of voxels.
scaling: Array defining the voxel size in nanometers (XYZ).
create: If True, the folder to its storage location :py:attr:`~segobj_dir` will be
created.
voxel_caching: Enables caching for voxel data.
mesh_caching: Enables caching for mesh data.
view_caching: Enables caching for view data.
skeleton_caching: Enables caching for skeleton data.
config: :class:`~syconn.handler.config.DynConfig` object.
n_folders_fs: Number of folders within the
:class:`~syconn.reps.segmentation.SegmentationDataset`'s folder structure.
enable_locking: If True, enables file locking.
mesh: Mesh data as flat arrays: (indices, vertices, ) or (indices, vertices, normals)
"""
self._id = int(obj_id)
self._type = obj_type
self._rep_coord = rep_coord
self._size = size
self._n_folders_fs = n_folders_fs
self.attr_dict = {}
self._bounding_box = None
self._paths_to_voxels = None
self.enable_locking = enable_locking
self._voxel_caching = voxel_caching
self._mesh_caching = mesh_caching
self._mesh_bb = None
self._view_caching = view_caching
self._voxels = None
self._voxel_list = None
self._mesh = mesh
self._config = config
self._views = None
self._skeleton = None
self._skeleton_caching = skeleton_caching
if version == 'temp':
version = 'tmp'
self._setup_working_dir(working_dir, config, version, scaling)
if version is None:
try:
self._version = self.config["versions"][self.type]
except KeyError:
raise Exception(f"Unclear version '{version}' during initialization of {self}.")
else:
self._version = version
if create:
os.makedirs(self.segobj_dir, exist_ok=True)
# IMMEDIATE PARAMETERS
def __hash__(self):
return hash((self.id, self.type.__hash__()))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.id == other.id and self.type == other.type
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return (f'{type(self).__name__}(obj_id={self.id}, obj_type="{self.type}", '
f'version="{self.version}", working_dir="{self.working_dir}")')
def __reduce__(self):
"""
Support pickling of class instances.
"""
return self.__class__, (self._id, self._type, self._version, self._working_dir,
self._rep_coord, self._size, self._scaling, False,
self._voxel_caching, self._mesh_caching, self._view_caching,
self._config, self._n_folders_fs, self.enable_locking,
self._skeleton_caching, self._mesh)
@property
def type(self) -> str:
"""
The `type` of the supervoxel.
Examples:
Keys which are currently used:
* 'mi': Mitochondria.
* 'vc': Vesicle clouds.
* 'sj': Synaptic junction.
* 'syn_ssv': Synapses between two
* 'syn': Synapse fragment between two :class:`~SegmentationObject` objects.
* 'cs': Contact site.
Can be used to initialized single :class:`~SegmentationObject` object of
a specific type or the corresponding dataset collection handled with the
:class:`~SegmentationDataset` class::
from syconn.reps.segmentation import SegmentationObject, SegmentationDataset
cell_sv = SegmentationObject(obj_id=.., obj_type='sv', working_dir='..')
cell_sv.load_attr_dict() # populates `cell_sv.attr_dict`
cell_sd = SegmentationDataset(obj_type='sv', working_dir='..')
cell_sv_from_sd = cell_sd.get_segmentation_object(obj_id=cell_sv.id)
cell_sv_from_sd.load_attr_dict()
keys1 = set(cell_sv.attr_dict.keys())
keys2 = set(cell_sv_from_sd.attr_dict.keys())
print(keys1 == keys2)
Returns:
String identifier.
"""
return self._type
@property
def n_folders_fs(self) -> int:
"""
Number of folders used to store the data of :class:`~SegmentationObject`s. Defines
the hierarchy of the folder structure organized by
:class:`~SegmentationDataset`.
Returns:
The number of (leaf-) folders used for storing supervoxel data.
"""
if self._n_folders_fs is None:
ps = glob.glob(
"%s/%s*/" % (self.segds_dir, self.so_storage_path_base))
if len(ps) == 0:
raise Exception("No storage folder found at '{}' and no number of "
"subfolders specified (n_folders_fs))".format(self.segds_dir))
bp = os.path.basename(ps[0].strip('/'))
for p in ps:
bp = os.path.basename(p.strip('/'))
if bp == self.so_storage_path_base:
bp = os.path.basename(p.strip('/'))
break
if bp == self.so_storage_path_base:
self._n_folders_fs = 100000
else:
self._n_folders_fs = int(re.findall(r'[\d]+', bp)[-1])
return self._n_folders_fs
@property
def id(self) -> int:
"""
Returns:
Globally unique identifier of this object.
"""
return self._id
@property
def version(self) -> str:
"""
Version of the :class:`~SegmentationDataset` this object
belongs to.
Returns:
String identifier of the object's version.
"""
return str(self._version)
@property
def voxel_caching(self) -> bool:
"""If True, voxel data is cached after loading."""
return self._voxel_caching
@property
def mesh_caching(self) -> bool:
"""If True, mesh data is cached."""
return self._mesh_caching
@property
def skeleton_caching(self):
"""If True, skeleton data is cached."""
return self._skeleton_caching
@property
def view_caching(self):
"""If True, view data is cached."""
return self._view_caching
@property
def scaling(self):
"""
Voxel size in nanometers (XYZ). Default is taken from the `config.yml` file and
accessible via `self.config`.
"""
if self._scaling is None:
try:
self._scaling = \
np.array(self.config['scaling'],
dtype=np.float32)
except:
self._scaling = np.array([1, 1, 1])
return self._scaling
@property
def dataset(self) -> 'SegmentationDataset':
"""
Factory method for the `~syconn.reps.segmentation.SegmentationDataset` this object
belongs to.
"""
return SegmentationDataset(self.type, self.version, self._working_dir)
@property
def config(self) -> DynConfig:
"""
Config. object which contains all dataset-sepcific parameters.
"""
if self._config is None:
self._config = global_params.config
return self._config
# PATHS
@property
def working_dir(self) -> str:
"""
Working directory.
"""
return self._working_dir
@property
def identifier(self) -> str:
"""
Identifier used to create the folder name of the
`~syconn.reps.segmentation.SegmentationDataset`.
"""
return "%s_%s" % (self.type, self.version.lstrip("_"))
@property
def segds_dir(self) -> str:
"""
Path to the `~syconn.reps.segmentation.SegmentationDataset` directory.
"""
return "%s/%s/" % (self.working_dir, self.identifier)
@property
def so_storage_path_base(self) -> str:
"""
Base folder name.
Todo:
* refactor.
"""
return "so_storage"
@property
def so_storage_path(self) -> str:
"""
Path to entry folder of the directory tree where all supervoxel data of
the corresponding `~syconn.reps.segmentation.SegmentationDataset` is located.
"""
if self._n_folders_fs is None and os.path.exists("%s/%s/" % (
self.segds_dir, self.so_storage_path_base)):
return "%s/%s/" % (self.segds_dir, self.so_storage_path_base)
elif self._n_folders_fs == 100000 and os.path.exists("%s/%s/" % (
self.segds_dir, self.so_storage_path_base)):
return "%s/%s/" % (self.segds_dir, self.so_storage_path_base)
else:
return "%s/%s_%d/" % (self.segds_dir, self.so_storage_path_base,
self.n_folders_fs)
@property
def segobj_dir(self) -> str:
"""
Path to the folder where the data of this supervoxel is stored.
"""
base_path = f"{self.so_storage_path}/" \
f"{subfold_from_ix(self.id, self.n_folders_fs)}/"
if os.path.exists(f"{base_path}/voxel.pkl"):
return base_path
else:
# use old folder scheme with leading 0s, e.g. '09'
return "%s/%s/" % (self.so_storage_path, subfold_from_ix(
self.id, self.n_folders_fs, old_version=True))
@property
def mesh_path(self) -> str:
"""
Path to the mesh storage.
"""
return self.segobj_dir + "mesh.pkl"
@property
def skeleton_path(self) -> str:
"""
Path to the skeleton storage.
"""
return self.segobj_dir + "skeletons.pkl"
@property
def attr_dict_path(self) -> str:
"""
Path to the attribute storage.
"""
return self.segobj_dir + "attr_dict.pkl"
def view_path(self, woglia=True, index_views=False, view_key=None) -> str:
"""
Path to the view storage.
"""
if view_key is not None and not (woglia and not index_views):
raise ValueError('view_path with custom view key is only allowed for default settings.')
# TODO: change bool index_views and bool woglia to respective view_key identifier
if view_key is not None:
return self.segobj_dir + 'views_{}.pkl'.format(view_key)
if index_views:
return self.segobj_dir + "views_index.pkl"
elif woglia:
return self.segobj_dir + "views_woglia.pkl"
return self.segobj_dir + "views.pkl"
@property
def locations_path(self) -> str:
"""
Path to the rendering location storage.
"""
return self.segobj_dir + "locations.pkl"
@property
def voxel_path(self) -> str:
"""
Path to the voxel storage. See :class:`~syconn.backend.storage.VoxelStorageDyn`
for details.
"""
# file type is inferred by either VoxelStorageLazyLoading or VoxelStorageDyn
return self.segobj_dir + "/voxel"
# PROPERTIES
@property
def cs_partner(self) -> Optional[List[int]]:
"""
Contact site specific attribute.
Returns:
None if object is not of type 'cs', else return the IDs to the two
supervoxels which are part of the contact site.
"""
# TODO: use `cs_id_to_partner_ids_vec` (single source of truth)
if self.type in ['cs', 'syn']:
partner = [self.id >> 32]
partner.append(self.id - (partner[0] << 32))
return partner
else:
return None
@property
def size(self) -> int:
"""
Returns:
Number of voxels.
"""
if self._size is None and 'size' in self.attr_dict:
self._size = self.attr_dict['size']
elif self._size is None and self.attr_dict_exists:
self._size = self.lookup_in_attribute_dict("size")
if self._size is None:
self.calculate_size()
return self._size
@property
def shape(self) -> np.ndarray:
"""
The XYZ extent of this SSV object in voxels.
Returns:
The shape/extent of thiss SSV object in voxels (XYZ).
"""
return self.bounding_box[1] - self.bounding_box[0]
@property
def bounding_box(self) -> np.ndarray:
if self._bounding_box is None and 'bounding_box' in self.attr_dict:
self._bounding_box = self.attr_dict['bounding_box']
elif self._bounding_box is None and self.attr_dict_exists:
self._bounding_box = self.lookup_in_attribute_dict('bounding_box')
if self._bounding_box is None:
self.calculate_bounding_box()
return self._bounding_box
@property
def rep_coord(self) -> np.ndarray:
"""
Representative coordinate of this SSV object. Will be the `rep_coord`
of the first supervoxel in :py:attr:`~svs`.
Returns:
1D array of the coordinate (XYZ).
"""
if self._rep_coord is None and 'rep_coord' in self.attr_dict:
self._rep_coord = self.attr_dict['rep_coord']
elif self._rep_coord is None and self.attr_dict_exists:
self._rep_coord = self.lookup_in_attribute_dict("rep_coord")
if self._rep_coord is None:
self.calculate_rep_coord()
return self._rep_coord
@property
def attr_dict_exists(self) -> bool:
"""
Checks if a attribute dictionary file exists at :py:attr:`~attr_dict_path`.
Returns:
True if the attribute dictionary file exists.
"""
if self.version == 'tmp':
return False
if not os.path.isfile(self.attr_dict_path):
return False
glob_attr_dc = AttributeDict(self.attr_dict_path,
disable_locking=True) # look-up only, PS 12Dec2018
return self.id in glob_attr_dc
@property
def voxels_exist(self) -> bool:
if self.version == 'tmp':
return False
if self.type in ['syn', 'syn_ssv']:
voxel_dc = VoxelStorageLazyLoading(self.voxel_path)
exists = self.id in voxel_dc
voxel_dc.close()
else:
voxel_dc = VoxelStorageDyn(self.voxel_path, read_only=True,
disable_locking=True)
self.id in voxel_dc
return exists
@property
def voxels(self) -> np.ndarray:
"""
Voxels associated with this SSV object.
Returns:
3D binary array indicating voxel locations.
"""
if self._voxels is None:
return self.load_voxels()
else:
return self._voxels
@property
def voxel_list(self) -> np.ndarray:
"""
Voxels associated with this SSV object.
Returns:
2D array with sparse voxel coordinates.
"""
if self._voxel_list is None:
voxel_list = load_voxel_list(self)
if self.voxel_caching:
self._voxel_list = voxel_list
return voxel_list
else:
return self._voxel_list
@property
def mesh_exists(self) -> bool:
"""
Returns:
True if mesh exists.
"""
if self.version == 'tmp':
return False
mesh_dc = MeshStorage(self.mesh_path, disable_locking=True)
return self.id in mesh_dc
@property
def skeleton_exists(self) -> bool:
"""
Returns:
True if skeleton exists.
"""
if self.version == 'tmp':
return False
skeleton_dc = SkeletonStorage(self.skeleton_path, disable_locking=True)
return self.id in skeleton_dc
@property
def mesh(self) -> MeshType:
"""
Mesh of this object.
Returns:
Three flat arrays: indices, vertices, normals.
"""
if self._mesh is None:
if self.mesh_caching:
self._mesh = load_mesh(self)
return self._mesh
else:
return load_mesh(self)
else:
return self._mesh
@property
def skeleton(self) -> dict:
"""
The skeleton representation of this supervoxel.
Returns:
Dict of at least three numpy arrays: "nodes", estimated node "diameters" and "edges".
"""
if self._skeleton is None:
if self.skeleton_caching:
self._skeleton = load_skeleton(self)
return self._skeleton
else:
return load_skeleton(self)
else:
return self._skeleton
@property
def mesh_bb(self) -> np.ndarray:
"""
Bounding box of the object meshes (in nanometers). Approximately
the same as scaled 'bounding_box'.
"""
if self._mesh_bb is None and 'mesh_bb' in self.attr_dict:
self._mesh_bb = self.attr_dict['mesh_bb']
elif self._mesh_bb is None:
if len(self.mesh[1]) == 0 or len(self.mesh[0]) == 0:
self._mesh_bb = self.bounding_box * self.scaling
else:
verts = self.mesh[1].reshape(-1, 3)
self._mesh_bb = np.array([np.min(verts, axis=0), np.max(verts, axis=0)], dtype=np.float32)
return self._mesh_bb
@property
def mesh_size(self) -> float:
"""
Length of bounding box diagonal (BBD).
Returns:
Diagonal length of the mesh bounding box in nanometers.
"""
return np.linalg.norm(self.mesh_bb[1] - self.mesh_bb[0], ord=2)
@property
def mesh_area(self) -> float:
"""
Returns:
Mesh surface area in um^2
"""
# TODO: decide if caching should be possible
mesh_area = self.lookup_in_attribute_dict('mesh_area')
if mesh_area is None:
mesh_area = mesh_area_calc(self.mesh)
if np.isnan(mesh_area) or np.isinf(mesh_area):
raise ValueError('Invalid mesh area.')
return mesh_area
@property
def sample_locations_exist(self) -> bool:
"""
Returns:
True if rendering locations have been stored at :py:attr:`~locations_path`.
"""
if self.version == 'tmp':
return False
location_dc = CompressedStorage(self.locations_path,
disable_locking=True)
return self.id in location_dc
def views_exist(self, woglia: bool, index_views: bool = False,
view_key: Optional[str] = None) -> bool:
"""
True if rendering locations have been stored at :func:`~view_path`.
Args:
woglia: If True, looks for views without glia, i.e. after astrocyte separation.
index_views: If True, refers to index views.
view_key: Identifier of the requested views.
"""
if self.version == 'tmp':
return False
view_dc = CompressedStorage(self.view_path(woglia=woglia, index_views=index_views, view_key=view_key),
disable_locking=True)
return self.id in view_dc
def views(self, woglia: bool, index_views: bool = False,
view_key: Optional[str] = None) -> Union[np.ndarray, int]:
"""
Getter method for the views of this supervoxel. Only valid for cell fragments, i.e.
:py:attr:`~type` must be `sv`.
Args:
woglia: If True, looks for views without glia, i.e. after astrocyte separation.
index_views: If True, refers to index views.
view_key: Identifier of the requested views.
Returns:
The requested view array or `-1` if it does not exist.
"""
assert self.type == "sv"
if self._views is None:
if self.views_exist(woglia):
if self.view_caching:
self._views = self.load_views(woglia=woglia, index_views=index_views,
view_key=view_key)
return self._views
else:
return self.load_views(woglia=woglia, index_views=index_views,
view_key=view_key)
else:
return -1
else:
return self._views
def sample_locations(self, force=False, save=True, ds_factor=None):
"""
Getter method for the rendering locations of this supervoxel. Only valid for cell
fragments, i.e. :py:attr:`~type` must be `sv`.
Args:
force: Overwrite existing data.
save: If True, saves the result at :py:attr:`~locations_path`. Uses
:class:`~syconn.backend.storage.CompressedStorage`.
ds_factor: Down sampling factor used to generate the rendering locations.
Returns:
Array of rendering locations (XYZ) with shape (N, 3) in nanometers!
"""
assert self.type == "sv"
if self.sample_locations_exist and not force:
return CompressedStorage(self.locations_path, disable_locking=True)[self.id]
else:
verts = self.mesh[1].reshape(-1, 3)
if len(verts) == 0: # only return scaled rep. coord as [1, 3] array
return np.array([self.rep_coord, ], dtype=np.float32) * self.scaling
if ds_factor is None:
ds_factor = 2000
if self.config.use_new_renderings_locs:
coords = generate_rendering_locs(verts, ds_factor).astype(np.float32)
else:
coords = surface_samples(verts, [ds_factor] * 3, r=ds_factor / 2).astype(np.float32)
if save:
loc_dc = CompressedStorage(self.locations_path, read_only=False,
disable_locking=not self.enable_locking)
loc_dc[self.id] = coords.astype(np.float32)
loc_dc.push()
return coords.astype(np.float32)
def load_voxels(self, voxel_dc: Optional[Union[VoxelStorageDyn, VoxelStorage]] = None) -> np.ndarray:
"""
Loader method of :py:attr:`~voxels`.
Args:
voxel_dc: Pre-loaded dictionary which contains the voxel data of this object.
Returns:
3D array of the all voxels which belong to this supervoxel.
"""
# syn_ssv do not have a segmentation KD; voxels are cached in their VoxelStorage
if self.type in ['syn', 'syn_ssv']:
vxs_list = self.voxel_list - self.bounding_box[0]
voxels = np.zeros(self.bounding_box[1] - self.bounding_box[0] + 1, dtype=np.bool)
voxels[vxs_list[..., 0], vxs_list[..., 1], vxs_list[..., 2]] = True
else:
if voxel_dc is None:
voxel_dc = VoxelStorageDyn(self.voxel_path, read_only=True, disable_locking=True)
voxels = voxel_dc.get_voxel_data_cubed(self.id)[0]
if self.voxel_caching:
self._voxels = voxels
return voxels
def load_voxels_downsampled(self, downsampling=(2, 2, 1)):
return load_voxels_downsampled(self, ds=downsampling)
def load_voxel_list(self):
"""
Loader method of :py:attr:`~voxel_list`.
Returns:
Sparse, 2-dimensional array of voxel coordinates.
"""
return load_voxel_list(self)
def load_voxel_list_downsampled(self, downsampling=(2, 2, 1)):
return load_voxel_list_downsampled(self, downsampling=downsampling)
def load_voxel_list_downsampled_adapt(self, downsampling=(2, 2, 1)):
return load_voxel_list_downsampled_adapt(self, downsampling=downsampling)
def load_skeleton(self, recompute: bool = False) -> dict:
"""
Loader method of :py:attr:`~skeleton`.
Args:
recompute: Recompute the skeleton. Currently not implemented.
Returns:
Dict of flat arrays of indices, vertices, diameters and attributes.
"""
return load_skeleton(self, recompute=recompute)
def save_skeleton(self, overwrite: bool = False):
"""
Save method of :py:attr:`~skeleton`.
Args:
overwrite: Overwrite existing skeleton entry.
Returns:
Flat arrays of indices, vertices, normals.
"""
return save_skeleton(self, overwrite=overwrite)
def glia_pred(self, thresh: float, pred_key_appendix: str = "") -> int:
"""
SV glia prediction (0: neuron, 1: glia). Only valid if :py:attr:`type` is `sv`.
Args:
thresh: Classification threshold.
pred_key_appendix: Identifier for specific glia predictions. Only used
during development.
Returns:
The glia prediction of this supervoxel.
"""
assert self.type == "sv"
if self.config.use_point_models:
return int(self.glia_proba(pred_key_appendix) >= thresh)
return glia_pred_so(self, thresh, pred_key_appendix)
def glia_proba(self, pred_key_appendix: str = "") -> float:
"""
SV glia probability (0: neuron, 1: glia). Only valid if :py:attr:`type` is `sv`.
Args:
pred_key_appendix: Identifier for specific glia predictions. Only used
during development.
Returns:
The glia prediction of this supervoxel.
"""
assert self.type == "sv"
return glia_proba_so(self, pred_key_appendix)
def axoness_preds(self, pred_key_appendix: str = "") -> np.ndarray:
"""
Axon prediction (0: dendrite, 1: axon, 2: soma) based on `img2scalar` CMN.
Args:
pred_key_appendix: Identifier for specific axon predictions. Only used
during development.
Returns:
The axon prediction of this supervoxel at every :py:attr:`~sample_locations`.
"""
pred = np.argmax(self.axoness_probas(pred_key_appendix), axis=1)
return pred
def axoness_probas(self, pred_key_appendix: str = "") -> np.ndarray:
"""
Axon probability (0: dendrite, 1: axon, 2: soma) based on `img2scalar` CMN.
Probability underlying the attribute :py:attr:`axoness_preds`. Only valid if
:py:attr:`type` is `sv`.
Args:
pred_key_appendix: Identifier for specific axon predictions. Only used during development.
Returns:
The axon probabilities of this supervoxel at every :py:attr:`~sample_locations`.
"""
assert self.type == "sv"
pred_key = "axoness_probas" + pred_key_appendix
if pred_key not in self.attr_dict:
self.load_attr_dict()
if pred_key not in self.attr_dict:
msg = (f"WARNING: Requested axoness {pred_key} for SV {self.id} is not available. Existing "
f"keys: {self.attr_dict.keys()}")
raise ValueError(msg)
return self.attr_dict[pred_key]
# FUNCTIONS
def total_edge_length(self) -> Union[np.ndarray, float]:
"""
Total edge length of the supervoxel :py:attr:`~skeleton` in nanometers.
Returns:
Sum of all edge lengths (L2 norm) in :py:attr:`~skeleton`.
"""
if self.skeleton is None:
self.load_skeleton()
nodes = self.skeleton['nodes'].astype(np.float32)
edges = self.skeleton['edges']
return np.sum([np.linalg.norm(self.scaling * (nodes[e[0]] - nodes[e[1]])) for e in edges])
def mesh_from_scratch(self, ds: Optional[Tuple[int, int, int]] = None,
**kwargs: dict) -> List[np.ndarray]:
"""
Calculate the mesh based on :func:`~syconn.proc.meshes.get_object_mesh`.
Args:
ds: Downsampling of the object's voxel data.
**kwargs: Key word arguments passed to :func:`~syconn.proc.meshes.triangulation`.
Returns:
"""
_supported_types = ['syn_ssv', 'syn', 'cs_ssv', 'cs']
if self.type in _supported_types:
raise ValueError(f'"mesh_from_scratch" does not support type "{self.type}". Supported types: '
f'{_supported_types}')
if ds is None:
ds = self.config['meshes']['downsampling'][self.type]
return meshes.get_object_mesh(self, ds, mesher_kwargs=kwargs)
def _save_mesh(self, ind: np.ndarray, vert: np.ndarray,
normals: np.ndarray):
"""
Save given mesh at :py:attr:`~mesh_path`. Uses
the :class:`~syconn.backend.storage.MeshStorage` interface.
Args:
ind: Flat index array.
vert: Flat vertex array.
normals: Flat normal array.
"""
mesh_dc = MeshStorage(self.mesh_path, read_only=False,
disable_locking=not self.enable_locking)
mesh_dc[self.id] = [ind, vert, normals]
mesh_dc.push()
def mesh2kzip(self, dest_path: str, ext_color: Optional[Union[
Tuple[int, int, int, int], List, np.ndarray]] = None,
ply_name: str = ""):
"""
Write :py:attr:`~mesh` to k.zip.
Args:
dest_path: Path to the k.zip file which contains the :py:attr:`~mesh`.
ext_color: If set to 0 no color will be written out. Use to adapt
color inKnossos.
ply_name: Name of the ply file in the k.zip, must not
end with `.ply`.
"""
mesh = self.mesh
if self.type == "sv":
color = (130, 130, 130, 160)
elif self.type == "cs":
color = (100, 200, 30, 255)
elif self.type == "syn":
color = (150, 50, 200, 255)
elif self.type == "syn_ssv":
color = (240, 50, 50, 255)
elif self.type == "cs_ssv":
color = (100, 200, 30, 255)
elif self.type == "sj":
color = (int(0.849 * 255), int(0.138 * 255), int(0.133 * 255), 255)
elif self.type == "vc":
color = (int(0.175 * 255), int(0.585 * 255), int(0.301 * 255), 255)
elif self.type == "mi":
color = (0, 153, 255, 255)
else:
raise TypeError("Color for bbject type '{}' does not exist."
"".format(self.type))
color = np.array(color, dtype=np.uint8)
if ext_color is not None:
if ext_color == 0:
color = None
else:
color = ext_color
if ply_name == "":
ply_name = str(self.id)
meshes.write_mesh2kzip(dest_path, mesh[0], mesh[1], mesh[2], color,
ply_fname=ply_name + ".ply")
def mergelist2kzip(self, dest_path: str):
"""
Writes the supervoxel agglomeration to a KNOSSOS compatible format.
Args:
dest_path: Path to k.zip file.
"""
self.load_attr_dict()
kml = knossos_ml_from_svixs([self.id], coords=[self.rep_coord])
write_txt2kzip(dest_path, kml, "mergelist.txt")
def load_views(self, woglia: bool = True, raw_only: bool = False,
ignore_missing: bool = False, index_views: bool = False,
view_key: Optional[str] = None):
"""
Loader method of :py:attr:`~views`.
Args:
woglia: If True, looks for views without glia, i.e. after astrocyte separation.
index_views: If True, refers to index views.
view_key: Identifier of the requested views.
raw_only: If True, ignores cell organelles projections.
ignore_missing: If True, will not throw ValueError if views do not exist.
Returns:
Views with requested properties.
"""
view_p = self.view_path(woglia=woglia, index_views=index_views,
view_key=view_key)
view_dc = CompressedStorage(view_p, disable_locking=not self.enable_locking)
try:
views = view_dc[self.id]
except KeyError as e:
if ignore_missing:
log_reps.warning("Views of SV {} were missing. Skipping.".format(self.id))
views = np.zeros((0, 4, 2, 128, 256), dtype=np.uint8)
else:
raise KeyError(e)
if raw_only:
views = views[:, :1]
return views
def save_views(self, views: np.ndarray, woglia: bool = True,
cellobjects_only: bool = False, index_views: bool = False,
view_key: Optional[str] = None,
enable_locking: Optional[bool] = None):
"""
Saves views according to its properties. If view_key is given it has
to be a special type of view, e.g. spine predictions. If in this case
any other kwarg is not set to default it will raise an error.
Todo:
* remove `cellobjects_only`.
Args:
woglia: If True, looks for views without glia, i.e. after astrocyte separation.
index_views: If True, refers to index views.
view_key: Identifier of the requested views.
views: View array.
cellobjects_only: Only render cell organelles (deprecated).
enable_locking: Enable file locking.
"""
if not (woglia and not cellobjects_only and not index_views) and view_key is not None:
raise ValueError('If views are saved to custom key, all other settings have to be defaults!')
if enable_locking is None:
enable_locking = self.enable_locking
view_dc = CompressedStorage(self.view_path(woglia=woglia, index_views=index_views, view_key=view_key),
read_only=False, disable_locking=not enable_locking)
if cellobjects_only:
assert self.id in view_dc, "SV must already contain raw views " \
"if adding views for cellobjects only."
view_dc[self.id] = np.concatenate([view_dc[self.id][:, :1], views],
axis=1)
else:
view_dc[self.id] = views
view_dc.push()
def load_attr_dict(self) -> int:
"""
Loader method of :py:attr:`~attr_dict`.
Returns:
0 if successful, -1 if attribute dictionary storage does not exist.
"""
try:
glob_attr_dc = AttributeDict(self.attr_dict_path,
disable_locking=True) # disable locking, PS 07June2019
self.attr_dict = glob_attr_dc[self.id]
except (IOError, EOFError) as e:
log_reps.critical("Could not load SSO attributes at {} due to "
"{}.".format(self.attr_dict_path, e))
return -1
def save_attr_dict(self):
"""
Saves :py:attr:`~attr_dict` to attr:`~attr_dict_path`. Already existing
dictionary will be updated.
"""
glob_attr_dc = AttributeDict(self.attr_dict_path, read_only=False,
disable_locking=not self.enable_locking)
if self.id in glob_attr_dc:
orig_dc = glob_attr_dc[self.id]
orig_dc.update(self.attr_dict)
else:
orig_dc = self.attr_dict
glob_attr_dc[self.id] = orig_dc
glob_attr_dc.push()
def save_attributes(self, attr_keys: List[str], attr_values: List[Any]):
"""
Writes attributes to attribute storage. Ignores :py:attr:`~attr_dict`.
Values have to be serializable and will be written via the
:class:`~syconn.backend.storage.AttributeDict` interface.
Args:
attr_keys: List of attribute keys which will be written to
:py:attr:`~attr_dict_path`.
attr_values: List of attribute values which will be written to
:py:attr:`~attr_dict_path`.
"""
if not hasattr(attr_keys, "__len__"):
attr_keys = [attr_keys]
if not hasattr(attr_values, "__len__"):
attr_values = [attr_values]
assert len(attr_keys) == len(attr_values), "Key-value lengths did not" \
" agree while saving attri" \
"butes of SSO %d." % self.id
glob_attr_dc = AttributeDict(self.attr_dict_path, read_only=False,
disable_locking=not self.enable_locking)
for k, v in zip(attr_keys, attr_values):
glob_attr_dc[self.id][k] = v
glob_attr_dc.push()
def load_attributes(self, attr_keys: List[str]) -> List[Any]:
"""
Reads attributes from attribute storage. It will ignore self.attr_dict and
will always pull it from the storage. Does not throw KeyError, but returns
None for missing keys.
Args:
attr_keys: List of attribute keys which will be loaded from
:py:attr:`~attr_dict_path`.
Returns:
Attribute values corresponding to `attr_keys`
"""
glob_attr_dc = AttributeDict(self.attr_dict_path, read_only=True,
disable_locking=not self.enable_locking)
return [glob_attr_dc[self.id][attr_k] if attr_k in glob_attr_dc[self.id]
else None for attr_k in attr_keys]
def attr_exists(self, attr_key: str) -> bool:
"""
Checks if `attr_key` exists in either :py:attr:`~attr_dict` or at
:py:attr:`~attr_dict_path`.
Args:
attr_key: Attribute key to look for.
Returns:
True if attribute exists, False otherwise.
"""
if len(self.attr_dict) == 0:
self.load_attr_dict()
try:
_ = self.attr_dict[attr_key]
except (KeyError, EOFError):
return False
return True
def lookup_in_attribute_dict(self, attr_key: str) -> Any:
"""
Returns
Args:
attr_key: Attribute key to look for.
Returns:
Value of `attr_key` in :py:attr:`~attr_dict` or None if it does not
exist. If key does not exist in :py:attr:`~attr_dict`, tries to
load from :py:attr:`~attr_dict_path`.
"""
if len(self.attr_dict) == 0:
self.load_attr_dict()
if self.attr_exists(attr_key):
return self.attr_dict[attr_key]
else:
return None
def calculate_rep_coord(self, voxel_dc: Optional[Dict[int, np.ndarray]] = None):
"""
Calculate/loads supervoxel representative coordinate.
Args:
voxel_dc: Pre-loaded dictionary which contains the voxel data of
this object.
"""
if voxel_dc is None:
if self.type in ['syn', 'syn_ssv']:
voxel_dc = VoxelStorageLazyLoading(self.voxel_path)
else:
voxel_dc = VoxelStorageDyn(self.voxel_path, read_only=True,
disable_locking=True)
if self.id not in voxel_dc:
self._bounding_box = np.array([[-1, -1, -1], [-1, -1, -1]])
log_reps.warning("No voxels found in VoxelDict!")
return
if isinstance(voxel_dc, VoxelStorageDyn):
self._rep_coord = voxel_dc.object_repcoord(self.id)
return
elif isinstance(voxel_dc, VoxelStorageLazyLoading):
self._rep_coord = voxel_dc[self.id][len(voxel_dc[self.id]) // 2] # any rep coord
return
else:
raise ValueError(f'Invalid voxel storage class: {type(voxel_dc)}')
def calculate_bounding_box(self, voxel_dc: Optional[Dict[int, np.ndarray]] = None):
"""
Calculate supervoxel :py:attr:`~bounding_box`.
Args:
voxel_dc: Pre-loaded dictionary which contains the voxel data of this object.
"""
if voxel_dc is None:
if self.type in ['syn', 'syn_ssv']:
voxel_dc = VoxelStorageLazyLoading(self.voxel_path)
else:
voxel_dc = VoxelStorageDyn(self.voxel_path, read_only=True,
disable_locking=True)
if not isinstance(voxel_dc, VoxelStorageDyn):
_ = self.load_voxels(voxel_dc=voxel_dc)
else:
bbs = voxel_dc.get_boundingdata(self.id)
bb = np.array([bbs[:, 0].min(axis=0), bbs[:, 1].max(axis=0)])
self._bounding_box = bb
def calculate_size(self, voxel_dc: Optional[Union[VoxelStorageDyn, VoxelStorage]] = None):
"""
Calculate supervoxel object :py:attr:`~size`.
Args:
voxel_dc: Pre-loaded dictionary which contains the voxel data of this object.
"""
if voxel_dc is None:
if self.type in ['syn', 'syn_ssv']:
voxel_dc = VoxelStorageLazyLoading(self.voxel_path)
else:
voxel_dc = VoxelStorageDyn(self.voxel_path, read_only=True,
disable_locking=True)
if not isinstance(voxel_dc, VoxelStorageDyn):
_ = self.load_voxels(voxel_dc=voxel_dc)
else:
size = voxel_dc.object_size(self.id)
self._size = size
def save_kzip(self, path: str,
kd: Optional[knossosdataset.KnossosDataset] = None,
write_id: Optional[int] = None):
"""
Write supervoxel segmentation to k.zip.
Todo:
* Broken, segmentation not rendered in K.
Args:
path:
kd:
write_id: Supervoxel ID.
"""
if write_id is None:
write_id = self.id
if kd is None:
try:
kd = kd_factory(self.config.kd_seg_path)
except:
raise ValueError("KnossosDataset could not be loaded")
kd.save_to_kzip(offset=self.bounding_box[0], data=self.voxels.astype(np.uint64).swapaxes(0, 2) * write_id,
kzip_path=path, data_mag=1, mags=[1])
def clear_cache(self):
"""
Clears the following, cached data:
* :py:attr:`~voxels`
* :py:attr:`~voxel_list`
* :py:attr:`~views`
* :py:attr:`~skeleton`
"""
self._voxels = None
self._voxel_list = None
self._mesh = None
self._views = None
self._skeleton = None
# SKELETON
@property
def skeleton_dict_path(self) -> str:
"""
Returns:
Path to skeleton storage.
"""
return self.segobj_dir + "/skeletons.pkl"
def copy2dir(self, dest_dir, safe=True):
"""
Examples:
To copy the content of this SV object (``sv_orig``) to the
destination of another (e.g. yet not existing) SV (``sv_target``),
call ``sv_orig.copy2dir(sv_target.segobj_dir)``. All files contained
in the directory py:attr:`~segobj_dir` of ``sv_orig`` will be copied to
``sv_target.segobj_dir``.
Args:
dest_dir: Destination directory where all files contained in
py:attr:`~segobj_dir` will be copied to.
safe: If ``True``, will not overwrite existing data.
"""
# get all files in home directory
fps = get_filepaths_from_dir(self.segobj_dir, ending="")
fnames = [os.path.split(fname)[1] for fname in fps]
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
for i in range(len(fps)):
src_filename = fps[i]
dest_filename = dest_dir + "/" + fnames[i]
try:
safe_copy(src_filename, dest_filename, safe=safe)
except Exception as e:
log_reps.warning("{}. Skipped {}.".format(e, fnames[i]))
pass
# copy attr_dict values
self.load_attr_dict()
if os.path.isfile(dest_dir + "/attr_dict.pkl"):
dest_attr_dc = load_pkl2obj(dest_dir + "/attr_dict.pkl")
else:
dest_attr_dc = {}
# overwrite existing keys in the destination attribute dict
dest_attr_dc.update(self.attr_dict)
self.attr_dict = dest_attr_dc
self.save_attr_dict()
def split_component(self, dist, new_sd, new_id):
"""
Todo:
* refactor -> VoxelStorageDyn
Args:
dist:
new_sd:
new_id:
Returns:
"""
raise NotImplementedError('WORK IN PROGRESS')
kdtree = spatial.cKDTree(self.voxel_list)
graph = nx.from_edgelist(kdtree.query_pairs(dist))
ccs = list(nx.connected_components(graph))
partner_ids = [self.id - ((self.id >> 32) << 32), self.id >> 32]
if len(ccs) == 1:
new_so_obj = new_sd.get_segmentation_object(new_id, create=True)
new_id += 1
new_so_obj.attr_dict["paths_to_voxels"] = self.paths_to_voxels
new_so_obj.attr_dict["%s_partner_ids" % self.type] = partner_ids
new_so_obj.save_attr_dict()
else:
for cc in ccs:
new_so_obj = new_sd.get_segmentation_object(new_id, create=True)
new_so_obj.attr_dict["%s_partner_ids" % self.type] = partner_ids
new_so_obj.save_attr_dict()
new_id += 1
voxel_ids = np.array(list(cc), dtype=np.int32)
this_voxel_list = self.voxel_list[voxel_ids]
bb = [np.min(this_voxel_list, axis=0),
np.max(this_voxel_list, axis=0)]
this_voxel_list -= bb[0]
this_voxels = np.zeros(bb[1] - bb[0] + 1, dtype=np.bool)
this_voxels[this_voxel_list[:, 0],
this_voxel_list[:, 1],
this_voxel_list[:, 2]] = True
save_voxels(new_so_obj, this_voxels, bb[0])
class SegmentationDataset(SegmentationBase):
"""
This class represents a set of supervoxels.
Examples:
To initialize the :class:`~syconn.reps.segmentation.SegmentationDataset` for
cell supervoxels you need to call ``sd_cell = SegmentationDataset('sv')``.
This requires an initialized working directory, for this please refer to
:class:`~syconn.handler.config.DynConfig` or see::
$ python SyConn/scripts/example_runs/start.py
After successfully executing
:class:`~syconn.exec.exec_init.init_cell_subcell_sds`, *cell* supervoxel properties
can be loaded from numpy arrays via the following keys:
* 'id': ID array, identical to :py:attr:`~ids`.
* 'bounding_box': Bounding box of every SV.
* 'size': Number voxels of each SV.
* 'rep_coord': Representative coordinates for each SV.
* 'mesh_area': Surface area as computed from the object mesh triangles.
* 'mapping_sj_ids': Synaptic junction objects which overlap with the respective SVs.
* 'mapping_sj_ratios': Overlap ratio of the synaptic junctions.
* 'mapping_vc_ids': Vesicle cloud objects which overlap with the respective SVs.
* 'mapping_vc_ratios': Overlap ratio of the vesicle clouds.
* 'mapping_mi_ids': Mitochondria objects which overlap with the respective SVs.
* 'mapping_mi_ratios': Overlap ratio of the mitochondria.
If astrocyte separation is performed, the following attributes will be stored as numpy array as well:
* 'glia_probas': Glia probabilities as array of shape (N, 2; N: Rendering
locations, 2: 0-index=neuron, 1-index=glia).
The 'mapping' attributes are only computed for cell supervoxels and not for cellular
organelles (e.g. 'mi', 'vc', etc.; see
:py:attr:`~syconn.global_params.config['process_cell_organelles']`).
For the :class:`~syconn.reps.segmentation.SegmentationDataset` of type 'syn_ssv'
(which represent the actual synapses between two cell reconstructions), the following
properties are stored as numpy arrays:
* 'id': ID array, identical to
:py:attr:`~ids`.
* 'bounding_box': Bounding box of every SV.
* 'size': Number voxels of each SV.
* 'rep_coord': Representative coordinates of each SV.
* 'mesh_area': Surface area as computed from the object mesh triangles.
* 'mesh_bb': Bounding box of the object meshes (in nanometers). Approximately
the same as scaled 'bounding_box'.
* 'latent_morph': Latent morphology vector at each rendering location; predicted by
the tCMN.
* 'neuron_partners': IDs of the two
:class:`~syconn.reps.super_segmentation_object.SuperSegmentationObject`
forming the synapse. The ordering of the subsequent 'partner' attributes is
identical to 'neuron_partners', e.g. 'neuron_partners'=[3, 49] and
'partner_celltypes'=[0, 1] means that SSV with ID 3 is an excitatory axon
targeting the MSN SSV with ID 49.
* 'partner_celltypes': Celltypes of the two SSVs.
* 'partner_spiness': Spine predictions (0: neck, 1: head, 2: shaft, 3: other) of the
two sites.
* 'partner_axoness': Compartment predictions (0: dendrite, 1: axon, 2: soma,
3: en-passant bouton, 4: terminal bouton) of the two sites.
* 'syn_prob': Synapse probability as inferred by the RFC (see corresponding
section the documentation).
* 'asym_prop': Mean probability of the 'syn_ssv' object voxels for the asymmetric
type. See :func:`~syconn.extraction.cs_processing_steps._extract_synapse_type_thread` .
* 'sym_prop': Mean probability of the 'syn_ssv' object voxels for the symmetric
type. See :func:`~syconn.extraction.cs_processing_steps._extract_synapse_type_thread` .
* 'syn_type_sym_ratio': ``sym_prop / float(asym_prop + sym_prop)``.
See :func:`~syconn.extraction.cs_processing_steps._extract_synapse_type_thread` .
* 'syn_sign': Synaptic "sign" (-1: symmetric, +1: asymmetric). For threshold see
:py:attr:`~syconn.global_params.config['cell_objects']['sym_thresh']` .
* 'cs_ids': Contact site IDs associated with each 'syn_ssv' synapse.
"""
def __init__(self, obj_type: str, version: Optional[Union[str, int]] = None, working_dir: Optional[str] = None,
scaling: Optional[Union[List, Tuple, np.ndarray]] = None,
version_dict: Optional[Dict[str, str]] = None, create: bool = False,
config: Optional[Union[str, DynConfig]] = None,
n_folders_fs: Optional[int] = None, cache_properties: Optional[List[str]] = None):
"""
Args:
obj_type: Type of :class:`~syconn.reps.segmentation.SegmentationObject`, e.g.: 'vc', 'sj', 'mi', 'cs', 'sv'.
version: Version of dataset to distinguish it from others of the same type.
working_dir: Path to the working directory.
scaling: Scaling of the raw data to nanometer
version_dict: Dictionary which contains the versions of other dataset types which share
the same working directory.
create: Whether or not to create this dataset's directory.
config: Config. object, see :class:`~syconn.handler.config.DynConfig`. Will be copied and then fixed by
setting :py:attr:`~syconn.handler.config.DynConfig.fix_config` to True.
n_folders_fs: Number of folders within the dataset's folder structure.
cache_properties: Use numpy arrays to populate the specified object properties when initializing
:py:class:`~syconn.reps.segmentation.SegmentationObject` via :py:func:`~get_segmentation_object`.
"""
self._type = obj_type
self._n_folders_fs = n_folders_fs
self._sizes = None
self._ids = None
self._rep_coords = None
self._config = config
self._soid2ix = None
self._property_cache = dict()
self._version = None
if cache_properties is None:
cache_properties = tuple()
if n_folders_fs is not None:
if n_folders_fs not in [10 ** i for i in range(6)]:
raise Exception("n_folders_fs must be in", [10 ** i for i in range(6)])
if version == 'temp':
version = 'tmp'
self._setup_working_dir(working_dir, config, version, scaling)
if version is not 'tmp' and self._config is not None:
self._config = copy.copy(self._config)
self._config.fix_config = True
if create and (version is None):
version = 'new'
if version is None and create is False:
try:
self._version = self.config["versions"][self.type]
except KeyError:
raise Exception(f"Unclear version '{version}' during initialization of {self}.")
elif version == "new":
other_datasets = \
glob.glob(self.working_dir + "/%s_[0-9]" % self.type) + \
glob.glob(self.working_dir + "/%s_[0-9][0-9]" % self.type) + \
glob.glob(self.working_dir + "/%s_[0-9][0-9][0-9]" % self.type)
max_version = -1
for other_dataset in other_datasets:
other_version = \
int(re.findall(r"[\d]+",
os.path.basename(other_dataset.strip('/')))[-1])
if max_version < other_version:
max_version = other_version
self._version = max_version + 1
else:
self._version = version
if version_dict is None:
try:
self.version_dict = self.config["versions"]
except KeyError:
raise Exception("No version dict specified in config")
else:
if isinstance(version_dict, dict):
self.version_dict = version_dict
elif isinstance(version_dict, str) and version_dict == "load":
if self.version_dict_exists:
self.load_version_dict()
else:
raise Exception("No version dict specified in config")
if create:
os.makedirs(self.path, exist_ok=True)
os.makedirs(self.so_storage_path, exist_ok=True)
self.enable_property_cache(cache_properties)
def __repr__(self):
return (f'{type(self).__name__}(obj_type="{self.type}", version="{self.version}", '
f'working_dir="{self.working_dir}")')
@property
def type(self) -> str:
"""
The type of :class:`~syconn.reps.segmentation.SegmentationObject`s
contained in this :class:`~syconn.reps.segmentation.SegmentationDataset`.
Returns:
String identifier of the object type.
"""
return self._type
@property
def n_folders_fs(self) -> int:
"""
Returns:
The number of folders in this :class:`~syconn.reps.segmentation.SegmentationDataset`
directory tree.
"""
if self._n_folders_fs is None:
ps = glob.glob("%s/%s*/" % (self.path, self.so_storage_path_base))
if len(ps) == 0:
raise Exception("No storage folder found at '{}' and no number of "
"subfolders specified (n_folders_fs))".format(self.path))
bp = os.path.basename(ps[0].strip('/'))
for p in ps:
bp = os.path.basename(p.strip('/'))
if bp == self.so_storage_path_base:
bp = os.path.basename(p.strip('/'))
break
if bp == self.so_storage_path_base:
self._n_folders_fs = 100000
else:
self._n_folders_fs = int(re.findall(r'[\d]+', bp)[-1])
return self._n_folders_fs
@property
def working_dir(self) -> str:
"""
Returns:
The working directory of this :class:`~syconn.reps.segmentation.SegmentationDataset`.
"""
return self._working_dir
@property
def version(self) -> str:
"""
Returns:
String identifier of the version.
"""
return str(self._version)
@property
def path(self) -> str:
"""
Returns:
The path to this :class:`~syconn.reps.segmentation.SegmentationDataset`.
"""
return "%s/%s_%s/" % (self._working_dir, self.type, self.version)
@property
def exists(self) -> bool:
"""
Checks whether :py:attr:`~path` exists.
"""
return os.path.isdir(self.path)
@property
def path_sizes(self) -> str:
"""
Path to the cache array of the object voxel sizes.
Returns:
Path to the numpy file.
"""
return self.path + "/sizes.npy"
@property
def path_rep_coords(self) -> str:
"""
Path to the cache array of the object representative coordinates.
Returns:
Path to the numpy file.
"""
return self.path + "/rep_coords.npy"
@property
def path_ids(self) -> str:
"""
Path to the cache array of the object IDs.
Returns:
Path to the numpy file.
"""
return self.path + "/ids.npy"
@property
def version_dict_path(self) -> str:
"""
Path to the version dictionary pickle file.
Returns:
Path to the pickle file.
"""
return self.path + "/version_dict.pkl"
@property
def version_dict_exists(self) -> bool:
"""
Checks whether :py:attr:`~version_dict_path` exists.
"""
return os.path.exists(self.version_dict_path)
@property
def so_storage_path_base(self) -> str:
"""
Name of the base of the root folder (``'so_storage'``).
"""
return "so_storage"
@property
def so_storage_path(self) -> str:
"""
Path to the root folder.
"""
if self._n_folders_fs is None and os.path.exists("%s/so_storage/" % self.path):
return "%s/so_storage/" % self.path
elif self._n_folders_fs == 100000 and os.path.exists("%s/so_storage/" % self.path):
return "%s/so_storage/" % self.path
else:
return "%s/%s_%d/" % (self.path, self.so_storage_path_base,
self.n_folders_fs)
@property
def so_dir_paths(self) -> List[str]:
"""
Sorted paths to all supervoxel object directories in the directory tree
:py:attr:`~so_storage_path`.
"""
depth = int(np.log10(self.n_folders_fs) // 2 + np.log10(self.n_folders_fs) % 2)
p = "".join([self.so_storage_path] + ["/*" for _ in range(depth)])
return sorted(glob.glob(p))
def iter_so_dir_paths(self) -> Iterator[str]:
"""
Iterator over all possible `SegmentationObject` storage base directories.
Notes:
In contrast to :attr:`~so_dir_paths` this iterator may return paths to storages that
do not exist, in the case that no object fell into its ID bucket.
Returns:
Path to ID storage base folder.
"""
storage_location_ids = get_unique_subfold_ixs(self.n_folders_fs)
for ix in storage_location_ids:
yield self.so_storage_path + subfold_from_ix(ix, self.n_folders_fs)
@property
def config(self) -> DynConfig:
"""
The configuration object which contain all dataset-specific parameters.
See :class:`~syconn.handler.config.DynConfig`.
Returns:
The configuration object.
"""
if self._config is None:
self._config = global_params.config
return self._config
@property
def sizes(self) -> np.ndarray:
"""
Returns:
A size array of all supervoxel which are part of this dataset.
The ordering of the returned array will correspond to :py:attr:`~ids`.
"""
if self._sizes is None:
if os.path.exists(self.path_sizes):
self._sizes = np.load(self.path_sizes)
else:
msg = "sizes were not calculated... Please run dataset_analysis"
log_reps.error(msg)
raise ValueError(msg)
return self._sizes
@property
def rep_coords(self) -> np.ndarray:
"""
Returns:
Representative coordinates of all supervoxel which are part of this dataset.
The ordering of the returned array will correspond to :py:attr:`~ids`.
"""
if self._rep_coords is None:
if os.path.exists(self.path_rep_coords):
self._rep_coords = np.load(self.path_rep_coords)
else:
msg = "rep_coords were not calculated... Please run dataset_analysis"
log_reps.error(msg)
raise ValueError(msg)
return self._rep_coords
@property
def ids(self) -> np.ndarray:
"""
Returns:
All supervoxel IDs which are part of this dataset.
"""
if self._ids is None:
acquire_obj_ids(self)
return self._ids
@property
def scaling(self) -> np.ndarray:
"""
Returns:
Voxel size in nanometers (XYZ).
"""
if self._scaling is None:
self._scaling = np.array(self.config['scaling'], dtype=np.float32)
return self._scaling
@property
def sos(self) -> Generator[SegmentationObject, None, None]:
"""
Generator for all :class:`~syconn.reps.segmentation.SegmentationObject` objects
associated with this dataset.
Yields:
:class:`~syconn.reps.segmentation.SegmentationObject`
"""
ix = 0
tot_nb_sos = len(self.ids)
while ix < tot_nb_sos:
yield self.get_segmentation_object(self.ids[ix])
ix += 1
def load_numpy_data(self, prop_name, allow_nonexisting: bool = True) -> np.ndarray:
"""
Load cached array. The ordering of the returned array will correspond
to :py:attr:`~ids`.
Todo:
* remove 's' appendix in file names.
* remove 'celltype' replacement for 'celltype_cnn_e3' as soon as 'celltype_cnn_e3' was renamed package-wide
Args:
prop_name: Identifier of the requested cache array.
allow_nonexisting: If False, will fail for missing numpy files.
Returns:
numpy array of property `prop_name`.
"""
if prop_name == 'celltype':
prop_name = 'celltype_cnn_e3'
if os.path.exists(self.path + prop_name + "s.npy"):
return np.load(self.path + prop_name + "s.npy", allow_pickle=True)
else:
msg = f'Requested data cache "{prop_name}" did not exist in {self}.'
if not allow_nonexisting:
log_reps.error(msg)
raise FileNotFoundError(msg)
log_reps.warning(msg)
def get_segmentationdataset(self, obj_type: str) -> 'SegmentationDataset':
"""
Factory method for :class:`~syconn.reps.segmentation.SegmentationDataset` which are part of this dataset.
Args:
obj_type: Dataset of supervoxels with type `obj_type`.
Returns:
The requested :class:`~syconn.reps.segmentation.SegmentationDataset` object.
"""
if obj_type not in self.version_dict:
raise ValueError('Requested object type {} not part of version_dict '
'{}.'.format(obj_type, self.version_dict))
return SegmentationDataset(obj_type, version=self.version_dict[obj_type], working_dir=self.working_dir)
def get_segmentation_object(self, obj_id: Union[int, List[int]],
create: bool = False, **kwargs) -> Union[SegmentationObject, List[SegmentationObject]]:
"""
Factory method for :class:`~syconn.reps.segmentation.SegmentationObject` which are
part of this dataset.
Args:
obj_id: Supervoxel ID.
create: If True, creates the folder hierarchy down to the requested supervoxel.
Returns:
The requested :class:`~syconn.reps.segmentation.SegmentationObject` object.
"""
if np.isscalar(obj_id):
return self._get_segmentation_object(obj_id, create, **kwargs)
else:
res = []
for ix in obj_id:
obj = self._get_segmentation_object(ix, create, **kwargs)
res.append(obj)
return res
def _get_segmentation_object(self, obj_id: int, create: bool, **kwargs) -> SegmentationObject:
"""
Initialize :py:class:`~SegmentationObject`.
Args:
obj_id: Object ID.
create: Create folder structure. Default: False.
Returns:
Supervoxel object.
"""
kwargs_def = dict(obj_id=obj_id, obj_type=self.type, version=self.version, working_dir=self.working_dir,
scaling=self.scaling, create=create, n_folders_fs=self.n_folders_fs, config=self.config)
kwargs_def.update(kwargs)
so = SegmentationObject(**kwargs_def)
for k, v in self._property_cache.items():
so.attr_dict[k] = v[self.soid2ix[obj_id]]
return so
def save_version_dict(self):
"""
Save the version dictionary to the `.pkl` file.
"""
write_obj2pkl(self.version_dict_path, self.version_dict)
def load_version_dict(self):
"""
Load the version dictionary from the `.pkl` file.
"""
try:
self.version_dict = load_pkl2obj(self.version_dict_path)
except Exception as e:
raise FileNotFoundError('Version dictionary of SegmentationDataset not found. {}'.format(str(e)))
@property
def soid2ix(self):
if self._soid2ix is None:
self._soid2ix = {k: ix for ix, k in enumerate(self.ids)}
return self._soid2ix
def enable_property_cache(self, property_keys: Iterable[str]):
"""
Add properties to cache.
Args:
property_keys: Property keys. Numpy cache arrays must exist.
"""
# look-up for so IDs to index in cache arrays
property_keys = list(property_keys) # copy
for k in self._property_cache:
if k in property_keys:
property_keys.remove(k)
if len(property_keys) == 0:
return
# init index array
_ = self.soid2ix
self._property_cache.update({k: self.load_numpy_data(k, allow_nonexisting=False) for k in property_keys})
def get_volume(self, source: str = 'total') -> float:
"""
Calculate the RAG volume.
Args:
source: Allowed sources: 'total' (all SVs contained in SegmentationDataset('sv')),
'neuron' (use glia-free RAG), 'glia' (use glia RAG).
Returns:
Volume in mm^3.
"""
self.enable_property_cache(['size'])
if source == 'neuron':
g = nx.read_edgelist(global_params.config.pruned_svgraph_path, nodetype=np.uint64)
svids = g.nodes()
elif source == 'glia':
g = nx.read_edgelist(global_params.config.working_dir + "/glia/astrocyte_svgraph.bz2", nodetype=np.uint64)
svids = g.nodes()
elif source == 'total':
svids = self.ids
else:
raise ValueError(f'Unknown source type "{source}".')
total_size = 0
for svid in svids:
total_size += self.get_segmentation_object(svid).size
total_size_cmm = np.prod(self.scaling) * total_size / 1e18
return total_size_cmm
| StructuralNeurobiologyLab/SyConn | syconn/reps/segmentation.py | Python | gpl-2.0 | 73,145 | [
"NEURON"
] | afa72548310be5c73c86d846f408a6d7ca4d92d72a763edfde359959bd2d9609 |
#!/usr/bin/env python
# Copyright (C) 2012 Tianyang Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
get query ids for queries algined to db seqs with db ids
"""
import sys
import getopt
from HTSeq import SAM_Reader
def main(args):
sam, db_ids = None, None
try:
opts, args = getopt.getopt(args, 's:d:')
except getopt.GetoptError as err:
print >> sys.stderr, str(err)
sys.exit(2)
for opt, arg in opts:
if opt == '-s':
sam = arg
if opt == '-d':
db_ids = arg
if sam == None or db_ids == None:
print >> sys.stderr, "missing arguments"
sys.exit(2)
dids = set([])
with open(db_ids, 'r') as fin:
for line in fin:
dids.add(line.strip())
for align in SAM_Reader(sam):
if align.aligned:
if align.iv.chrom in dids:
print align.read.name
if __name__ == '__main__':
main(sys.argv[1:])
| tianyang-li/meta-transcriptome | trans-len/get_SAM_aligned_query_ids.py | Python | gpl-3.0 | 1,493 | [
"HTSeq"
] | 238e496c291e8a4aceb12587e876d687bffa937f3a958bdec37d88123d2533aa |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Vladimír Slávik 2010 - 2011
# Python 2.6
#
# for Simutrans
# http://www.simutrans.com
#
# code is public domain
#
#
# creates a timeline chart for given objects
#-----
from __future__ import print_function
#-----
paksize = 128 # default only
stop_year = 2050 # don't plot after this date
font_name = "Verdana"
font_size = 10 # unknown units!
font_aa = False # antialiasing
month_width = 1 # px
bin_width = 10 # years
bin_height = 5 # items
item_height = 20 # px
left_margin = 250 # px
right_margin = 80 # px
vertical_margin = 10 # px
bin_text_space = 12 # px
parameter_bar_max = 50 # px
background_color = (231,255,255)
grid_color = (0,0,0)
name_color = (128,128,128)
power_color = (0,0,255)
speed_color = (0,128,0)
data_color_normal = (192,192,192)
data_color_nointro = (255,192,192)
data_color_noretire = (168,228,168)
data_color_always = (255,255,255)
data_color_unfree = (255,192,192)
data_color_replaced = (150,200,255)
filter_goods = [] # if not empty [], only vehicles for these goods will be plotted
filter_goods_invert = False # inverts the goods logic fom "only these" to "all except these"
filter_ways = [] # same as goods
filter_powered = False # only with power>0
filter_front = False # only with Constraint[Prev]=none present
filter_authors = [] # same as goods
#-----
import os
import simutools
Data = []
canvas = None
font = None
start_year = 0
end_year = 10000
max_speed = 0
max_power = 0
year_width = month_width * 12
bin_height_px = bin_height * item_height + bin_text_space
check_authors = False
check_compat = False
compat = {}
#-----
def objCompare(A, B) :
# first should come object with earlier dates!
a_intro = int(A.ask("intro_year", 0)) * 12 + int(A.ask("intro_month", 0))
b_intro = int(B.ask("intro_year", 0)) * 12 + int(B.ask("intro_month", 1)) - 1
if a_intro != b_intro :
return cmp(a_intro, b_intro)
else :
a_retro = int(A.ask("retire_year", 2999)) * 12 + int(A.ask("retire_month", 0))
b_retro = int(B.ask("retire_year", 2999)) * 12 + int(B.ask("retire_month", 1)) - 1
return cmp(a_retro, b_retro)
#-----
def prepareCanvas() :
global start_year, end_year, canvas, max_speed, max_power
start_year = 0
i = 0
while start_year == 0 :
start_year = int(Data[i].ask("intro_year", "0"))
i += 1
# intro is primary sorting, so just skip the first items without intro date
end_year_retire = 0
end_year_intro = 0
for item in Data :
intro_year = int(item.ask("intro_year", 0))
retire_year = int(item.ask("retire_year", 0))
intro_month = int(item.ask("intro_month", 1)) - 1
retire_month = int(item.ask("retire_month", 1)) - 1
if (intro_year * 12 + intro_month) < (retire_year * 12 + retire_month) or (retire_year == 0):
# check for valid timeline (disabling)
end_year_retire = max(end_year_retire, retire_year)
end_year_intro = max(end_year_intro, intro_year)
max_power = max(max_power, (int(item.ask("power", 0)) * int(item.ask("gear", 100))) / 100)
max_speed = max(max_speed, int(item.ask("speed", 0)))
# all else is not sorted or has special cases and must be found in the whole dataset
end_year = min(max(end_year_retire, end_year_intro) + 20, stop_year)
width = left_margin + right_margin
width += (end_year - start_year) * year_width # for data
bin_count_vertical = len(Data) / bin_height
if len(Data) % bin_height > 0 :
bin_count_vertical += 1
height = bin_count_vertical * bin_height_px # every "bin" includes year marks
height += vertical_margin * 2
canvas = pygame.Surface((width, height))
canvas.fill(background_color)
for i in xrange(bin_count_vertical) :
y = vertical_margin + i * bin_height_px
start = (left_margin, y)
stop = (width - right_margin, y)
pygame.draw.line(canvas, grid_color, start, stop, 1)
for year in xrange(start_year, end_year, 1) :
x = left_margin + (year - start_year) * year_width
if year % 10 == 0 :
start = (x, y)
stop = (x, y + bin_height_px)
pygame.draw.line(canvas, grid_color, start, stop, 1)
surf = font.render(str(year), font_aa, grid_color, background_color)
canvas.blit(surf, (x + 3, y + 1))
return
#-----
def procObj(i) :
obj = Data[i]
objname = obj.ask("name")
intro_year = int(obj.ask("intro_year", 0))
intro_month = int(obj.ask("intro_month", 1)) - 1
retire_year = int(obj.ask("retire_year", stop_year))
retire_month = int(obj.ask("retire_month", 1)) - 1
bin_number = i / bin_height # which bin is this in
bin_position = i % bin_height # which item is this in that bin
# bar color codes information about the vehicle:
data_color = data_color_normal
# start with default state (from-to)
# check timeline state
if (intro_year == 0) and (retire_year == 0) :
data_color = data_color_always
intro_year = start_year
intro_month = 0
retire_year = end_year
retire_month = 12
elif intro_year == 0 :
data_color = data_color_nointro
intro_year = start_year
intro_month = 0
elif retire_year == stop_year :
data_color = data_color_noretire
retire_year = end_year
retire_month = 12
else:
pass
# nothing to do oterwise
# knowing if the vehicle is not free is still more important
if check_authors :
if not CheckAuthors(obj.ask("copyright", "", False)) :
data_color = data_color_unfree
# compat.tab checking overrides all since it is the most interesting info overall
if check_compat :
if compat.has_key(objname + "\n") :
data_color = data_color_replaced
is_disabled = (intro_year * 12 + intro_month) >= (retire_year * 12 + retire_month)
# important to think about these cases - they can stretch timeline really loooong into future and increase image size -> memory...
start_x = left_margin + ((intro_year - start_year) * 12 + intro_month) * month_width
end_x = left_margin + ((retire_year - start_year) * 12 + retire_month) * month_width
start_y = vertical_margin + bin_number * bin_height_px + bin_text_space + bin_position * item_height + 1
end_y = start_y + item_height - 2
if not is_disabled :
canvas.fill(data_color, (start_x, start_y, end_x - start_x, end_y - start_y))
speed_length = int(obj.ask("speed", 0)) * parameter_bar_max / max_speed if max_speed > 0 else 0
power_length = (int(obj.ask("power", 0)) * int(obj.ask("gear", 100))) * parameter_bar_max / 100 / max_power if max_power > 0 else 0
# integer arithmetic - multiplication always first!
start = (start_x, start_y)
end = (start_x + speed_length, start_y)
pygame.draw.line(canvas, speed_color, start, end, 2)
start = (start_x, end_y - 2)
end = (start_x + power_length, end_y - 2)
pygame.draw.line(canvas, power_color, start, end, 2)
else:
pass
# charting done, now picture and labels
picture_txt_ref = None
if not picture_txt_ref :
picture_txt_ref = obj.ask("freightimage[0][sw]")
if not picture_txt_ref :
picture_txt_ref = obj.ask("freightimage[0][ne]")
if not picture_txt_ref :
picture_txt_ref = obj.ask("freightimage[sw]")
if not picture_txt_ref :
picture_txt_ref = obj.ask("freightimage[ne]")
if not picture_txt_ref :
picture_txt_ref = obj.ask("emptyimage[sw]")
if not picture_txt_ref :
picture_txt_ref = obj.ask("image[sw]")
if not picture_txt_ref :
picture_txt_ref = obj.ask("image[ne]")
if not picture_txt_ref :
raise Exception, "no images for object %s!" % objname
# attempt at extracting the "best" image
picture_ref = simutools.SimutransImgParam(picture_txt_ref)
picfilename = os.path.join(os.path.dirname(obj.srcfile), picture_ref.file + ".png")
big_pic = pygame.image.load(picfilename)
srccoords = pygame.Rect(picture_ref.coords[1] * paksize, picture_ref.coords[0] * paksize, paksize, paksize)
targetcoords = (0, 0, paksize, paksize)
small_pic = pygame.Surface((paksize, paksize))
small_pic.blit(big_pic, targetcoords, srccoords)
small_pic.set_colorkey((231,255,255))
srccoords = small_pic.get_bounding_rect()
final_pic = pygame.Surface((srccoords.width, srccoords.height))
final_pic.fill((231,255,255))
final_pic.blit(small_pic, (0,0), srccoords)
final_pic.set_colorkey((231,255,255))
del big_pic, small_pic
# final_pic is now only wanted picture
targetcoords = final_pic.get_rect()
targetcoords.bottom = start_y + item_height
targetcoords.right = left_margin - 2 if is_disabled else start_x - 2
graphic_left = targetcoords.left
canvas.blit(final_pic, targetcoords)
del final_pic
# picture is on the main canvas
surf = font.render(objname, font_aa, name_color)
targetcoords = surf.get_rect()
targetcoords.top = start_y + 2
targetcoords.right = graphic_left - 4
canvas.blit(surf, targetcoords)
# name written
text = "%i km/h" % int(obj.ask("speed", 0))
pwr = int(obj.ask("power", 0))
if pwr > 0 : text += ", %i kW" % pwr
surf = font.render(text, font_aa, name_color)
if not is_disabled :
canvas.blit(surf, (start_x + 4, start_y + 2))
else :
canvas.blit(surf, (left_margin + 4, start_y + 2))
return
#-----
help_string = """
Script for generating Simutrans vehicle timeline reports as images. Runs in
directory where started, searches folders recursively for dat files and reads
vehicle definitions for plotting. These can be filtered by waytype and carried
goods.
Requires PyGame and module 'simutools'.
Usage: timeline-chart [options]
These are available options, in any order:
size=<number> Sets tile size for reading images.
a=<string> Adds a filter by author name.
g=<string> Adds a filter by goods type (freight).
w=<string> Adds a filter by waytype.
invert Flips goods filtering logic.
engine Include only vehicles with engine.
power Alias for engine.
front Include only vehicles that can be head of a convoy.
<unrecognized-string> Shortcut for g=<string>.
For filters, the following rules apply:
* Default is empty filter.
* If no entries are specified, filter is empty and not active.
Further (optional) capabilities are:
* Marking vehicles that have compatibility entries - needs compat.tab in
directory where started. Automatically enabled if the file is present.
* Marking vehicles according to allowed author list - needs simuauthors.py
loadable, refer to the module for further information. Automatically enabled
if the module can be loaded.
"""
def process_args() :
from sys import argv, exit
for i in argv :
if i == "--help" or i == "/?":
print(help_string);
exit(0)
elif i == "invert" :
global filter_goods_invert
filter_goods_invert = True
elif i == "power" or i == "engine":
global filter_powered
filter_powered = True
elif i == "front" :
global filter_front
filter_front = True
elif i[:5] == "size=" :
global paksize
paksize = int(i[5:])
elif i[:2] == "w=" :
filter_ways.append(i[2:])
elif i[:2] == "g=" :
filter_goods.append(i[2:])
elif i[:2] == "a=" :
filter_authors.append(i[2:])
else :
filter_goods.append(i)
del filter_goods[0] # this will be name of script!
#-----
def test_authorChecks() :
try :
from simuauthors import CheckAuthors
check_authors = True
print("author checking enabled.")
except ImportError :
check_authors = False
#-----
def test_compat() :
try :
f = open("compat.tab", "r")
lines = f.readlines()
f.close()
if (len(lines) % 2) :
del lines[-1]
# delete last odd, if present
compat = dict(zip(lines[::2], lines[1::2]))
# warning - entries still do include trailing \n chars!
check_compat = True
print("compat.tab checking enabled.")
except :
check_compat = False
#-----
process_args()
test_authorChecks()
test_compat()
try :
import pygame
except ImportError :
print("This script needs PyGame to work!")
print("Visit http://www.pygame.org to get it.")
else :
pygame.font.init()
if font_name :
font = pygame.font.SysFont(font_name, font_size)
else :
font = pygame.font.Font(font_name, font_size)
print("loading files...")
simutools.walkFiles(os.getcwd(), simutools.loadFile, cbparam=Data)
simutools.pruneList(Data)
simutools.pruneObjs(Data, ["vehicle", "citycar"]) # remove citycars that share obj=type !
if filter_goods :
print("filtering by cargo types:", " ".join(filter_goods))
simutools.pruneByParam(Data, "freight", filter_goods, filter_goods_invert)
if filter_ways :
print("filtering by way types:", " ".join(filter_ways))
simutools.pruneByParam(Data, "waytype", filter_ways)
if filter_authors :
auth_str = " ".join(filter_authors).lower()
print("filtering by author names:", auth_str)
i = len(Data) - 1;
while i >= 0 :
author = Data[i].ask("copyright", "").lower()
if not (author in auth_str) :
del(Data[i]);
i = i - 1;
if filter_powered :
print("filtering for powered vehicles...")
simutools.pruneByParam(Data, "power", [0, None], True)
if filter_front :
print("filtering for front vehicles...")
i = len(Data) - 1;
while i >= 0 :
constraints = Data[i].ask_indexed("constraint")
keep = False
for c in constraints :
if c[0].lower().startswith("[prev") and c[1].lower() == "none" :
keep = True
break
if not keep :
del(Data[i]);
i = i - 1;
print("filtered to", len(Data), "items.")
Data.sort(objCompare)
if len(Data) > 0 :
print("plotting...")
prepareCanvas()
for i in xrange(len(Data)) :
procObj(i)
pygame.image.save(canvas, "timeline.png")
else :
print("no matches left!")
print("finished.")
#-----
# EOF | simutrans/pak128 | tools/timeline-chart.py | Python | artistic-2.0 | 13,461 | [
"VisIt"
] | ea477654921c5a4ba5b48a779635433bff0417cc5e1c78cd6569b556e3cb4611 |
from math import pi, cos, sin, sqrt, acos
from ase.atom import Atom
from ase.atoms import Atoms
from ase.parallel import paropen
def read_I_info(fileobj, index=-1):
if isinstance(fileobj, str):
fileobj = open(fileobj)
lines = fileobj.readlines()
del lines[0]
finished = False
s = Atoms()
while not finished:
w = lines.pop(0).split()
if w[0].startswith('"'):
s.append(Atom(w[0].replace('"',''),
[float(w[3]), float(w[4]), float(w[5])]))
else:
finished = True
return s
| freephys/python_ase | ase/io/cmdft.py | Python | gpl-3.0 | 585 | [
"ASE"
] | ceed440997ec050cdaf499324b8d828685780610f98af5019e41d1cd615cb15d |
"""Parsing functions for Binvox files.
https://www.patrickmin.com/binvox/binvox.html
Exporting meshes as binvox files requires binvox CL tool to be on your path.
"""
import os
import subprocess
import numpy as np
import collections
from distutils.spawn import find_executable
from .. import util
from ..base import Trimesh
# find the executable
binvox_encoder = find_executable('binvox')
Binvox = collections.namedtuple(
'Binvox', ['rle_data', 'shape', 'translate', 'scale'])
def parse_binvox_header(fp):
"""
Read the header from a binvox file.
Spec available:
https://www.patrickmin.com/binvox/binvox.html
Parameters
------------
fp: file-object
File like object with binvox file
Returns
----------
shape : tuple
Shape of binvox according to binvox spec
translate : tuple
Translation
scale : float
Scale of voxels
Raises
------------
IOError
If invalid binvox file.
"""
line = fp.readline().strip()
if hasattr(line, 'decode'):
binvox = b'#binvox'
space = b' '
else:
binvox = '#binvox'
space = ' '
if not line.startswith(binvox):
raise IOError('Not a binvox file')
shape = tuple(
int(s) for s in fp.readline().strip().split(space)[1:])
translate = tuple(
float(s) for s in fp.readline().strip().split(space)[1:])
scale = float(fp.readline().strip().split(space)[1])
fp.readline()
return shape, translate, scale
def parse_binvox(fp, writeable=False):
"""
Read a binvox file, spec at
https://www.patrickmin.com/binvox/binvox.html
Parameters
------------
fp: file-object
File like object with binvox file
Returns
----------
binvox : namedtuple
Containing data
rle : numpy array
Run length encoded data
Raises
------------
IOError
If invalid binvox file
"""
# get the header info
shape, translate, scale = parse_binvox_header(fp)
# get the rest of the file
data = fp.read()
# convert to numpy array
rle_data = np.frombuffer(data, dtype=np.uint8)
if writeable:
rle_data = rle_data.copy()
return Binvox(rle_data, shape, translate, scale)
_binvox_header = '''#binvox 1
dim {sx} {sy} {sz}
translate {tx} {ty} {tz}
scale {scale}
data
'''
def binvox_header(shape, translate, scale):
"""
Get a binvox header string.
Parameters
--------
shape: length 3 iterable of ints denoting shape of voxel grid.
translate: length 3 iterable of floats denoting translation.
scale: num length of entire voxel grid.
Returns
--------
string including "data\n" line.
"""
sx, sy, sz = (int(s) for s in shape)
tx, ty, tz = translate
return _binvox_header.format(
sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, scale=scale)
def binvox_bytes(rle_data, shape, translate=(0, 0, 0), scale=1):
"""Get a binary representation of binvox data.
Parameters
--------
rle_data : numpy array
Run-length encoded numpy array.
shape : (3,) int
Shape of voxel grid.
translate : (3,) float
Translation of voxels
scale : float
Length of entire voxel grid.
Returns
--------
data : bytes
Suitable for writing to binary file
"""
if rle_data.dtype != np.uint8:
raise ValueError(
"rle_data.dtype must be np.uint8, got %s" % rle_data.dtype)
header = binvox_header(shape, translate, scale).encode()
return header + rle_data.tostring()
def voxel_from_binvox(
rle_data, shape, translate=None, scale=1.0, axis_order='xzy'):
"""
Factory for building from data associated with binvox files.
Parameters
---------
rle_data : numpy
Run-length-encoded of flat voxel
values, or a `trimesh.rle.RunLengthEncoding` object.
See `trimesh.rle` documentation for description of encoding
shape : (3,) int
Shape of voxel grid.
translate : (3,) float
Translation of voxels
scale : float
Length of entire voxel grid.
encoded_axes : iterable
With values in ('x', 'y', 'z', 0, 1, 2),
where x => 0, y => 1, z => 2
denoting the order of axes in the encoded data. binvox by
default saves in xzy order, but using `xyz` (or (0, 1, 2)) will
be faster in some circumstances.
Returns
---------
result : VoxelGrid
Loaded voxels
"""
# shape must be uniform else scale is ambiguous
from ..voxel import encoding as enc
from ..voxel.base import VoxelGrid
from .. import transformations
if isinstance(rle_data, enc.RunLengthEncoding):
encoding = rle_data
else:
encoding = enc.RunLengthEncoding(rle_data, dtype=bool)
# translate = np.asanyarray(translate) * scale)
# translate = [0, 0, 0]
transform = transformations.scale_and_translate(
scale=scale / (np.array(shape) - 1),
translate=translate)
if axis_order == 'xzy':
perm = (0, 2, 1)
shape = tuple(shape[p] for p in perm)
encoding = encoding.reshape(shape).transpose(perm)
elif axis_order is None or axis_order == 'xyz':
encoding = encoding.reshape(shape)
else:
raise ValueError(
"Invalid axis_order '%s': must be None, 'xyz' or 'xzy'")
assert(encoding.shape == shape)
return VoxelGrid(encoding, transform)
def load_binvox(file_obj,
resolver=None,
axis_order='xzy',
file_type=None):
"""
Load trimesh `VoxelGrid` instance from file.
Parameters
-----------
file_obj : file-like object
Contains binvox data
resolver : unused
axis_order : str
Order of axes in encoded data.
Binvox default is 'xzy', but 'xyz' may be faster
where this is not relevant.
Returns
---------
result : trimesh.voxel.VoxelGrid
Loaded voxel data
"""
if file_type is not None and file_type != 'binvox':
raise ValueError(
'file_type must be None or binvox, got %s' % file_type)
data = parse_binvox(file_obj, writeable=True)
return voxel_from_binvox(
rle_data=data.rle_data,
shape=data.shape,
translate=data.translate,
scale=data.scale,
axis_order=axis_order)
def export_binvox(voxel, axis_order='xzy'):
"""
Export `trimesh.voxel.VoxelGrid` instance to bytes
Parameters
------------
voxel : `trimesh.voxel.VoxelGrid`
Assumes axis ordering of `xyz` and encodes
in binvox default `xzy` ordering.
axis_order : str
Eements in ('x', 'y', 'z', 0, 1, 2), the order
of axes to encode data (standard is 'xzy' for binvox). `voxel`
data is assumed to be in order 'xyz'.
Returns
-----------
result : bytes
Representation according to binvox spec
"""
translate = voxel.translation
scale = voxel.scale * ((np.array(voxel.shape) - 1))
neg_scale, = np.where(scale < 0)
encoding = voxel.encoding.flip(neg_scale)
scale = np.abs(scale)
if not util.allclose(scale[0], scale[1:], 1e-6 * scale[0] + 1e-8):
raise ValueError('Can only export binvox with uniform scale')
scale = scale[0]
if axis_order == 'xzy':
encoding = encoding.transpose((0, 2, 1))
elif axis_order != 'xyz':
raise ValueError('Invalid axis_order: must be one of ("xyz", "xzy")')
rle_data = encoding.flat.run_length_data(dtype=np.uint8)
return binvox_bytes(
rle_data, shape=voxel.shape, translate=translate, scale=scale)
class Binvoxer(object):
"""
Interface for binvox CL tool.
This class is responsible purely for making calls to the CL tool. It
makes no attempt to integrate with the rest of trimesh at all.
Constructor args configure command line options.
`Binvoxer.__call__` operates on the path to a mode file.
If using this interface in published works, please cite the references
below.
See CL tool website for further details.
https://www.patrickmin.com/binvox/
@article{nooruddin03,
author = {Fakir S. Nooruddin and Greg Turk},
title = {Simplification and Repair of Polygonal Models Using Volumetric
Techniques},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {9},
number = {2},
pages = {191--205},
year = {2003}
}
@Misc{binvox,
author = {Patrick Min},
title = {binvox},
howpublished = {{\tt http://www.patrickmin.com/binvox} or
{\tt https://www.google.com/search?q=binvox}},
year = {2004 - 2019},
note = {Accessed: yyyy-mm-dd}
}
"""
SUPPORTED_INPUT_TYPES = (
'ug',
'obj',
'off',
'dfx',
'xgl',
'pov',
'brep',
'ply',
'jot',
)
SUPPORTED_OUTPUT_TYPES = (
'binvox',
'hips',
'mira',
'vtk',
'raw',
'schematic',
'msh',
)
def __init__(
self,
dimension=32,
file_type='binvox',
z_buffer_carving=True,
z_buffer_voting=True,
dilated_carving=False,
exact=False,
bounding_box=None,
remove_internal=False,
center=False,
rotate_x=0,
rotate_z=0,
wireframe=False,
fit=False,
block_id=None,
use_material_block_id=False,
use_offscreen_pbuffer=True,
downsample_factor=None,
downsample_threshold=None,
verbose=False,
binvox_path=binvox_encoder,
):
"""
Configure the voxelizer.
Parameters
------------
dimension: voxel grid size (max 1024 when not using exact)
file_type: str
Output file type, supported types are:
'binvox'
'hips'
'mira'
'vtk'
'raw'
'schematic'
'msh'
z_buffer_carving : use z buffer based carving. At least one of
`z_buffer_carving` and `z_buffer_voting` must be True.
z_buffer_voting: use z-buffer based parity voting method.
dilated_carving: stop carving 1 voxel before intersection.
exact: any voxel with part of a triangle gets set. Does not use
graphics card.
bounding_box: 6-element float list/tuple of min, max values,
(minx, miny, minz, maxx, maxy, maxz)
remove_internal: remove internal voxels if True. Note there is some odd
behaviour if boundary voxels are occupied.
center: center model inside unit cube.
rotate_x: number of 90 degree ccw rotations around x-axis before
voxelizing.
rotate_z: number of 90 degree cw rotations around z-axis before
voxelizing.
wireframe: also render the model in wireframe (helps with thin parts).
fit: only write voxels in the voxel bounding box.
block_id: when converting to schematic, use this as the block ID.
use_matrial_block_id: when converting from obj to schematic, parse
block ID from material spec "usemtl blockid_<id>" (ids 1-255 only).
use_offscreen_pbuffer: use offscreen pbuffer instead of onscreen
window.
downsample_factor: downsample voxels by this factor in each dimension.
Must be a power of 2 or None. If not None/1 and `core dumped`
errors occur, try slightly adjusting dimensions.
downsample_threshold: when downsampling, destination voxel is on if
more than this number of voxels are on.
verbose: if False, silences stdout/stderr from subprocess call.
binvox_path: path to binvox executable. The default looks for an
executable called `binvox` on your `PATH`.
"""
if binvox_encoder is None:
raise IOError(
'No `binvox_path` provided, and no binvox executable found '
'on PATH. \nPlease go to https://www.patrickmin.com/binvox/ and '
'download the appropriate version.')
if dimension > 1024 and not exact:
raise ValueError(
'Maximum dimension using exact is 1024, got %d' % dimension)
if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES:
raise ValueError(
'file_type %s not in set of supported output types %s' %
(file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES)))
args = [binvox_path, '-d', str(dimension), '-t', file_type]
if exact:
args.append('-e')
if z_buffer_carving:
if z_buffer_voting:
pass
else:
args.append('-c')
elif z_buffer_voting:
args.append('-v')
else:
raise ValueError(
'At least one of `z_buffer_carving` or `z_buffer_voting` must '
'be True')
if dilated_carving:
args.append('-dc')
# Additional parameters
if bounding_box is not None:
if len(bounding_box) != 6:
raise ValueError('bounding_box must have 6 elements')
args.append('-bb')
args.extend(str(b) for b in bounding_box)
if remove_internal:
args.append('-ri')
if center:
args.append('-cb')
args.extend(('-rotx',) * rotate_x)
args.extend(('-rotz',) * rotate_z)
if wireframe:
args.append('-aw')
if fit:
args.append('-fit')
if block_id is not None:
args.extend(('-bi', block_id))
if use_material_block_id:
args.append('-mb')
if use_offscreen_pbuffer:
args.append('-pb')
if downsample_factor is not None:
times = np.log2(downsample_factor)
if int(times) != times:
raise ValueError(
'downsample_factor must be a power of 2, got %d'
% downsample_factor)
args.extend(('-down',) * int(times))
if downsample_threshold is not None:
args.extend(('-dmin', str(downsample_threshold)))
args.append('PATH')
self._args = args
self._file_type = file_type
self.verbose = verbose
@property
def file_type(self):
return self._file_type
def __call__(self, path, overwrite=False):
"""
Create an voxel file in the same directory as model at `path`.
Parameters
------------
path: string path to model file. Supported types:
'ug'
'obj'
'off'
'dfx'
'xgl'
'pov'
'brep'
'ply'
'jot' (polygongs only)
overwrite: if False, checks the output path (head.file_type) is empty
before running. If True and a file exists, raises an IOError.
Returns
------------
string path to voxel file. File type give by file_type in constructor.
"""
head, ext = os.path.splitext(path)
ext = ext[1:].lower()
if ext not in Binvoxer.SUPPORTED_INPUT_TYPES:
raise ValueError(
'file_type %s not in set of supported input types %s' %
(ext, str(Binvoxer.SUPPORTED_INPUT_TYPES)))
out_path = '%s.%s' % (head, self._file_type)
if os.path.isfile(out_path) and not overwrite:
raise IOError(
'Attempted to voxelize object a %s, but there is already a '
'file at output path %s' % (path, out_path))
self._args[-1] = path
# generalizes to python2 and python3
# will capture terminal output into variable rather than printing
verbosity = subprocess.check_output(self._args)
# if requested print ourselves
if self.verbose:
print(verbosity)
return out_path
def voxelize_mesh(mesh,
binvoxer=None,
export_type='off',
**binvoxer_kwargs):
"""
Interface for voxelizing Trimesh object via the binvox tool.
Implementation simply saved the mesh in the specified export_type then
runs the `Binvoxer.__call__` (using either the supplied `binvoxer` or
creating one via `binvoxer_kwargs`)
Parameters
------------
mesh: Trimesh object to voxelize.
binvoxer: optional Binvoxer instance.
export_type: file type to export mesh as temporarily for Binvoxer to
operate on.
**binvoxer_kwargs: kwargs for creating a new Binvoxer instance. If binvoxer
if provided, this must be empty.
Returns
------------
`VoxelGrid` object resulting.
"""
if not isinstance(mesh, Trimesh):
raise ValueError('mesh must be Trimesh instance, got %s' % str(mesh))
if binvoxer is None:
binvoxer = Binvoxer(**binvoxer_kwargs)
elif len(binvoxer_kwargs) > 0:
raise ValueError('Cannot provide binvoxer and binvoxer_kwargs')
if binvoxer.file_type != 'binvox':
raise ValueError(
'Only "binvox" binvoxer `file_type` currently supported')
with util.TemporaryDirectory() as folder:
model_path = os.path.join(folder, 'model.%s' % export_type)
with open(model_path, 'wb') as fp:
mesh.export(fp, file_type=export_type)
out_path = binvoxer(model_path)
with open(out_path, 'rb') as fp:
out_model = load_binvox(fp)
return out_model
_binvox_loaders = {'binvox': load_binvox}
| dajusc/trimesh | trimesh/exchange/binvox.py | Python | mit | 17,700 | [
"VTK"
] | dcfd2fed22458fa69deb9b1f2321cb8cf30fb7cfb031ee2b5f0f1016ec2e7715 |
from __future__ import print_function
from __future__ import absolute_import
import unittest
import csv
import PyOpenWorm
from PyOpenWorm.worm import Worm
from PyOpenWorm.cell import Cell
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
from PyOpenWorm.utils import normalize_cell_name
import rdflib as R
from six.moves import map
from six.moves import range
import pytest
@pytest.mark.inttest
class DataIntegrityTest(unittest.TestCase):
""" Integration tests that read from the database and ensure that basic
queries have expected answers, as a way to keep data quality high.
"""
@classmethod
def setUpClass(cls):
# grab the list of the names of the 302 neurons
csvfile = open('OpenWormData/aux_data/neurons.csv', 'r')
reader = csv.reader(csvfile, delimiter=';', quotechar='|')
# array that holds the names of the 302 neurons at class-level scope
cls.neurons = []
for row in reader:
if len(row[0]) > 0: # Only saves valid neuron names
cls.neurons.append(row[0])
def setUp(self):
PyOpenWorm.connect(configFile='tests/data_integrity_test.conf')
self.g = PyOpenWorm.config("rdf.graph")
self.context = Context()
self.qctx = self.context.stored
def tearDown(self):
PyOpenWorm.disconnect()
def test_correct_neuron_number(self):
"""
This test verifies that the worm model has exactly 302 neurons.
"""
# FIXME: Test execution is not properly isolated -- it fails if
# test_compare_to_xls fails. Other conditions may cause
# it to pass
net = self.qctx(Worm)().get_neuron_network()
self.assertEqual(302, net.neuron.count())
def test_correct_muscle_number(self):
"""
This test verifies that the worm model has exactly 158 muscles.
95 body wall muscles, 37 Pharynx muscles, 26 other muscles
See counts on row 3 here:
https://docs.google.com/spreadsheets/d/1NDx9LRF_B2phR5w4HlEtxJzxx1ZIPT2gA0ZmNmozjos/edit#gid=1
"""
self.assertEqual(158, self.qctx(Worm)().muscle.count())
def test_INS_26_neuropeptide_neuron_list(self):
"""
This test verifies that the set of neurons which contain the
neuropeptide INS-26 is correct (the list is given below).
"""
neuronlist = self.qctx(Neuron)()
neuronlist.neuropeptide("INS-26")
thlist = set(x.name() for x in neuronlist.load())
self.assertEqual({'ASEL', 'ASER', 'ASIL', 'ASIR'}, thlist)
def test_bentley_expr_data(self):
"""
This verifies that the data in OpenWormData/aux_data/expression_data/Bentley_et_al_2016_expression.csv has
been incorporated, by checking that one of the novel receptor expression patterns is in the worm.
"""
va9 = self.qctx(Neuron)('VA9')
self.assertIn('LGC-53', va9.receptors())
def test_unique_neuron_node(self):
"""
There should one and only one unique RDF node for every neuron. If
more than one is present for a given cell name, then our data is
inconsistent. If there is not at least one present, then we are
missing neurons.
"""
results = {}
for n in self.neurons:
# Create a SPARQL query per neuron that looks for all RDF nodes
# that have text matching the name of the neuron
qres = self.g.query(
"""
SELECT distinct ?n WHERE
{{
?n <http://openworm.org/entities/Cell/name> {name}
}} LIMIT 5
""".format(name=R.Literal(n).n3()))
results[n] = (len(qres.result), [x[0] for x in qres.result])
# If there is not only one result back, then there is more than one RDF
# node.
more_than_one = [(x, results[x]) for x in results if results[x][0] > 1]
less_than_one = [(x, results[x]) for x in results if results[x][0] < 1]
self.assertEqual(
0,
len(more_than_one),
"Some neurons have more than 1 node: " +
"\n".join(
str(x) for x in more_than_one))
self.assertEqual(
0,
len(less_than_one),
"Some neurons have no node: " +
"\n".join(
str(x) for x in less_than_one))
def test_neurons_have_types(self):
"""
Every Neuron should have a non-blank type
"""
results = set()
for n in self.neurons:
s = '''SELECT ?v WHERE {{
?k <http://openworm.org/entities/Cell/name> {name} .
?k <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://openworm.org/entities/Neuron> .
?k <http://openworm.org/entities/Neuron/type> ?v .
}}'''.format(name=R.Literal(n).n3())
qres = self.g.query(s)
for x in qres:
v = x[0]
if isinstance(v, R.Literal):
results.add(n)
self.assertEqual(len(results),
len(self.neurons),
"Some neurons are missing a type: {}".format(set(self.neurons) - results))
def test_neuron_GJ_degree(self):
""" Get the number of gap junctions from a networkx representation """
# was 81 -- now retunring 44 -- are we sure this is correct?
self.assertEqual(self.qctx(Neuron)(name='AVAL').GJ_degree(), 44)
def test_neuron_Syn_degree(self):
""" Get the number of chemical synapses from a networkx representation """
# was 187 -- now returning 105 -- are we sure this is correct?
self.assertEqual(self.qctx(Neuron)(name='AVAL').Syn_degree(), 105)
@unittest.skip("have not yet defined asserts")
def test_what_nodes_get_type_info(self):
qres = self.g.query("""SELECT ?o ?p ?s WHERE {{
?o <http://openworm.org/entities/SimpleProperty/value> "motor".
?o ?p ?s # for that type ?o, get its value ?v
}} LIMIT 10
""")
for row in qres.result:
print(row)
# TODO: Revise this test to pull from the herm_full_edgelist.csv instead of NeuronConnect.xls
@unittest.skip("deprecated because spreadsheet is no longer supposed to match")
def test_compare_to_xls(self):
""" Compare the PyOpenWorm connections to the data in the spreadsheet """
SAMPLE_CELL = 'AVAL'
xls_conns = set([])
pow_conns = set([])
# QUERY TO GET ALL CONNECTIONS WHERE SAMPLE_CELL IS ON THE PRE SIDE
qres = self.g.query("""SELECT ?post_name ?type (STR(?num) AS ?numval) WHERE {{
#############################################################
# Find connections that have the ?pre_name as our passed in value
#############################################################
?pre_cell <http://openworm.org/entities/Cell/name> {name}.
?conn <http://openworm.org/entities/Connection/pre_cell> ?pre_cell.
#############################################################
# Find all the cells that are on the post side of those
# connections and bind their names to ?post_name
#############################################################
?conn <http://openworm.org/entities/Connection/post_cell> ?post_cell.
?post_cell <http://openworm.org/entities/Cell/name> ?post_name.
############################################################
# Go find the type of the connection and bind to ?type
#############################################################
?conn <http://openworm.org/entities/Connection/syntype> ?type.
############################################################
# Go find the number of the connection and bind to ?num
############################################################
?conn <http://openworm.org/entities/Connection/number> ?num.
############################################################
# Filter out any ?pre_names or ?post_names that aren't literals
############################################################
FILTER(isLiteral(?post_name))
}}""".format(name=R.Literal(SAMPLE_CELL).n3()))
def ff(x):
return str(x.value)
for line in qres.result:
t = list(map(ff, line))
# Insert sample cell name into the result set after the fact
t.insert(0, SAMPLE_CELL)
pow_conns.add(tuple(t))
# QUERY TO GET ALL CONNECTIONS WHERE SAMPLE_CELL IS ON THE *POST* SIDE
qres = self.g.query("""SELECT ?pre_name ?type (STR(?num) AS ?numval) WHERE {
#############################################################
# Find connections that have the ?post_name as our passed in value
#############################################################
?post_cell <http://openworm.org/entities/Cell/name> {name}.
?conn <http://openworm.org/entities/Connection/post_cell> ?post_cell.
#############################################################
# Find all the cells that are on the pre side of those
# connections and bind their names to ?pre_name
#############################################################
?conn <http://openworm.org/entities/Connection/pre_cell> ?pre_cell.
?pre_cell <http://openworm.org/entities/Cell/name> ?pre_name.
############################################################
# Go find the type of the connection and bind to ?type
#############################################################
?conn <http://openworm.org/entities/Connection/syntype> ?type.
############################################################
# Go find the number of the connection and bind to ?num
############################################################
?conn <http://openworm.org/entities/Connection/number> ?num.
############################################################
# Filter out any ?pre_names or ?post_names that aren't literals
############################################################
FILTER(isLiteral(?pre_name))}""".format(name=R.Literal(SAMPLE_CELL).n3()))
for line in qres.result:
t = list(map(ff, line))
# Insert sample cell name into the result set after the fact
t.insert(1, SAMPLE_CELL)
pow_conns.add(tuple(t))
# get connections from the sheet
import re
search_string = re.compile(r'\w+[0]+[1-9]+')
replace_string = re.compile(r'[0]+')
def normalize(name):
# normalize neuron names to match those used at other points
# see #137 for elaboration
# if there are zeroes in the middle of a name, remove them
if re.match(search_string, name):
name = replace_string.sub('', name)
return name
import xlrd
combining_dict = {}
# 's' is the workbook sheet
s = xlrd.open_workbook(
'OpenWormData/aux_data/NeuronConnect.xls').sheets()[0]
for row in range(1, s.nrows):
if s.cell(row, 2).value in ('S', 'Sp', 'EJ') and \
SAMPLE_CELL in [s.cell(row, 0).value,
s.cell(row, 1).value]:
# we're not going to include 'receives' ('r', 'rp') since
# they're just the inverse of 'sends' also omitting 'nmj'
# for the time being (no model in db)
pre = normalize(s.cell(row, 0).value)
post = normalize(s.cell(row, 1).value)
num = int(s.cell(row, 3).value)
if s.cell(row, 2).value == 'EJ':
syntype = 'gapJunction'
elif s.cell(row, 2).value in ('S', 'Sp'):
syntype = 'send'
# add them to a dict to make sure sends ('s') and send-polys ('sp') are summed.
# keying by connection pairs as a string (e.g. 'sdql,aval,send').
# values are lists if the form [pre, post, number, syntype].
string_key = '{},{},{}'.format(pre, post, syntype)
if string_key in combining_dict.keys():
# if key already there, add to number
num += int(combining_dict[string_key][3])
combining_dict[string_key] = (
str(pre),
str(post),
str(syntype),
str(int(num)))
xls_conns = set(combining_dict.values())
# assert that these two sorted lists are the same
# using sorted lists because Set() removes multiples
self.maxDiff = None
self.assertEqual(sorted(pow_conns), sorted(xls_conns))
def test_all_cells_have_wormbaseID(self):
""" This test verifies that every cell has a Wormbase ID. """
cells = set(self.qctx(Cell)().load())
for cell in cells:
self.assertNotEqual(cell.wormbaseID(), '')
def test_all_neurons_have_wormbaseID(self):
""" This test verifies that every neuron has a Wormbase ID. """
net = self.qctx(Worm)().get_neuron_network()
for neuron_object in net.neurons():
self.assertNotEqual(neuron_object.wormbaseID(), '')
def test_all_muscles_have_wormbaseID(self):
""" This test verifies that every muscle has a Wormbase ID. """
muscles = self.qctx(Worm)().muscles()
for muscle_object in muscles:
self.assertNotEqual(muscle_object.wormbaseID(), '')
def test_all_neurons_are_cells(self):
""" This test verifies that all Neuron objects are also Cell objects. """
net = self.qctx(Worm)().get_neuron_network()
for neuron_object in net.neurons():
self.assertIsInstance(neuron_object, Cell)
def test_all_muscles_are_cells(self):
""" This test verifies that all Muscle objects are also Cell objects. """
muscles = self.qctx(Worm)().muscles()
for muscle_object in muscles:
self.assertIsInstance(muscle_object, Cell)
def test_correct_connections_number(self):
""" This test verifies that there are exactly 7319 connections. """
net = self.qctx(Worm)().get_neuron_network()
# XXX: The synapses contain some cells that aren't neurons
self.assertEqual(7319, net.synapses.count())
@unittest.skip("Takes too long")
def test_connection_content_matches(self):
""" This test verifies that the content of each connection matches the
content in the source.
"""
# XXX: Needs updates to match the name translations in insert_worm.py
ignored_cells = ['hyp', 'intestine']
synapse_tuples = set() # set of tuple representation of synapses
csv_tuples = set() # set of tuple representation of csv file
synapses = self.qctx(Worm)().get_neuron_network().synapses()
for synapse in synapses:
print(synapse)
if synapse.syntype() == 'send':
syn_type = 'chemical'
else:
syn_type = 'electrical'
pre = str(synapse.pre_cell().name())
post = str(synapse.post_cell().name())
syn_tuple = (pre,
post,
synapse.number(),
syn_type)
synapse_tuples.add(syn_tuple)
# read csv file row by row
with open('OpenWormData/aux_data/herm_full_edgelist.csv', 'rb') as csvfile:
edge_reader = csv.reader(csvfile)
next(edge_reader) # skip header row
for row in edge_reader:
source, target, weight, syn_type = map(str.strip, row)
# ignore rows where source or target is 'hyp' or 'intestine'
if source in ignored_cells or target in ignored_cells:
continue
source = normalize_cell_name(source)
target = normalize_cell_name(target)
csv_tuple = (source, target, int(weight), syn_type)
csv_tuples.add(csv_tuple)
self.assertEqual(set(), csv_tuples - synapse_tuples)
def test_number_neuron_to_neuron(self):
"""
This test verifies that the worm model has exactly 5805 neuron to neuron
connections.
"""
synapse = self.qctx(Connection)()
synapse.termination('neuron')
self.qctx(Worm)().get_neuron_network().synapse(synapse)
self.assertEqual(5805, synapse.count())
def test_number_neuron_to_muscle(self):
"""
This test verifies that the worm model has exactly 1111 neuron to muscle
connections.
"""
synapse = self.qctx(Connection)()
synapse.termination('muscle')
self.qctx(Worm)().get_neuron_network().synapse(synapse)
self.assertEqual(1111, synapse.count())
def test_correct_number_unique_neurons(self):
"""
This test verifies that the worm model has exactly 300 unique neurons
making connections.
"""
synapse = self.qctx(Connection)()
pre = self.qctx(Neuron)()
synapse.pre_cell(pre)
self.qctx(Worm)().get_neuron_network().synapse(synapse)
self.assertEqual(300, pre.count())
def test_unconnected_neurons(self):
"""
This test verifies that there are exactly 2 unconnected neurons,
i.e., CANL and CANR, in the new connectome.
"""
# In previous tests, there is a check for exactly 302 neurons in total.
# There is also a test for exactly 300 unique neurons making connections.
# That means it should be enough to check that the set {CANL, CANR} and
# the set of neurons making connections are disjoint.
neuron = self.qctx(Neuron)()
synapse = self.qctx(Connection)()
synapse.pre_cell(neuron)
self.qctx(Worm)().get_neuron_network().synapse(synapse)
connected_neurons = set()
unconnected_neurons = {'CANL', 'CANR'}
for name in neuron.name.get():
connected_neurons.add(name)
self.assertTrue(connected_neurons.isdisjoint(unconnected_neurons))
| gsarma/PyOpenWorm | tests/DataIntegrityTest.py | Python | mit | 19,722 | [
"NEURON"
] | 750487f8761916f06f1fed532dcd993930a4761ad8fd0585acf47adaed1ba45f |
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in microsites/White Labels
if linkedin_config.enabled and not microsite.is_request_in_microsite():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Get any programs associated with courses being displayed.
# This is passed along in the template context to allow rendering of
# program-related information on the dashboard.
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page.").format(
platform_name=platform_name),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
'age': profile.age,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to course if course_id is defined, otherwise it will redirect to dashboard
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true'
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to course info page if course_id is known
if course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
# Support old password reset URLs that used base36 encoded user IDs.
# https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard')
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
"""Build a dictionary of program data required for display on the student dashboard.
Given a user and an iterable of course keys, find all programs relevant to the
user and return them in a dictionary keyed by course key.
Arguments:
user (User): The user to authenticate as when requesting programs.
user_enrolled_courses (list): List of course keys representing the courses in which
the given user has active enrollments.
Returns:
dict, containing programs keyed by course. Empty if programs cannot be retrieved.
"""
course_programs = get_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, program in course_programs.viewitems():
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_data[course_key] = {
'course_count': len(program['course_codes']),
'display_name': program['name'],
'category': program.get('category'),
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'), 'xseries' + '/{}'
).format(program['marketing_slug']),
'display_category': 'XSeries'
}
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
| ZLLab-Mooc/edx-platform | common/djangoapps/student/views.py | Python | agpl-3.0 | 96,983 | [
"VisIt"
] | bf4a602fd3289a29db45a946abc597b2a7752123c8d164b198b28b28194ff712 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2020 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Signal and image processing functions."""
from __future__ import absolute_import, division, print_function
from builtins import range
import numpy as np
from sporco._util import renamed_function
from sporco.fft import is_complex_dtype, fftn, ifftn, rfftn, irfftn, fftconv
from sporco import array
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
@renamed_function(depname='complex_randn', depmod='sporco.util')
def complex_randn(*args):
"""Return a complex array of samples drawn from a standard normal
distribution.
Parameters
----------
d0, d1, ..., dn : int
Dimensions of the random array
Returns
-------
a : ndarray
Random array of shape (d0, d1, ..., dn)
"""
return np.random.randn(*args) + 1j*np.random.randn(*args)
@renamed_function(depname='spnoise', depmod='sporco.util')
def spnoise(s, frc, smn=0.0, smx=1.0):
"""Return image with salt & pepper noise imposed on it.
Parameters
----------
s : ndarray
Input image
frc : float
Desired fraction of pixels corrupted by noise
smn : float, optional (default 0.0)
Lower value for noise (pepper)
smx : float, optional (default 1.0)
Upper value for noise (salt)
Returns
-------
sn : ndarray
Noisy image
"""
sn = s.copy()
spm = np.random.uniform(-1.0, 1.0, s.shape)
sn[spm < frc - 1.0] = smn
sn[spm > 1.0 - frc] = smx
return sn
@renamed_function(depname='rndmask', depmod='sporco.util')
def rndmask(shp, frc, dtype=None):
r"""Return random mask image with values in :math:`\{0,1\}`.
Parameters
----------
s : tuple
Mask array shape
frc : float
Desired fraction of zero pixels
dtype : data-type or None, optional (default None)
Data type of mask array
Returns
-------
msk : ndarray
Mask image
"""
msk = np.asarray(np.random.uniform(-1.0, 1.0, shp), dtype=dtype)
msk[np.abs(msk) > frc] = 1.0
msk[np.abs(msk) < frc] = 0.0
return msk
@renamed_function(depname='rgb2gray', depmod='sporco.util')
def rgb2gray(rgb):
"""Convert an RGB image (or images) to grayscale.
Parameters
----------
rgb : ndarray
RGB image as Nr x Nc x 3 or Nr x Nc x 3 x K array
Returns
-------
gry : ndarray
Grayscale image as Nr x Nc or Nr x Nc x K array
"""
w = np.array([0.299, 0.587, 0.114], dtype=rgb.dtype)[np.newaxis,
np.newaxis]
return np.sum(w * rgb, axis=2)
@renamed_function(depname='grad', depmod='sporco.linalg')
def grad(x, axis, zero_pad=False):
r"""Compute gradient of `x` along axis `axis`.
The form of the gradient operator depends on parameter `zero_pad`.
If it is False, the operator is of the form
.. math::
\left(\begin{array}{rrrrr}
-1 & 1 & 0 & \ldots & 0\\
0 & -1 & 1 & \ldots & 0\\
\vdots & \vdots & \ddots & \ddots & \vdots\\
0 & 0 & \ldots & -1 & 1\\
0 & 0 & \dots & 0 & 0
\end{array}\right) \;,
mapping :math:`\mathbb{R}^N \rightarrow \mathbb{R}^N`. If parameter
`zero_pad` is True, the operator is of the form
.. math::
\left(\begin{array}{rrrrr}
1 & 0 & 0 & \ldots & 0\\
-1 & 1 & 0 & \ldots & 0\\
0 & -1 & 1 & \ldots & 0\\
\vdots & \vdots & \ddots & \ddots & \vdots\\
0 & 0 & \ldots & -1 & 1\\
0 & 0 & \ldots & 0 & -1
\end{array}\right) \;,
mapping :math:`\mathbb{R}^N \rightarrow \mathbb{R}^{N + 1}`.
Parameters
----------
x : array_like
Input array
axis : int
Axis on which gradient is to be computed
zero_pad : boolean
Flag selecting type of gradient
Returns
-------
xg : ndarray
Output array
"""
if zero_pad:
xg = np.diff(x, axis=axis, prepend=0, append=0)
else:
slc = (slice(None),)*axis + (slice(-1, None),)
xg = np.roll(x, -1, axis=axis) - x
xg[slc] = 0.0
return xg
@renamed_function(depname='gradT', depmod='sporco.linalg')
def gradT(x, axis, zero_pad=False):
"""Compute transpose of gradient of `x` along axis `axis`.
See :func:`grad` for a description of the dependency of the gradient
operator on parameter `zero_pad`.
Parameters
----------
x : array_like
Input array
axis : int
Axis on which gradient transpose is to be computed
zero_pad : boolean
Flag selecting type of gradient
Returns
-------
xg : ndarray
Output array
"""
if zero_pad:
xg = -np.diff(x, axis=axis)
else:
slc0 = (slice(None),) * axis
xg = np.roll(x, 1, axis=axis) - x
xg[slc0 + (slice(0, 1),)] = -x[slc0 + (slice(0, 1),)]
xg[slc0 + (slice(-1, None),)] = x[slc0 + (slice(-2, -1),)]
return xg
@renamed_function(depname='gradient_filters', depmod='sporco.linalg')
def gradient_filters(ndim, axes, axshp, dtype=None):
r"""Construct a set of filters for computing gradients in the
frequency domain.
Parameters
----------
ndim : integer
Total number of dimensions in array in which gradients are to be
computed
axes : tuple of integers
Axes on which gradients are to be computed
axshp : tuple of integers
Shape of axes on which gradients are to be computed
dtype : dtype, optional (default np.float32)
Data type of output arrays
Returns
-------
Gf : ndarray
Frequency domain gradient operators :math:`\hat{G}_i`
GHGf : ndarray
Sum of products :math:`\sum_i \hat{G}_i^H \hat{G}_i`
"""
if dtype is None:
dtype = np.float32
g = np.zeros([2 if k in axes else 1 for k in range(ndim)] +
[len(axes),], dtype)
for k in axes:
g[(0,) * k + (slice(None),) + (0,) * (g.ndim - 2 - k) + (k,)] = \
np.array([1, -1])
if is_complex_dtype(dtype):
Gf = fftn(g, axshp, axes=axes)
else:
Gf = rfftn(g, axshp, axes=axes)
GHGf = np.sum(np.conj(Gf) * Gf, axis=-1).real
return Gf, GHGf
@renamed_function(depname='tikhonov_filter', depmod='sporco.util')
def tikhonov_filter(s, lmbda, npd=16):
r"""Lowpass filter based on Tikhonov regularization.
Lowpass filter image(s) and return low and high frequency
components, consisting of the lowpass filtered image and its
difference with the input image. The lowpass filter is equivalent to
Tikhonov regularization with `lmbda` as the regularization parameter
and a discrete gradient as the operator in the regularization term,
i.e. the lowpass component is the solution to
.. math::
\mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s}
\right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;,
where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the
regularization parameter, and :math:`G_i` is an operator that
computes the discrete gradient along image axis :math:`i`. Once the
lowpass component :math:`\mathbf{x}` has been computed, the highpass
component is just :math:`\mathbf{s} - \mathbf{x}`.
Parameters
----------
s : array_like
Input image or array of images.
lmbda : float
Regularization parameter controlling lowpass filtering.
npd : int, optional (default=16)
Number of samples to pad at image boundaries.
Returns
-------
slp : array_like
Lowpass image or array of images.
shp : array_like
Highpass image or array of images.
"""
if np.isrealobj(s):
fft = rfftn
ifft = irfftn
else:
fft = fftn
ifft = ifftn
grv = np.array([-1.0, 1.0]).reshape([2, 1])
gcv = np.array([-1.0, 1.0]).reshape([1, 2])
Gr = fft(grv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1))
Gc = fft(gcv, (s.shape[0] + 2*npd, s.shape[1] + 2*npd), (0, 1))
A = 1.0 + lmbda * (np.conj(Gr)*Gr + np.conj(Gc)*Gc).real
if s.ndim > 2:
A = A[(slice(None),)*2 + (np.newaxis,)*(s.ndim-2)]
sp = np.pad(s, ((npd, npd),)*2 + ((0, 0),)*(s.ndim-2), 'symmetric')
spshp = sp.shape
sp = fft(sp, axes=(0, 1))
sp /= A
sp = ifft(sp, s=spshp[0:2], axes=(0, 1))
slp = sp[npd:(sp.shape[0] - npd), npd:(sp.shape[1] - npd)]
shp = s - slp
return slp.astype(s.dtype), shp.astype(s.dtype)
@renamed_function(depname='gaussian', depmod='sporco.util')
def gaussian(shape, sd=1.0):
"""Sample a multivariate Gaussian pdf, normalised to have unit sum.
Parameters
----------
shape : tuple
Shape of output array.
sd : float, optional (default 1.0)
Standard deviation of Gaussian pdf.
Returns
-------
gc : ndarray
Sampled Gaussian pdf.
"""
gfn = lambda x, sd: np.exp(-(x**2) / (2.0 * sd**2)) / \
(np.sqrt(2.0 * np.pi) *sd)
gc = 1.0
if isinstance(shape, int):
shape = (shape,)
for k, n in enumerate(shape):
x = np.linspace(-3.0, 3.0, n).reshape(
(1,) * k + (n,) + (1,) * (len(shape) - k - 1))
gc = gc * gfn(x, sd)
gc /= np.sum(gc)
return gc
@renamed_function(depname='local_contrast_normalise', depmod='sporco.util')
def local_contrast_normalise(s, n=7, c=None):
"""Local contrast normalisation of an image.
Perform local contrast normalisation :cite:`jarret-2009-what` of
an image, consisting of subtraction of the local mean and division
by the local norm. The original image can be reconstructed from the
contrast normalised image as (`snrm` * `scn`) + `smn`.
Parameters
----------
s : array_like
Input image or array of images.
n : int, optional (default 7)
The size of the local region used for normalisation is :math:`2n+1`.
c : float, optional (default None)
The smallest value that can be used in the divisive normalisation.
If `None`, this value is set to the mean of the local region norms.
Returns
-------
scn : ndarray
Contrast normalised image(s)
smn : ndarray
Additive normalisation correction
snrm : ndarray
Multiplicative normalisation correction
"""
# Construct region weighting filter
N = 2 * n + 1
g = gaussian((N, N), sd=1.0)
# Compute required image padding
pd = ((n, n),) * 2
if s.ndim > 2:
g = g[..., np.newaxis]
pd += ((0, 0),)
sp = np.pad(s, pd, mode='symmetric')
# Compute local mean and subtract from image
smn = np.roll(fftconv(g, sp), (-n, -n), axis=(0, 1))
s1 = sp - smn
# Compute local norm
snrm = np.roll(np.sqrt(np.clip(fftconv(g, s1**2), 0.0, np.inf)),
(-n, -n), axis=(0, 1))
# Set c parameter if not specified
if c is None:
c = np.mean(snrm, axis=(0, 1), keepdims=True)
# Divide mean-subtracted image by corrected local norm
snrm = np.maximum(c, snrm)
s2 = s1 / snrm
# Return contrast normalised image and normalisation components
return s2[n:-n, n:-n], smn[n:-n, n:-n], snrm[n:-n, n:-n]
| bwohlberg/sporco | sporco/signal.py | Python | bsd-3-clause | 11,378 | [
"Gaussian"
] | 38d4c40f8db154a25158bd761c80226da6c1e2ccfacb58dbb4c13a94010fd11a |
from itertools import product
import os
import numpy as np
from openfermion.config import DATA_DIRECTORY
from openfermion.chem import MolecularData, make_reduced_hamiltonian
from openfermion.linalg import wedge
from openfermion.linalg.erpa import singlet_erpa, erpa_eom_hamiltonian
from openfermion.transforms.opconversions import get_fermion_operator
from openfermion.ops.representations import InteractionRDM
from openfermion.ops.operators import FermionOperator
from openfermion.transforms.opconversions import normal_ordered
from openfermion.transforms.repconversions import get_interaction_operator
from openfermion.utils.commutators import commutator
def test_h2_rpa():
filename = os.path.join(DATA_DIRECTORY, "H2_sto-3g_singlet_0.7414.hdf5")
molecule = MolecularData(filename=filename)
reduced_ham = make_reduced_hamiltonian(molecule.get_molecular_hamiltonian(),
molecule.n_electrons)
hf_opdm = np.diag([1] * molecule.n_electrons + [0] *
(molecule.n_qubits - molecule.n_electrons))
hf_tpdm = 2 * wedge(hf_opdm, hf_opdm, (1, 1), (1, 1))
pos_spectrum, xy_eigvects, basis = singlet_erpa(hf_tpdm,
reduced_ham.two_body_tensor)
assert np.isclose(pos_spectrum, 0.92926444) # pyscf-rpa value
assert isinstance(xy_eigvects, np.ndarray)
assert isinstance(basis, dict)
def test_erpa_eom_ham_h2():
filename = os.path.join(DATA_DIRECTORY, "H2_sto-3g_singlet_0.7414.hdf5")
molecule = MolecularData(filename=filename)
reduced_ham = make_reduced_hamiltonian(molecule.get_molecular_hamiltonian(),
molecule.n_electrons)
rha_fermion = get_fermion_operator(reduced_ham)
permuted_hijkl = np.einsum('ijlk', reduced_ham.two_body_tensor)
opdm = np.diag([1] * molecule.n_electrons + [0] *
(molecule.n_qubits - molecule.n_electrons))
tpdm = 2 * wedge(opdm, opdm, (1, 1), (1, 1))
rdms = InteractionRDM(opdm, tpdm)
dim = reduced_ham.one_body_tensor.shape[0] // 2
full_basis = {} # erpa basis. A, B basis in RPA language
cnt = 0
for p, q in product(range(dim), repeat=2):
if p < q:
full_basis[(p, q)] = cnt
full_basis[(q, p)] = cnt + dim * (dim - 1) // 2
cnt += 1
for rkey in full_basis.keys():
p, q = rkey
for ckey in full_basis.keys():
r, s = ckey
for sigma, tau in product([0, 1], repeat=2):
test = erpa_eom_hamiltonian(permuted_hijkl, tpdm, 2 * q + sigma,
2 * p + sigma, 2 * r + tau,
2 * s + tau).real
qp_op = FermionOperator(
((2 * q + sigma, 1), (2 * p + sigma, 0)))
rs_op = FermionOperator(((2 * r + tau, 1), (2 * s + tau, 0)))
erpa_op = normal_ordered(
commutator(qp_op, commutator(rha_fermion, rs_op)))
true = rdms.expectation(get_interaction_operator(erpa_op))
assert np.isclose(true, test)
def test_erpa_eom_ham_lih():
filename = os.path.join(DATA_DIRECTORY, "H1-Li1_sto-3g_singlet_1.45.hdf5")
molecule = MolecularData(filename=filename)
reduced_ham = make_reduced_hamiltonian(molecule.get_molecular_hamiltonian(),
molecule.n_electrons)
rha_fermion = get_fermion_operator(reduced_ham)
permuted_hijkl = np.einsum('ijlk', reduced_ham.two_body_tensor)
opdm = np.diag([1] * molecule.n_electrons + [0] *
(molecule.n_qubits - molecule.n_electrons))
tpdm = 2 * wedge(opdm, opdm, (1, 1), (1, 1))
rdms = InteractionRDM(opdm, tpdm)
dim = 3 # so we don't do the full basis. This would make the test long
full_basis = {} # erpa basis. A, B basis in RPA language
cnt = 0
# start from 1 to make test shorter
for p, q in product(range(1, dim), repeat=2):
if p < q:
full_basis[(p, q)] = cnt
full_basis[(q, p)] = cnt + dim * (dim - 1) // 2
cnt += 1
for rkey in full_basis.keys():
p, q = rkey
for ckey in full_basis.keys():
r, s = ckey
for sigma, tau in product([0, 1], repeat=2):
test = erpa_eom_hamiltonian(permuted_hijkl, tpdm, 2 * q + sigma,
2 * p + sigma, 2 * r + tau,
2 * s + tau).real
qp_op = FermionOperator(
((2 * q + sigma, 1), (2 * p + sigma, 0)))
rs_op = FermionOperator(((2 * r + tau, 1), (2 * s + tau, 0)))
erpa_op = normal_ordered(
commutator(qp_op, commutator(rha_fermion, rs_op)))
true = rdms.expectation(get_interaction_operator(erpa_op))
assert np.isclose(true, test)
| quantumlib/OpenFermion | src/openfermion/linalg/erpa_test.py | Python | apache-2.0 | 4,973 | [
"PySCF"
] | 6be074c09af6449204f5144cf9e3bcdfadd10dd583adb2f1a79dd544a696799c |
# -*- coding: utf-8 -*-
"""
Tests for user authorization password-related functionality.
"""
import json
import logging
import re
from datetime import datetime, timedelta
import ddt
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import mail
from django.core.cache import cache
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from freezegun import freeze_time
from mock import Mock, patch
from oauth2_provider.models import AccessToken as dot_access_token
from oauth2_provider.models import RefreshToken as dot_refresh_token
from pytz import UTC
from testfixtures import LogCapture
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.user_api.accounts.tests.test_api import CreateAccountMixin
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError, UserNotFound
from openedx.core.djangoapps.user_authn.views.password_reset import request_password_change
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
LOGGER_NAME = 'audit'
User = get_user_model() # pylint:disable=invalid-name
class TestRequestPasswordChange(CreateAccountMixin, TestCase):
"""
Tests for users who request a password change.
"""
USERNAME = u'claire-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'claire+underwood@example.com'
IS_SECURE = False
@skip_unless_lms
def test_request_password_change(self):
# Create and activate an account
self.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
self.assertEqual(len(mail.outbox), 1)
request = RequestFactory().post('/password')
request.user = Mock()
request.site = SiteFactory()
with patch('crum.get_current_request', return_value=request):
# Request a password change
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that a new email message has been sent
self.assertEqual(len(mail.outbox), 2)
# Verify that the body of the message contains something that looks
# like an activation link
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
@skip_unless_lms
def test_request_password_change_invalid_user(self):
with self.assertRaises(UserNotFound):
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that no email messages have been sent
self.assertEqual(len(mail.outbox), 0)
@skip_unless_lms
def test_request_password_change_inactive_user(self):
# Create an account, but do not activate it
self.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
self.assertEqual(len(mail.outbox), 1)
request = RequestFactory().post('/password')
request.user = Mock()
request.site = SiteFactory()
with patch('crum.get_current_request', return_value=request):
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that the password change email was still sent
self.assertEqual(len(mail.outbox), 2)
@skip_unless_lms
@ddt.ddt
class TestPasswordChange(CreateAccountMixin, CacheIsolationTestCase):
""" Tests for views that change the user's password. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"B🄸🄶B🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_KEY = u"123abc"
ENABLED_CACHES = ['default']
def setUp(self):
super(TestPasswordChange, self).setUp()
self.create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
mail.outbox = []
cache.clear()
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
login_api_url = reverse('login_api')
response = self.client.post(login_api_url, {'email': self.OLD_EMAIL, 'password': self.NEW_PASSWORD})
assert response.status_code == 200
response_dict = json.loads(response.content.decode('utf-8'))
assert response_dict['success']
# Try reusing the activation link to change the password again
# Visit the activation link again.
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
response = self.client.post(login_api_url, {'email': self.OLD_EMAIL, 'password': self.NEW_PASSWORD})
assert response.status_code == 200
response_dict = json.loads(response.content.decode('utf-8'))
assert response_dict['success']
def test_password_change_failure(self):
with patch(
'openedx.core.djangoapps.user_authn.views.password_reset.request_password_change',
side_effect=UserAPIInternalError,
):
self._change_password()
self.assertRaises(UserAPIInternalError)
@patch.dict(settings.FEATURES, {'ENABLE_PASSWORD_RESET_FAILURE_EMAIL': True})
def test_password_reset_failure_email(self):
"""Test that a password reset failure email notification is sent, when enabled."""
# Log the user out
self.client.logout()
bad_email = 'doesnotexist@example.com'
response = self._change_password(email=bad_email)
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the body contains the failed password reset message
sent_message = mail.outbox[0]
text_body = sent_message.body
html_body = sent_message.alternatives[0][0]
for email_body in [text_body, html_body]:
msg = u'However, there is currently no user account associated with your email address: {email}'.format(
email=bad_email
)
assert u'reset for your user account at {}'.format(settings.PLATFORM_NAME) in email_body
assert 'password_reset_confirm' not in email_body, 'The link should not be added if user was not found'
assert msg in email_body
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_access_token_invalidation_logged_out(self):
self.client.logout()
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dot_tokens(user)
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
self._assert_access_token_destroyed(user)
def test_access_token_invalidation_logged_in(self):
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dot_tokens(user)
response = self._change_password()
self.assertEqual(response.status_code, 200)
self._assert_access_token_destroyed(user)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
self.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
mail.outbox = []
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
expected_logs = (
(LOGGER_NAME, 'INFO', 'Password reset initiated for user {}.'.format(self.NEW_EMAIL)),
(LOGGER_NAME, 'INFO', 'Invalid password reset attempt')
)
logger.check(*expected_logs)
def test_password_change_rate_limited(self):
"""
Tests that password reset requests are rate limited as expected.
"""
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
for status in [200, 403]:
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, status)
# now reset the time to 1 min from now in future and change the email and
# verify that it will allow another request from same IP
reset_time = datetime.now(UTC) + timedelta(seconds=61)
with freeze_time(reset_time):
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
def _create_dot_tokens(self, user=None):
"""Create dot access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
application = dot_factories.ApplicationFactory(user=user)
access_token = dot_factories.AccessTokenFactory(user=user, application=application)
dot_factories.RefreshTokenFactory(user=user, application=application, access_token=access_token)
def _assert_access_token_destroyed(self, user):
"""Assert all access tokens are destroyed."""
self.assertFalse(dot_access_token.objects.filter(user=user).exists())
self.assertFalse(dot_refresh_token.objects.filter(user=user).exists())
| msegado/edx-platform | openedx/core/djangoapps/user_authn/views/tests/test_password.py | Python | agpl-3.0 | 13,150 | [
"VisIt"
] | 9d16636869e50f69d12a70b325e28efa943816ee0db60f74da8964290d9eaed3 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class Eccodes(CMakePackage):
"""ecCodes is a package developed by ECMWF for processing meteorological
data in GRIB (1/2), BUFR (3/4) and GTS header formats."""
homepage = "https://software.ecmwf.int/wiki/display/ECC/ecCodes+Home"
url = "https://software.ecmwf.int/wiki/download/attachments/45757960/eccodes-2.2.0-Source.tar.gz?api=v2"
list_url = "https://software.ecmwf.int/wiki/display/ECC/Releases"
version('2.5.0', '5a7e92c58418d855082fa573efd352aa')
version('2.2.0', 'b27e6f0a3eea5b92dac37372e4c45a62')
variant('netcdf', default=False,
description='Enable GRIB to NetCDF conversion tool')
variant('jp2k', default='openjpeg', values=('openjpeg', 'jasper', 'none'),
description='Specify JPEG2000 decoding/encoding backend')
variant('png', default=False,
description='Enable PNG support for decoding/encoding')
variant('aec', default=False,
description='Enable Adaptive Entropy Coding for decoding/encoding')
variant('pthreads', default=False,
description='Enable POSIX threads')
variant('openmp', default=False,
description='Enable OpenMP threads')
variant('memfs', default=False,
description='Enable memory based access to definitions/samples')
variant('python', default=False,
description='Enable the Python interface')
variant('fortran', default=True, description='Enable the Fortran support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'Production'))
depends_on('netcdf', when='+netcdf')
depends_on('openjpeg', when='jp2k=openjpeg')
depends_on('jasper', when='jp2k=jasper')
depends_on('libpng', when='+png')
depends_on('libaec', when='+aec')
depends_on('python@:2', when='+python')
depends_on('py-numpy', when='+python', type=('build', 'run'))
extends('python', when='+python')
conflicts('+openmp', when='+pthreads',
msg='Cannot enable both POSIX threads and OMP')
# The following enforces linking against the specified JPEG2000 backend.
patch('enable_only_openjpeg.patch', when='jp2k=openjpeg')
patch('enable_only_jasper.patch', when='jp2k=jasper')
def cmake_args(self):
variants = ['+netcdf', '+png', '+aec', '+pthreads',
'+openmp', '+memfs', '+python', '+fortran']
options = ['NETCDF', 'PNG', 'AEC', 'ECCODES_THREADS',
'ECCODES_OMP_THREADS', 'MEMFS', 'PYTHON', 'FORTRAN']
args = map(lambda var, opt:
"-DENABLE_%s=%s" %
(opt, 'ON' if var in self.spec else 'OFF'),
variants,
options)
if self.spec.variants['jp2k'].value == 'none':
args.append('-DENABLE_JPG=OFF')
else:
args.append('-DENABLE_JPG=ON')
return args
| skosukhin/spack | var/spack/repos/builtin/packages/eccodes/package.py | Python | lgpl-2.1 | 4,224 | [
"NetCDF"
] | 06c6a1a6dd8c88e6f509584d5824ab5a3f3ce6775ab05c283cc8e5e710f84b3c |
import numpy as np
from ase.utils import rotate, irotate
def test(xyz):
a = rotate(xyz)
ixyz = '%sx,%sy,%sz' % irotate(a)
a2 = rotate(ixyz)
print(xyz)
print(ixyz)
#print np.around(a-a2, 5)
assert abs(a-a2).max() < 1e-10
test('10z')
test('155x,43y,190z')
test('55x,90y,190z')
test('180x,-90y,45z')
test('-180y')
test('40z,50x')
from math import sqrt
from ase import Atoms, Atom
norm = np.linalg.norm
for eps in [1.e-6, 1.e-8]:
struct = Atoms([Atom('H'),
Atom('H',[0, sqrt(1-eps**2), eps])])
struct.rotate(struct[1].position, 'y')
assert abs(norm(struct[1].position) - 1) < 1.e-12
| suttond/MODOI | ase/test/rotate.py | Python | lgpl-3.0 | 644 | [
"ASE"
] | 2b782c9f8170e9af6552c04ca5a942999b1f1745ef097e99b8873800ba2eb235 |
# $HeadURL$
__RCSID__ = "$Id$"
import types
import os
import datetime
from DIRAC import S_OK, S_ERROR, rootPath, gConfig, gLogger, gMonitor
from DIRAC.AccountingSystem.DB.AccountingDB import AccountingDB
from DIRAC.AccountingSystem.private.Summaries import Summaries
from DIRAC.AccountingSystem.private.DataCache import gDataCache
from DIRAC.AccountingSystem.private.MainReporter import MainReporter
from DIRAC.AccountingSystem.private.DBUtils import DBUtils
from DIRAC.AccountingSystem.private.Policies import gPoliciesList
from DIRAC.AccountingSystem.private.Plots import generateErrorMessagePlot
from DIRAC.AccountingSystem.private.FileCoding import extractRequestFromFileId
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import Time
gAccountingDB = False
def initializeReportGeneratorHandler( serviceInfo ):
global gAccountingDB
gAccountingDB = AccountingDB( readOnly = True )
#Get data location
reportSection = PathFinder.getServiceSection( "Accounting/ReportGenerator" )
dataPath = gConfig.getValue( "%s/DataLocation" % reportSection, "data/accountingGraphs" )
dataPath = dataPath.strip()
if "/" != dataPath[0]:
dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
gLogger.info( "Data will be written into %s" % dataPath )
try:
os.makedirs( dataPath )
except:
pass
try:
testFile = "%s/acc.jarl.test" % dataPath
fd = file( testFile, "w" )
fd.close()
os.unlink( testFile )
except IOError:
gLogger.fatal( "Can't write to %s" % dataPath )
return S_ERROR( "Data location is not writable" )
gDataCache.setGraphsLocation( dataPath )
gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Accounting reports", "plots", gMonitor.OP_SUM )
gMonitor.registerActivity( "reportsRequested", "Generated reports", "Accounting reports", "reports", gMonitor.OP_SUM )
return S_OK()
class ReportGeneratorHandler( RequestHandler ):
__reportRequestDict = { 'typeName' : types.StringType,
'reportName' : types.StringType,
'startTime' : Time._allDateTypes,
'endTime' : Time._allDateTypes,
'condDict' : types.DictType,
'grouping' : types.StringType,
'extraArgs' : types.DictType
}
def __checkPlotRequest( self, reportRequest ):
#If extraArgs is not there add it
if 'extraArgs' not in reportRequest:
reportRequest[ 'extraArgs' ] = {}
if type( reportRequest[ 'extraArgs' ] ) != self.__reportRequestDict[ 'extraArgs' ]:
return S_ERROR( "Extra args has to be of type %s" % self.__reportRequestDict[ 'extraArgs' ] )
reportRequestExtra = reportRequest[ 'extraArgs' ]
#Check sliding plots
if 'lastSeconds' in reportRequestExtra:
try:
lastSeconds = long( reportRequestExtra[ 'lastSeconds' ] )
except:
return S_ERROR( "lastSeconds key must be a number" )
if lastSeconds < 3600:
return S_ERROR( "lastSeconds must be more than 3600" )
now = Time.dateTime()
reportRequest[ 'endTime' ] = now
reportRequest[ 'startTime' ] = now - datetime.timedelta( seconds = lastSeconds )
else:
#if enddate is not there, just set it to now
if not reportRequest.get( 'endTime', False ):
reportRequest[ 'endTime' ] = Time.dateTime()
#Check keys
for key in self.__reportRequestDict:
if not key in reportRequest:
return S_ERROR( 'Missing mandatory field %s in plot reques' % key )
requestKeyType = type( reportRequest[ key ] )
if key in ( 'startTime', 'endTime' ):
if requestKeyType not in self.__reportRequestDict[ key ]:
return S_ERROR( "Type mismatch for field %s (%s), required one of %s" % ( key,
str( requestKeyType ),
str( self.__reportRequestDict[ key ] ) ) )
reportRequest[ key ] = int( Time.toEpoch( reportRequest[ key ] ) )
else:
if requestKeyType != self.__reportRequestDict[ key ]:
return S_ERROR( "Type mismatch for field %s (%s), required %s" % ( key,
str( requestKeyType ),
str( self.__reportRequestDict[ key ] ) ) )
return S_OK( reportRequest )
types_generatePlot = [ types.DictType ]
def export_generatePlot( self, reportRequest ):
"""
Plot a accounting
Arguments:
- viewName : Name of view (easy!)
- startTime
- endTime
- argsDict : Arguments to the view.
- grouping
- extraArgs
"""
retVal = self.__checkPlotRequest( reportRequest )
if not retVal[ 'OK' ]:
return retVal
reporter = MainReporter( gAccountingDB, self.serviceInfoDict[ 'clientSetup' ] )
gMonitor.addMark( "plotsDrawn" )
reportRequest[ 'generatePlot' ] = True
return reporter.generate( reportRequest, self.getRemoteCredentials() )
types_getReport = [ types.DictType ]
def export_getReport( self, reportRequest ):
"""
Plot a accounting
Arguments:
- viewName : Name of view (easy!)
- startTime
- endTime
- argsDict : Arguments to the view.
- grouping
- extraArgs
"""
retVal = self.__checkPlotRequest( reportRequest )
if not retVal[ 'OK' ]:
return retVal
reporter = MainReporter( gAccountingDB, self.serviceInfoDict[ 'clientSetup' ] )
gMonitor.addMark( "reportsRequested" )
reportRequest[ 'generatePlot' ] = False
return reporter.generate( reportRequest, self.getRemoteCredentials() )
types_listReports = [ types.StringType ]
def export_listReports( self, typeName ):
"""
List all available plots
Arguments:
- none
"""
reporter = MainReporter( gAccountingDB, self.serviceInfoDict[ 'clientSetup' ] )
return reporter.list( typeName )
types_listUniqueKeyValues = [ types.StringType ]
def export_listUniqueKeyValues( self, typeName ):
"""
List all values for all keys in a type
Arguments:
- none
"""
dbUtils = DBUtils( gAccountingDB, self.serviceInfoDict[ 'clientSetup' ] )
credDict = self.getRemoteCredentials()
if typeName in gPoliciesList:
policyFilter = gPoliciesList[ typeName ]
filterCond = policyFilter.getListingConditions( credDict )
else:
policyFilter = gPoliciesList[ 'Null' ]
filterCond = {}
retVal = dbUtils.getKeyValues( typeName, filterCond )
if not policyFilter or not retVal[ 'OK' ]:
return retVal
return policyFilter.filterListingValues( credDict, retVal[ 'Value' ] )
def __generatePlotFromFileId( self, fileId ):
result = extractRequestFromFileId( fileId )
if not result[ 'OK' ]:
return result
plotRequest = result[ 'Value' ]
gLogger.info( "Generating the plots.." )
result = self.export_generatePlot( plotRequest )
if not result[ 'OK' ]:
gLogger.error( "Error while generating the plots", result[ 'Message' ] )
return result
fileToReturn = 'plot'
if 'extraArgs' in plotRequest:
extraArgs = plotRequest[ 'extraArgs' ]
if 'thumbnail' in extraArgs and extraArgs[ 'thumbnail' ]:
fileToReturn = 'thumbnail'
gLogger.info( "Returning %s file: %s " % ( fileToReturn, result[ 'Value' ][ fileToReturn ] ) )
return S_OK( result[ 'Value' ][ fileToReturn ] )
def __sendErrorAsImg( self, msgText, fileHelper ):
retVal = generateErrorMessagePlot( msgText )
retVal = fileHelper.sendData( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
fileHelper.sendEOF()
return S_OK()
def transfer_toClient( self, fileId, token, fileHelper ):
"""
Get graphs data
"""
#First check if we've got to generate the plot
if len( fileId ) > 5 and fileId[1] == ':':
gLogger.info( "Seems the file request is a plot generation request!" )
#Seems a request for a plot!
try:
result = self.__generatePlotFromFileId( fileId )
except Exception, e:
gLogger.exception( "Exception while generating plot" )
result = S_ERROR( "Error while generating plot: %s" % str( e ) )
if not result[ 'OK' ]:
self.__sendErrorAsImg( result[ 'Message' ], fileHelper )
fileHelper.sendEOF()
return result
fileId = result[ 'Value' ]
retVal = gDataCache.getPlotData( fileId )
if not retVal[ 'OK' ]:
self.__sendErrorAsImg( retVal[ 'Message' ], fileHelper )
return retVal
retVal = fileHelper.sendData( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
fileHelper.sendEOF()
return S_OK()
| avedaee/DIRAC | AccountingSystem/Service/ReportGeneratorHandler.py | Python | gpl-3.0 | 8,961 | [
"DIRAC"
] | a60f6c5dfe5f771b4cc32a8ab35b9db8bb12647b1d717fb35137accdec53ef15 |
# Copyright (C) 2018 Charlie Hoy, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian and
allows for the likelihood to be marginalized over phase and/or time and/or
distance.
"""
import numpy
from scipy import special
from pycbc.waveform import NoWaveformError
from pycbc.filter.matchedfilter import matched_filter_core
from pycbc.distributions import read_distributions_from_config
from pycbc.waveform import generator
from .gaussian_noise import GaussianNoise
class MarginalizedPhaseGaussianNoise(GaussianNoise):
r"""The likelihood is analytically marginalized over phase.
This class can be used with signal models that can be written as:
.. math::
\tilde{h}(f; \Theta, \phi) = A(f; \Theta)e^{i\Psi(f; \Theta) + i \phi},
where :math:`\phi` is an arbitrary phase constant. This phase constant
can be analytically marginalized over with a uniform prior as follows:
assuming the noise is stationary and Gaussian (see `GaussianNoise`
for details), the posterior is:
.. math::
p(\Theta,\phi|d)
&\propto p(\Theta)p(\phi)p(d|\Theta,\phi) \\
&\propto p(\Theta)\frac{1}{2\pi}\exp\left[
-\frac{1}{2}\sum_{i}^{N_D} \left<
h_i(\Theta,\phi) - d_i, h_i(\Theta,\phi) - d_i
\right>\right].
Here, the sum is over the number of detectors :math:`N_D`, :math:`d_i`
and :math:`h_i` are the data and signal in detector :math:`i`,
respectively, and we have assumed a uniform prior on :math:`phi \in [0,
2\pi)`. With the form of the signal model given above, the inner product
in the exponent can be written as:
.. math::
-\frac{1}{2}\left<h_i - d_i, h_i- d_i\right>
&= \left<h_i, d_i\right> -
\frac{1}{2}\left<h_i, h_i\right> -
\frac{1}{2}\left<d_i, d_i\right> \\
&= \Re\left\{O(h^0_i, d_i)e^{-i\phi}\right\} -
\frac{1}{2}\left<h^0_i, h^0_i\right> -
\frac{1}{2}\left<d_i, d_i\right>,
where:
.. math::
h_i^0 &\equiv \tilde{h}_i(f; \Theta, \phi=0); \\
O(h^0_i, d_i) &\equiv 4 \int_0^\infty
\frac{\tilde{h}_i^*(f; \Theta,0)\tilde{d}_i(f)}{S_n(f)}\mathrm{d}f.
Gathering all of the terms that are not dependent on :math:`\phi` together:
.. math::
\alpha(\Theta, d) \equiv \exp\left[-\frac{1}{2}\sum_i
\left<h^0_i, h^0_i\right> + \left<d_i, d_i\right>\right],
we can marginalize the posterior over :math:`\phi`:
.. math::
p(\Theta|d)
&\propto p(\Theta)\alpha(\Theta,d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[\Re \left\{
e^{-i\phi} \sum_i O(h^0_i, d_i)
\right\}\right]\mathrm{d}\phi \\
&\propto p(\Theta)\alpha(\Theta, d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[
x(\Theta,d)\cos(\phi) + y(\Theta, d)\sin(\phi)
\right]\mathrm{d}\phi.
The integral in the last line is equal to :math:`2\pi I_0(\sqrt{x^2+y^2})`,
where :math:`I_0` is the modified Bessel function of the first kind. Thus
the marginalized log posterior is:
.. math::
\log p(\Theta|d) \propto \log p(\Theta) +
I_0\left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\sum_i\left[ \left<h^0_i, h^0_i\right> -
\left<d_i, d_i\right> \right]
"""
name = 'marginalized_phase'
@property
def _extra_stats(self):
"""Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each
detector."""
return ['loglr', 'maxl_phase'] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
params = self.current_params
try:
wfs = self._waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
hh = 0.
hd = 0j
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax[det])
if self._kmin[det] >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
hh_i = 0.
hd_i = 0j
else:
# whiten the waveform
h[self._kmin[det]:kmax] *= \
self._weight[det][self._kmin[det]:kmax]
# calculate inner products
hh_i = h[self._kmin[det]:kmax].inner(
h[self._kmin[det]:kmax]).real
hd_i = self.data[det][self._kmin[det]:kmax].inner(
h[self._kmin[det]:kmax])
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
hh += hh_i
hd += hd_i
hd = abs(hd)
self._current_stats.maxl_phase = numpy.angle(hd)
return numpy.log(special.i0e(hd)) + hd - 0.5*hh
class MarginalizedGaussianNoise(GaussianNoise):
r"""The likelihood is analytically marginalized over phase and/or time
and/or distance.
For the case of marginalizing over phase, the signal, can be written as:
.. math::
\tilde{h}(f; \Theta, \phi) = A(f; \Theta)e^{i\Psi(f; \Theta) + i \phi},
where :math:`\phi` is an arbitrary phase constant. This phase constant
can be analytically marginalized over with a uniform prior as follows:
assuming the noise is stationary and Gaussian (see `GaussianNoise`
for details), the posterior is:
.. math::
p(\Theta,\phi|d)
&\propto p(\Theta)p(\phi)p(d|\Theta,\phi) \\
&\propto p(\Theta)\frac{1}{2\pi}\exp\left[
-\frac{1}{2}\sum_{i}^{N_D} \left<
h_i(\Theta,\phi) - d_i, h_i(\Theta,\phi) - d_i
\right>\right].
Here, the sum is over the number of detectors :math:`N_D`, :math:`d_i`
and :math:`h_i` are the data and signal in detector :math:`i`,
respectively, and we have assumed a uniform prior on :math:`phi \in [0,
2\pi)`. With the form of the signal model given above, the inner product
in the exponent can be written as:
.. math::
-\frac{1}{2}\left<h_i - d_i, h_i- d_i\right>
&= \left<h_i, d_i\right> -
\frac{1}{2}\left<h_i, h_i\right> -
\frac{1}{2}\left<d_i, d_i\right> \\
&= \Re\left\{O(h^0_i, d_i)e^{-i\phi}\right\} -
\frac{1}{2}\left<h^0_i, h^0_i\right> -
\frac{1}{2}\left<d_i, d_i\right>,
where:
.. math::
h_i^0 &\equiv \tilde{h}_i(f; \Theta, \phi=0); \\
O(h^0_i, d_i) &\equiv 4 \int_0^\infty
\frac{\tilde{h}_i^*(f; \Theta,0)\tilde{d}_i(f)}{S_n(f)}\mathrm{d}f.
Gathering all of the terms that are not dependent on :math:`\phi` together:
.. math::
\alpha(\Theta, d) \equiv \exp\left[-\frac{1}{2}\sum_i
\left<h^0_i, h^0_i\right> + \left<d_i, d_i\right>\right],
we can marginalize the posterior over :math:`\phi`:
.. math::
p(\Theta|d)
&\propto p(\Theta)\alpha(\Theta,d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[\Re \left\{
e^{-i\phi} \sum_i O(h^0_i, d_i)
\right\}\right]\mathrm{d}\phi \\
&\propto p(\Theta)\alpha(\Theta, d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[
x(\Theta,d)\cos(\phi) + y(\Theta, d)\sin(\phi)
\right]\mathrm{d}\phi.
The integral in the last line is equal to :math:`2\pi I_0(\sqrt{x^2+y^2})`,
where :math:`I_0` is the modified Bessel function of the first kind. Thus
the marginalized log posterior is:
.. math::
\log p(\Theta|d) \propto \log p(\Theta) +
I_0\left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\sum_i\left[ \left<h^0_i, h^0_i\right> -
\left<d_i, d_i\right> \right]
For the case of marginalizing over distance, the signal can be written as,
.. math::
\tilde{h}_{j} = \frac{1}{D} \tilde{h}_{j}^{0}
The distance can be analytically marginalized over with a uniform prior as
follows:
assuming the noise is stationary and Gaussian (see `GaussianNoise`
for details), the likelihood is:
.. math::
log L = -\frac{1}{2}\left<d-h|d-h\right>
We see that :math: `<h|h>` is inversely proportional to distance squared
and :math: `<h|d>` is inversely proportional to distance. The log
likelihood is therefore
.. math::
\log L = -\frac{1}{2}\left<d|d\right> - \frac{1}{2D^{2}}
\left<h|h\right> + \frac{1}{D}\left<h|d\right>
Consequently, the likelihood marginalised over distance is simply
.. math::
\log L = \log\left(\int_{0}^{D}{L p(D) dD}\right)
If we assume a flat prior
.. math::
\log L = \log\left(\int_{0}^{D}{\exp{\log L} dD}\right)
For the case of marginalizing over time, the signal can be written as,
.. math::
\tilde{h}_{j} = \tilde{h}_{j}^{0} \exp\left(-2\pi ij\Delta ft\right)
The time can be analytically marginalized over with a uniform prior as
follows:
assuming the noise is stationary and Gaussian (see `GaussianNoise`
for details), the likelihood is:
.. math::
\log L = -\frac{1}{2}\left<d-h|d-h\right>
We note that :math: `<h|h>` and :math: `<d|d>` are time independent while
:math: `<d|h>` is dependent of time
.. math::
\left<d|h\right>(t) = 4\Delta f\sum_{j=0}^{N/2} \frac{\tilde{d}_{j}^{*}
\tilde{h}_{j}^{0}}{S_{j}} \exp\left(-2\pi ij
\Delta f t\right)
For integer timesteps :math: `t=k\Delta t`
.. math::
\left<d|h\right>(k\Delta t) = 4\Delta f\sum_{j=0}^{N/2} \frac{
\tilde{d}_{j}^{*}\tilde{h}_{j}^{0}}
{S_{j}}\exp(-2\pi \frac{ijk}{N}
\left<d|h\right>(k\Delta t) = 2\Delta f\sum_{j=0}^{N} \frac{
\tilde{d}_{j}^{*}\tilde{h}_{j}^{0}}
{S_{j}} \exp(-2\pi \frac{ijk}{N}
Using a FFT, this expression can be evaluated efficiently for all :math:
`k`
.. math::
\left<d|h\right>(k\Delta t) = 2\Delta f FFT_{k} (\frac{dh}{S})
since :math:: `\left<h|d\right> = \left<d|h\right>^{*}`,
.. math::
\left<d|h\right> + \left<h|d\right> = 4\Delta f FFT_{k} (\frac{dh}{S})
and so the likelihood marginalised over time is simply
.. math::
\log{L} = \log\left(\int_{0}^{T} np.exp(\np.log(L) p(t))\right)
where p(t) is the prior. If we assume a flat prior then,
.. math::
\log{L} = \log\left(\int_{0}^{T} np.exp(\np.log(L))\right)
Parameters
----------
time_marginalization : bool, optional
A Boolean operator which determines if the likelihood is marginalized
over time
phase_marginalization : bool, optional
A Boolean operator which determines if the likelihood is marginalized
over phase
distance_marginalization : bool, optional
A Boolean operator which determines if the likelihood is marginalized
over distance
marg_prior : list, optional
An instance of pycbc.distributions which returns a list of prior
distributions to be used when marginalizing the likelihood
**kwargs :
All other keyword arguments are passed to ``GaussianNoise``.
"""
name = 'marginalized_gaussian_noise'
def __init__(self, variable_params, data, waveform_generator,
f_lower, psds=None, f_upper=None, norm=None,
time_marginalization=False, distance_marginalization=False,
phase_marginalization=False, marg_prior=None, **kwargs):
super(MarginalizedGaussianNoise, self).__init__(variable_params, data,
waveform_generator,
f_lower, psds, f_upper,
norm, **kwargs)
self._margtime = time_marginalization
self._margdist = distance_marginalization
self._margphase = phase_marginalization
# dictionary containing possible techniques to evalulate the log
# likelihood ratio.
loglr_poss = {(1, 1, 1): self._margtimephasedist_loglr,
(1, 0, 1): self._margtimedist_loglr,
(0, 1, 1): self._margtimephase_loglr,
(1, 1, 0): self._margdistphase_loglr,
(1, 0, 0): self._margdist_loglr,
(0, 0, 1): self._margtime_loglr,
(0, 1, 0): self._margphase_loglr}
# dictionary containing two techniques to calculate the matched
# filter SNR depending on whether time has been marginalised over or
# not.
mfsnr_poss = {(1): self._margtime_mfsnr, (0): self._mfsnr}
self._args = (int(self._margdist), int(self._margphase),
int(self._margtime))
if self._args == (0, 0, 0):
raise AttributeError("This class requires that you marginalize "
"over at least one parameter. You have not "
"marginalized over any.")
else:
self._eval_loglr = loglr_poss[self._args]
self._eval_mfsnr = mfsnr_poss[self._args[2]]
if marg_prior is None:
raise AttributeError("No priors are specified for the "
"marginalization. This is needed to "
"calculated the marginalized likelihood")
else:
self._marg_prior = dict(zip([i.params[0] for i in marg_prior],
marg_prior))
self._setup_prior()
@property
def _extra_stats(self):
"""Adds ``loglr``, ``optimal_snrsq`` and matched filter snrsq in each
detector to the default stats."""
return ['loglr'] + \
['{}_optimal_snrsq'.format(det) for det in self._data] + \
['{}_matchedfilter_snrsq'.format(det) for det in self._data]
@classmethod
def from_config(cls, cp, data=None, delta_f=None, delta_t=None,
gates=None, recalibration=None, **kwargs):
"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
data : dict
A dictionary of data, in which the keys are the detector names and
the values are the data. This is not retrieved from the config
file, and so must be provided.
delta_f : float
The frequency spacing of the data; needed for waveform generation.
delta_t : float
The time spacing of the data; needed for time-domain waveform
generators.
recalibration : dict of pycbc.calibration.Recalibrate, optional
Dictionary of detectors -> recalibration class instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by `pycbc.gate.gates_from_cli`.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
prior_section = "marginalized_prior"
args = cls._init_args_from_config(cp)
marg_prior = read_distributions_from_config(cp, prior_section)
if len(marg_prior) == 0:
raise AttributeError("No priors are specified for the "
"marginalization. Please specify this in a "
"section in the config file with heading "
"{}-variable".format(prior_section))
params = [i.params[0] for i in marg_prior]
marg_args = [k for k, v in args.items() if "_marginalization" in k]
if len(marg_args) != len(params):
raise ValueError("There is not a prior for each keyword argument")
kwargs['marg_prior'] = marg_prior
for i in params:
kwargs[i+"_marginalization"] = True
args.update(kwargs)
variable_params = args['variable_params']
args["data"] = data
try:
static_params = args['static_params']
except KeyError:
static_params = {}
# set up waveform generator
try:
approximant = static_params['approximant']
except KeyError:
raise ValueError("no approximant provided in the static args")
generator_function = generator.select_waveform_generator(approximant)
waveform_generator = generator.FDomainDetFrameGenerator(
generator_function, epoch=tuple(data.values())[0].start_time,
variable_args=variable_params, detectors=data.keys(),
delta_f=delta_f, delta_t=delta_t,
recalib=recalibration, gates=gates,
**static_params)
args['waveform_generator'] = waveform_generator
args["f_lower"] = static_params["f_lower"]
return cls(**args)
def _setup_prior(self):
"""Sets up the prior for time and/or distance and/or phase which is
used for the likelihood marginalization.
"""
if len(self._marg_prior) == 0:
raise ValueError("A prior must be specified for the parameters "
"that you wish to marginalize the likelihood "
"over")
marg_number = len([i for i in self._args if i != 0])
if len(self._marg_prior) != marg_number:
raise AttributeError("There is not a prior for each keyword "
"argument")
if self._margdist:
bounds = self._marg_prior["distance"].bounds
self._dist_array = numpy.linspace(bounds["distance"].min,
bounds["distance"].max, 10**4)
self._deltad = self._dist_array[1] - self._dist_array[0]
self.dist_prior = numpy.array(
[self._marg_prior["distance"].pdf(distance=i)
for i in self._dist_array])
if self._margtime:
bounds = self._marg_prior["time"].bounds
self._time_array = numpy.linspace(bounds["time"].min,
bounds["time"].min, 10**4)
self.time_prior = numpy.array(
[self._marg_prior["time"].pdf(time=i) for
i in self._time_array])
if self._margphase:
bounds = self._marg_prior["phase"].bounds
self._phase_array = numpy.linspace(bounds["phase"].min,
bounds["phase"].max, 10**4)
self._deltap = self._phase_array[1] - self._phase_array[0]
self.phase_prior = numpy.array(
[self._marg_prior["phase"].pdf(phase=i) for
i in self._phase_array])
@staticmethod
def _margtime_mfsnr(template, data):
"""Returns a time series for the matched filter SNR assuming that the
template and data have both been normalised and whitened.
"""
snr = matched_filter_core(template, data, h_norm=1, psd=None)
hd_i = snr[0].numpy().real
return hd_i
@staticmethod
def _mfsnr(template, data):
"""Returns the matched filter SNR assuming that the template and data
have both been normalised and whitened.
"""
hd_i = data.inner(template)
return hd_i
def _margtimephasedist_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time, phase and
distance.
"""
logl = special.logsumexp(numpy.log(special.i0(mf_snr)),
b=self._deltat)
logl_marg = logl/self._dist_array
opt_snr_marg = opt_snr/self._dist_array**2
return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
b=self._deltad*self.dist_prior)
def _margtimedist_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time and
distance.
"""
logl = special.logsumexp(mf_snr, b=self._deltat)
logl_marg = logl/self._dist_array
opt_snr_marg = opt_snr/self._dist_array**2
return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
b=self._deltad*self.dist_prior)
def _margtimephase_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time and phase.
"""
return special.logsumexp(numpy.log(special.i0(mf_snr)),
b=self._deltat) - 0.5*opt_snr
def _margdistphase_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over distance and
phase.
"""
logl = numpy.log(special.i0(mf_snr))
logl_marg = logl/self._dist_array
opt_snr_marg = opt_snr/self._dist_array**2
return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
b=self._deltad*self.dist_prior)
def _margdist_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over distance.
"""
mf_snr_marg = mf_snr/self._dist_array
opt_snr_marg = opt_snr/self._dist_array**2
return special.logsumexp(mf_snr_marg - 0.5*opt_snr_marg,
b=self._deltad*self.dist_prior)
def _margtime_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time.
"""
return special.logsumexp(mf_snr, b=self._deltat) - 0.5*opt_snr
@staticmethod
def _margphase_loglr(mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over phase.
"""
return numpy.log(special.i0(mf_snr)) - 0.5*opt_snr
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
params = self.current_params
try:
wfs = self._waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
opt_snr = 0.
mf_snr = 0j
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax[det])
# time step
self._deltat = h.delta_t
if self._kmin[det] >= kmax:
# if the waveform terminates before the filtering low
# frequency cutoff, then the loglr is just 0 for this
# detector
hh_i = 0.
hd_i = 0j
else:
h[self._kmin[det]:kmax] *= \
self._weight[det][self._kmin[det]:kmax]
hh_i = h[self._kmin[det]:kmax].inner(
h[self._kmin[det]:kmax]).real
hd_i = self._eval_mfsnr(h[self._kmin[det]:kmax],
self.data[det][self._kmin[det]:kmax])
opt_snr += hh_i
mf_snr += hd_i
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
setattr(self._current_stats, '{}_matchedfilter_snrsq'.format(det),
hd_i)
mf_snr = abs(mf_snr)
loglr = self._eval_loglr(mf_snr, opt_snr)
# also store the loglikelihood, to ensure it is populated in the
# current stats even if loglikelihood is never called
self._current_stats.loglikelihood = loglr + self.lognl
return loglr
| sfairhur/pycbc | pycbc/inference/models/marginalized_gaussian_noise.py | Python | gpl-3.0 | 25,814 | [
"Gaussian"
] | 9ef597cdca4778b4d21559863cbfb1bee4abda6c1bb096ba973a5f1eaf2edc98 |
# -*- coding: utf-8 -*-
#
# This file is part of Harvesting Kit.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Harvesting Kit is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Harvesting Kit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
import re
import sys
import time
import requests
import xml.dom.minidom
import datetime
from bs4 import BeautifulSoup
from os import (listdir,
rename,
fdopen)
from os.path import (join,
exists,
walk)
from tempfile import (mkdtemp,
mkstemp)
from zipfile import ZipFile
from xml.dom.minidom import parse
try:
from invenio.errorlib import register_exception
except ImportError:
register_exception = lambda a, b: True
try:
from invenio.config import CFG_TMPSHAREDDIR, CFG_LOGDIR
except ImportError:
from distutils.sysconfig import get_python_lib
CFG_TMPSHAREDDIR = join(get_python_lib(),
"harvestingkit",
"tmp")
CFG_LOGDIR = join(get_python_lib(),
"harvestingkit",
"log")
from harvestingkit.utils import run_shell_command, create_logger
from harvestingkit.scoap3utils import (
MissingFFTError,
extract_package as scoap3utils_extract_package
)
from harvestingkit.contrast_out_utils import find_package_name
from harvestingkit.minidom_utils import (get_value_in_tag,
xml_to_text)
from harvestingkit.config import CFG_DTDS_PATH as CFG_SCOAP3DTDS_PATH
from harvestingkit.utils import (fix_journal_name,
format_arxiv_id,
add_nations_field,
fix_dashes)
from harvestingkit.bibrecord import (
record_add_field,
create_record,
record_xml_output,
)
CFG_ELSEVIER_ART501_PATH = join(CFG_SCOAP3DTDS_PATH, 'ja5_art501.zip')
CFG_ELSEVIER_ART510_PATH = join(CFG_SCOAP3DTDS_PATH, 'ja5_art510.zip')
CFG_ELSEVIER_ART520_PATH = join(CFG_SCOAP3DTDS_PATH, 'ja5_art520.zip')
CFG_ELSEVIER_ART540_PATH = join(CFG_SCOAP3DTDS_PATH, 'ja5_art540.zip')
CFG_ELSEVIER_SI510_PATH = join(CFG_SCOAP3DTDS_PATH, 'si510.zip')
CFG_ELSEVIER_SI520_PATH = join(CFG_SCOAP3DTDS_PATH, 'si520.zip')
CFG_ELSEVIER_SI540_PATH = join(CFG_SCOAP3DTDS_PATH, 'si540.zip')
CFG_ELSEVIER_JID_MAP = {'PLB': 'Physics letters B',
'NUPHB': 'Nuclear Physics B',
'CEMGE': 'Chemical Geology',
'SOLMAT': 'Solar Energy Materials & Solar Cells',
'APCATB': 'Applied Catalysis B: Environmental',
'NUMA': 'Journal of Nuclear Materials'}
class ElsevierPackage(object):
"""
This class is specialized in parsing an Elsevier package
and creating a SCOAP3-compatible bibupload containing the original
PDF, XML, and every possible metadata filled in.
:param package_name: the path to a tar.gz file to expand and parse
:type package_name: string
:param path: the actual path of an already expanded package.
:type package_name: string
:param CONSYN: flag to determine which conversion should be used.
:type package_name: bool
:param journal_mappings: dictionary used to convert journal names
key: the name in the xml source files
value: the desired name.
:type package_name: dict
:note: either C{package_name} or C{path} don't have to be passed to the
constructor, in this case the Elsevier server will be harvested.
"""
def __init__(self, package_name=None, path=None,
run_locally=False, CONSYN=False,
journal_mappings={},
extract_nations=False,
no_harvest=False):
self.CONSYN = CONSYN
self.doi_package_name_mapping = []
try:
self.logger = create_logger(
"Elsevier",
filename=join(CFG_LOGDIR, 'scoap3_harvesting.log')
)
except IOError: # Could not access log file
# Use std.out for logging
self.logger = self
self.info = print
self.warning = print
self.error = print
self.debug = print
if self.CONSYN:
self.journal_mappings = journal_mappings
else:
if not no_harvest:
self.package_name = package_name
self.path = path
self.found_articles = []
self._found_issues = []
if run_locally:
from harvestingkit.contrast_out import ContrastOutConnector
self.conn = ContrastOutConnector(self.logger)
self.conn.run(run_locally)
else:
if not path and package_name:
self.logger.info("Got package: %s" % (package_name,))
self._extract_package()
elif not path and not package_name:
from harvestingkit.contrast_out import ContrastOutConnector
self.conn = ContrastOutConnector(self.logger)
self.conn.run()
self._crawl_elsevier_and_find_main_xml()
self._crawl_elsevier_and_find_issue_xml()
self._build_doi_mapping()
self.extract_nations = extract_nations
def _extract_package(self):
"""
Extract a package in a new temporary directory.
"""
self.path = mkdtemp(prefix="scoap3_package_", dir=CFG_TMPSHAREDDIR)
self.logger.debug("Extracting package: %s" % (self.package_name,))
scoap3utils_extract_package(self.package_name, self.path, self.logger)
def _crawl_elsevier_and_find_main_xml(self):
"""
A package contains several subdirectory corresponding to each article.
An article is actually identified by the existence of a main.pdf and
a main.xml in a given directory.
"""
self.found_articles = []
if not self.path and not self.package_name:
for doc in self.conn.found_articles:
dirname = doc['xml'].rstrip('/main.xml')
try:
self._normalize_article_dir_with_dtd(dirname)
self.found_articles.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s" % (dirname, err))
else:
def visit(dummy, dirname, names):
if "main.xml" in names and "main.pdf" in names:
try:
self._normalize_article_dir_with_dtd(dirname)
self.found_articles.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s" % (dirname, err))
walk(self.path, visit, None)
def _crawl_elsevier_and_find_issue_xml(self):
"""
Information about the current volume, issue, etc. is available
in a file called issue.xml that is available in a higher directory.
"""
self._found_issues = []
if not self.path and not self.package_name:
for issue in self.conn._get_issues():
dirname = issue.rstrip('/issue.xml')
try:
self._normalize_issue_dir_with_dtd(dirname)
self._found_issues.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s" % (dirname, err))
else:
def visit(dummy, dirname, names):
if "issue.xml" in names:
try:
self._normalize_issue_dir_with_dtd(dirname)
self._found_issues.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s"
% (dirname, err))
walk(self.path, visit, None)
def _extract_correct_dtd_package(self, si_name, path):
try:
ZipFile(eval("CFG_ELSEVIER_%s_PATH" % si_name.upper())).extractall(path)
except Exception as e:
raise e
for filename in listdir(join(path, si_name)):
rename(join(path, si_name, filename), join(path, filename))
def _normalize_issue_dir_with_dtd(self, path):
"""
issue.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the issue.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
if exists(join(path, 'resolved_issue.xml')):
return
issue_xml_content = open(join(path, 'issue.xml')).read()
sis = ['si510.dtd', 'si520.dtd', 'si540.dtd']
tmp_extracted = 0
for si in sis:
if si in issue_xml_content:
self._extract_correct_dtd_package(si.split('.')[0], path)
tmp_extracted = 1
if not tmp_extracted:
message = "It looks like the path " + path
message += " does not contain an si510, si520 or si540 in issue.xml file"
self.logger.error(message)
raise ValueError(message)
command = ["xmllint", "--format", "--loaddtd",
join(path, 'issue.xml'),
"--output", join(path, 'resolved_issue.xml')]
dummy, dummy, cmd_err = run_shell_command(command)
if cmd_err:
message = "Error in cleaning %s: %s" % (
join(path, 'issue.xml'), cmd_err)
self.logger.error(message)
raise ValueError(message)
def _normalize_article_dir_with_dtd(self, path):
"""
main.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the main.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
if exists(join(path, 'resolved_main.xml')):
return
main_xml_content = open(join(path, 'main.xml')).read()
arts = ['art501.dtd','art510.dtd','art520.dtd','art540.dtd']
tmp_extracted = 0
for art in arts:
if art in main_xml_content:
self._extract_correct_dtd_package(art.split('.')[0], path)
tmp_extracted = 1
if not tmp_extracted:
message = "It looks like the path " + path
message += "does not contain an art501, art510, art520 or art540 in main.xml file"
self.logger.error(message)
raise ValueError(message)
command = ["xmllint", "--format", "--loaddtd",
join(path, 'main.xml'),
"--output", join(path, 'resolved_main.xml')]
dummy, dummy, cmd_err = run_shell_command(command)
if cmd_err:
message = "Error in cleaning %s: %s" % (
join(path, 'main.xml'), cmd_err)
self.logger.error(message)
raise ValueError(message)
def _add_references(self, xml_doc, rec, refextract_callback=None):
for label, authors, doi, issue, page, title, volume, year,\
textref, ext_link, isjournal, comment, journal, publisher,\
editors, book_title in self.get_references(xml_doc):
subfields = []
if textref and not authors:
textref = textref.replace('\"', '\'')
if refextract_callback:
ref_xml = refextract_callback(textref)
dom = xml.dom.minidom.parseString(ref_xml)
fields = dom.getElementsByTagName("datafield")[0]
fields = fields.getElementsByTagName("subfield")
for field in fields:
data = field.firstChild.data
code = field.getAttribute("code")
if code == 'r':
data = fix_dashes(data)
subfields.append((code, data))
if fields:
subfields.append(('9', 'refextract'))
else:
subfields.append(('m', textref))
if label:
label = re.sub("[\[\].)]", "", label)
subfields.append(('o', label))
if subfields:
record_add_field(rec, '999', ind1='C', ind2='5',
subfields=subfields)
else:
if doi:
subfields.append(('a', doi))
for author in authors:
subfields.append(('h', author))
if ext_link:
ext_link = fix_dashes(ext_link)
subfields.append(('r', ext_link))
if title:
subfields.append(('t', title))
elif textref:
subfields.append(('m', textref))
if publisher:
subfields.append(('p', publisher))
if volume:
subfields.append(('v', volume))
if year:
subfields.append(('y', year))
if comment:
subfields.append(('m', comment))
for editor in editors:
subfields.append(('e', editor))
if book_title:
subfields.append(('q', book_title))
if label:
label = re.sub("[\[\].)]", "", label)
subfields.append(('o', label))
if journal:
journal, vol = fix_journal_name(journal,
self.journal_mappings)
volume = vol + volume
if volume and page:
journal = journal + "," + volume + "," + page
subfields.append(('s', journal))
elif volume:
journal = journal + "," + volume
subfields.append(('s', journal))
else:
subfields.append(('s', journal))
if textref:
subfields.append(('m', textref))
if subfields:
record_add_field(rec, '999', ind1='C', ind2='5',
subfields=subfields)
def _build_doi_mapping(self):
self._dois = {}
for path in self._found_issues:
xml_doc = parse(open(join(path, "resolved_issue.xml")))
jid = get_value_in_tag(xml_doc, "jid")
journal = CFG_ELSEVIER_JID_MAP.get(jid, jid)
issn = get_value_in_tag(xml_doc, "ce:issn")
volume = get_value_in_tag(xml_doc, "vol-first")
issue = get_value_in_tag(xml_doc, "iss-first")
year = get_value_in_tag(xml_doc, "start-date")[:4]
start_date = get_value_in_tag(xml_doc, "start-date")
if len(start_date) is 8:
start_date = time.strftime(
'%Y-%m-%d', time.strptime(start_date, '%Y%m%d'))
elif len(start_date) is 6:
start_date = time.strftime(
'%Y-%m', time.strptime(start_date, '%Y%m'))
for item in xml_doc.getElementsByTagName("ce:include-item"):
doi = get_value_in_tag(item, "ce:doi")
first_page = get_value_in_tag(item, "ce:first-page")
last_page = get_value_in_tag(item, "ce:last-page")
self._dois[doi] = (journal, issn, volume, issue,
first_page, last_page, year, start_date)
def _get_doi(self, xml_doc):
try:
return get_value_in_tag(xml_doc, "ce:doi")
except Exception:
print("Can't find doi", file=sys.stderr)
def get_title(self, xml_doc):
try:
return get_value_in_tag(xml_doc, "ce:title")
except Exception:
print("Can't find title", file=sys.stderr)
def get_doctype(self, xml_doc):
doctype = xml_doc.getElementsByTagName('cja:converted-article')
if not doctype:
doctype = xml_doc.getElementsByTagName('ja:article')
if not doctype:
doctype = xml_doc.getElementsByTagName('ja:simple-article')
try:
doctype = doctype[0].getAttribute('docsubtype')
except IndexError:
print('Cannot find doctype!!!')
return ''
return doctype
def get_abstract(self, xml_doc):
try:
abstract_sec = xml_doc.getElementsByTagName("ce:abstract-sec")[0]
return get_value_in_tag(abstract_sec, "ce:simple-para")
except Exception:
print("Can't find abstract", file=sys.stderr)
def get_keywords(self, xml_doc):
head = xml_doc.getElementsByTagName("ja:head")
if not head:
head = xml_doc.getElementsByTagName("cja:head")
if not head:
keywords = xml_doc.getElementsByTagName("ce:keyword")
else:
keywords = head[0].getElementsByTagName("ce:keyword")
return [get_value_in_tag(keyword, "ce:text")
for keyword in keywords
if get_value_in_tag(keyword, "ce:text")]
def get_copyright(self, xml_doc):
try:
copyright = get_value_in_tag(xml_doc, "ce:copyright")
if not copyright:
copyright = get_value_in_tag(xml_doc, "prism:copyright")
return copyright
except Exception:
print("Can't find copyright", file=sys.stderr)
def get_ref_link(self, xml_doc, name):
links = xml_doc.getElementsByTagName('ce:inter-ref')
ret = None
for link in links:
if name in link.getAttribute("xlink:href").encode('utf-8'):
ret = xml_to_text(link).strip()
return ret
def _author_dic_from_xml(self, author):
tmp = {}
surname = get_value_in_tag(author, "ce:surname")
if surname:
tmp["surname"] = surname
given_name = get_value_in_tag(author, "ce:given-name")
if given_name:
tmp["given_name"] = given_name
initials = get_value_in_tag(author, "ce:initials")
if initials:
tmp["initials"] = initials
orcid = author.getAttribute('orcid').encode('utf-8')
if orcid:
tmp["orcid"] = orcid
emails = author.getElementsByTagName("ce:e-address")
for email in emails:
if email.getAttribute("type").encode('utf-8') in ('email', ''):
tmp["email"] = xml_to_text(email)
break
cross_refs = author.getElementsByTagName("ce:cross-ref")
if cross_refs:
tmp["cross_ref"] = []
for cross_ref in cross_refs:
tmp["cross_ref"].append(
cross_ref.getAttribute("refid").encode('utf-8'))
return tmp
def _affiliation_from_sa_field(self, affiliation):
sa_affiliation = affiliation.getElementsByTagName('sa:affiliation')
if sa_affiliation:
return xml_to_text(sa_affiliation[0], ', ')
else:
affiliation = re.sub(r'^(\d+\ ?)',"",get_value_in_tag(affiliation, "ce:textfn"))
if affiliation:
return affiliation
else:
raise IndexError
def _find_affiliations(self, xml_doc, doi):
try:
return dict((aff.getAttribute("id").encode('utf-8'),
self._affiliation_from_sa_field(aff))
for aff in xml_doc.getElementsByTagName("ce:affiliation"))
except IndexError:
message = "Elsevier paper: {0} is missing sa:affiliation."
register_exception(alert_admin=True, prefix=message.format(doi))
def _add_affiliations_to_author(self, author, affs):
if affs:
try:
author['affiliation'].extend(affs)
except KeyError:
author['affiliation'] = affs
return len(affs)
def _add_referenced_affiliation(self, author, affiliations):
affs = [affiliations[ref] for ref in author.get("cross_ref", [])
if ref in affiliations]
return self._add_affiliations_to_author(author, affs)
def _add_group_affiliation(self, author, xml_author):
affs = [get_value_in_tag(aff, "ce:textfn") for aff in
xml_author.parentNode.getElementsByTagName('ce:affiliation')]
return self._add_affiliations_to_author(author, affs)
def _get_direct_children(self, element, tagname):
affs = []
for child in element.childNodes:
try:
if child.tagName == tagname:
affs.append(child)
except AttributeError:
pass
return affs
def _add_global_affiliation(self, author, xml_author):
affs = []
# get author_group of author, already done in group_affiliation
# this goes higher in the hierarchy
parent = xml_author.parentNode
while True:
try:
parent = parent.parentNode
affs.extend([get_value_in_tag(aff, "ce:textfn") for aff
in self._get_direct_cildren(parent,
'ce:affiliation')])
except AttributeError:
break
return self._add_affiliations_to_author(author, affs)
def _add_affiliations(self, authors, xml_authors, affiliations):
for xml_author, author in zip(xml_authors, authors):
if not self._add_referenced_affiliation(author, affiliations):
self._add_group_affiliation(author, xml_author)
self._add_global_affiliation(author, xml_author)
def _add_orcids(self, authors, xml_authors):
for author, xml_author in zip(authors, xml_authors):
try:
orcid = xml_author.getAttribute('orcid')
if orcid:
author['orcid'] = 'ORCID:{0}'.format(orcid)
except IndexError:
continue
def get_authors(self, xml_doc):
xml_authors = xml_doc.getElementsByTagName("ce:author")
authors = [self._author_dic_from_xml(author) for author
in xml_authors]
doi = self._get_doi(xml_doc)
self._add_affiliations(authors, xml_authors,
self._find_affiliations(xml_doc, doi))
self._add_orcids(authors, xml_authors)
return authors
def get_publication_information(self, xml_doc, path=''):
if self.CONSYN:
publication = get_value_in_tag(xml_doc, "prism:publicationName")
doi = get_value_in_tag(xml_doc, "prism:doi")
issn = get_value_in_tag(xml_doc, "prism:issn")
issue = get_value_in_tag(xml_doc, "prism:number")
first_page = get_value_in_tag(xml_doc, "prism:startingPage")
last_page = get_value_in_tag(xml_doc, "prism:endingPage")
journal = publication.split(",")[0]
journal, volume = fix_journal_name(journal, self.journal_mappings)
try:
vol = publication.split(",")[1].strip()
if vol.startswith("Section"):
vol = vol[7:].strip()
if vol and not volume:
volume = vol
except IndexError:
pass
vol = get_value_in_tag(xml_doc, "prism:volume")
if vol is "":
# if volume is not present try to harvest it
try:
session = requests.session()
url = 'http://www.sciencedirect.com/science/article/pii'\
+ path.split('/')[-1]
r = session.get(url)
parsed_html = BeautifulSoup(r.text)
info = parsed_html.body.find(
'p', attrs={'class': 'volIssue'}).text.split()
for s in info:
if unicode(s).find(u'\xe2') > 0:
first_page = s.rsplit(u'\xe2')[0]
last_page = s.rsplit(u'\x93')[1]
if info[1].lower() != 'online':
vol = info[1][:-1]
except:
pass
if vol:
volume += vol
start_date = self.get_publication_date(xml_doc)
year = start_date.split("-")[0]
doi = get_value_in_tag(xml_doc, "ce:doi")
return (journal, issn, volume, issue, first_page,
last_page, year, start_date, doi)
else:
doi = self._get_doi(xml_doc)
try:
return self._dois[doi] + (doi, )
except KeyError:
return ('', '', '', '', '', '', '', '', doi)
def get_publication_date(self, xml_doc):
"""Return the best effort start_date."""
start_date = get_value_in_tag(xml_doc, "prism:coverDate")
if not start_date:
start_date = get_value_in_tag(xml_doc, "prism:coverDisplayDate")
if not start_date:
start_date = get_value_in_tag(xml_doc, 'oa:openAccessEffective')
if start_date:
start_date = datetime.datetime.strptime(
start_date, "%Y-%m-%dT%H:%M:%SZ"
)
return start_date.strftime("%Y-%m-%d")
import dateutil.parser
try:
date = dateutil.parser.parse(start_date)
except ValueError:
return ''
# Special case where we ignore the deduced day form dateutil
# in case it was not given in the first place.
if len(start_date.split(" ")) == 3:
return date.strftime("%Y-%m-%d")
else:
return date.strftime("%Y-%m")
else:
if len(start_date) is 8:
start_date = time.strftime(
'%Y-%m-%d', time.strptime(start_date, '%Y%m%d'))
elif len(start_date) is 6:
start_date = time.strftime(
'%Y-%m', time.strptime(start_date, '%Y%m'))
return start_date
def _get_ref(self, ref, label):
doi = get_value_in_tag(ref, "ce:doi")
page = get_value_in_tag(ref, "sb:first-page")
issue = get_value_in_tag(ref, "sb:issue")
title = get_value_in_tag(ref, "sb:maintitle")
volume = get_value_in_tag(ref, "sb:volume-nr")
tmp_issues = ref.getElementsByTagName('sb:issue')
if tmp_issues:
year = get_value_in_tag(tmp_issues[0], "sb:date")
else:
year = ''
textref = ref.getElementsByTagName("ce:textref")
if textref:
textref = xml_to_text(textref[0])
ext_link = format_arxiv_id(self.get_ref_link(ref, 'arxiv'))
authors = []
for author in ref.getElementsByTagName("sb:author"):
given_name = get_value_in_tag(author, "ce:given-name")
surname = get_value_in_tag(author, "ce:surname")
if given_name:
name = "%s, %s" % (surname, given_name)
else:
name = surname
authors.append(name)
if ext_link and ext_link.lower().startswith('arxiv'):
# check if the identifier contains
# digits seperated by dot
regex = r'\d*\.\d*'
if not re.search(regex, ext_link):
ext_link = ext_link[6:]
comment = get_value_in_tag(ref, "sb:comment")
links = []
for link in ref.getElementsByTagName("ce:inter-ref"):
links.append(xml_to_text(link))
title = ""
try:
container = ref.getElementsByTagName("sb:contribution")[0]
title = container.getElementsByTagName("sb:maintitle")[0]
title = xml_to_text(title)
except IndexError:
title = ''
except TypeError:
title = ''
isjournal = ref.getElementsByTagName("sb:issue")
journal = ""
if isjournal:
isjournal = True
if not page:
page = comment
container = ref.getElementsByTagName("sb:issue")[0]
journal = get_value_in_tag(container, "sb:maintitle")
edited_book = ref.getElementsByTagName("sb:edited-book")
editors = []
book_title = ""
publisher = ""
if edited_book:
# treat as a journal
if ref.getElementsByTagName("sb:book-series"):
container = ref.getElementsByTagName("sb:book-series")[0]
journal = get_value_in_tag(container, "sb:maintitle")
year = get_value_in_tag(ref, "sb:date")
isjournal = True
# conference
elif ref.getElementsByTagName("sb:conference"):
container = ref.getElementsByTagName("sb:edited-book")[0]
maintitle = get_value_in_tag(container, "sb:maintitle")
conference = get_value_in_tag(
container, "sb:conference")
date = get_value_in_tag(container, "sb:date")
# use this variable in order to get in the 'm' field
publisher = maintitle + ", " + conference + ", " + date
else:
container = ref.getElementsByTagName(
"sb:edited-book")[0]
if ref.getElementsByTagName("sb:editors"):
for editor in ref.getElementsByTagName("sb:editor"):
surname = get_value_in_tag(editor, "ce:surname")
firstname = get_value_in_tag(editor, "ce:given-name")
editors.append("%s,%s" % (surname, firstname))
if title:
book_title = get_value_in_tag(
container, "sb:maintitle")
else:
title = get_value_in_tag(container, "sb:maintitle")
year = get_value_in_tag(container, "sb:date")
if ref.getElementsByTagName("sb:publisher"):
container = ref.getElementsByTagName("sb:publisher")[0]
location = get_value_in_tag(container, "sb:location")
publisher = get_value_in_tag(container, "sb:name")
if location:
publisher = location + ": " + publisher
if ref.getElementsByTagName("sb:book"):
if ref.getElementsByTagName("sb:book-series"):
book_series = ref.getElementsByTagName(
"sb:book-series")[0]
title += ", " + \
get_value_in_tag(book_series, "sb:maintitle")
title += ", " + \
get_value_in_tag(book_series, "sb:volume-nr")
publisher = get_value_in_tag(ref, "sb:publisher")
if not year:
year = get_value_in_tag(ref, "sb:date")
year = re.sub(r'\D', '', year)
return (label, authors, doi, issue, page, title, volume,
year, textref, ext_link, isjournal, comment, journal,
publisher, editors, book_title)
def get_references(self, xml_doc):
for ref in xml_doc.getElementsByTagName("ce:bib-reference"):
label = get_value_in_tag(ref, "ce:label")
innerrefs = ref.getElementsByTagName("sb:reference")
if not innerrefs:
yield self._get_ref(ref, label)
for inner in innerrefs:
yield self._get_ref(inner, label)
def get_article_journal(self, xml_doc):
return CFG_ELSEVIER_JID_MAP[get_value_in_tag(xml_doc, "jid")]
def get_article(self, path):
if path.endswith('.xml'):
data_file = path
else:
data_file = open(join(path, "resolved_main.xml"))
return parse(data_file)
def get_elsevier_version(self, name):
try:
ret = name[0:5]
if ret[4] is "A":
ret = ret + "B"
return ret
except Exception:
raise
def get_pdfa_record(self, path=None):
from invenio.search_engine import perform_request_search
xml_doc = self.get_article(path)
rec = create_record()
dummy, dummy, dummy, dummy, dummy, dummy, dummy,\
dummy, doi = self.get_publication_information(xml_doc)
recid = perform_request_search(p='0247_a:"%s" AND NOT 980:"DELETED"' % (doi,))
if recid:
record_add_field(rec, '001', controlfield_value=recid[0])
else:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi),
('2', 'DOI')])
message = ('Adding PDF/A. No paper with this DOI: '
'%s. Trying to add it anyway.') % (doi,)
self.logger.error(message)
try:
if exists(join(path, 'main_a-2b.pdf')):
record_add_field(
rec, 'FFT', subfields=[('a', join(path, 'main_a-2b.pdf')),
('n', 'main'),
('f', '.pdf;pdfa')])
self.logger.debug('Adding PDF/A to record: %s' % (doi,))
elif exists(join(path, 'main.pdf')):
record_add_field(
rec, 'FFT', subfields=[('a', join(path, 'main.pdf'))])
message = 'No PDF/A in VTEX package for record: ' + doi
self.logger.debug(message)
else:
message = "Record %s doesn't contain PDF file." % (doi,)
raise MissingFFTError(message)
except MissingFFTError:
message = "Elsevier paper: %s is missing PDF." % (doi,)
register_exception(alert_admin=True, prefix=message)
self.logger.warning(message)
## copy other formats to bibupload file
if recid:
from invenio.bibdocfile import BibRecDocs
record = BibRecDocs(recid[0])
for bibfile in record.list_latest_files():
if bibfile.get_format() != '.pdf;pdfa':
record_add_field(rec,
'FFT',
subfields=[('a', bibfile.get_full_path()),
('n', bibfile.get_name()),
('f', bibfile.get_format())]
)
return record_xml_output(rec)
def get_license(self, xml_doc):
license = ''
license_url = ''
for tag in xml_doc.getElementsByTagName('oa:openAccessInformation'):
license_url = get_value_in_tag(tag, 'oa:userLicense')
if license_url.startswith('http://creativecommons.org/licenses/by/3.0'):
license = 'CC-BY-3.0'
return license, license_url
def get_record(self, path=None, no_pdf=False,
test=False, refextract_callback=None):
"""Convert a record to MARCXML format.
:param path: path to a record.
:type path: string
:param test: flag to determine if it is a test call.
:type test: bool
:param refextract_callback: callback to be used to extract
unstructured references. It should
return a marcxml formated string
of the reference.
:type refextract_callback: callable
:returns: marcxml formated string.
"""
xml_doc = self.get_article(path)
rec = create_record()
title = self.get_title(xml_doc)
if title:
record_add_field(rec, '245', subfields=[('a', title)])
(journal, dummy, volume, issue, first_page, last_page, year,
start_date, doi) = self.get_publication_information(xml_doc, path)
if not journal:
journal = self.get_article_journal(xml_doc)
if start_date:
record_add_field(rec, '260', subfields=[('c', start_date),
('t', 'published')])
else:
record_add_field(
rec, '260', subfields=[('c', time.strftime('%Y-%m-%d'))])
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi),
('2', 'DOI')])
license, license_url = self.get_license(xml_doc)
if license and license_url:
record_add_field(rec, '540', subfields=[('a', license),
('u', license_url)])
elif license_url:
record_add_field(rec, '540', subfields=[('u', license_url)])
self.logger.info("Creating record: %s %s" % (path, doi))
authors = self.get_authors(xml_doc)
first_author = True
for author in authors:
author_name = (author['surname'], author.get(
'given_name') or author.get('initials'))
subfields = [('a', '%s, %s' % author_name)]
if 'orcid' in author:
subfields.append(('j', author['orcid']))
if 'affiliation' in author:
for aff in author["affiliation"]:
subfields.append(('v', aff))
if self.extract_nations:
add_nations_field(subfields)
if author.get('email'):
subfields.append(('m', author['email']))
if first_author:
record_add_field(rec, '100', subfields=subfields)
first_author = False
else:
record_add_field(rec, '700', subfields=subfields)
abstract = self.get_abstract(xml_doc)
if abstract:
record_add_field(rec, '520', subfields=[('a', abstract),
('9', 'Elsevier')])
record_copyright = self.get_copyright(xml_doc)
if record_copyright:
record_add_field(rec, '542', subfields=[('f', record_copyright)])
keywords = self.get_keywords(xml_doc)
if self.CONSYN:
for tag in xml_doc.getElementsByTagName('ce:collaboration'):
collaboration = get_value_in_tag(tag, 'ce:text')
if collaboration:
record_add_field(rec, '710',
subfields=[('g', collaboration)])
# We add subjects also as author keywords
subjects = xml_doc.getElementsByTagName('dct:subject')
for subject in subjects:
for listitem in subject.getElementsByTagName('rdf:li'):
keyword = xml_to_text(listitem)
if keyword not in keywords:
keywords.append(keyword)
for keyword in keywords:
record_add_field(rec, '653', ind1='1',
subfields=[('a', keyword),
('9', 'author')])
journal, dummy = fix_journal_name(journal.strip(),
self.journal_mappings)
subfields = []
doctype = self.get_doctype(xml_doc)
try:
page_count = int(last_page) - int(first_page) + 1
record_add_field(rec, '300',
subfields=[('a', str(page_count))])
except ValueError: # do nothing
pass
if doctype == 'err':
subfields.append(('m', 'Erratum'))
elif doctype == 'add':
subfields.append(('m', 'Addendum'))
elif doctype == 'pub':
subfields.append(('m', 'Publisher Note'))
elif doctype == 'rev':
record_add_field(rec, '980', subfields=[('a', 'Review')])
if journal:
subfields.append(('p', journal))
if first_page and last_page:
subfields.append(('c', '%s-%s' %
(first_page, last_page)))
elif first_page:
subfields.append(('c', first_page))
if volume:
subfields.append(('v', volume))
if year:
subfields.append(('y', year))
record_add_field(rec, '773', subfields=subfields)
if not test:
if license:
url = 'http://www.sciencedirect.com/science/article/pii/'\
+ path.split('/')[-1][:-4]
record_add_field(rec, '856', ind1='4',
subfields=[('u', url),
('y', 'Elsevier server')])
record_add_field(rec, 'FFT', subfields=[('a', path),
('t', 'INSPIRE-PUBLIC'),
('d', 'Fulltext')])
else:
record_add_field(rec, 'FFT', subfields=[('a', path),
('t', 'Elsevier'),
('o', 'HIDDEN')])
record_add_field(rec, '980', subfields=[('a', 'HEP')])
record_add_field(rec, '980', subfields=[('a', 'Citeable')])
record_add_field(rec, '980', subfields=[('a', 'Published')])
self._add_references(xml_doc, rec, refextract_callback)
else:
licence = 'http://creativecommons.org/licenses/by/3.0/'
record_add_field(rec,
'540',
subfields=[('a', 'CC-BY-3.0'), ('u', licence)])
if keywords:
for keyword in keywords:
record_add_field(
rec, '653', ind1='1', subfields=[('a', keyword),
('9', 'author')])
pages = ''
if first_page and last_page:
pages = '{0}-{1}'.format(first_page, last_page)
elif first_page:
pages = first_page
subfields = filter(lambda x: x[1] and x[1] != '-', [('p', journal),
('v', volume),
('n', issue),
('c', pages),
('y', year)])
record_add_field(rec, '773', subfields=subfields)
if not no_pdf:
from invenio.search_engine import perform_request_search
query = '0247_a:"%s" AND NOT 980:DELETED"' % (doi,)
prev_version = perform_request_search(p=query)
old_pdf = False
if prev_version:
from invenio.bibdocfile import BibRecDocs
prev_rec = BibRecDocs(prev_version[0])
try:
pdf_path = prev_rec.get_bibdoc('main')
pdf_path = pdf_path.get_file(
".pdf;pdfa", exact_docformat=True)
pdf_path = pdf_path.fullpath
old_pdf = True
record_add_field(rec, 'FFT',
subfields=[('a', pdf_path),
('n', 'main'),
('f', '.pdf;pdfa')])
message = ('Leaving previously delivered PDF/A for: '
+ doi)
self.logger.info(message)
except:
pass
try:
if exists(join(path, 'main_a-2b.pdf')):
pdf_path = join(path, 'main_a-2b.pdf')
record_add_field(rec, 'FFT',
subfields=[('a', pdf_path),
('n', 'main'),
('f', '.pdf;pdfa')])
self.logger.debug('Adding PDF/A to record: %s'
% (doi,))
elif exists(join(path, 'main.pdf')):
pdf_path = join(path, 'main.pdf')
record_add_field(rec, 'FFT', subfields=[('a', pdf_path)])
else:
if not old_pdf:
message = "Record " + doi
message += " doesn't contain PDF file."
self.logger.warning(message)
raise MissingFFTError(message)
except MissingFFTError:
message = "Elsevier paper: %s is missing PDF." % (doi,)
register_exception(alert_admin=True, prefix=message)
version = self.get_elsevier_version(find_package_name(path))
record_add_field(rec, '583', subfields=[('l', version)])
xml_path = join(path, 'main.xml')
record_add_field(rec, 'FFT', subfields=[('a', xml_path)])
record_add_field(rec, '980', subfields=[('a', 'SCOAP3'),
('b', 'Elsevier')])
try:
return record_xml_output(rec)
except UnicodeDecodeError:
message = "Found a bad char in the file for the article " + doi
sys.stderr.write(message)
return ""
def bibupload_it(self):
from invenio.bibtask import task_low_level_submission
print(self.found_articles)
if self.found_articles:
if [x for x in self.found_articles if "vtex" not in x]:
self.logger.debug("Preparing bibupload.")
fd, name = mkstemp(suffix='.xml',
prefix='bibupload_scoap3_',
dir=CFG_TMPSHAREDDIR)
out = fdopen(fd, 'w')
print("<collection>", file=out)
for i, path in enumerate(self.found_articles):
if "vtex" not in path:
print(self.get_record(path),
file=out)
print(path, i + 1, "out of", len(self.found_articles))
xml_doc = self.get_article(path)
doi = self._get_doi(xml_doc)
package_name = filter(lambda x: 'cern' in x.lower() or 'vtex' in x.lower(), path.split('/'))
if package_name:
self.doi_package_name_mapping.append((package_name[0], doi))
print("</collection>", file=out)
out.close()
task_low_level_submission(
"bibupload", "admin", "-N", "Elsevier", "-i", "-r", name)
if [x for x in self.found_articles if "vtex" in x]:
# for VTEX files with PDF/A
self.logger.debug("Preparing bibupload for PDF/As.")
fd_vtex, name_vtex = mkstemp(
suffix='.xml', prefix='bibupload_scoap3_',
dir=CFG_TMPSHAREDDIR)
out = fdopen(fd_vtex, 'w')
print("<collection>", file=out)
# enumerate remember progress of previous one
for i, path in enumerate(self.found_articles):
if "vtex" in path:
print(self.get_pdfa_record(path), file=out)
print(path, i + 1, "out of", len(self.found_articles))
print("</collection>", file=out)
out.close()
task_low_level_submission("bibupload", "admin", "-N",
"Elsevier:VTEX", "-c", name_vtex)
| jalavik/harvesting-kit | harvestingkit/elsevier_package.py | Python | gpl-2.0 | 49,386 | [
"VisIt"
] | 08b8964cd5d464fda0757ec3db7ad52c9626c5bec49e3ae7111126cf91bfe4a4 |
# Django
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# Tower
from awx.conf import fields, register
from awx.conf import settings_registry
# Define a conf.py file within your app and register each setting similarly to
# the example below. Any field class from Django REST Framework or subclass
# thereof can be used for validation/conversion of the setting. All keyword
# arguments to the register function (except field_class, category,
# category_slug, depends_on, placeholder) will be used to initialize
# the field_class.
register(
'ANSIBLE_COW_SELECTION',
field_class=fields.ChoiceField,
choices=[
('bud-frogs', _('Bud Frogs')),
('bunny', _('Bunny')),
('cheese', _('Cheese')),
('daemon', _('Daemon')),
('default', _('Default Cow')),
('dragon', _('Dragon')),
('elephant-in-snake', _('Elephant in Snake')),
('elephant', _('Elephant')),
('eyes', _('Eyes')),
('hellokitty', _('Hello Kitty')),
('kitty', _('Kitty')),
('luke-koala', _('Luke Koala')),
('meow', _('Meow')),
('milk', _('Milk')),
('moofasa', _('Moofasa')),
('moose', _('Moose')),
('ren', _('Ren')),
('sheep', _('Sheep')),
('small', _('Small Cow')),
('stegosaurus', _('Stegosaurus')),
('stimpy', _('Stimpy')),
('supermilker', _('Super Milker')),
('three-eyes', _('Three Eyes')),
('turkey', _('Turkey')),
('turtle', _('Turtle')),
('tux', _('Tux')),
('udder', _('Udder')),
('vader-koala', _('Vader Koala')),
('vader', _('Vader')),
('www', _('WWW')),
],
default='default',
label=_('Cow Selection'),
help_text=_('Select which cow to use with cowsay when running jobs.'),
category=_('Cows'),
# Optional; category_slug will be slugified version of category if not
# explicitly provided.
category_slug='cows',
)
def _get_read_only_ansible_cow_selection_default():
return getattr(settings, 'ANSIBLE_COW_SELECTION', 'No default cow!')
register(
'READONLY_ANSIBLE_COW_SELECTION',
field_class=fields.CharField,
# read_only must be set via kwargs even if field_class sets it.
read_only=True,
# default can be a callable to dynamically compute the value; should be in
# the plain JSON format stored in the DB and used in the API.
default=_get_read_only_ansible_cow_selection_default,
label=_('Example Read-Only Setting'),
help_text=_('Example setting that cannot be changed.'),
category=_('Cows'),
category_slug='cows',
# Optional; list of other settings this read-only setting depends on. When
# the other settings change, the cached value for this setting will be
# cleared to require it to be recomputed.
depends_on=['ANSIBLE_COW_SELECTION'],
# Optional; field is stored encrypted in the database and only $encrypted$
# is returned via the API.
encrypted=True,
)
register(
'EXAMPLE_USER_SETTING',
field_class=fields.CharField,
allow_blank=True,
label=_('Example Setting'),
help_text=_('Example setting which can be different for each user.'),
category=_('User'),
category_slug='user',
default='',
)
# Unregister the example settings above.
settings_registry.unregister('ANSIBLE_COW_SELECTION')
settings_registry.unregister('READONLY_ANSIBLE_COW_SELECTION')
settings_registry.unregister('EXAMPLE_USER_SETTING')
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/conf/conf.py | Python | apache-2.0 | 3,506 | [
"MOOSE"
] | 092025adf9fec34c20130f030e36b9e1c89168157a38bcc8b96e53417c06551b |
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the sprotXX.dat file from
SwissProt.
http://www.expasy.ch/sprot/sprot-top.html
Tested with:
Release 37, Release 38, Release 39
Limited testing with:
Release 51, 54
Classes:
Record Holds SwissProt data.
Reference Holds reference data from a SwissProt entry.
Iterator Iterates over entries in a SwissProt file.
Dictionary Accesses a SwissProt file using a dictionary interface.
RecordParser Parses a SwissProt record into a Record object.
SequenceParser Parses a SwissProt record into a SeqRecord object.
_Scanner Scans SwissProt-formatted data.
_RecordConsumer Consumes SwissProt data to a SProt.Record object.
_SequenceConsumer Consumes SwissProt data to a SeqRecord object.
Functions:
index_file Index a SwissProt file for a Dictionary.
"""
from types import *
import os
from Bio import File
from Bio import Index
from Bio import Alphabet
from Bio import Seq
from Bio import SeqRecord
from Bio.ParserSupport import *
_CHOMP = " \n\r\t.,;" #whitespace and trailing punctuation
class Record:
"""Holds information from a SwissProt record.
Members:
entry_name Name of this entry, e.g. RL1_ECOLI.
data_class Either 'STANDARD' or 'PRELIMINARY'.
molecule_type Type of molecule, 'PRT',
sequence_length Number of residues.
accessions List of the accession numbers, e.g. ['P00321']
created A tuple of (date, release).
sequence_update A tuple of (date, release).
annotation_update A tuple of (date, release).
description Free-format description.
gene_name Gene name. See userman.txt for description.
organism The source of the sequence.
organelle The origin of the sequence.
organism_classification The taxonomy classification. List of strings.
(http://www.ncbi.nlm.nih.gov/Taxonomy/)
taxonomy_id A list of NCBI taxonomy id's.
host_organism A list of NCBI taxonomy id's of the hosts of a virus,
if any.
references List of Reference objects.
comments List of strings.
cross_references List of tuples (db, id1[, id2][, id3]). See the docs.
keywords List of the keywords.
features List of tuples (key name, from, to, description).
from and to can be either integers for the residue
numbers, '<', '>', or '?'
seqinfo tuple of (length, molecular weight, CRC32 value)
sequence The sequence.
"""
def __init__(self):
self.entry_name = None
self.data_class = None
self.molecule_type = None
self.sequence_length = None
self.accessions = []
self.created = None
self.sequence_update = None
self.annotation_update = None
self.description = ''
self.gene_name = ''
self.organism = ''
self.organelle = ''
self.organism_classification = []
self.taxonomy_id = []
self.host_organism = []
self.references = []
self.comments = []
self.cross_references = []
self.keywords = []
self.features = []
self.seqinfo = None
self.sequence = ''
class Reference:
"""Holds information from 1 references in a SwissProt entry.
Members:
number Number of reference in an entry.
positions Describes extent of work. list of strings.
comments Comments. List of (token, text).
references References. List of (dbname, identifier)
authors The authors of the work.
title Title of the work.
location A citation for the work.
"""
def __init__(self):
self.number = None
self.positions = []
self.comments = []
self.references = []
self.authors = ''
self.title = ''
self.location = ''
class Iterator:
"""Returns one record at a time from a SwissProt file.
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
if type(handle) is not FileType and type(handle) is not InstanceType:
raise ValueError, "I expected a file handle or file-like object"
self._uhandle = File.UndoHandle(handle)
self._parser = parser
def next(self):
"""next(self) -> object
Return the next swissprot record from the file. If no more records,
return None.
"""
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
lines.append(line)
if line[:2] == '//':
break
if not lines:
return None
data = ''.join(lines)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class Dictionary:
"""Accesses a SwissProt file using a dictionary interface.
"""
__filename_key = '__filename'
def __init__(self, indexname, parser=None):
"""__init__(self, indexname, parser=None)
Open a SwissProt Dictionary. indexname is the name of the
index for the dictionary. The index should have been created
using the index_file function. parser is an optional Parser
object to change the results into another form. If set to None,
then the raw contents of the file will be returned.
"""
self._index = Index.Index(indexname)
self._handle = open(self._index[self.__filename_key])
self._parser = parser
def __len__(self):
return len(self._index)
def __getitem__(self, key):
start, len = self._index[key]
self._handle.seek(start)
data = self._handle.read(len)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __getattr__(self, name):
return getattr(self._index, name)
def keys(self):
# I only want to expose the keys for SwissProt.
k = self._index.keys()
k.remove(self.__filename_key)
return k
class ExPASyDictionary:
"""Access SwissProt at ExPASy using a read-only dictionary interface.
"""
def __init__(self, delay=5.0, parser=None):
"""__init__(self, delay=5.0, parser=None)
Create a new Dictionary to access SwissProt. parser is an optional
parser (e.g. SProt.RecordParser) object to change the results
into another form. If set to None, then the raw contents of the
file will be returned. delay is the number of seconds to wait
between each query.
"""
import warnings
from Bio.WWW import RequestLimiter
warnings.warn("Bio.SwissProt.ExPASyDictionary is deprecated. Please use the function Bio.ExPASy.get_sprot_raw instead.",
DeprecationWarning)
self.parser = parser
self.limiter = RequestLimiter(delay)
def __len__(self):
raise NotImplementedError, "SwissProt contains lots of entries"
def clear(self):
raise NotImplementedError, "This is a read-only dictionary"
def __setitem__(self, key, item):
raise NotImplementedError, "This is a read-only dictionary"
def update(self):
raise NotImplementedError, "This is a read-only dictionary"
def copy(self):
raise NotImplementedError, "You don't need to do this..."
def keys(self):
raise NotImplementedError, "You don't really want to do this..."
def items(self):
raise NotImplementedError, "You don't really want to do this..."
def values(self):
raise NotImplementedError, "You don't really want to do this..."
def has_key(self, id):
"""has_key(self, id) -> bool"""
try:
self[id]
except KeyError:
return 0
return 1
def get(self, id, failobj=None):
try:
return self[id]
except KeyError:
return failobj
raise "How did I get here?"
def __getitem__(self, id):
"""__getitem__(self, id) -> object
Return a SwissProt entry. id is either the id or accession
for the entry. Raises a KeyError if there's an error.
"""
from Bio.WWW import ExPASy
# First, check to see if enough time has passed since my
# last query.
self.limiter.wait()
try:
handle = ExPASy.get_sprot_raw(id)
except IOError:
raise KeyError, id
if self.parser is not None:
return self.parser.parse(handle)
return handle.read()
class RecordParser(AbstractParser):
"""Parses SwissProt data into a Record object.
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class SequenceParser(AbstractParser):
"""Parses SwissProt data into a standard SeqRecord object.
"""
def __init__(self, alphabet = Alphabet.generic_protein):
"""Initialize a SequenceParser.
Arguments:
o alphabet - The alphabet to use for the generated Seq objects. If
not supplied this will default to the generic protein alphabet.
"""
self._scanner = _Scanner()
self._consumer = _SequenceConsumer(alphabet)
def parse(self, handle):
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scans SwissProt-formatted data.
Tested with:
Release 37
Release 38
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in SwissProt data for scanning. handle is a file-like
object that contains swissprot data. consumer is a
Consumer object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
self._scan_record(uhandle, consumer)
def _skip_starstar(self, uhandle) :
"""Ignores any lines starting **"""
#See Bug 2353, some files from the EBI have extra lines
#starting "**" (two asterisks/stars), usually between the
#features and sequence but not all the time. They appear
#to be unofficial automated annotations. e.g.
#**
#** ################# INTERNAL SECTION ##################
#**HA SAM; Annotated by PicoHamap 1.88; MF_01138.1; 09-NOV-2003.
while "**" == uhandle.peekline()[:2] :
skip = uhandle.readline()
#print "Skipping line: %s" % skip.rstrip()
def _scan_record(self, uhandle, consumer):
consumer.start_record()
for fn in self._scan_fns:
self._skip_starstar(uhandle)
fn(self, uhandle, consumer)
# In Release 38, ID N33_HUMAN has a DR buried within comments.
# Check for this and do more comments, if necessary.
# XXX handle this better
if fn is self._scan_dr.im_func:
self._scan_cc(uhandle, consumer)
self._scan_dr(uhandle, consumer)
consumer.end_record()
def _scan_line(self, line_type, uhandle, event_fn,
exactly_one=None, one_or_more=None, any_number=None,
up_to_one=None):
# Callers must set exactly one of exactly_one, one_or_more, or
# any_number to a true value. I do not explicitly check to
# make sure this function is called correctly.
# This does not guarantee any parameter safety, but I
# like the readability. The other strategy I tried was have
# parameters min_lines, max_lines.
if exactly_one or one_or_more:
read_and_call(uhandle, event_fn, start=line_type)
if one_or_more or any_number:
while 1:
if not attempt_read_and_call(uhandle, event_fn,
start=line_type):
break
if up_to_one:
attempt_read_and_call(uhandle, event_fn, start=line_type)
def _scan_id(self, uhandle, consumer):
self._scan_line('ID', uhandle, consumer.identification, exactly_one=1)
def _scan_ac(self, uhandle, consumer):
# Until release 38, this used to match exactly_one.
# However, in release 39, 1A02_HUMAN has 2 AC lines, and the
# definition needed to be expanded.
self._scan_line('AC', uhandle, consumer.accession, any_number=1)
def _scan_dt(self, uhandle, consumer):
self._scan_line('DT', uhandle, consumer.date, exactly_one=1)
self._scan_line('DT', uhandle, consumer.date, exactly_one=1)
# IPI doesn't necessarily contain the third line about annotations
self._scan_line('DT', uhandle, consumer.date, up_to_one=1)
def _scan_de(self, uhandle, consumer):
# IPI can be missing a DE line
self._scan_line('DE', uhandle, consumer.description, any_number=1)
def _scan_gn(self, uhandle, consumer):
self._scan_line('GN', uhandle, consumer.gene_name, any_number=1)
def _scan_os(self, uhandle, consumer):
self._scan_line('OS', uhandle, consumer.organism_species,
one_or_more=1)
def _scan_og(self, uhandle, consumer):
self._scan_line('OG', uhandle, consumer.organelle, any_number=1)
def _scan_oc(self, uhandle, consumer):
self._scan_line('OC', uhandle, consumer.organism_classification,
one_or_more=1)
def _scan_ox(self, uhandle, consumer):
self._scan_line('OX', uhandle, consumer.taxonomy_id,
any_number=1)
def _scan_oh(self, uhandle, consumer):
# viral host organism. introduced after SwissProt 39.
self._scan_line('OH', uhandle, consumer.organism_host, any_number=1)
def _scan_reference(self, uhandle, consumer):
while 1:
if safe_peekline(uhandle)[:2] != 'RN':
break
self._scan_rn(uhandle, consumer)
self._scan_rp(uhandle, consumer)
self._scan_rc(uhandle, consumer)
self._scan_rx(uhandle, consumer)
# ws:2001-12-05 added, for record with RL before RA
self._scan_rl(uhandle, consumer)
self._scan_ra(uhandle, consumer)
#EBI copy of P72010 is missing the RT line, and has one
#of their ** lines in its place noting "** /NO TITLE."
#See also bug 2353
self._skip_starstar(uhandle)
self._scan_rt(uhandle, consumer)
self._scan_rl(uhandle, consumer)
def _scan_rn(self, uhandle, consumer):
self._scan_line('RN', uhandle, consumer.reference_number,
exactly_one=1)
def _scan_rp(self, uhandle, consumer):
self._scan_line('RP', uhandle, consumer.reference_position,
one_or_more=1)
def _scan_rc(self, uhandle, consumer):
self._scan_line('RC', uhandle, consumer.reference_comment,
any_number=1)
def _scan_rx(self, uhandle, consumer):
self._scan_line('RX', uhandle, consumer.reference_cross_reference,
any_number=1)
def _scan_ra(self, uhandle, consumer):
# In UniProt release 1.12 of 6/21/04, there is a new RG
# (Reference Group) line, which references a group instead of
# an author. Each block must have at least 1 RA or RG line.
self._scan_line('RA', uhandle, consumer.reference_author,
any_number=1)
self._scan_line('RG', uhandle, consumer.reference_author,
any_number=1)
# PRKN_HUMAN has RG lines, then RA lines. The best solution
# is to write code that accepts either of the line types.
# This is the quick solution...
self._scan_line('RA', uhandle, consumer.reference_author,
any_number=1)
def _scan_rt(self, uhandle, consumer):
self._scan_line('RT', uhandle, consumer.reference_title,
any_number=1)
def _scan_rl(self, uhandle, consumer):
# This was one_or_more, but P82909 in TrEMBL 16.0 does not
# have one.
self._scan_line('RL', uhandle, consumer.reference_location,
any_number=1)
def _scan_cc(self, uhandle, consumer):
self._scan_line('CC', uhandle, consumer.comment, any_number=1)
def _scan_dr(self, uhandle, consumer):
self._scan_line('DR', uhandle, consumer.database_cross_reference,
any_number=1)
def _scan_kw(self, uhandle, consumer):
self._scan_line('KW', uhandle, consumer.keyword, any_number=1)
def _scan_ft(self, uhandle, consumer):
self._scan_line('FT', uhandle, consumer.feature_table, any_number=1)
def _scan_pe(self, uhandle, consumer):
self._scan_line('PE', uhandle, consumer.protein_existence, any_number=1)
def _scan_sq(self, uhandle, consumer):
self._scan_line('SQ', uhandle, consumer.sequence_header, exactly_one=1)
def _scan_sequence_data(self, uhandle, consumer):
self._scan_line(' ', uhandle, consumer.sequence_data, one_or_more=1)
def _scan_terminator(self, uhandle, consumer):
self._scan_line('//', uhandle, consumer.terminator, exactly_one=1)
_scan_fns = [
_scan_id,
_scan_ac,
_scan_dt,
_scan_de,
_scan_gn,
_scan_os,
_scan_og,
_scan_oc,
_scan_ox,
_scan_oh,
_scan_reference,
_scan_cc,
_scan_dr,
_scan_pe,
_scan_kw,
_scan_ft,
_scan_sq,
_scan_sequence_data,
_scan_terminator
]
class _RecordConsumer(AbstractConsumer):
"""Consumer that converts a SwissProt record to a Record object.
Members:
data Record with SwissProt data.
"""
def __init__(self):
self.data = None
def __repr__(self) :
return "Bio.SwissProt.SProt._RecordConsumer()"
def start_record(self):
self.data = Record()
def end_record(self):
self._clean_record(self.data)
def identification(self, line):
cols = line.split()
#Prior to release 51, included with MoleculeType:
#ID EntryName DataClass; MoleculeType; SequenceLength.
#
#Newer files lack the MoleculeType:
#ID EntryName DataClass; SequenceLength.
#
#Note that cols is split on white space, so the length
#should become two fields (number and units)
if len(cols) == 6 :
self.data.entry_name = cols[1]
self.data.data_class = cols[2].rstrip(_CHOMP) # don't want ';'
self.data.molecule_type = cols[3].rstrip(_CHOMP) # don't want ';'
self.data.sequence_length = int(cols[4])
elif len(cols) == 5 :
self.data.entry_name = cols[1]
self.data.data_class = cols[2].rstrip(_CHOMP) # don't want ';'
self.data.molecule_type = None
self.data.sequence_length = int(cols[3])
else :
#Should we print a warning an continue?
raise ValueError("ID line has unrecognised format:\n"+line)
# data class can be 'STANDARD' or 'PRELIMINARY'
# ws:2001-12-05 added IPI
# pjc:2006-11-02 added 'Reviewed' and 'Unreviewed'
if self.data.data_class not in ['STANDARD', 'PRELIMINARY', 'IPI',
'Reviewed', 'Unreviewed']:
raise ValueError, "Unrecognized data class %s in line\n%s" % \
(self.data.data_class, line)
# molecule_type should be 'PRT' for PRoTein
# Note that has been removed in recent releases (set to None)
if self.data.molecule_type is not None \
and self.data.molecule_type != 'PRT':
raise ValueError, "Unrecognized molecule type %s in line\n%s" % \
(self.data.molecule_type, line)
def accession(self, line):
cols = line[5:].rstrip(_CHOMP).strip().split(';')
for ac in cols:
if ac.strip() :
#remove any leading or trailing white space
self.data.accessions.append(ac.strip())
def date(self, line):
uprline = string.upper(line)
cols = line.rstrip().split()
if uprline.find('CREATED') >= 0 \
or uprline.find('LAST SEQUENCE UPDATE') >= 0 \
or uprline.find('LAST ANNOTATION UPDATE') >= 0:
# Old style DT line
# =================
# e.g.
# DT 01-FEB-1995 (Rel. 31, Created)
# DT 01-FEB-1995 (Rel. 31, Last sequence update)
# DT 01-OCT-2000 (Rel. 40, Last annotation update)
#
# or:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
# ...
# find where the version information will be located
# This is needed for when you have cases like IPI where
# the release verison is in a different spot:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
uprcols = uprline.split()
rel_index = -1
for index in range(len(uprcols)):
if uprcols[index].find("REL.") >= 0:
rel_index = index
assert rel_index >= 0, \
"Could not find Rel. in DT line: %s" % (line)
version_index = rel_index + 1
# get the version information
str_version = cols[version_index].rstrip(_CHOMP)
# no version number
if str_version == '':
version = 0
# dot versioned
elif str_version.find(".") >= 0:
version = str_version
# integer versioned
else:
version = int(str_version)
if uprline.find('CREATED') >= 0:
self.data.created = cols[1], version
elif uprline.find('LAST SEQUENCE UPDATE') >= 0:
self.data.sequence_update = cols[1], version
elif uprline.find( 'LAST ANNOTATION UPDATE') >= 0:
self.data.annotation_update = cols[1], version
else:
assert False, "Shouldn't reach this line!"
elif uprline.find('INTEGRATED INTO') >= 0 \
or uprline.find('SEQUENCE VERSION') >= 0 \
or uprline.find('ENTRY VERSION') >= 0:
# New style DT line
# =================
# As of UniProt Knowledgebase release 7.0 (including
# Swiss-Prot release 49.0 and TrEMBL release 32.0) the
# format of the DT lines and the version information
# in them was changed - the release number was dropped.
#
# For more information see bug 1948 and
# http://ca.expasy.org/sprot/relnotes/sp_news.html#rel7.0
#
# e.g.
# DT 01-JAN-1998, integrated into UniProtKB/Swiss-Prot.
# DT 15-OCT-2001, sequence version 3.
# DT 01-APR-2004, entry version 14.
#
#This is a new style DT line...
# The date should be in string cols[1]
# Get the version number if there is one.
# For the three DT lines above: 0, 3, 14
try:
version = int(cols[-1])
except ValueError :
version = 0
# Re-use the historical property names, even though
# the meaning has changed slighty:
if uprline.find("INTEGRATED") >= 0:
self.data.created = cols[1], version
elif uprline.find('SEQUENCE VERSION') >= 0:
self.data.sequence_update = cols[1], version
elif uprline.find( 'ENTRY VERSION') >= 0:
self.data.annotation_update = cols[1], version
else:
assert False, "Shouldn't reach this line!"
else:
raise ValueError, "I don't understand the date line %s" % line
def description(self, line):
self.data.description += line[5:]
def gene_name(self, line):
self.data.gene_name += line[5:]
def organism_species(self, line):
self.data.organism += line[5:]
def organelle(self, line):
self.data.organelle += line[5:]
def organism_classification(self, line):
line = line[5:].rstrip(_CHOMP)
cols = line.split(';')
for col in cols:
self.data.organism_classification.append(col.lstrip())
def taxonomy_id(self, line):
# The OX line is in the format:
# OX DESCRIPTION=ID[, ID]...;
# If there are too many id's to fit onto a line, then the ID's
# continue directly onto the next line, e.g.
# OX DESCRIPTION=ID[, ID]...
# OX ID[, ID]...;
# Currently, the description is always "NCBI_TaxID".
# To parse this, I need to check to see whether I'm at the
# first line. If I am, grab the description and make sure
# it's an NCBI ID. Then, grab all the id's.
line = line[5:].rstrip(_CHOMP)
index = line.find('=')
if index >= 0:
descr = line[:index]
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
ids = line[index+1:].split(',')
else:
ids = line.split(',')
self.data.taxonomy_id.extend([id.strip() for id in ids])
def organism_host(self, line):
# Line type OH (Organism Host) for viral hosts
# same code as in taxonomy_id()
line = line[5:].rstrip(_CHOMP)
index = line.find('=')
if index >= 0:
descr = line[:index]
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
ids = line[index+1:].split(',')
else:
ids = line.split(',')
self.data.host_organism.extend([id.strip() for id in ids])
def reference_number(self, line):
rn = line[5:].rstrip()
assert rn[0] == '[' and rn[-1] == ']', "Missing brackets %s" % rn
ref = Reference()
ref.number = int(rn[1:-1])
self.data.references.append(ref)
def reference_position(self, line):
assert self.data.references, "RP: missing RN"
self.data.references[-1].positions.append(line[5:].rstrip())
def reference_comment(self, line):
assert self.data.references, "RC: missing RN"
cols = line[5:].rstrip().split( ';')
ref = self.data.references[-1]
for col in cols:
if not col: # last column will be the empty string
continue
# The token is everything before the first '=' character.
index = col.find('=')
token, text = col[:index], col[index+1:]
# According to the spec, there should only be 1 '='
# character. However, there are too many exceptions to
# handle, so we'll ease up and allow anything after the
# first '='.
#if col == ' STRAIN=TISSUE=BRAIN':
# # from CSP_MOUSE, release 38
# token, text = "TISSUE", "BRAIN"
#elif col == ' STRAIN=NCIB 9816-4, AND STRAIN=G7 / ATCC 17485':
# # from NDOA_PSEPU, release 38
# token, text = "STRAIN", "NCIB 9816-4 AND G7 / ATCC 17485"
#elif col == ' STRAIN=ISOLATE=NO 27, ANNO 1987' or \
# col == ' STRAIN=ISOLATE=NO 27 / ANNO 1987':
# # from NU3M_BALPH, release 38, release 39
# token, text = "STRAIN", "ISOLATE NO 27, ANNO 1987"
#else:
# token, text = string.split(col, '=')
ref.comments.append((token.lstrip(), text))
def reference_cross_reference(self, line):
assert self.data.references, "RX: missing RN"
# The basic (older?) RX line is of the form:
# RX MEDLINE; 85132727.
# but there are variants of this that need to be dealt with (see below)
# CLD1_HUMAN in Release 39 and DADR_DIDMA in Release 33
# have extraneous information in the RX line. Check for
# this and chop it out of the line.
# (noticed by katel@worldpath.net)
ind = line.find('[NCBI, ExPASy, Israel, Japan]')
if ind >= 0:
line = line[:ind]
# RX lines can also be used of the form
# RX PubMed=9603189;
# reported by edvard@farmasi.uit.no
# and these can be more complicated like:
# RX MEDLINE=95385798; PubMed=7656980;
# RX PubMed=15060122; DOI=10.1136/jmg 2003.012781;
# We look for these cases first and deal with them
if line.find("=") != -1:
cols = line[2:].split("; ")
cols = [x.strip() for x in cols]
cols = [x for x in cols if x]
for col in cols:
x = col.split("=")
assert len(x) == 2, "I don't understand RX line %s" % line
key, value = x[0].rstrip(_CHOMP), x[1].rstrip(_CHOMP)
ref = self.data.references[-1].references
ref.append((key, value))
# otherwise we assume we have the type 'RX MEDLINE; 85132727.'
else:
cols = line.split()
# normally we split into the three parts
assert len(cols) == 3, "I don't understand RX line %s" % line
self.data.references[-1].references.append(
(cols[1].rstrip(_CHOMP), cols[2].rstrip(_CHOMP)))
def reference_author(self, line):
assert self.data.references, "RA: missing RN"
ref = self.data.references[-1]
ref.authors += line[5:]
def reference_title(self, line):
assert self.data.references, "RT: missing RN"
ref = self.data.references[-1]
ref.title += line[5:]
def reference_location(self, line):
assert self.data.references, "RL: missing RN"
ref = self.data.references[-1]
ref.location += line[5:]
def comment(self, line):
if line[5:8] == '-!-': # Make a new comment
self.data.comments.append(line[9:])
elif line[5:8] == ' ': # add to the previous comment
if not self.data.comments:
# TCMO_STRGA in Release 37 has comment with no topic
self.data.comments.append(line[9:])
else:
self.data.comments[-1] += line[9:]
elif line[5:8] == '---':
# If there are no comments, and it's not the closing line,
# make a new comment.
if not self.data.comments or self.data.comments[-1][:3] != '---':
self.data.comments.append(line[5:])
else:
self.data.comments[-1] += line[5:]
else: # copyright notice
self.data.comments[-1] += line[5:]
def database_cross_reference(self, line):
# From CLD1_HUMAN, Release 39:
# DR EMBL; [snip]; -. [EMBL / GenBank / DDBJ] [CoDingSequence]
# DR PRODOM [Domain structure / List of seq. sharing at least 1 domai
# DR SWISS-2DPAGE; GET REGION ON 2D PAGE.
line = line[5:]
# Remove the comments at the end of the line
i = line.find('[')
if i >= 0:
line = line[:i]
cols = line.rstrip(_CHOMP).split(';')
cols = [col.lstrip() for col in cols]
self.data.cross_references.append(tuple(cols))
def keyword(self, line):
cols = line[5:].rstrip(_CHOMP).split(';')
self.data.keywords.extend([c.lstrip() for c in cols])
def feature_table(self, line):
line = line[5:] # get rid of junk in front
name = line[0:8].rstrip()
try:
from_res = int(line[9:15])
except ValueError:
from_res = line[9:15].lstrip()
try:
to_res = int(line[16:22])
except ValueError:
to_res = line[16:22].lstrip()
description = line[29:70].rstrip()
#if there is a feature_id (FTId), store it away
if line[29:35]==r"/FTId=":
ft_id = line[35:70].rstrip()[:-1]
else:
ft_id =""
if not name: # is continuation of last one
assert not from_res and not to_res
name, from_res, to_res, old_description,old_ft_id = self.data.features[-1]
del self.data.features[-1]
description = "%s %s" % (old_description, description)
# special case -- VARSPLIC, reported by edvard@farmasi.uit.no
if name == "VARSPLIC":
description = self._fix_varsplic_sequences(description)
self.data.features.append((name, from_res, to_res, description,ft_id))
def _fix_varsplic_sequences(self, description):
"""Remove unwanted spaces in sequences.
During line carryover, the sequences in VARSPLIC can get mangled
with unwanted spaces like:
'DISSTKLQALPSHGLESIQT -> PCRATGWSPFRRSSPC LPTH'
We want to check for this case and correct it as it happens.
"""
descr_cols = description.split(" -> ")
if len(descr_cols) == 2:
first_seq = descr_cols[0]
second_seq = descr_cols[1]
extra_info = ''
# we might have more information at the end of the
# second sequence, which should be in parenthesis
extra_info_pos = second_seq.find(" (")
if extra_info_pos != -1:
extra_info = second_seq[extra_info_pos:]
second_seq = second_seq[:extra_info_pos]
# now clean spaces out of the first and second string
first_seq = first_seq.replace(" ", "")
second_seq = second_seq.replace(" ", "")
# reassemble the description
description = first_seq + " -> " + second_seq + extra_info
return description
def protein_existence(self, line):
#TODO - Record this information?
pass
def sequence_header(self, line):
cols = line.split()
assert len(cols) == 8, "I don't understand SQ line %s" % line
# Do more checking here?
self.data.seqinfo = int(cols[2]), int(cols[4]), cols[6]
def sequence_data(self, line):
seq = line.replace(" ", "").rstrip()
self.data.sequence = self.data.sequence + seq
def terminator(self, line):
pass
#def _clean(self, line, rstrip=1):
# if rstrip:
# return string.rstrip(line[5:])
# return line[5:]
def _clean_record(self, rec):
# Remove trailing newlines
members = ['description', 'gene_name', 'organism', 'organelle']
for m in members:
attr = getattr(rec, m)
setattr(rec, m, attr.rstrip())
for ref in rec.references:
self._clean_references(ref)
def _clean_references(self, ref):
# Remove trailing newlines
members = ['authors', 'title', 'location']
for m in members:
attr = getattr(ref, m)
setattr(ref, m, attr.rstrip())
class _SequenceConsumer(AbstractConsumer):
"""Consumer that converts a SwissProt record to a SeqRecord object.
Members:
data Record with SwissProt data.
alphabet The alphabet the generated Seq objects will have.
"""
#TODO - Cope with references as done for GenBank
def __init__(self, alphabet = Alphabet.generic_protein):
"""Initialize a Sequence Consumer
Arguments:
o alphabet - The alphabet to use for the generated Seq objects. If
not supplied this will default to the generic protein alphabet.
"""
self.data = None
self.alphabet = alphabet
def start_record(self):
seq = Seq.Seq("", self.alphabet)
self.data = SeqRecord.SeqRecord(seq)
self.data.description = ""
self.data.name = ""
self._current_ref = None
def end_record(self):
if self._current_ref is not None:
self.data.annotations['references'].append(self._current_ref)
self._current_ref = None
self.data.description = self.data.description.rstrip()
def identification(self, line):
cols = line.split()
self.data.name = cols[1]
def accession(self, line):
#Note that files can and often do contain multiple AC lines.
ids = line[5:].strip().split(';')
#Remove any white space
ids = [x.strip() for x in ids if x.strip()]
#Use the first as the ID, but record them ALL in the annotations
try :
self.data.annotations['accessions'].extend(ids)
except KeyError :
self.data.annotations['accessions'] = ids
#Use the FIRST accession as the ID, not the first on this line!
self.data.id = self.data.annotations['accessions'][0]
#self.data.id = ids[0]
def description(self, line):
self.data.description = self.data.description + \
line[5:].strip() + "\n"
def sequence_data(self, line):
seq = Seq.Seq(line.replace(" ", "").rstrip(),
self.alphabet)
self.data.seq = self.data.seq + seq
def gene_name(self, line):
#We already store the identification/accession as the records name/id
try :
self.data.annotations['gene_name'] += line[5:]
except KeyError :
self.data.annotations['gene_name'] = line[5:]
def comment(self, line):
#Try and agree with SeqRecord convention from the GenBank parser,
#which stores the comments as a long string with newlines
#with key 'comment'
try :
self.data.annotations['comment'] += "\n" + line[5:]
except KeyError :
self.data.annotations['comment'] = line[5:]
#TODO - Follow SwissProt conventions more closely?
def database_cross_reference(self, line):
#Format of the line is described in the manual dated 04-Dec-2007 as:
#DR DATABASE; PRIMARY; SECONDARY[; TERTIARY][; QUATERNARY].
#However, some older files only seem to have a single identifier:
#DR DATABASE; PRIMARY.
#
#Also must cope with things like this from Tests/SwissProt/sp007,
#DR PRODOM [Domain structure / List of seq. sharing at least 1 domain]
#
#Store these in the dbxref list, but for consistency with
#the GenBank parser and with what BioSQL can cope with,
#store only DATABASE_IDENTIFIER:PRIMARY_IDENTIFIER
parts = [x.strip() for x in line[5:].strip(_CHOMP).split(";")]
if len(parts) > 1 :
value = "%s:%s" % (parts[0], parts[1])
#Avoid duplicate entries
if value not in self.data.dbxrefs :
self.data.dbxrefs.append(value)
#else :
#print "Bad DR line:\n%s" % line
def date(self, line):
date_str = line.split()[0]
uprline = string.upper(line)
if uprline.find('CREATED') >= 0 :
#Try and agree with SeqRecord convention from the GenBank parser,
#which stores the submitted date as 'date'
self.data.annotations['date'] = date_str
elif uprline.find('LAST SEQUENCE UPDATE') >= 0 :
#There is no existing convention from the GenBank SeqRecord parser
self.data.annotations['date_last_sequence_update'] = date_str
elif uprline.find('LAST ANNOTATION UPDATE') >= 0:
#There is no existing convention from the GenBank SeqRecord parser
self.data.annotations['date_last_annotation_update'] = date_str
def keyword(self, line):
#Try and agree with SeqRecord convention from the GenBank parser,
#which stores a list as 'keywords'
cols = line[5:].rstrip(_CHOMP).split(';')
cols = [c.strip() for c in cols]
cols = filter(None, cols)
try :
#Extend any existing list of keywords
self.data.annotations['keywords'].extend(cols)
except KeyError :
#Create the list of keywords
self.data.annotations['keywords'] = cols
def organism_species(self, line):
#Try and agree with SeqRecord convention from the GenBank parser,
#which stores the organism as a string with key 'organism'
data = line[5:].rstrip(_CHOMP)
try :
#Append to any existing data split over multiple lines
self.data.annotations['organism'] += " " + data
except KeyError:
self.data.annotations['organism'] = data
def organism_host(self, line):
#There is no SeqRecord convention from the GenBank parser,
data = line[5:].rstrip(_CHOMP)
index = data.find('=')
if index >= 0:
descr = data[:index]
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
ids = data[index+1:].split(',')
else:
ids = data.split(',')
try :
#Append to any existing data
self.data.annotations['organism_host'].extend(ids)
except KeyError:
self.data.annotations['organism_host'] = ids
def organism_classification(self, line):
#Try and agree with SeqRecord convention from the GenBank parser,
#which stores this taxonomy lineage ese as a list of strings with
#key 'taxonomy'.
#Note that 'ncbi_taxid' is used for the taxonomy ID (line OX)
line = line[5:].rstrip(_CHOMP)
cols = [col.strip() for col in line.split(';')]
try :
#Append to any existing data
self.data.annotations['taxonomy'].extend(cols)
except KeyError:
self.data.annotations['taxonomy'] = cols
def taxonomy_id(self, line):
#Try and agree with SeqRecord convention expected in BioSQL
#the NCBI taxon id with key 'ncbi_taxid'.
#Note that 'taxonomy' is used for the taxonomy lineage
#(held as a list of strings, line type OC)
line = line[5:].rstrip(_CHOMP)
index = line.find('=')
if index >= 0:
descr = line[:index]
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
ids = line[index+1:].split(',')
else:
ids = line.split(',')
try :
#Append to any existing data
self.data.annotations['ncbi_taxid'].extend(ids)
except KeyError:
self.data.annotations['ncbi_taxid'] = ids
def reference_number(self, line):
"""RN line, reference number (start of new reference)."""
from Bio.SeqFeature import Reference
# if we have a current reference that hasn't been added to
# the list of references, add it.
if self._current_ref is not None:
self.data.annotations['references'].append(self._current_ref)
else:
self.data.annotations['references'] = []
self._current_ref = Reference()
def reference_position(self, line):
"""RP line, reference position."""
assert self._current_ref is not None, "RP: missing RN"
#Should try and store this in self._current_ref.location
#but the SwissProt locations don't match easily to the
#format used in GenBank...
pass
def reference_cross_reference(self, line):
"""RX line, reference cross-references."""
assert self._current_ref is not None, "RX: missing RN"
# The basic (older?) RX line is of the form:
# RX MEDLINE; 85132727.
# or more recently:
# RX MEDLINE=95385798; PubMed=7656980;
# RX PubMed=15060122; DOI=10.1136/jmg 2003.012781;
# We look for these cases first and deal with them
if line.find("=") != -1:
cols = line[2:].split("; ")
cols = [x.strip() for x in cols]
cols = [x for x in cols if x]
for col in cols:
x = col.split("=")
assert len(x) == 2, "I don't understand RX line %s" % line
key, value = x[0].rstrip(_CHOMP), x[1].rstrip(_CHOMP)
if key == "MEDLINE" :
self._current_ref.medline_id = value
elif key == "PubMed" :
self._current_ref.pubmed_id = value
else :
#Sadly the SeqFeature.Reference object doesn't
#support anything else (yet)
pass
# otherwise we assume we have the type 'RX MEDLINE; 85132727.'
else:
# CLD1_HUMAN in Release 39 and DADR_DIDMA in Release 33
# have extraneous information in the RX line. Check for
# this and chop it out of the line.
# (noticed by katel@worldpath.net)
ind = line.find('[NCBI, ExPASy, Israel, Japan]')
if ind >= 0:
line = line[:ind]
cols = line.split()
# normally we split into the three parts
assert len(cols) == 3, "I don't understand RX line %s" % line
key = cols[1].rstrip(_CHOMP)
value = cols[2].rstrip(_CHOMP)
if key == "MEDLINE" :
self._current_ref.medline_id = value
elif key == "PubMed" :
self._current_ref.pubmed_id = value
else :
#Sadly the SeqFeature.Reference object doesn't
#support anything else (yet)
pass
def reference_author(self, line):
"""RA line, reference author(s)."""
assert self._current_ref is not None, "RA: missing RN"
self._current_ref.authors += line[5:].rstrip("\n")
def reference_title(self, line):
"""RT line, reference title."""
assert self._current_ref is not None, "RT: missing RN"
self._current_ref.title += line[5:].rstrip("\n")
def reference_location(self, line):
"""RL line, reference 'location' - journal, volume, pages, year."""
assert self._current_ref is not None, "RL: missing RN"
self._current_ref.journal += line[5:].rstrip("\n")
def reference_comment(self, line):
"""RC line, reference comment."""
assert self._current_ref is not None, "RC: missing RN"
#This has a key=value; structure...
#Can we do a better job with the current Reference class?
self._current_ref.comment += line[5:].rstrip("\n")
def index_file(filename, indexname, rec2key=None):
"""index_file(filename, indexname, rec2key=None)
Index a SwissProt file. filename is the name of the file.
indexname is the name of the dictionary. rec2key is an
optional callback that takes a Record and generates a unique key
(e.g. the accession number) for the record. If not specified,
the entry name will be used.
"""
if not os.path.exists(filename):
raise ValueError, "%s does not exist" % filename
index = Index.Index(indexname, truncate=1)
index[Dictionary._Dictionary__filename_key] = filename
iter = Iterator(open(filename), parser=RecordParser())
while 1:
start = iter._uhandle.tell()
rec = iter.next()
length = iter._uhandle.tell() - start
if rec is None:
break
if rec2key is not None:
key = rec2key(rec)
else:
key = rec.entry_name
if not key:
raise KeyError, "empty sequence key was produced"
elif index.has_key(key):
raise KeyError, "duplicate key %s found" % key
index[key] = start, length
if __name__ == "__main__" :
print "Quick self test..."
example_filename = "../../Tests/SwissProt/sp008"
import os
if not os.path.isfile(example_filename) :
print "Missing test file %s" % example_filename
else :
#Try parsing it!
handle = open(example_filename)
for record in Iterator(handle, RecordParser()) :
print record.entry_name
print ",".join(record.accessions)
print record.keywords
print repr(record.organism)
print record.sequence[:20] + "..."
handle.close()
handle = open(example_filename)
for record in Iterator(handle, SequenceParser()) :
print record.name
print record.id
print record.annotations['keywords']
print repr(record.annotations['organism'])
print record.seq.tostring()[:20] + "..."
handle.close()
| dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/SwissProt/SProt.py | Python | apache-2.0 | 50,022 | [
"Biopython"
] | 62a9e3c7a5b038c8f0ad43042caa583fed78dbe6781e9e7bea7bfd6107c2692e |
# Copyright (c) 2007-2009 Pedro Matiello <pmatiello@gmail.com>
# Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
# Rhys Ulerich <rhys.ulerich@gmail.com>
# Roy Smith <roy@panix.com>
# Salim Fadhley <sal@stodge.org>
# Tomaz Kovacic <tomaz.kovacic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Minimization and maximization algorithms.
@sort: heuristic_search, minimal_spanning_tree, shortest_path,
shortest_path_bellman_ford
"""
from pygraph.algorithms.utils import heappush, heappop
from pygraph.classes.exceptions import NodeUnreachable
from pygraph.classes.exceptions import NegativeWeightCycleError
from pygraph.classes.digraph import digraph
# Minimal spanning tree
def minimal_spanning_tree(graph, root=None):
"""
Minimal spanning tree.
@attention: Minimal spanning tree is meaningful only for weighted graphs.
@type graph: graph
@param graph: Graph.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: dictionary
@return: Generated spanning tree.
"""
visited = [] # List for marking visited and non-visited nodes
spanning_tree = {} # MInimal Spanning tree
# Initialization
if (root is not None):
visited.append(root)
nroot = root
spanning_tree[root] = None
else:
nroot = 1
# Algorithm loop
while (nroot is not None):
ledge = _lightest_edge(graph, visited)
if (ledge == (-1, -1)):
if (root is not None):
break
nroot = _first_unvisited(graph, visited)
if (nroot is not None):
spanning_tree[nroot] = None
visited.append(nroot)
else:
spanning_tree[ledge[1]] = ledge[0]
visited.append(ledge[1])
return spanning_tree
def _first_unvisited(graph, visited):
"""
Return first unvisited node.
@type graph: graph
@param graph: Graph.
@type visited: list
@param visited: List of nodes.
@rtype: node
@return: First unvisited node.
"""
for each in graph:
if (each not in visited):
return each
return None
def _lightest_edge(graph, visited):
"""
Return the lightest edge in graph going from a visited node to an unvisited one.
@type graph: graph
@param graph: Graph.
@type visited: list
@param visited: List of nodes.
@rtype: tuple
@return: Lightest edge in graph going from a visited node to an unvisited one.
"""
lightest_edge = (-1, -1)
weight = -1
for each in visited:
for other in graph[each]:
if (other not in visited):
w = graph.edge_weight((each, other))
if (w < weight or weight < 0):
lightest_edge = (each, other)
weight = w
return lightest_edge
# Shortest Path
def shortest_path(graph, source):
"""
Return the shortest path distance between source and all other nodes using Dijkstra's
algorithm.
@attention: All weights must be nonnegative.
@see: shortest_path_bellman_ford
@type graph: graph, digraph
@param graph: Graph.
@type source: node
@param source: Node from which to start the search.
@rtype: tuple
@return: A tuple containing two dictionaries, each keyed by target nodes.
1. Shortest path spanning tree
2. Shortest distance from given source to each target node
Inaccessible target nodes do not appear in either dictionary.
"""
# Initialization
dist = { source: 0 }
previous = { source: None}
q = graph.nodes()
# Algorithm loop
while q:
# examine_min process performed using O(nodes) pass here.
# May be improved using another examine_min data structure.
u = q[0]
for node in q[1:]:
if ((u not in dist)
or (node in dist and dist[node] < dist[u])):
u = node
q.remove(u)
# Process reachable, remaining nodes from u
if (u in dist):
for v in graph[u]:
if v in q:
alt = dist[u] + graph.edge_weight((u, v))
if (v not in dist) or (alt < dist[v]):
dist[v] = alt
previous[v] = u
return previous, dist
def shortest_path_bellman_ford(graph, source):
"""
Return the shortest path distance between the source node and all other
nodes in the graph using Bellman-Ford's algorithm.
This algorithm is useful when you have a weighted (and obviously
a directed) graph with negative weights.
@attention: The algorithm can detect negative weight cycles and will raise
an exception. It's meaningful only for directed weighted graphs.
@see: shortest_path
@type graph: digraph
@param graph: Digraph
@type source: node
@param source: Source node of the graph
@raise NegativeWeightCycleError: raises if it finds a negative weight cycle.
If this condition is met d(v) > d(u) + W(u, v) then raise the error.
@rtype: tuple
@return: A tuple containing two dictionaries, each keyed by target nodes.
(same as shortest_path function that implements Dijkstra's algorithm)
1. Shortest path spanning tree
2. Shortest distance from given source to each target node
"""
# initialize the required data structures
distance = {source : 0}
predecessor = {source : None}
# iterate and relax edges
for i in range(1,graph.order()-1):
for src,dst in graph.edges():
if (src in distance) and (dst not in distance):
distance[dst] = distance[src] + graph.edge_weight((src,dst))
predecessor[dst] = src
elif (src in distance) and (dst in distance) and \
distance[src] + graph.edge_weight((src,dst)) < distance[dst]:
distance[dst] = distance[src] + graph.edge_weight((src,dst))
predecessor[dst] = src
# detect negative weight cycles
for src,dst in graph.edges():
if distance[src] + graph.edge_weight((src,dst)) < distance[dst]:
raise NegativeWeightCycleError("Detected a negative weight cycle on edge (%s, %s)" % (src,dst))
return predecessor, distance
#Heuristics search
def heuristic_search(graph, start, goal, heuristic):
"""
A* search algorithm.
A set of heuristics is available under C{graph.algorithms.heuristics}. User-created heuristics
are allowed too.
@type graph: graph, digraph
@param graph: Graph
@type start: node
@param start: Start node
@type goal: node
@param goal: Goal node
@type heuristic: function
@param heuristic: Heuristic function
@rtype: list
@return: Optimized path from start to goal node
"""
# The queue stores priority, node, cost to reach, and parent.
queue = [ (0, start, 0, None) ]
# This dictionary maps queued nodes to distance of discovered paths
# and the computed heuristics to goal. We avoid to compute the heuristics
# more than once and to insert too many times the node in the queue.
g = {}
# This maps explored nodes to parent closest to the start
explored = {}
while queue:
_, current, dist, parent = heappop(queue)
if current == goal:
path = [current] + [ n for n in _reconstruct_path( parent, explored ) ]
path.reverse()
return path
if current in explored:
continue
explored[current] = parent
for neighbor in graph[current]:
if neighbor in explored:
continue
ncost = dist + graph.edge_weight((current, neighbor))
if neighbor in g:
qcost, h = g[neighbor]
if qcost <= ncost:
continue
# if ncost < qcost, a longer path to neighbor remains
# g. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
else:
h = heuristic(neighbor, goal)
g[neighbor] = ncost, h
heappush(queue, (ncost + h, neighbor, ncost, current))
raise NodeUnreachable( start, goal )
def _reconstruct_path(node, parents):
while node is not None:
yield node
node = parents[node]
#maximum flow/minimum cut
def maximum_flow(graph, source, sink, caps = None):
"""
Find a maximum flow and minimum cut of a directed graph by the Edmonds-Karp algorithm.
@type graph: digraph
@param graph: Graph
@type source: node
@param source: Source of the flow
@type sink: node
@param sink: Sink of the flow
@type caps: dictionary
@param caps: Dictionary specifying a maximum capacity for each edge. If not given, the weight of the edge
will be used as its capacity. Otherwise, for each edge (a,b), caps[(a,b)] should be given.
@rtype: tuple
@return: A tuple containing two dictionaries
1. contains the flow through each edge for a maximal flow through the graph
2. contains to which component of a minimum cut each node belongs
"""
#handle optional argument, if weights are available, use them, if not, assume one
if caps == None:
caps = {}
for edge in graph.edges():
caps[edge] = graph.edge_weight((edge[0],edge[1]))
#data structures to maintain
f = {}.fromkeys(graph.edges(),0)
label = {}.fromkeys(graph.nodes(),[])
label[source] = ['-',float('Inf')]
u = {}.fromkeys(graph.nodes(),False)
d = {}.fromkeys(graph.nodes(),float('Inf'))
#queue for labelling
q = [source]
finished = False
while not finished:
#choose first labelled vertex with u == false
for i in range(len(q)):
if not u[q[i]]:
v = q.pop(i)
break
#find augmenting path
for w in graph.neighbors(v):
if label[w] == [] and f[(v,w)] < caps[(v,w)]:
d[w] = min(caps[(v,w)] - f[(v,w)],d[v])
label[w] = [v,'+',d[w]]
q.append(w)
for w in graph.incidents(v):
if label[w] == [] and f[(w,v)] > 0:
d[w] = min(f[(w,v)],d[v])
label[w] = [v,'-',d[w]]
q.append(w)
u[v] = True
#extend flow by augmenting path
if label[sink] != []:
delta = label[sink][-1]
w = sink
while w != source:
v = label[w][0]
if label[w][1] == '-':
f[(w,v)] = f[(w,v)] - delta
else:
f[(v,w)] = f[(v,w)] + delta
w = v
#reset labels
label = {}.fromkeys(graph.nodes(),[])
label[source] = ['-',float('Inf')]
q = [source]
u = {}.fromkeys(graph.nodes(),False)
d = {}.fromkeys(graph.nodes(),float('Inf'))
#check whether finished
finished = True
for node in graph.nodes():
if label[node] != [] and u[node] == False:
finished = False
#find the two components of the cut
cut = {}
for node in graph.nodes():
if label[node] == []:
cut[node] = 1
else:
cut[node] = 0
return (f,cut)
def cut_value(graph, flow, cut):
"""
Calculate the value of a cut.
@type graph: digraph
@param graph: Graph
@type flow: dictionary
@param flow: Dictionary containing a flow for each edge.
@type cut: dictionary
@param cut: Dictionary mapping each node to a subset index. The function only considers the flow between
nodes with 0 and 1.
@rtype: float
@return: The value of the flow between the subsets 0 and 1
"""
#max flow/min cut value calculation
S = []
T = []
for node in cut.keys():
if cut[node] == 0:
S.append(node)
elif cut[node] == 1:
T.append(node)
value = 0
for node in S:
for neigh in graph.neighbors(node):
if neigh in T:
value = value + flow[(node,neigh)]
for inc in graph.incidents(node):
if inc in T:
value = value - flow[(inc,node)]
return value
def cut_tree(igraph, caps = None):
"""
Construct a Gomory-Hu cut tree by applying the algorithm of Gusfield.
@type igraph: graph
@param igraph: Graph
@type caps: dictionary
@param caps: Dictionary specifying a maximum capacity for each edge. If not given, the weight of the edge
will be used as its capacity. Otherwise, for each edge (a,b), caps[(a,b)] should be given.
@rtype: dictionary
@return: Gomory-Hu cut tree as a dictionary, where each edge is associated with its weight
"""
#maximum flow needs a digraph, we get a graph
#I think this conversion relies on implementation details outside the api and may break in the future
graph = digraph()
graph.add_graph(igraph)
#handle optional argument
if not caps:
caps = {}
for edge in graph.edges():
caps[edge] = igraph.edge_weight(edge)
#temporary flow variable
f = {}
#we use a numbering of the nodes for easier handling
n = {}
N = 0
for node in graph.nodes():
n[N] = node
N = N + 1
#predecessor function
p = {}.fromkeys(range(N),0)
p[0] = None
for s in range(1,N):
t = p[s]
S = []
#max flow calculation
(flow,cut) = maximum_flow(graph,n[s],n[t],caps)
for i in range(N):
if cut[n[i]] == 0:
S.append(i)
value = cut_value(graph,flow,cut)
f[s] = value
for i in range(N):
if i == s:
continue
if i in S and p[i] == t:
p[i] = s
if p[t] in S:
p[s] = p[t]
p[t] = s
f[s] = f[t]
f[t] = value
#cut tree is a dictionary, where each edge is associated with its weight
b = {}
for i in range(1,N):
b[(n[i],n[p[i]])] = f[i]
return b
| cwacek/experimentor_tools | pygraph/algorithms/minmax.py | Python | mit | 15,697 | [
"VisIt"
] | 569e2906f03aacc1157a47d852c8c748c47a2d91cf14ba653f9cc53d6ad11eae |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.