text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
"""
Bok choy acceptance tests for conditionals in the LMS
"""
from flaky import flaky
from capa.tests.response_xml_factory import StringResponseXMLFactory
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.conditional import ConditionalPage, POLL_ANSWER
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage
class ConditionalTest(UniqueCourseTest):
"""
Test the conditional module in the lms.
"""
def setUp(self):
super(ConditionalTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False
).visit()
def install_course_fixture(self, block_type='problem'):
"""
Install a course fixture
"""
course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name'],
)
vertical = XBlockFixtureDesc('vertical', 'Test Unit')
# populate the course fixture with the right conditional modules
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
vertical
)
)
)
course_fixture.install()
# Construct conditional block
source_block = None
conditional_attr = None
conditional_value = None
if block_type == 'problem':
problem_factory = StringResponseXMLFactory()
problem_xml = problem_factory.build_xml(
question_text='The answer is "correct string"',
case_sensitive=False,
answer='correct string',
),
problem = XBlockFixtureDesc('problem', 'Test Problem', data=problem_xml[0])
source_block = problem
conditional_attr = 'attempted'
conditional_value = 'True'
elif block_type == 'poll':
poll = XBlockFixtureDesc(
'poll_question',
'Conditional Poll',
question='Is this a good poll?',
answers=[
{'id': 'yes', 'text': POLL_ANSWER},
{'id': 'no', 'text': 'Of course not!'}
],
)
conditional_attr = 'poll_answer'
conditional_value = 'yes'
source_block = poll
else:
raise NotImplementedError()
course_fixture.create_xblock(vertical.locator, source_block)
# create conditional
conditional = XBlockFixtureDesc(
'conditional',
'Test Conditional',
sources_list=[source_block.locator],
conditional_attr=conditional_attr,
conditional_value=conditional_value
)
result_block = XBlockFixtureDesc(
'html', 'Conditional Contents',
data='<html><div class="hidden-contents">Hidden Contents</p></html>'
)
course_fixture.create_xblock(vertical.locator, conditional)
course_fixture.create_xblock(conditional.locator, result_block)
def test_conditional_hides_content(self):
self.install_course_fixture()
self.courseware_page.visit()
conditional_page = ConditionalPage(self.browser)
self.assertFalse(conditional_page.is_content_visible())
def test_conditional_displays_content(self):
self.install_course_fixture()
self.courseware_page.visit()
# Answer the problem
problem_page = ProblemPage(self.browser)
problem_page.fill_answer('correct string')
problem_page.click_submit()
# The conditional does not update on its own, so we need to reload the page.
self.courseware_page.visit()
# Verify that we can see the content.
conditional_page = ConditionalPage(self.browser)
self.assertTrue(conditional_page.is_content_visible())
@flaky # TNL-5770
def test_conditional_handles_polls(self):
self.install_course_fixture(block_type='poll')
self.courseware_page.visit()
# Fill in the conditional page poll
conditional_page = ConditionalPage(self.browser)
conditional_page.fill_in_poll()
# The conditional does not update on its own, so we need to reload the page.
self.courseware_page.visit()
self.assertTrue(conditional_page.is_content_visible())
| itsjeyd/edx-platform | common/test/acceptance/tests/lms/test_conditional.py | Python | agpl-3.0 | 4,885 | [
"VisIt"
] | 6b893e75a8743b3ffea1b19a21657f2a4e898481e4fec30705af0827b639feee |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(-0.0180, 0.8826, 0.4699)
camera.SetPosition(-1.2854, -10.1975, 19.2304)
camera.SetFocalPoint(0.0000, 0.0000, 0.1250)
transform = chigger.filters.TransformFilter(scale=[2,1,1])
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, variable='diffused', camera=camera, cmap='viridis', filters=[transform])
mug.setOptions('colorbar', visible=False)
window = chigger.RenderWindow(mug, size=[300,300], test=True)
window.write('scale.png')
window.start()
| nuclear-wizard/moose | python/chigger/tests/transform/scale.py | Python | lgpl-2.1 | 942 | [
"MOOSE",
"VTK"
] | 3d01d1cb0892eca98d6029ed993278cddc5523e8a84ab65e98e16446b1e3a4ef |
#!/usr/bin/env python
''' VIC exact restart testing '''
import numpy as np
import datetime
import os
import glob
import xarray as xr
import warnings
from test_utils import read_vic_ascii
from tonic.testing import VICTestError
def prepare_restart_run_periods(restart_dict, state_basedir):
''' For restart tests, read full running period and splitting dates into
datetime objects (NOTE: all restart runs always start from the beginning
of the starting date (i.e., second 0) and end at the end of the ending
date (i.e., second 0 of the next day))
Parameters
----------
restart_dict: <class 'configobj.Section'>
A section of the config file for exact restart test setup
state_basedir: <str>
Basedirectory of output state files.
For restart tests, state files will be output as:
<state_basedir>/<run_start_date>_<run_end_date>/<state_file>
Returns
----------
run_periods: OrderedDict
A list of running periods, including the full-period run, and all
splitted runs in order. Each element of run_period is a dictionary with
keys:
start_date
end_date
init_state # None, or full path of the initial state file
# e.g., '/path/19490101_19490105/states_19490105_82800'
'''
# --- Read in full running period --- #
start_date = datetime.datetime.strptime(restart_dict['start_date'],
'%Y-%m-%d')
end_date = datetime.datetime.strptime(restart_dict['end_date'],
'%Y-%m-%d')
# --- Identify each of the splitted running period --- #
if not isinstance(restart_dict['split_dates'], list):
list_split_dates = [datetime.datetime.strptime(
restart_dict['split_dates'],
'%Y-%m-%d')]
else:
list_split_dates = datetime.datetime.strptime(
restart_dict['split_dates'],
'%Y-%m-%d')
# --- Prepare running periods --- #
# run_periods is a list of running periods, including the full-period run,
# and all splitted runs in order. Each element of run_period is a
# dictionary with keys:
# start_date
# end_date
# init_state # None, or full path of the initial state file
# # e.g., '/path/19490101_19490105/states_19490106_00000'
run_periods = []
# Append the full run
d = dict(start_date=start_date, end_date=end_date)
d['init_state'] = None
run_periods.append(d)
# First splitted running period - start_date to first split date
d = dict(start_date=start_date, end_date=list_split_dates[0])
d['init_state'] = None
run_periods.append(d)
# Loop over each of the rest splitted periods
for i in range(len(list_split_dates) - 1):
d = dict(start_date=list_split_dates[i] + datetime.timedelta(days=1),
end_date=list_split_dates[i + 1])
d['init_state'] = os.path.join(
state_basedir,
'{}_{}'.format(run_periods[-1]['start_date'].strftime("%Y%m%d"),
run_periods[-1]['end_date'].strftime("%Y%m%d")),
'{}{}_{:05d}'.format(
'states_',
(run_periods[-1]['end_date'] +
datetime.timedelta(days=1)).strftime("%Y%m%d"), 0))
run_periods.append(d)
# Last splitted running period - last split date to end_date
d = dict(start_date=list_split_dates[len(list_split_dates) - 1] +
datetime.timedelta(days=1), end_date=end_date)
d['init_state'] = os.path.join(
state_basedir,
'{}_{}'.format(run_periods[-1]['start_date'].strftime("%Y%m%d"),
run_periods[-1]['end_date'].strftime("%Y%m%d")),
'{}{}_{:05d}'.format(
'states_',
(run_periods[-1]['end_date'] +
datetime.timedelta(days=1)).strftime("%Y%m%d"), 0))
run_periods.append(d)
return run_periods
def setup_subdirs_and_fill_in_global_param_restart_test(
s, run_periods, driver, result_basedir, state_basedir, test_data_dir):
''' Fill in global parameter options for multiple runs for restart testing
Parameters
----------
s: <string.Template>
Template of the global param file to be filled in
run_periods: <list>
A list of running periods. Return from prepare_restart_run_periods()
driver: <str>
'classic' or 'image'
result_basedir: <str>
Base directory of output fluxes results; running periods are
subdirectories under the base directory
state_basedir: <str>
Base directory of output state results; running periods are
subdirectories under the base directory
test_data_dir: <str>
Base directory of test data
Returns
----------
list_global_param: <list>
A list of global parameter strings to be run with parameters filled in
'''
list_global_param = []
for j, run_period in enumerate(run_periods):
# Set up subdirectories for results and states
run_start_date = run_period['start_date']
run_end_date = run_period['end_date']
result_dir = os.path.join(
result_basedir,
'{}_{}'.format(run_start_date.strftime("%Y%m%d"),
run_end_date.strftime("%Y%m%d")))
state_dir = os.path.join(
state_basedir,
'{}_{}'.format(run_start_date.strftime("%Y%m%d"),
run_end_date.strftime("%Y%m%d")))
os.makedirs(result_dir, exist_ok=True)
os.makedirs(state_dir, exist_ok=True)
# Determine initial state
if run_period['init_state'] is None: # if no initial state
init_state = '#INIT_STATE'
else: # else, the initial state is the last time step
init_state = 'INIT_STATE {}'.format(run_period['init_state'])
# In image driver, the name of the state file is 'basepath.*'
# instead of 'basepath_*', and ends with ".nc"
if driver == 'image':
init_state = init_state.replace("states_", "states.") + '.nc'
# Determine output state date
state_date = run_end_date + datetime.timedelta(days=1)
# Fill in global parameter options
list_global_param.append(s.safe_substitute(
test_data_dir=test_data_dir,
result_dir=result_dir,
state_dir=state_dir,
startyear=run_start_date.year,
startmonth=run_start_date.month,
startday=run_start_date.day,
endyear=run_end_date.year,
endmonth=run_end_date.month,
endday=run_end_date.day,
init_state=init_state,
stateyear=state_date.year,
statemonth=state_date.month,
stateday=state_date.day,
statesec=0))
return(list_global_param)
def check_exact_restart_fluxes(result_basedir, driver, run_periods):
''' Checks whether all the fluxes are the same w/ or w/o restart
Parameters
----------
result_basedir: <str>
Base directory of output fluxes results; running periods are
subdirectories under the base directory
driver: <str>
'classic' or 'image'
run_periods: <list>
A list of running periods. Return from prepare_restart_run_periods()
Require:
----------
xarray
glob
os
numpy
warnings
read_vic_ascii
'''
# --- Extract full run period --- #
run_full_start_date = run_periods[0]['start_date']
run_full_end_date = run_periods[0]['end_date']
# --- Read full run fluxes --- #
result_dir = os.path.join(
result_basedir,
'{}_{}'.format(run_full_start_date.strftime('%Y%m%d'),
run_full_end_date.strftime('%Y%m%d')))
if driver == 'classic':
# Read in each of the output flux files
# --- a dict of flux at each grid cell, keyed by flux basename ---#
dict_df_full_run = {}
for fname in glob.glob(os.path.join(result_dir, '*')):
df = read_vic_ascii(fname)
dict_df_full_run[os.path.basename(fname)] = df
elif driver == 'image':
if len(glob.glob(os.path.join(result_dir, '*.nc'))) > 1:
warnings.warn('More than one netCDF file found under '
'directory {}'.format(result_dir))
fname = glob.glob(os.path.join(result_dir, '*.nc'))[0]
ds_full_run = xr.open_dataset(fname)
# --- Loop over the result of each split run period --- #
for i, run_period in enumerate(run_periods):
# Skip the full run
if i == 0:
continue
# Extract running period
start_date = run_period['start_date']
end_date = run_period['end_date']
# Loop over each of the output flux files
result_dir = os.path.join(
result_basedir,
'{}_{}'.format(start_date.strftime('%Y%m%d'),
end_date.strftime('%Y%m%d')))
if driver == 'classic':
for flux_basename in dict_df_full_run.keys():
# Read in flux data
fname = os.path.join(result_dir, flux_basename)
df = read_vic_ascii(fname)
# Extract the same period from the full run
df_full_run_split_period = \
dict_df_full_run[flux_basename].truncate(df.index[0],
df.index[-1])
# Compare split run fluxes with full run
np.testing.assert_almost_equal(df.values,
df_full_run_split_period.values,
decimal=6,
err_msg='fluxes are not a '
'close match')
elif driver == 'image':
# Read in flux data
if len(glob.glob(os.path.join(result_dir, '*.nc'))) > 1:
warnings.warn('More than one netCDF file found under'
'directory {}'.format(result_dir))
fname = glob.glob(os.path.join(result_dir, '*.nc'))[0]
ds = xr.open_dataset(fname)
# Extract the same period from the full run
ds_full_run_split_period = ds_full_run.sel(time=slice(
start_date.strftime('%Y%m%d'),
end_date.strftime('%Y%m%d')))
# Compare split run fluxes with full run
for var in ds_full_run.data_vars:
np.testing.assert_array_equal(
ds[var].values, ds_full_run_split_period[var].values,
err_msg='Fluxes are not an exact match for %s' % var)
def check_exact_restart_states(state_basedir, driver, run_periods,
state_format='ASCII'):
''' Checks whether all the states are the same w/ or w/o restart.
Only test the state at the last time step.
Parameters
----------
state_basedir: <str>
Base directory of output state results; running periods are
subdirectories under the base directory
driver: <str>
'classic' or 'image'
run_periods: <list>
A list of running periods. Return from prepare_restart_run_periods()
state_format: <str>
state file format, 'ASCII' or 'BINARY'; only need to specify when
driver=='classic'
'''
# --- Read the state at the end of the full run --- #
# Extract full run period
run_full_start_date = run_periods[0]['start_date']
run_full_end_date = run_periods[0]['end_date']
# Read the state file
if driver == 'classic':
state_fname = os.path.join(
state_basedir,
'{}_{}'.format(
run_full_start_date.strftime('%Y%m%d'),
run_full_end_date.strftime('%Y%m%d')),
'states_{}_{:05d}'.format(
(run_full_end_date +
datetime.timedelta(
days=1)).strftime('%Y%m%d'),
0))
if state_format == 'ASCII':
states_full_run = read_ascii_state(state_fname)
elif state_format == 'BINARY':
states_full_run = read_binary_state(state_fname)
elif driver == 'image':
state_fname = os.path.join(
state_basedir,
'{}_{}'.format(
run_full_start_date.strftime('%Y%m%d'),
run_full_end_date.strftime('%Y%m%d')),
'states.{}_{:05d}.nc'.format(
(run_full_end_date +
datetime.timedelta(
days=1)).strftime('%Y%m%d'),
0))
ds_states_full_run = xr.open_dataset(state_fname)
# --- Compare split run states with full run --- #
# Extract the last split run period
run_last_period_start_date = run_periods[-1]['start_date']
run_last_period_end_date = run_periods[-1]['end_date']
if driver == 'classic':
# Read the state file at the end of the last period of run
state_fname = os.path.join(
state_basedir,
'{}_{}'.format(
run_last_period_start_date.strftime('%Y%m%d'),
run_last_period_end_date.strftime('%Y%m%d')),
'states_{}_{:05d}'.format(
(run_last_period_end_date +
datetime.timedelta(
days=1)).strftime('%Y%m%d'),
0))
if state_format == 'ASCII':
states = read_ascii_state(state_fname)
elif state_format == 'BINARY':
states = read_binary_state(state_fname)
# Compare split run states with full run
# --- If ASCII state file, check if almost the same ---#
if state_format == 'ASCII':
np.testing.assert_almost_equal(states, states_full_run, decimal=3,
err_msg='States are not a '
'close match')
# --- If BINARY state file, check if exactly the same ---#
elif state_format == 'BINARY':
if states != states_full_run:
raise VICTestError('Restart causes inexact state outputs!')
elif driver == 'image':
# Read the state file at the end of the last period of run
state_fname = os.path.join(
state_basedir,
'{}_{}'.format(
run_last_period_start_date.strftime('%Y%m%d'),
run_last_period_end_date.strftime('%Y%m%d')),
'states.{}_{:05d}.nc'.format(
(run_last_period_end_date +
datetime.timedelta(
days=1)).strftime('%Y%m%d'),
0))
ds_states = xr.open_dataset(state_fname)
# Compare split run states with full run
for var in ds_states.data_vars:
np.testing.assert_array_equal(ds_states[var].values,
ds_states_full_run[var].values,
err_msg='states are not an '
'exact match for %s' % var)
def read_ascii_state(state_fname):
''' Read in ascii format state file and convert to a list of numbers
Parameters
----------
state_fname: <str>
Path of the state file to be read
Returns
----------
states: <np.array>
A np.array of float numbers of the state file
'''
with open(state_fname, 'r') as f:
list_states = f.read().split()
for i, item in enumerate(list_states):
list_states[i] = float(item)
return np.asarray(list_states)
def read_binary_state(state_fname):
''' Read in ascii format state file and convert to a list of numbers
Parameters
----------
state_fname: <str>
Path of the state file to be read
Returns
----------
states: <bytes>
The full binary state file content
'''
with open(state_fname, 'rb') as f:
states = f.read()
return states
| wietsefranssen/VIC | tests/test_restart.py | Python | gpl-2.0 | 16,190 | [
"NetCDF"
] | bda4a94893392333cb94ba13e432161c752d2632b7630ea93c2d12ef313b4215 |
# -*- coding: utf-8 -*-
#
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright © 2014, 2019 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
#
# Device bright
#
import commands.ServerCommand as ServerCommand
import drivers.X10ControllerAdapter
#######################################################################
# Command handler for bright command
class DeviceBright(ServerCommand.ServerCommand):
#######################################################################
# Execute the "of" command.
def Execute(self, request):
result = drivers.X10ControllerAdapter.X10ControllerAdapter.DeviceBright(request["args"]["house-device-code"],
int(request["args"]["bright-amount"]))
# Generate a successful response
r = DeviceBright.CreateResponse(request["request"])
r['result-code'] = drivers.X10ControllerAdapter.X10ControllerAdapter.GetLastErrorCode()
if result:
# r['error'] = "Command not fully implemented"
r['message'] = "Success"
else:
r['error'] = drivers.X10ControllerAdapter.X10ControllerAdapter.GetLastError()
r['message'] = "Failure"
return r
| dhocker/athomepowerlineserver | commands/DeviceBright.py | Python | gpl-3.0 | 1,552 | [
"xTB"
] | 7958bfae5a888f9c2fdb97bfe817c20f0bf5810b2389a17dd039eab0e37f1f5b |
# Copyright 2009-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""View classes for ITranslationMessage interface."""
__metaclass__ = type
__all__ = [
'BaseTranslationView',
'contains_translations',
'convert_translationmessage_to_submission',
'CurrentTranslationMessageAppMenus',
'CurrentTranslationMessageFacets',
'CurrentTranslationMessageIndexView',
'CurrentTranslationMessagePageView',
'CurrentTranslationMessageView',
'CurrentTranslationMessageZoomedView',
'revert_unselected_translations',
'TranslationMessageSuggestions',
]
import cgi
import datetime
import operator
import re
import urllib
import pytz
from z3c.ptcompat import ViewPageTemplateFile
from zope import datetime as zope_datetime
from zope.component import getUtility
from zope.formlib.interfaces import IInputWidget
from zope.formlib.utility import setUpWidgets
from zope.formlib.widget import CustomWidgetFactory
from zope.formlib.widgets import DropdownWidget
from zope.interface import implements
from zope.schema.vocabulary import getVocabularyRegistry
from lp.app.errors import UnexpectedFormData
from lp.services.propertycache import cachedproperty
from lp.services.webapp import (
ApplicationMenu,
canonical_url,
enabled_with_permission,
LaunchpadView,
Link,
urlparse,
)
from lp.services.webapp.batching import BatchNavigator
from lp.services.webapp.escaping import structured
from lp.services.webapp.interfaces import ILaunchBag
from lp.services.webapp.publisher import RedirectionView
from lp.translations.browser.browser_helpers import (
contract_rosetta_escapes,
convert_newlines_to_web_form,
count_lines,
text_to_html,
)
from lp.translations.browser.potemplate import POTemplateFacets
from lp.translations.interfaces.pofile import IPOFileAlternativeLanguage
from lp.translations.interfaces.side import ITranslationSideTraitsSet
from lp.translations.interfaces.translationmessage import (
ITranslationMessage,
ITranslationMessageSet,
ITranslationMessageSuggestions,
RosettaTranslationOrigin,
TranslationConflict,
)
from lp.translations.interfaces.translations import TranslationConstants
from lp.translations.interfaces.translationsperson import ITranslationsPerson
from lp.translations.model import pofilestatsjob
from lp.translations.utilities.sanitize import (
sanitize_translations_from_webui,
)
from lp.translations.utilities.validate import GettextValidationError
def revert_unselected_translations(translations, current_message,
plural_indices_to_store):
"""Revert translations that the user entered but did not select.
:param translations: a dict mapping plural forms to their respective
translation strings.
:param current_message: the current `TranslationMessage`. Its
translations are substituted for corresponding ones that the
user entered without selecting their radio buttons.
:param plural_indices_to_store: a sequence of plural form numbers
that the user did select new translations for.
:return: a dict similar to `translations`, but with any translations
that are not in `plural_indices_to_store` reset to what they
were in `current_message` (if any).
"""
if current_message is None:
original_translations = {}
else:
original_translations = dict(enumerate(current_message.translations))
output = {}
for plural_form, translation in translations.iteritems():
if plural_form in plural_indices_to_store:
output[plural_form] = translation
elif original_translations.get(plural_form) is None:
output[plural_form] = u''
else:
output[plural_form] = original_translations[plural_form]
return output
def contains_translations(translations):
"""Does `translations` contain any nonempty translations?
:param translations: a dict mapping plural forms to their respective
translation strings.
"""
for text in translations.itervalues():
if text is not None and len(text) != 0:
return True
return False
class POTMsgSetBatchNavigator(BatchNavigator):
def __init__(self, results, request, start=0, size=1):
"""Constructs a BatchNavigator instance.
results is an iterable of results. request is the web request
being processed. size is a default batch size which the callsite
can choose to provide.
Why a custom BatchNavigator is required is a great mystery and
should be documented here.
"""
schema, netloc, path, parameters, query, fragment = (
urlparse(str(request.URL)))
# For safety, delete the start and batch variables, if they
# appear in the URL. The situation in which 'start' appears
# today is when the alternative language form is posted back and
# includes it.
if 'start' in request:
del request.form['start']
if 'batch' in request.form:
del request.form['batch']
# Note: the BatchNavigator has now been updated so that it
# gets the parameters out of the request.query_string_params
# dict by default. Therefore, we'll remove the 'start' option
# from request.query_string_params as well.
if 'start' in request.query_string_params:
del request.query_string_params['start']
if 'batch' in request.query_string_params:
del request.query_string_params['batch']
# 'path' will be like: 'POTURL/LANGCODE/POTSEQUENCE/+translate' and
# we are interested on the POTSEQUENCE.
self.start_path, pot_sequence, self.page = path.rsplit('/', 2)
try:
# The URLs we use to navigate thru POTMsgSet objects start with 1,
# while the batching machinery starts with 0, that's why we need
# to remove '1'.
start_value = int(pot_sequence) - 1
except ValueError:
start_value = start
# This batch navigator class only supports batching of 1 element.
size = 1
BatchNavigator.__init__(self, results, request, start_value, size)
def generateBatchURL(self, batch, backwards=False):
"""Return a custom batch URL for `ITranslationMessage`'s views."""
url = ""
if batch is None:
return url
assert batch.size == 1, 'The batch size must be 1.'
sequence = batch.startNumber()
url = '/'.join([self.start_path, str(sequence), self.page])
# getCleanQueryString ensures we get rid of any bogus 'start' or
# 'batch' form variables we may have received via the URL.
qs = self.getCleanQueryString()
if qs:
# There are arguments that we should preserve.
url = '%s?%s' % (url, qs)
return url
class CustomDropdownWidget(DropdownWidget):
def _div(self, cssClass, contents, **kw):
"""Render the select widget without the div tag."""
return contents
class CurrentTranslationMessageFacets(POTemplateFacets):
usedfor = ITranslationMessage
def __init__(self, context):
POTemplateFacets.__init__(self, context.browser_pofile.potemplate)
class CurrentTranslationMessageAppMenus(ApplicationMenu):
usedfor = ITranslationMessage
facet = 'translations'
links = ['overview', 'translate', 'upload', 'download']
def overview(self):
text = 'Overview'
return Link('../', text)
def translate(self):
text = 'Translate many'
return Link('../+translate', text, icon='languages')
@enabled_with_permission('launchpad.Edit')
def upload(self):
text = 'Upload a file'
return Link('../+upload', text, icon='edit')
def download(self):
text = 'Download'
return Link('../+export', text, icon='download')
class CurrentTranslationMessageIndexView(RedirectionView):
"""A view to forward to the translation form."""
def __init__(self, context, request):
target = canonical_url(context, view_name='+translate')
super(CurrentTranslationMessageIndexView, self).__init__(
target, request)
def _getSuggestionFromFormId(form_id):
"""Return the suggestion associated with the given form element ID.
The ID is in the format generated by `POSubmission.makeHTMLID`.
"""
expr_match = re.search(
'msgset_(\d+)_(\S+)_suggestion_(\d+)_(\d+)', form_id)
if expr_match is None:
raise UnexpectedFormData(
'The given form ID (%s) is not valid' % form_id)
# Extract the suggestion ID.
suggestion_id = int(expr_match.group(3))
plural_form = int(expr_match.group(4))
translationmessage = getUtility(ITranslationMessageSet).getByID(
suggestion_id)
return translationmessage.translations[plural_form]
class BaseTranslationView(LaunchpadView):
"""Base class that implements a framework for modifying translations.
This class provides a basis for building a batched translation page.
It relies on one or more subviews being used to actually display the
translations and form elements. It processes the form submitted and
constructs data which can be then fed back into the subviews.
The subviews must be (or behave like) CurrentTranslationMessageViews.
Child classes must define:
- self.pofile
- _buildBatchNavigator()
- _initializeTranslationMessageViews()
- _submitTranslations()
"""
pofile = None
@property
def label(self):
"""The label will be used as the main page heading."""
if self.form_is_writeable:
form_label = 'Translating into %s'
else:
form_label = 'Browsing %s translation'
return form_label % self.context.language.englishname
def initialize(self):
assert self.pofile, "Child class must define self.pofile"
self.has_plural_form_information = (
self.pofile.hasPluralFormInformation())
# These two dictionaries hold translation data parsed from the
# form submission. They exist mainly because of the need to
# redisplay posted translations when they contain errors; if not
# _submitTranslations could take care of parsing and saving
# translations without the need to store them in instance
# variables. To understand more about how they work, see
# _extractFormPostedTranslations, _prepareView and
# _storeTranslations.
self.form_posted_translations = {}
self.form_posted_translations_has_store_flag = {}
self.form_posted_needsreview = {}
self.form_posted_diverge = {}
self.form_posted_dismiss_suggestions = {}
if not self.has_plural_form_information:
# This POFile needs administrator setup.
self.request.response.addErrorNotification(
structured("""
<p>
Launchpad can’t handle the plural items in this file,
because it doesn’t yet know how plural forms work for %s.
</p>
<p>
If you have this information, please visit the
<a href="https://answers.launchpad.net/rosetta/">Answers</a>
application to see whether anyone has submitted it yet. If not,
please file the information there as a question. The preferred
format for such questions is described in the
<a href="https://help.launchpad.net/FAQ/Translations">Frequently
Asked Questions list</a>.
</p>
<p>
This only needs to be done once per language. Thanks for helping
Launchpad Translations.
</p>
""", self.pofile.language.englishname))
return
self._initializeAltLanguage()
method = self.request.method
if method == 'POST':
self.lock_timestamp = self._extractLockTimestamp()
self._checkSubmitConditions()
else:
# It's not a POST, so we should generate lock_timestamp.
UTC = pytz.timezone('UTC')
self.lock_timestamp = datetime.datetime.now(UTC)
# The batch navigator needs to be initialized early, before
# _submitTranslations is called; the reason for this is that
# _submitTranslations, in the case of no errors, redirects to
# the next batch page.
self.batchnav = self._buildBatchNavigator()
# These two variables are stored for the sole purpose of being
# output in hidden inputs that preserve the current navigation
# when submitting forms.
self.start = self.batchnav.start
self.size = self.batchnav.currentBatch().size
if method == 'POST' and 'submit_translations' in self.request.form:
if self._submitTranslations():
# If no errors occurred, adios. Otherwise, we need to set up
# the subviews for error display and correction.
return
# Slave view initialization depends on _submitTranslations being
# called, because the form data needs to be passed in to it --
# again, because of error handling.
self._initializeTranslationMessageViews()
def _extractLockTimestamp(self):
"""Extract the lock timestamp from the request.
The lock_timestamp is used to detect conflicting concurrent
translation updates: if the translation that is being changed
has been set after the current form was generated, the user
chose a translation based on outdated information. In that
case there is a conflict.
"""
try:
return zope_datetime.parseDatetimetz(
self.request.form.get('lock_timestamp', u''))
except zope_datetime.DateTimeError:
# invalid format. Either we don't have the timestamp in the
# submitted form or it has the wrong format.
return None
def _checkSubmitConditions(self):
"""Verify that this submission is possible and valid.
:raises: `UnexpectedFormData` if conditions are not met. In
principle the user should not have been given the option to
submit the current request.
"""
if self.user is None:
raise UnexpectedFormData("You are not logged in.")
# Users who have declined the licensing agreement can't post
# translations. We don't stop users who haven't made a decision
# yet at this point; they may be project owners making
# corrections.
translations_person = ITranslationsPerson(self.user)
relicensing = translations_person.translations_relicensing_agreement
if relicensing is not None and not relicensing:
raise UnexpectedFormData(
"You can't post translations since you have not agreed to "
"our translations licensing terms.")
if self.lock_timestamp is None:
raise UnexpectedFormData(
"Your form submission did not contain the lock_timestamp "
"that tells Launchpad when the submitted form was generated.")
@cachedproperty
def share_with_other_side(self):
"""Should these translations be shared with the other side?"""
template = self.pofile.potemplate
language = self.pofile.language
policy = template.getTranslationPolicy()
return policy.sharesTranslationsWithOtherSide(
self.user, language, sourcepackage=template.sourcepackage)
#
# API Hooks
#
def _buildBatchNavigator(self):
"""Construct a BatchNavigator of POTMsgSets and return it."""
raise NotImplementedError
def _initializeTranslationMessageViews(self):
"""Construct subviews as necessary."""
raise NotImplementedError
def _submitTranslations(self):
"""Handle translations submitted via a form.
Return True if processing went fine; return False if errors
occurred.
Implementing this method is complicated. It needs to find out
what TranslationMessage were updated in the form post, call
_receiveTranslations() for each of those, check for errors that
may have occurred during that (displaying them using
addErrorNotification), and otherwise call _redirectToNextPage if
everything went fine.
"""
raise NotImplementedError
#
# Helper methods that should be used for TranslationMessageView.__init__()
# and _submitTranslations().
#
def _receiveTranslations(self, potmsgset):
"""Process and store submitted translations for `potmsgset`.
:return: An error string in case of failure, or None otherwise.
"""
try:
self._storeTranslations(potmsgset)
except GettextValidationError as e:
return unicode(e)
except TranslationConflict:
# The translations are demoted to suggestions, but they may
# still affect the "messages with new suggestions" filter.
self._observeTranslationUpdate(potmsgset)
return """
This translation has changed since you last saw it. To avoid
accidentally reverting work done by others, we added your
translations as suggestions. Please review the current
values.
"""
else:
self._observeTranslationUpdate(potmsgset)
return None
def _storeTranslations(self, potmsgset):
"""Store the translation submitted for a POTMsgSet.
:raises GettextValidationError: if the submitted translation
fails gettext validation. The translation is not stored.
:raises TranslationConflict: if the current translations have
changed since the translator/reviewer last saw them. The
submitted translations are stored as suggestions.
"""
self._extractFormPostedTranslations(potmsgset)
if self.form_posted_dismiss_suggestions.get(potmsgset, False):
potmsgset.dismissAllSuggestions(
self.pofile, self.user, self.lock_timestamp)
return
translations = self.form_posted_translations.get(potmsgset, {})
if not translations:
# A post with no content -- not an error, but nothing to be
# done.
return
template = self.pofile.potemplate
language = self.pofile.language
current_message = potmsgset.getCurrentTranslation(
template, language, template.translation_side)
translations = revert_unselected_translations(
translations, current_message,
self.form_posted_translations_has_store_flag.get(potmsgset, []))
translations = sanitize_translations_from_webui(
potmsgset.singular_text, translations, self.pofile.plural_forms)
has_translations = contains_translations(translations)
if current_message is None and not has_translations:
# There is no current translation yet, neither we get any
# translation submitted, so we don't need to store anything.
return
force_suggestion = self.form_posted_needsreview.get(potmsgset, False)
force_diverge = self.form_posted_diverge.get(potmsgset, False)
potmsgset.validateTranslations(translations)
is_suggestion = (
force_suggestion or not self.user_is_official_translator)
if has_translations or not is_suggestion:
message = potmsgset.submitSuggestion(
self.pofile, self.user, translations)
if self.user_is_official_translator:
if force_suggestion:
# The translator has requested that this translation
# be reviewed. That means we clear the current
# translation, demoting the existing message to a
# suggestion.
if not has_translations:
# Forcing a suggestion has a different meaning
# for an empty translation: "someone should review
# the _existing_ translation." Which also means
# that the existing translation is demoted to a
# suggestion.
potmsgset.resetCurrentTranslation(
self.pofile, lock_timestamp=self.lock_timestamp,
share_with_other_side=self.share_with_other_side)
else:
self._approveTranslation(
message, force_diverge=force_diverge)
def _approveTranslation(self, message, force_diverge=False):
"""Approve `message`."""
if force_diverge:
message.approveAsDiverged(
self.pofile, self.user, lock_timestamp=self.lock_timestamp)
else:
message.approve(
self.pofile, self.user,
share_with_other_side=self.share_with_other_side,
lock_timestamp=self.lock_timestamp)
def _areSuggestionsEmpty(self, suggestions):
"""Return true if all suggestions are empty strings or None."""
for index in suggestions:
if (suggestions[index] is not None and suggestions[index] != ""):
return False
return True
def _prepareView(self, view_class, current_translation_message,
pofile, can_edit, error=None):
"""Collect data and build a `TranslationMessageView` for display."""
# XXX: kiko 2006-09-27:
# It would be nice if we could easily check if this is being
# called in the right order, after _storeTranslations().
translations = {}
# Get translations that the user typed in the form.
posted = self.form_posted_translations.get(
current_translation_message.potmsgset, None)
# Get the flags set by the user to note whether 'New suggestion'
# should be taken in consideration.
plural_indices_to_store = (
self.form_posted_translations_has_store_flag.get(
current_translation_message.potmsgset, []))
# We are going to prepare the content of the translation form.
for plural_index in range(current_translation_message.plural_forms):
if posted is not None and posted[plural_index] is not None:
# We have something submitted by the user, we use that value.
translations[plural_index] = posted[plural_index]
else:
# We didn't get anything from the user for this translation,
# so we store nothing for it.
translations[plural_index] = None
# Check the values we got with the submit for the 'Needs review' flag
# so we prepare the new render with the same values.
if current_translation_message.potmsgset in (
self.form_posted_needsreview):
force_suggestion = self.form_posted_needsreview[
current_translation_message.potmsgset]
else:
force_suggestion = False
# Check if the current translation message is marked
# as needing to be diverged.
if current_translation_message.potmsgset in (
self.form_posted_diverge):
force_diverge = self.form_posted_needsreview[
current_translation_message.potmsgset]
else:
force_diverge = False
return view_class(
current_translation_message, self.request,
plural_indices_to_store, translations, force_suggestion,
force_diverge, error, self.second_lang_code,
self.form_is_writeable, pofile=pofile, can_edit=can_edit)
#
# Internals
#
def _initializeAltLanguage(self):
"""Initialize the alternative language widget and check form data."""
alternative_language = None
second_lang_code = self.request.form.get("field.alternative_language")
fallback_language = self.pofile.language.alt_suggestion_language
if isinstance(second_lang_code, list):
# self._redirect() was generating duplicate params in the URL.
# We may be able to remove this guard.
raise UnexpectedFormData(
"You specified more than one alternative language; "
"only one is currently supported.")
if second_lang_code:
try:
translatable_vocabulary = getVocabularyRegistry().get(
None, 'TranslatableLanguage')
language_term = (
translatable_vocabulary.getTermByToken(second_lang_code))
alternative_language = language_term.value
except LookupError:
# Oops, a bogus code was provided in the request.
# This is UnexpectedFormData caused by a hacked URL, or an
# old URL. The alternative_language field used to use
# LanguageVocabulary that contained untranslatable languages.
second_lang_code = None
elif fallback_language is not None:
# If there's a standard alternative language and no
# user-specified language was provided, preselect it.
alternative_language = fallback_language
second_lang_code = fallback_language.code
else:
# The second_lang_code is None and there is no fallback_language.
# This is probably a parent language or an English variant.
pass
# Whatever alternative language choice came out of all that, ignore it
# if the user has preferred languages and the alternative language
# isn't among them. Otherwise we'd be initializing this dropdown to a
# choice it didn't in fact contain, resulting in an oops.
if alternative_language is not None:
user = getUtility(ILaunchBag).user
if user is not None:
translations_person = ITranslationsPerson(user)
choices = set(translations_person.translatable_languages)
if choices and alternative_language not in choices:
editlanguages_url = canonical_url(
self.user, view_name="+editlanguages")
self.request.response.addInfoNotification(structured(
u"Not showing suggestions from selected alternative "
"language %(alternative)s. If you wish to see "
"suggestions from this language, "
'<a href="%(editlanguages_url)s">'
"add it to your preferred languages</a> first.",
alternative=alternative_language.displayname,
editlanguages_url=editlanguages_url,
))
alternative_language = None
second_lang_code = None
initial_values = {}
if alternative_language is not None:
initial_values['alternative_language'] = alternative_language
self.alternative_language_widget = CustomWidgetFactory(
CustomDropdownWidget)
setUpWidgets(
self, IPOFileAlternativeLanguage, IInputWidget,
names=['alternative_language'], initial=initial_values)
# We store second_lang_code for use in hidden inputs in the
# other forms in the translation pages.
self.second_lang_code = second_lang_code
@property
def user_is_official_translator(self):
"""Determine whether the current user is an official translator."""
return self.pofile.canEditTranslations(self.user)
@cachedproperty
def form_is_writeable(self):
"""Whether the form should accept write operations."""
return self.pofile.canAddSuggestions(self.user)
def _extractFormPostedTranslations(self, potmsgset):
"""Look for translations for this `POTMsgSet` in the form submitted.
Store the new translations at self.form_posted_translations and its
fuzzy status at self.form_posted_needsreview, keyed on the
`POTMsgSet`.
In this method, we look for various keys in the form, and use them as
follows:
* 'msgset_ID' to know if self is part of the submitted form. If it
isn't found, we stop parsing the form and return.
* 'msgset_ID_LANGCODE_translation_PLURALFORM': Those will be the
submitted translations and we will have as many entries as plural
forms the language self.context.language has. This identifier
format is generated by `TranslationMessage.makeHTMLID`.
* 'msgset_ID_LANGCODE_needsreview': If present, will note that the
'needs review' flag has been set for the given translations.
In all those form keys, 'ID' is the ID of the `POTMsgSet`.
"""
form = self.request.form
potmsgset_ID = potmsgset.id
language_code = self.pofile.language.code
msgset_ID = 'msgset_%d' % potmsgset_ID
if msgset_ID not in form:
# If this form does not have data about the msgset id, then
# do nothing at all.
return
msgset_ID_LANGCODE_needsreview = 'msgset_%d_%s_needsreview' % (
potmsgset_ID, language_code)
self.form_posted_needsreview[potmsgset] = (
msgset_ID_LANGCODE_needsreview in form)
msgset_ID_diverge = 'msgset_%d_diverge' % (
potmsgset_ID)
self.form_posted_diverge[potmsgset] = (
msgset_ID_diverge in form)
msgset_ID_dismiss = 'msgset_%d_dismiss' % potmsgset_ID
self.form_posted_dismiss_suggestions[potmsgset] = (
msgset_ID_dismiss in form)
# Note the trailing underscore: we append the plural form
# number later.
msgset_ID_LANGCODE_translation_ = 'msgset_%d_%s_translation_' % (
potmsgset_ID, language_code)
msgset_ID_LANGCODE_translation_GREATER_PLURALFORM_new = '%s%d_new' % (
msgset_ID_LANGCODE_translation_,
TranslationConstants.MAX_PLURAL_FORMS)
if msgset_ID_LANGCODE_translation_GREATER_PLURALFORM_new in form:
# The plural form translation generation rules created too many
# fields, or the form was hacked.
raise AssertionError(
'More than %d plural forms were submitted!'
% TranslationConstants.MAX_PLURAL_FORMS)
# Extract the translations from the form, and store them in
# self.form_posted_translations. We try plural forms in turn,
# starting at 0.
for pluralform in xrange(TranslationConstants.MAX_PLURAL_FORMS):
msgset_ID_LANGCODE_translation_PLURALFORM_new = '%s%d_new' % (
msgset_ID_LANGCODE_translation_, pluralform)
if msgset_ID_LANGCODE_translation_PLURALFORM_new not in form:
# Stop when we reach the first plural form which is
# missing from the form.
break
# Get new value introduced by the user.
raw_value = form[msgset_ID_LANGCODE_translation_PLURALFORM_new]
value = contract_rosetta_escapes(raw_value)
if self.user_is_official_translator:
# Let's see the section that we are interested on based on the
# radio button that is selected.
msgset_ID_LANGCODE_translation_PLURALFORM_radiobutton = (
'%s%d_radiobutton' % (
msgset_ID_LANGCODE_translation_, pluralform))
selected_translation_key = form.get(
msgset_ID_LANGCODE_translation_PLURALFORM_radiobutton)
if selected_translation_key is None:
# The radiobutton was missing from the form; either
# it wasn't rendered to the end-user or no buttons
# were selected.
continue
# We are going to check whether the radio button is for
# current translation, suggestion or the new translation
# field.
current_translation_message = (
potmsgset.getCurrentTranslationMessageOrDummy(
self.pofile))
if (selected_translation_key !=
msgset_ID_LANGCODE_translation_PLURALFORM_new):
# It's either current translation or an existing
# suggestion.
# Let's override 'value' with the selected suggestion
# value.
if 'suggestion' in selected_translation_key:
value = _getSuggestionFromFormId(
selected_translation_key)
elif current_translation_message.translations[
pluralform] is not None:
# It's current translation.
value = current_translation_message.translations[
pluralform]
else:
# Current translation is None, this code expects u''
# when there is no translation.
value = u''
# Current user is an official translator and the radio button
# for 'New translation' is selected, so we are sure we want to
# store this submission.
store = True
else:
# Note whether this translation should be stored in our
# database as a new suggestion.
msgset_ID_LANGCODE_translation_PLURALFORM_new_checkbox = (
'%s_checkbox'
% msgset_ID_LANGCODE_translation_PLURALFORM_new)
store = (
msgset_ID_LANGCODE_translation_PLURALFORM_new_checkbox
in form)
if potmsgset not in self.form_posted_translations:
self.form_posted_translations[potmsgset] = {}
self.form_posted_translations[potmsgset][pluralform] = value
if potmsgset not in self.form_posted_translations_has_store_flag:
self.form_posted_translations_has_store_flag[potmsgset] = []
if store:
self.form_posted_translations_has_store_flag[
potmsgset].append(pluralform)
def _observeTranslationUpdate(self, potmsgset):
"""Observe that a translation was updated for the potmsgset.
Subclasses should redefine this method if they need to watch the
successful calls to `potmsgset.updateTranslation`.
"""
pass
#
# Redirection
#
def _buildRedirectParams(self):
"""Construct parameters for redirection.
Redefine this method if you have additional parameters to preserve.
"""
parameters = {}
if self.second_lang_code:
parameters['field.alternative_language'] = self.second_lang_code
return parameters
def _redirect(self, new_url):
"""Redirect to the given url adding the selected filtering rules."""
assert new_url is not None, ('The new URL cannot be None.')
if not new_url:
new_url = str(self.request.URL)
if self.request.get('QUERY_STRING'):
new_url += '?%s' % self.request.get('QUERY_STRING')
# Get the default values for several parameters.
parameters = self._buildRedirectParams()
if '?' in new_url:
# Get current query string
base_url, old_query_string = new_url.split('?')
query_parts = cgi.parse_qsl(
old_query_string, strict_parsing=False)
# Combine parameters provided by _buildRedirectParams with those
# that came with our page request. The latter take precedence.
combined_parameters = {}
combined_parameters.update(parameters)
for (key, value) in query_parts:
combined_parameters[key] = value
parameters = combined_parameters
else:
base_url = new_url
new_query = urllib.urlencode(sorted(parameters.items()))
if new_query:
new_url = '%s?%s' % (base_url, new_query)
self.request.response.redirect(new_url)
def _redirectToNextPage(self):
"""After a successful submission, redirect to the next batch page."""
# Schedule this POFile to have its statistics updated.
pofilestatsjob.schedule(self.pofile)
next_url = self.batchnav.nextBatchURL()
if next_url is None or next_url == '':
# We are already at the end of the batch, forward to the
# first one.
next_url = self.batchnav.firstBatchURL()
if next_url is None:
# Stay in whatever URL we are atm.
next_url = ''
self._redirect(next_url)
class CurrentTranslationMessagePageView(BaseTranslationView):
"""A view for the page that renders a single translation.
See `BaseTranslationView` for details on how this works.
"""
def initialize(self):
self.pofile = self.context.browser_pofile
# Since we are only displaying a single message, we only hold on to
# one error for it. The variable is set to the failing
# TranslationMessage (a device of
# BaseTranslationView._storeTranslations) via _submitTranslations.
self.error = None
self.translationmessage_view = None
BaseTranslationView.initialize(self)
#
# BaseTranslationView API
#
def _buildBatchNavigator(self):
"""See `BaseTranslationView._buildBatchNavigator`."""
return POTMsgSetBatchNavigator(self.pofile.potemplate.getPOTMsgSets(),
self.request, size=1)
def _initializeTranslationMessageViews(self):
"""See `BaseTranslationView._initializeTranslationMessageViews`."""
pofile = self.pofile
can_edit = pofile.canEditTranslations(self.user)
self.translationmessage_view = self._prepareView(
CurrentTranslationMessageZoomedView, self.context, pofile=pofile,
can_edit=can_edit, error=self.error)
def _submitTranslations(self):
"""See `BaseTranslationView._submitTranslations`."""
self.error = self._receiveTranslations(self.context.potmsgset)
if self.error:
self.request.response.addErrorNotification(
"There is an error in the translation you provided. "
"Please correct it before continuing.")
return False
self._redirectToNextPage()
return True
def _messages_html_id(self):
order = []
message = self.translationmessage_view
# If we don't know about plural forms, or there are some other
# reason that prevent translations, translationmessage_view is
# not created
if ((message is not None) and (message.form_is_writeable)):
for dictionary in message.translation_dictionaries:
order.append(
dictionary['html_id_translation'] + '_new')
return order
@property
def autofocus_html_id(self):
if (len(self._messages_html_id()) > 0):
return self._messages_html_id()[0]
else:
return ""
@property
def translations_order(self):
return ' '.join(self._messages_html_id())
class CurrentTranslationMessageView(LaunchpadView):
"""Holds all data needed to show an ITranslationMessage.
This view class could be used directly or as part of the POFileView class
in which case, we would have up to 100 instances of this class using the
same information at self.form.
"""
# Instead of registering in ZCML, we indicate the template here and avoid
# the adapter lookup when constructing these subviews.
template = ViewPageTemplateFile(
'../templates/currenttranslationmessage-translate-one.pt')
def __init__(self, current_translation_message, request,
plural_indices_to_store, translations, force_suggestion,
force_diverge, error, second_lang_code, form_is_writeable,
pofile, can_edit):
"""Primes the view with information that is gathered by a parent view.
:param plural_indices_to_store: A dictionary that indicates whether
the translation associated should be stored in our database or
ignored. It's indexed by plural form.
:param translations: A dictionary indexed by plural form index;
BaseTranslationView constructed it based on form-submitted
translations.
:param force_suggestion: Should this be a suggestion even for editors.
:param force_diverge: Should this translation be diverged.
:param error: The error related to self.context submission or None.
:param second_lang_code: The result of submiting
field.alternative_value.
:param form_is_writeable: Whether the form should accept write
operations
:param pofile: The `POFile` that's being displayed or edited.
:param can_edit: Whether the user has editing privileges on `pofile`.
"""
LaunchpadView.__init__(self, current_translation_message, request)
self.pofile = pofile
self.plural_indices_to_store = plural_indices_to_store
self.translations = translations
self.error = error
self.force_suggestion = force_suggestion
self.force_diverge = force_diverge
self.user_is_official_translator = can_edit
self.form_is_writeable = form_is_writeable
side_traits = getUtility(ITranslationSideTraitsSet).getForTemplate(
pofile.potemplate)
if side_traits.other_side_traits.getFlag(self.context):
# The shared translation for the other side matches the current
# one.
self.other_translationmessage = None
else:
self.other_translationmessage = (
self.context.potmsgset.getCurrentTranslation(
self.pofile.potemplate, self.pofile.language,
side_traits.other_side_traits.side))
if self.context.potemplate is None:
# Current translation is shared.
self.shared_translationmessage = None
else:
# Current translation is diverged, find the shared one.
shared_translationmessage = (
self.context.potmsgset.getCurrentTranslation(
None, self.pofile.language, side_traits.side))
if (shared_translationmessage == self.other_translationmessage):
# If it matches the other message, we don't care.
self.shared_translationmessage = None
else:
self.shared_translationmessage = shared_translationmessage
self.other_title = "In %s:" % (
side_traits.other_side_traits.displayname)
self.can_confirm_and_dismiss = False
self.can_dismiss_on_empty = False
self.can_dismiss_on_plural = False
self.can_dismiss_other = False
# Initialize to True, allowing POFileTranslateView to override.
self.zoomed_in_view = True
# Set up alternative language variables.
# XXX: kiko 2006-09-27:
# This could be made much simpler if we built suggestions externally
# in the parent view, as suggested in initialize() below.
self.sec_lang = None
self.second_lang_potmsgset = None
if second_lang_code is not None:
potemplate = self.pofile.potemplate
second_lang_pofile = potemplate.getPOFileByLang(second_lang_code)
if second_lang_pofile:
self.sec_lang = second_lang_pofile.language
def initialize(self):
# XXX: kiko 2006-09-27:
# The heart of the optimization problem here is that
# _buildAllSuggestions() is very expensive. We need to move to
# building suggestions and active texts in one fell swoop in the
# parent view, and then supplying them all via __init__(). This
# would cut the number of (expensive) queries per-page by an
# order of 30.
# This code is where we hit the database collecting suggestions for
# this ITranslationMessage.
# We store lists of TranslationMessageSuggestions objects in a
# suggestion_blocks dictionary, keyed on plural form index; this
# allows us later to just iterate over them in the view code
# using a generic template.
self.pluralform_indices = range(self.context.plural_forms)
self._buildAllSuggestions()
# If existing translation is shared, and a user is
# an official translator, they can diverge a translation.
self.allow_diverging = (self.zoomed_in_view and
self.user_is_official_translator and
self.context.potemplate is None)
if self.allow_diverging:
if self.pofile.potemplate.productseries is not None:
self.current_series = self.pofile.potemplate.productseries
self.current_series_title = "%s %s" % (
self.current_series.product.displayname,
self.current_series.name)
else:
self.current_series = self.pofile.potemplate.distroseries
self.current_series_title = "%s %s" % (
self.current_series.distribution.displayname,
self.current_series.name)
# Initialize the translation dictionaries used from the
# translation form.
self.translation_dictionaries = []
for index in self.pluralform_indices:
current_translation = self.getCurrentTranslation(index)
other_translation = self.getOtherTranslation(index)
shared_translation = self.getSharedTranslation(index)
submitted_translation = self.getSubmittedTranslation(index)
is_multi_line = (count_lines(current_translation) > 1 or
count_lines(submitted_translation) > 1 or
count_lines(self.singular_text) > 1 or
count_lines(self.plural_text) > 1)
is_same_translator = (
self.context.submitter == self.context.reviewer)
is_same_date = (
self.context.date_created == self.context.date_reviewed)
if self.other_translationmessage is None:
other_submission = None
else:
pofile = (
self.other_translationmessage.ensureBrowserPOFile())
if pofile is None:
other_submission = None
else:
other_submission = (
convert_translationmessage_to_submission(
message=self.other_translationmessage,
current_message=self.context,
plural_form=index,
pofile=pofile,
legal_warning_needed=False,
is_empty=False,
other_side=True,
local_to_pofile=True))
diverged_and_have_shared = (
self.context.potemplate is not None and
self.shared_translationmessage is not None)
if diverged_and_have_shared:
pofile = self.shared_translationmessage.ensureBrowserPOFile()
if pofile is None:
shared_submission = None
else:
shared_submission = (
convert_translationmessage_to_submission(
message=self.shared_translationmessage,
current_message=self.context,
plural_form=index,
pofile=pofile,
legal_warning_needed=False,
is_empty=False,
local_to_pofile=True))
else:
shared_submission = None
translation_entry = {
'plural_index': index,
'current_translation': text_to_html(
current_translation, self.context.potmsgset.flags),
'submitted_translation': submitted_translation,
'other_translation': text_to_html(
other_translation, self.context.potmsgset.flags),
'other_translation_message': other_submission,
'shared_translation': text_to_html(
shared_translation, self.context.potmsgset.flags),
'shared_translation_message': shared_submission,
'suggestion_block': self.suggestion_blocks[index],
'suggestions_count': self.suggestions_count[index],
'store_flag': index in self.plural_indices_to_store,
'is_multi_line': is_multi_line,
'same_translator_and_reviewer': (is_same_translator and
is_same_date),
'html_id_translation':
self.context.makeHTMLID('translation_%d' % index),
}
if self.message_must_be_hidden:
# We must hide the translation because it may have private
# info that we don't want to show to anonymous users.
translation_entry['current_translation'] = u'''
To prevent privacy issues, this translation is not
available to anonymous users,<br />
if you want to see it, please, <a href="+login">log in</a>
first.'''
self.translation_dictionaries.append(translation_entry)
self.html_id = self.context.potmsgset.makeHTMLID()
# HTML id for singular form of this message
self.html_id_singular = self.context.makeHTMLID('translation_0')
def _set_dismiss_flags(self, local_suggestions, other):
"""Set dismissal flags.
The flags have been initialized to False in the constructor. This
method activates the right ones.
:param local_suggestions: The list of local suggestions.
:param other: The translation on the other side for this
message or None if there is no such translation.
"""
# Only official translators can dismiss anything.
if not self.user_is_official_translator:
return
# If there is an other-side translation that is newer than the
# context, it can be dismissed.
self.can_dismiss_other = other is not None and (
self.context.date_reviewed is None or
self.context.date_reviewed < other.date_created)
# If there are no local suggestion and no new other-side translation
# nothing can be dismissed.
if len(local_suggestions) == 0 and not self.can_dismiss_other:
return
if self.is_plural:
self.can_dismiss_on_plural = True
else:
if self.getCurrentTranslation(0) is None:
self.can_dismiss_on_empty = True
else:
self.can_confirm_and_dismiss = True
def _setOnePOFile(self, messages):
"""Return a list of messages that all have a browser_pofile set.
If a pofile cannot be found for a message, it is not included in
the resulting list.
"""
result = []
for message in messages:
if message.browser_pofile is None:
pofile = message.getOnePOFile()
if pofile is None:
# Do not include in result.
continue
else:
message.setPOFile(pofile)
result.append(message)
return result
def _buildAllSuggestions(self):
"""Builds all suggestions and puts them into suggestions_block.
This method does the ugly nitty gritty of making sure we don't
display duplicated suggestions; this is done by checking the
translation strings in each submission and grabbing only one
submission per string.
The decreasing order of preference this method encodes is:
- Non-active translations to this context and to the pofile
from which this translation was imported (non_editor)
- Active translations to other contexts (elsewhere)
- Non-editor translations to other contexts (wiki)
"""
# Prepare suggestions storage.
self.suggestion_blocks = {}
self.suggestions_count = {}
if self.message_must_be_hidden:
# We must hide all suggestions because this message may contain
# private information that we don't want to show to anonymous
# users, such as email addresses.
for index in self.pluralform_indices:
self.suggestion_blocks[index] = []
self.suggestions_count[index] = 0
return
language = self.pofile.language
potmsgset = self.context.potmsgset
other = self.other_translationmessage
# Show suggestions only when you can actually do something with them
# (i.e. you are logged in and have access to at least submit
# suggestions).
if self.form_is_writeable:
# Get a list of local suggestions for this message: local are
# those who have been submitted directly against it and are
# newer than the date of the last review.
local = sorted(
potmsgset.getLocalTranslationMessages(
self.pofile.potemplate,
language),
key=operator.attrgetter("date_created"),
reverse=True)
self._set_dismiss_flags(local, other)
for suggestion in local:
suggestion.setPOFile(self.pofile)
# Get a list of translations which are _used_ as translations
# for this same message in a different translation template.
used_languages = [language]
if self.sec_lang is not None:
used_languages.append(self.sec_lang)
translations = (
potmsgset.getExternallySuggestedOrUsedTranslationMessages(
suggested_languages=[language],
used_languages=used_languages))
alt_external = translations[self.sec_lang].used
externally_used = self._setOnePOFile(sorted(
translations[language].used,
key=operator.attrgetter("date_created"),
reverse=True))
# Get a list of translations which are suggested as
# translations for this same message in a different translation
# template, but are not used.
externally_suggested = self._setOnePOFile(sorted(
translations[language].suggested,
key=operator.attrgetter("date_created"),
reverse=True))
else:
# Don't show suggestions for anonymous users.
local = externally_used = externally_suggested = []
# Fetch a list of current and externally used translations for
# this message in an alternative language.
alt_submissions = []
if self.sec_lang is None:
alt_title = None
else:
# User is asking for alternative language suggestions.
alt_pofile = self.pofile.potemplate.getPOFileByLang(
self.sec_lang.code)
alt_current = potmsgset.getCurrentTranslation(
self.pofile.potemplate, self.sec_lang,
self.pofile.potemplate.translation_side)
if alt_current is not None:
alt_submissions.append(alt_current)
if not self.form_is_writeable:
alt_external = list(
potmsgset.getExternallyUsedTranslationMessages(
self.sec_lang))
alt_submissions.extend(alt_external)
for suggestion in alt_submissions:
suggestion.setPOFile(alt_pofile)
alt_title = self.sec_lang.englishname
# To maintain compatibility with the old DB model as much as possible,
# let's split out all the submissions by their plural form.
# Builds ITranslationMessageSuggestions for each type of the
# suggestion per plural form.
for index in self.pluralform_indices:
self.seen_translations = set([self.context.translations[index]])
if other is not None:
self.seen_translations.add(other.translations[index])
local_suggestions = (
self._buildTranslationMessageSuggestions(
'Suggestions', local, index, local_to_pofile=True))
externally_used_suggestions = (
self._buildTranslationMessageSuggestions(
'Used in', externally_used, index, legal_warning=True))
externally_suggested_suggestions = (
self._buildTranslationMessageSuggestions(
'Suggested in', externally_suggested, index,
legal_warning=True))
alternate_language_suggestions = (
self._buildTranslationMessageSuggestions(
alt_title, alt_submissions, index))
self.suggestion_blocks[index] = [
local_suggestions, externally_used_suggestions,
externally_suggested_suggestions,
alternate_language_suggestions]
self.suggestions_count[index] = (
len(local_suggestions.submissions) +
len(externally_used_suggestions.submissions) +
len(externally_suggested_suggestions.submissions) +
len(alternate_language_suggestions.submissions))
def _buildTranslationMessageSuggestions(self, title, suggestions, index,
legal_warning=False,
local_to_pofile=False):
"""Build filtered list of submissions to be shown in the view.
`title` is the title for the suggestion type, `suggestions` is
a list of suggestions, and `index` is the plural form.
"""
iterable_submissions = TranslationMessageSuggestions(
title, self.context,
suggestions[:self.max_entries],
self.user_is_official_translator, self.form_is_writeable,
index, self.seen_translations, legal_warning=legal_warning,
local_to_pofile=local_to_pofile)
self.seen_translations = iterable_submissions.seen_translations
return iterable_submissions
def getOfficialTranslation(self, index, is_other=False, is_shared=False):
"""Return current translation on either side for plural form 'index'.
"""
assert index in self.pluralform_indices, (
'There is no plural form #%d for %s language' % (
index, self.pofile.language.displayname))
if is_shared:
if self.shared_translationmessage is None:
return None
translation = self.shared_translationmessage.translations[index]
elif is_other and self.other_translationmessage is not None:
translation = self.other_translationmessage.translations[index]
else:
translation = self.context.translations[index]
# We store newlines as '\n', '\r' or '\r\n', depending on the
# msgid but forms should have them as '\r\n' so we need to change
# them before showing them.
if translation is not None:
return convert_newlines_to_web_form(translation)
else:
return None
def getCurrentTranslation(self, index):
"""Return the current translation for the pluralform 'index'."""
return self.getOfficialTranslation(index)
def getOtherTranslation(self, index):
"""Return the other-side translation for the pluralform 'index'."""
return self.getOfficialTranslation(index, is_other=True)
def getSharedTranslation(self, index):
"""Return the shared translation for the pluralform 'index'."""
return self.getOfficialTranslation(index, is_shared=True)
def getSubmittedTranslation(self, index):
"""Return the translation submitted for the pluralform 'index'."""
assert index in self.pluralform_indices, (
'There is no plural form #%d for %s language' % (
index, self.pofile.language.displayname))
translation = self.translations[index]
# We store newlines as '\n', '\r' or '\r\n', depending on the text to
# translate; but forms should have them as '\r\n' so we need to change
# line endings before showing them.
return convert_newlines_to_web_form(translation)
#
# Display-related methods
#
@cachedproperty
def is_plural(self):
"""Return whether there are plural forms."""
return self.context.potmsgset.plural_text is not None
@cachedproperty
def message_must_be_hidden(self):
"""Whether this message must be hidden from anonymous viewers.
Messages are always shown to logged-in users. However, messages that
are likely to contain email addresses must not be shown to anonymous
visitors in order to keep them out of search engines, spam lists etc.
"""
if self.user is not None:
# Always show messages to logged-in users.
return False
# For anonymous users, check the msgid.
return self.context.potmsgset.hide_translations_from_anonymous
@property
def translation_credits(self):
"""Return automatically created translation if defined, or None."""
assert self.context.potmsgset.is_translation_credit
return text_to_html(
self.pofile.prepareTranslationCredits(
self.context.potmsgset),
self.context.potmsgset.flags)
@cachedproperty
def sequence(self):
"""Return the position number of this potmsgset in the pofile."""
return self.context.potmsgset.getSequence(
self.pofile.potemplate)
@property
def singular_text(self):
"""Return the singular form prepared to render in a web page."""
return text_to_html(
self.context.potmsgset.singular_text,
self.context.potmsgset.flags)
@property
def plural_text(self):
"""Return a plural form prepared to render in a web page.
If there is no plural form, return None.
"""
return text_to_html(
self.context.potmsgset.plural_text,
self.context.potmsgset.flags)
# XXX mpt 2006-09-15: Detecting tabs, newlines, and leading/trailing
# spaces is being done one way here, and another way in the functions
# above.
@property
def text_has_tab(self):
"""Whether the text to translate contain tab chars."""
return ('\t' in self.context.potmsgset.singular_text or
(self.context.potmsgset.plural_text is not None and
'\t' in self.context.potmsgset.plural_text))
@property
def text_has_newline(self):
"""Whether the text to translate contain newline chars."""
return ('\n' in self.context.potmsgset.singular_text or
(self.context.potmsgset.plural_text is not None and
'\n' in self.context.potmsgset.plural_text))
@property
def text_has_leading_or_trailing_space(self):
"""Whether the text to translate contain leading/trailing spaces."""
texts = [self.context.potmsgset.singular_text]
if self.context.potmsgset.plural_text is not None:
texts.append(self.context.potmsgset.plural_text)
for text in texts:
for line in text.splitlines():
if line.startswith(' ') or line.endswith(' '):
return True
return False
@property
def source_comment(self):
"""Return the source code comments for this ITranslationMessage."""
return self.context.potmsgset.sourcecomment
@property
def comment(self):
"""Return the translator comments for this ITranslationMessage."""
return self.context.comment
@property
def file_references(self):
"""Return the file references for this ITranslationMessage."""
return self.context.potmsgset.filereferences
@property
def zoom_url(self):
"""Return the URL where we should from the zoom icon."""
# XXX: kiko 2006-09-27: Preserve second_lang_code and other form
# parameters?
return canonical_url(self.context) + '/+translate'
@property
def zoom_alt(self):
return 'View all details of this message'
@property
def zoom_link_id(self):
return "zoom-%s" % self.context.id
@property
def zoom_icon(self):
return 'zoom-in'
@property
def max_entries(self):
"""Return the max number of entries to show as suggestions.
If there is no limit, we return None.
"""
return 3
@property
def dismissable_class(self):
"""The class string for dismissable parts."""
return "%s_dismissable %s_dismissable_button" % (
self.html_id, self.html_id)
@property
def dismissable_class_other(self):
"""The class string for dismissable other translations."""
if self.can_dismiss_other:
return self.dismissable_class
# Buttons are always dismissable.
return "%s_dismissable_button" % self.html_id
class CurrentTranslationMessageZoomedView(CurrentTranslationMessageView):
"""A view that displays a `TranslationMessage`, but zoomed in.
See `TranslationMessagePageView`.
"""
zoom_link_id = 'zoom-out'
@property
def zoom_url(self):
# We are viewing this class directly from an ITranslationMessage, we
# should point to the parent batch of messages.
# XXX: kiko 2006-09-27: Preserve second_lang_code and other form
# parameters?
batch_url = '/+translate?start=%d' % (self.sequence - 1)
return canonical_url(self.pofile) + batch_url
@property
def zoom_alt(self):
return 'Return to multiple messages view.'
@property
def zoom_icon(self):
return 'zoom-out'
@property
def max_entries(self):
return None
class TranslationMessageSuggestions:
"""See `ITranslationMessageSuggestions`."""
implements(ITranslationMessageSuggestions)
def __init__(self, title, translation, submissions,
user_is_official_translator, form_is_writeable,
plural_form, seen_translations=None, legal_warning=False,
local_to_pofile=False):
self.title = title
self.potmsgset = translation.potmsgset
self.pofile = translation.browser_pofile
self.user_is_official_translator = user_is_official_translator
self.form_is_writeable = form_is_writeable
self.submissions = []
if seen_translations is None:
seen_translations = set()
for submission in submissions:
total_plural_forms = submission.language.pluralforms
if total_plural_forms is None:
total_plural_forms = 2
has_form = (plural_form < total_plural_forms and
plural_form < len(submission.translations))
if not has_form:
# This submission does not have a translation for the
# requested plural form. It's not a viable suggestion here.
continue
this_translation = submission.translations[plural_form]
if (this_translation is None or
this_translation in seen_translations):
continue
else:
seen_translations.add(this_translation)
self.submissions.append(
convert_translationmessage_to_submission(
submission,
translation,
plural_form,
self.pofile,
legal_warning,
is_empty=False,
local_to_pofile=local_to_pofile))
self.seen_translations = seen_translations
class Submission:
"""A submission generated from a TranslationMessage"""
def convert_translationmessage_to_submission(
message, current_message, plural_form, pofile, legal_warning_needed,
is_empty=False, other_side=False, local_to_pofile=False):
"""Turn a TranslationMessage to an object used for rendering a submission.
:param message: A TranslationMessage.
:param plural_form: A plural form to prepare a submission for.
:param pofile: A containing PO file where suggestion is being rendered.
:param legal_warning_needed: Whether a warning check is needed.
:param is_empty: Is the submission empty or not.
"""
submission = Submission()
submission.is_traversable = (message.sequence != 0)
submission.translationmessage = message
for attribute in ['id', 'language', 'potmsgset', 'date_created']:
setattr(submission, attribute, getattr(message, attribute))
submission.pofile = message.browser_pofile
submission.person = message.submitter
submission.is_empty = is_empty
submission.plural_index = plural_form
submission.suggestion_text = text_to_html(
message.translations[plural_form],
message.potmsgset.flags)
submission.is_local_to_pofile = local_to_pofile
submission.legal_warning = legal_warning_needed and (
message.origin == RosettaTranslationOrigin.SCM)
submission.suggestion_html_id = (
current_message.potmsgset.makeHTMLID(u'%s_suggestion_%s_%s' % (
message.language.code, message.id,
plural_form)))
if other_side:
submission.row_html_id = current_message.potmsgset.makeHTMLID(
'other')
submission.origin_html_id = submission.row_html_id + '_origin'
else:
submission.row_html_id = ''
submission.origin_html_id = submission.suggestion_html_id + '_origin'
submission.translation_html_id = (
current_message.makeHTMLID(
u'translation_%s' % (plural_form)))
suggestion_dismissable_class = message.potmsgset.makeHTMLID(
u'dismissable_button')
if submission.is_local_to_pofile:
suggestion_dismissable_class += u' ' + message.potmsgset.makeHTMLID(
u'dismissable')
submission.suggestion_dismissable_class = suggestion_dismissable_class
return submission
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/translations/browser/translationmessage.py | Python | agpl-3.0 | 71,809 | [
"VisIt"
] | 7689d5b4258f9f8c31bfc1070a6ce9833571a3166d15062e24969d6f38d2dcfd |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 - 2015 -- Lars Heuer <heuer[at]semagia.com>
# All rights reserved.
#
# License: BSD, see LICENSE.txt for more details.
#
"""\
Tests classificationist parsing.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD license
"""
from nose.tools import eq_, ok_
from cablemap.core import cable_by_id
from cablemap.core.reader import parse_classified_by
_TEST_DATA = (
(u'10TOKYO397', u'Marc Wall', u'''FIELD
REF: STATE 015541
Classified By: Acting Deputy Chief of Mission Marc Wall for Reasons 1.4
(b) and (d)
¶1. (C) SUM'''),
(u'10GENEVA249', u'Rose E. Gottemoeller', u'''REF: 10 GENEVA 231 (SFO-GVA-VIII-088) CLASSIFIED BY: Rose E. Gottemoeller, Assistant Secretary, Department of State, VCI; REASON: 1.4(B), (D) '''),
(u'10GENEVA247', u'Rose E. Gottemoeller', u'''REF: 10 GENEVA 245 (SFO-GVA-VIII-086) CLASSIFIED BY: Rose E. Gottemoeller, Assistant Secretary, Department of State, VCI; REASON: 1.4(B), (D) ¶1. (U) This '''),
(u'10UNVIEVIENNA77', u'Glyn T. Davies', u'''\nClassified By: Ambassador Glyn T. Davies for reasons 1.4 b and d '''),
(u'10WARSAW117', u'F. Daniel Sainz', u'''\nClassified By: Political Counselor F. Daniel Sainz for Reasons 1.4 (b) and (d) '''),
(u'10STATE16019', u'Karin L. Look', u'''\nClassified By: Karin L. Look, Acting ASSISTANT SECRETARY, VCI. Reason: 1.4 (b) and (d).'''),
(u'10LILONGWE59', u'Bodde Peter', u'''\nCLASSIFIED BY: Bodde Peter, Ambassador; REASON: 1.4(B) '''),
(u'95ZAGREB4339', u'ROBERT P. FINN', u'''
1. (U) CLASSIFIED BY ROBERT P. FINN, DEPUTY CHIEF OF
MISSION. REASON: 1.5 (D)
'''),
(u'95DAMASCUS5748', u'CHRISTOPHER W.S. ROSS', u'''SUBJECT: HAFIZ AL-ASAD: LAST DEFENDER OF ARABS
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY:
CHRISTOPHER W.S. ROSS, AMBASSADOR. REASON: 1.5 (D) .
2. SUMMAR'''),
(u'95TELAVIV17504', (), u'''
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY SECTION 1.5 (B)
AND (D). NIACT PRECEDENCE BECAUSE OF GOVERNMENT CRISIS IN
ISRAEL.
2. SU'''),
(u'95RIYADH5221', u'THEODORE KATTOUF', u'''
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY DCM
THEODORE KATTOUF - 1.5 B,D.
2. (C)'''),
(u'96ADDISABABA1545', u'JEFFREY JACOBS', u'''
1. (U) CLASSIFIED BY POLOFF JEFFREY JACOBS, 1.5 (D).
2. (C)'''),
(u'96AMMAN2094', u'ROBERT BEECROFT', u'''
1. (U) CLASSIFIED BY CHARGE ROBERT BEECROFT; REASON 1.5 (D).
2. (C) '''),
(u'96STATE86789', u'MARY BETH LEONARD', u'''
1. CLASSIFIED BY AF/C - MARY BETH LEONARD, REASON 1.5
(D). '''),
(u'96NAIROBI6573', u'TIMOTHY CARNEY', u'''
1. CLASSIFIED BY AMBASSADOR TO SUDAN TIMOTHY CARNEY.
REASON 1.5(D).
'''),
(u'96RIYADH2406', u'THEODORE KATTOUF', u'''SUBJECT: CROWN PRINCE ABDULLAH THE DIPLOMAT
1. (U) CLASSIFIED BY CDA THEODORE KATTOUF, REASON 1.5.D.
2. '''),
(u'96RIYADH2696', u'THEODORE KATTOUF', u'''
1. (U) CLASSIFIED BY CHARGE D'AFFAIRES THEODORE
KATTOUF: 1.5 B, D.
'''),
(u'96ISLAMABAD5972', u'THOMAS W. SIMONS, JR.', u'''
1. (U) CLASSIFIED BY THOMAS W. SIMONS, JR., AMBASSADOR.
REASON: 1.5 (B), (C) AND (D).
'''),
(u'96ISLAMABAD5972', u'Thomas W. Simons, Jr.', u'''
1. (U) CLASSIFIED BY THOMAS W. SIMONS, JR., AMBASSADOR.
REASON: 1.5 (B), (C) AND (D).
''', True),
(u'96STATE183372', u'LEE 0. COLDREN', u'''
1. (U) CLASSIFIED BY LEE 0. COLDREN, DIRECTOR, SA/PAB,
DEPARTMENT OF STATE. REASON: 1.5(D).
'''),
(u'96STATE183372', u'Lee O. Coldren', u'''
1. (U) CLASSIFIED BY LEE 0. COLDREN, DIRECTOR, SA/PAB,
DEPARTMENT OF STATE. REASON: 1.5(D).
''', True),
(u'96ASHGABAT2612', u'TATIANA C. GFOELLER', u'''
1. (U) CLASSIFIED BY CHARGE TATIANA C. GFOELLER.
REASON: 1.5 D.
'''),
(u'96BOGOTA8773', u'S.K. ABEYTA', u'''
1. CLASSIFIED BY POL/ECONOFF. S.K. ABEYTA. REASON: 1.5(D)
'''),
(u'96STATE194868', u'E. GIBSON LANPHER, JR.', u'''
1. (U) CLASSIFIED BY E. GIBSON LANPHER, JR., ACTING
ASSISTANT SECRETARY OF STATE FOR SOUTH ASIAN AFFAIRS,
DEPARTMENT OF STATE. REASON: 1.5(D).
'''),
(u'96JAKARTA7841', u'ED MCWILLIAMS', u'''
1. (U) CLASSIFIED BY POL COUNSELOR ED MCWILLIAMS;
REASON 1.5(D)
'''),
(u'96JERUSALEM3094', u'EDWARD G. ABINGTON, JR.', u'''
1. CLASSIFIED BY CONSUL GENERAL EDWARD G. ABINGTON, JR. REASON
1.5 (B) AND (D).
'''),
(u'96BOGOTA10967', u'S.K. ABEYTA', u'''
1. (U) CLASSIFIED BY POL/ECONOFF S.K. ABEYTA. REASON 1.5(D).
'''),
(u'04MUSCAT2112', u'Richard L. Baltimore, III', u'''
Classified By: Ambassador Richard L. Baltimore, III.
Reasons: 1.4 (b) and (d).
'''),
(u'04MUSCAT2112', u'Richard L. Baltimore, III', u'''
Classified By: Ambassador Richard L. Baltimore, III.
Reasons: 1.4 (b) and (d).
''', True),
(u'05OTTAWA1975', u'Patricia Kim-Scott', u'''
Classified By: Pol/Mil Officer Patricia Kim-Scott. Reason E.O. 12958,
1.4 (b) and (d).
'''),
(u'05BOGOTA6208', u'William B. Wood', u'''
Classified By: Ambassador William B. Wood; reasons 1.4
(b) and (d)
'''),
(u'05TAIPEI2839', u'Douglas Paal', u'''
Classified By: AIT Director Douglas Paal, Reason(s): 1.4 (B/D).
'''),
(u'05DHAKA3073', u'D.C. McCullough', u'''
Classified By: A/DCM D.C. McCullough, reason para 1.4 (b)
'''),
(u'09NAIROBI1132', u'Jessica Davis Ba', u'''
Classified By: Pol/Econ Officer Jessica Davis Ba for reasons 1.4(b) and
(d)
'''),
(u'08ROME1541', u'Liz Dibble', u'''
Classified By: Classified by DCM Liz Dibble for reasons 1.4 (b) and
(d).
'''),
(u'06BAGHDAD2082', u'DANIEL SPECKHARD', ur'''
Classified By: CHARGE D\'AFFAIRES DANIEL SPECKHARD FOR REASONS 1.4 (A),
(B) AND (D)
'''),
(u'05ANKARA4653', u'Nancy McEldowney', u'''
Classified By: (U) CDA Nancy McEldowney; E.O. 12958, reasons 1.4 (b,d)
'''),
(u'05QUITO2057', u'LARRY L. MEMMOTT', u'''
Classified By: ECON LARRY L. MEMMOTT, REASONS 1.4 (B,D)
'''),
(u'06HONGKONG3559', u'LAURENT CHARBONNET', u'''
CLASSIFIED BY: ACTING DEPUTY PRINCIPAL OFFICER LAURENT CHARBONNET. REA
SONS: 1.4 (B,D)
'''),
(u'09BAGHDAD791', u'Patricia Butenis', u'''
Classified By: Charge d\' Affairs Patricia Butenis for reasons 1.4 (b) a
nd (d)
'''),
(u'06OSLO19', u'Christopher W. Webster', u'''
Classified By: Charge d\'Affaires a.i. Christopher W. Webster,
reason 1.4 (b) and (d)
'''),
(u'08BEIJING3386', u'Aubrey Carlson', u'''
Classified By: Political Section Minister Counselor Aubrey Carlson. Re
asons 1.4 (b/d).
'''),
(u'09MOSCOW2393', u'Susan M. Elliott', u'''
Classified By: Political Minister Counselor Susan M. Elliott for reason
s: 1.4 (b), (d).
'''),
(u'10BRUSSELS66', u'Christopher R. Davis', u'''
Classified By: Political Minister-Counselor Christopher R. Davis for re
ason 1.4 (b/d)
'''),
(u'06BEIJING22125', u'ROBERT LUKE', u'''
Classified By: (C) CLASSIFIED BY MINISTER COUNSELOR FOR ECONOMIC AFFAIR
S ROBERT LUKE; REASON 1.4 (B) AND (D).
'''),
(u'07CAIRO622', u'William R. Stewart', u'''
Classified by: Minister Counselor for Economic and
Political Affairs William R. Stewart for reasons 1.4(b) and
(d).
'''),
(u'07BAGHDAD1188', u'Daniel Speckhard', u'''
Classified By: Charge Affaires Daniel Speckhard. Reasons: 1.4 (b) and
(d).
'''),
(u'08PARIS1131', u'STUART DWYER', u'''
Classified By: ECONCOUNS STUART DWYER FOR REASONS 1.4 B AND D
'''),
(u'08ATHENS985', u'Jeff Hovenier', u'''
Classified By: A/Political Counselor Jeff Hovenier for
1.4 (b) and (d)
'''),
(u'09BEIJING2690', u'William Weinstein', u'''
Classified By: This message classified by Econ Minister Counselor
William Weinstein for reasons 1.4 (b), (d) and (e).
'''),
(u'06VILNIUS945', u'Rebecca Dunham', u'''
Classified By: Political and Economic Section Chief Rebecca Dunham for
reasons 1.4 (b) and (d)
'''),
(u'07BAGHDAD2781', u'Howard Keegan', u'''
Classified By: Kirkuk PRT Team Leader Howard Keegan for reason 1.4 (b)
and(d).
'''),
(u'09HARARE864', u'Donald Petterson', u'''
Classified By: Charge d\'affaires, a.i. Donald Petterson for reason 1.4
(b).
'''),
(u'04MANAMA525', u'Robert S. Ford', u'''
Classified By: Charge de Affaires Robert S. Ford for reasons
1.4 (b) and (d).
'''),
(u'08STATE56778', u'Patricia A. McNerney', u'''
Classified By: ISN Acting Assistant Secretary
Patricia A. McNerney, Reasons 1.4 b, c, and d
'''),
(u'07BRUSSELS1462', u'Larry Wohlers', u'''
Classified By: USEU Political Minister Counselor Larry Wohlers
for reasons 1.4 (b) and (d).
'''),
(u'09KABUL2261', u'Hoyt Yee', u'''
Classified By: Interagency Provincial Affairs Deputy Coordinator Hoyt Y
ee for reasons 1.4 (b) and (d)
'''),
(u'09KABUL1233', u'Patricia A McNerney', u'''
Classified By: PRT and Sub-National Governance Acting Director Patricia
A McNerney for reasons 1.4 (b) and (d)
'''),
(u'09BRUSSELS1288', u'CHRISTOPHER DAVIS', u'''
Classified By: CLASSIFIED BY USEU MCOUNSELOR CHRISTOPHER DAVIS, FOR REA
SONS 1.4 (B) AND (D)
'''),
(u'06TAIPEI3165', u'Stephen M. Young', u'''
Classified By: Classified by AIT DIR Stephen M. Young.
Reasons: 1.4 b, d.
'''),
(u'07BRUSSELS1208', u'Courtney Nemroff', u'''
Classified By: Institutional Affairs Unit Chief Courtney Nemroff for re
asons 1.4 (b) & (d)
'''),
(u'05CAIRO8602', u'Michael Corbin', u'''
Classified by ECPO Minister-Counselour Michael Corbin for
reasons 1.4 (b) and (d).
'''),
(u'09MADRID1210', u'Arnold A. Chacon', u'''
Classified By: Charge d'Affaires, a.i., Arnold A. Chacon
1.(C) Summary: In his meetings with Spanish officials,
Special Envoy for Eurasian Energy'''),
(u'05SINGAPORE887', u'Laurent Charbonnet', u'''
Classified By: E/P Counselor Laurent Charbonnet, Reasons 1.4(b)(d)
'''),
(u'09SINGAPORE677', u'Dan Jassem', u'''
Classified By: Acting E/P Counselor Dan Jassem for reasons 1.4 (b) and
(d)
'''),
(u'08BELGRADE1189', u'Thatcher Scharpf', u'''
Classified By: Acting Deputy Chief of Mission Thatcher Scharpf for reas
ons 1.4(b/d).
'''),
(u'09BAGHDAD3319', u'Rachna Korhonen', u'''
Classified By: PRT Kirkuk Governance Section Head Rachna Korhonen for r
easons 1.4 (b) and (d).
'''),
(u'04ANKARA5897', u'Thomas Goldberger', u'''
Classified By: (U) Classified by Economic Counselor Thomas Goldberger f
or reasons 1.4 b,d.
'''),
(u'00HARARE3759', u'TOM MCDONALD', u'''
CLASSIFIED BY AMBASSADOR TOM MCDONALD.
CONFIDENTIAL
PAGE 02 HARARE 03759 01 OF 03 111533Z
REASONS: 1.5 (B) AND (D).
1. (C) SUMMARY: ALTHOUGH WIDESPREAD FEARS OF A
SPIKE'''),
(u'07STATE156455', u'Glyn T. Davies', u'''
Classified By: Glyn T. Davies
SUMMARY
-------
'''),
(u'03GUATEMALA1727', u'Erik Hall', u'''
Classified By: Labor Attache Erik Hall. Reason 1.5 (d).
'''),
(u'05VILNIUS503', u'LARRY BEISEL', u'''
Classified By: DEFENSE ATTACHE LTC LARRY BEISEL FOR REASONS 1.4 (B) AND
(D).
'''),
(u'08USUNNEWYORK729', u'Carolyn L. Willson', u'''
Classified By: USUN Legal Adviser Carolyn L. Willson, for reasons
1.4(b) and (d)
'''),
(u'04BRUSSELS4688', u'Jeremy Brenner', u'''
Classified By: USEU polmil officer Jeremy Brenner for reasons 1.4 (b) a
nd (d)
'''),
(u'08GUATEMALA1416', u'Drew G. Blakeney', u'''
Classified By: Pol/Econ Couns Drew G. Blakeney for reasons 1.4 (b&d).
'''),
(u'08STATE77798', u'Brian H. Hook', u'''
Classified By: IO Acting A/S Brian H. Hook, E.O. 12958,
Reasons: 1.4(b) and (d)
'''),
(u'05ANKARA1071', u'Margaret H. Nardi', u'''
Classified By: Acting Counselor for Political-Military Affiars Margaret
H. Nardi for reasons 1.4 (b) and (d).
'''),
(u'08MOSCOW3655', u'David Kostelancik', u'''
Classified By: Deputy Political M/C David Kostelancik. Reasons 1.4 (b)
and (d).
'''),
(u'09STATE75025', u'Richard C. Holbrooke', u'''
Classified By: Special Representative for Afghanistan and Pakistan
Richard C. Holbrooke
1. (U) This is an action request; see paragraph 4.
'''),
(u'10KABUL688', u'Joseph Mussomeli', u'''
Classified By: Assistant Chief of Mission Joseph Mussomeli for Reasons
1.4 (b) and (d)
'''),
(u'98USUNNEWYORK1638', u'HOWARD STOFFER', u'''
CLASSIFIED BY DEPUTY POLITICAL COUNSEL0R HOWARD STOFFER
PER 1.5 (B) AND (D). ACTION REQUEST IN PARA 10 BELOW.
'''),
(u'02ROME3119', u'PIERRE-RICHARD PROSPER', u'''
CLASSIFIED BY: AMBASSADOR-AT-LARGE PIERRE-RICHARD PROSPER
FOR REASONS 1.5 (B) AND (D)
'''),
(u'02ANKARA8447', u'Greta C. Holtz', u'''
Classified by Consul Greta C. Holtz for reasons 1.5 (b) & (d).
'''),
(u'09USUNNEWYORK282', u'SUSAN RICE', u'''
Classified By: U.S. PERMANENT REPRESENATIVE AMBASSADOR SUSAN RICE
FOR REASONS 1.4 B/D
'''),
(u'09DHAKA339', u'Geeta Pasi', u'''
Classified By: Charge d'Affaires, a.i. Geeta Pasi. Reasons 1.4 (b) and
(d)
'''),
(u'06USUNNEWYORK2273', u'Alejandro D. Wolff', u'''
Classified By: Acting Permanent Representative Alejandro D. Wolff
per reasons 1.4 (b) and (d)
'''),
(u'08ISLAMABAD1494', u'Anne W. Patterson', u'''
Classified By: Ambassador Anne W. Patterson for reaons 1.4 (b) and (d).
1. (C) Summary: During'''),
(u'08BERLIN1150', u'Robert Pollard', u'''
Classified By: Classified by Economic Minister-Counsellor
Robert Pollard for reasons 1.4 (b) and (d)
'''),
(u'08STATE104902', u'DAVID WELCH', u'''
Classified By: 1. CLASSIFIED BY NEA ASSISTANT SECRETARY DAVID WELCH
REASONS: 1.4 (B) AND (D)
'''),
(u'07VIENTIANE454', u'Mary Grace McGeehan', u'''
Classified By: Charge de'Affairs ai. Mary Grace McGeehan for reasons 1.
4 (b) and (d)
'''),
(u'07ROME1948', u'William Meara', u'''
Classified By: Acting Ecmin William Meara for reasons 1.4 (b) and (d)
'''),
(u'07USUNNEWYORK545', u'Jackie Sanders', u'''
Classified By: Amb. Jackie Sanders. E.O 12958. Reasons 1.4 (B&D).
'''),
(u'06USOSCE113', u'Bruce Connuck', u'''
Classified By: Classified by Political Counselor Bruce Connuck for Reas
(b) and (d).
'''),
(u'09DOHA404', u'Joseph LeBaron', u'''
Classified By: Ambassaor Joseph LeBaron for reasons 1.4 (b and d).
'''),
(u'09DOHA404', u'Joseph LeBaron', u'''
Classified By: Ambassaor Joseph LeBaron for reasons 1.4 (b and d).
''', True),
(u'09RANGOON575', u'Thomas Vajda', u'''
Classified By: Charge d'Afairs (AI) Thomas Vajda for Reasons 1.4 (b) &
(d
'''),
(u'03ROME3107', u'TOM COUNTRYMAN', u'''
Classified By: POL MIN COUN TOM COUNTRYMAN, REASON 1.5(B)&(D).
'''),
(u'06USUNNEWYORK732', u'Molly Phee', u'''
Classified By: Deputy Political Counselor Molly Phee,
for Reasons 1.4 (B and D)
'''),
(u'06BAGHDAD1552', u'David M. Satterfield', u'''
Classified By: Charge d'Affaires David M. Satterfield for reasons 1.4 (
b) and (d)
'''),
(u'06ABUJA232', u'Erin Y. Tariot', u'''
Classified By: USDEL Member Erin Y. Tariot, reasons 1.4 (b,d)
'''),
(u'09ASTANA184', u'RICAHRD E. HOAGLAND', u'''
Classified By: AMBASSADOR RICAHRD E. HOAGLAND: 1.2 (B), (D)
'''),
(u'09ASTANA184', u'Richard E. Hoagland', u'''
Classified By: AMBASSADOR RICAHRD E. HOAGLAND: 1.2 (B), (D)
''', True),
(u'09CANBERRA428', u'John W. Crowley', u'''
Classified By: Deputy Political Counselor: John W. Crowley, for reasons
1.4 (b) and (d)
'''),
(u'08TASHKENT706', u'Molly Stephenson', u'''
Classified By: Classfied By: IO Molly Stephenson for reasons 1.4 (b) a
nd (d).
'''),
(u'08CONAKRY348', u'T. SCOTT BROWN', u'''
Classified By: ECONOFF T. SCOTT BROWN FOR REASONS 1.4 (B) and (D)
'''),
(u'07STATE125576', u'Margaret McKelvey', u'''
Classified By: PRM/AFR Dir. Margaret McKelvey-reasons 1.4(b/d)
'''),
(u'09BUDAPEST372', u'Steve Weston', u'''
Classified By: Acting Pol/Econ Counselor:Steve Weston,
reasons 1.4 (b and d)
'''),
(u'04TAIPEI3162', u'David J. Keegan', u''''
Classified By: AIT Deputy Director David J. Keegan, Reason: 1.4 (B/D)
'''),
(u'04TAIPEI3521', u'David J. Keegan', u'''
Classified By: AIT Acting Director David J. Keegan, Reason: 1.4 (B/D)
'''),
(u'04TAIPEI3919', u'David J. Keegan', u'''
Classified By: AIT Director David J. Keegan, Reason 1.4 (B/D)
'''),
(u'08JAKARTA1142', u'Stanley A. Harsha', u'''
Classified By: Acting Pol/C Stanley A. Harsha for reasons 1.4 (b+d).
'''),
(u'06ISLAMABAD16739', u'MARY TOWNSWICK', u'''
Classified By: DOS CLASSIFICATION GUIDE BY MARY TOWNSWICK
1. (C) Summary. With limited government support, Islamic
banking has gained momentum in Pakistan in the past three
years. The State Bank of Pakistan (SBP) reports that the
capital base of the Islamic banking system has more than
doubled since 2003 as the number of Islamic banks operating
in Pakistan rose from one to four. A media analysis of
Islamic banking in Pakistan cites an increase in the number
of conventional banks'''),
(u'05DJIBOUTI802', u'JEFFREY PURSELL', u'''
(U) CLASSIFIED BY TDY RSO JEFFREY PURSELL FOR REASON 1.5 C.
'''),
(u'09STATE82567', u'Eliot Kang', u'''
Classified By: Acting DAS for ISN Eliot Kang. Reasons 1.4 (b) and (d)
'''),
(u'04ANKARA5764', u'Charles O. Blaha', u'''
Classified By: Classified by Deputy Political Counselor Charles O. Blah
a, E.O. 12958, reasons 1.4 (b) and (d).
'''),
(u'04ANKARA5764', u'Charles O. Blaha', u'''
Classified By: Classified by Deputy Political Counselor Charles O. Blah
a, E.O. 12958, reasons 1.4 (b) and (d).
''', True),
(u'10VIENNA195', u'J. Dean Yap', u'''
Classified by: DCM J. Dean Yap (acting) for reasons 1.4 (b)
and (d).
'''),
(u'03HARARE175', u'JOHN S. DICARLO', u'''
Classified By: RSO - JOHN S. DICARLO. REASON 1.5(D)
'''),
(u'08LONDON2968', u'Greg Berry', u'''
Classified By: PolMinCons Greg Berry, reasons 1.4 (b/d).
'''),
(u'08HAVANA956', u'Jonathan Farrar', u'''
Classified By: COM Jonathan Farrar for reasons 1.5 (b) and (d)
'''),
(u'09BAGHDAD253', u'Robert Ford', u'''
Classified By: Acting Deputy Robert Ford. Reasons 1.4 (b) and (d)
'''),
(u'09TIRANA81', u'JOHN L. WITHERS II', u'''
Classified By: AMBASSADOR JOHN L. WITHERS II FR REASONS 1.4 (b) AND (d
).
'''),
(u'05HARARE383', u'Eric T. Schultz', u'''
Classified By: Charge d'Affaires a.i. Eric T. Schultz under Section 1.4
b/d
'''),
(u'07LISBON2591', u'Jenifer Neidhart', u'''
Classified By: Pol/Econ Off Jenifer Neidhart for reasons 1.4 (b) and (d
)
'''),
(u'07STATE171234', u'Lawrence E. Butler', u'''
Classified By: NEA Lawrence E. Butler for reasons EO 12958
1.4(b),(d), and (e).
'''),
(u'04AMMAN8544', u'David Hale', u'''
Classified By: Charge d'Affaries David Hale for Reasons 1.4 (b), (d)
'''),
(u'07NEWDELHI5334', u'Ted Osius', u'''
Classified By: Acting DCM/Ted Osius for reasons 1.4 (b and d)
'''),
(u'04JAKARTA5072', u'ANTHONY C. WOODS', u'''
Classified By: EST&H OFFICER ANTHONY C. WOODS FOR REASON 1.5 (b, d)
'''),
(u'03AMMAN2822', u'Edward W. Gnehm', u'''
Classified By: Ambassador Edward W. Gnehm. Resons 1.5 (B) and (D)
'''),
(u'08CANBERRA1335', u'Daniel A. Clune', u'''
Classified By: Deputy Chief of Mission: Daniel A. Clune: Reason: 1.4 (c
) and (d)
'''),
(u'09HAVANA665', u'Charles Barclay', u'''
Classified By: CDA: Charles Barclay for reQ#8$UQ8ML#C may choke oQhQGTzovisional\" controls, such as
price caps and limits on the amount any one person could buy.
3. (SBU) Furthering speculation that the private markets
were under the gun, official reports have resurfaced in
recent months accusing private markets of artificially
maintaining higher'''),
(u'08STATE8993', u'Gregory B. Starr', u'''
1. (U) Classified by Acting Assistant Secretary for Diplomatic
Security Gregory B. Starr for E.O. 12958 reasons 1.4 (c) and
(d).
'''),
(u'09ISTANBUL137', u'Sandra Oudkirk', u'''
Classified By: ConGen Istanbul DPO Sandra Oudkirk; Reason 1.5 (d)
'''),
(u'08BANGKOK1778', u'James F. Entwistle', u'''
Classified By: Charge, d,Affaires a. i. James F. Entwistle, reason 1.4
(b) and (d).
'''),
(u'08MANAMA301', u'Christopher Henzel', u'''
Classified By: Charge d,Affaires a.i. Christopher Henzel, reasons 1.4(b
) and (d).
'''),
(u'06COLOMBO123', u'Robert O. Blake, Jr.', u'''
Classified By: Abassador Robert O. Blake, Jr. for reasons
1.4 (b and (d).
'''),
(u'08YEREVAN907', u'Marie Yovanovitch', u'''
Classified By: Amabassador Marie Yovanovitch. Reason 1.4 (B/D)
'''),
(u'09QUITO329', u'Heather M. Hodges', u'''
Classified By: AMB Heather M. Hodges for reason 1.4 (D)
'''),
(u'09STATE38028', (u'KARL WYCOFF', u'SHARI VILLAROSA'), u'''
CLASSIFIED BY AF KARL WYCOFF, ACTING AND S/CT DAS SHARI
VILLAROSA ; E.O. 12958 REASON: 1.4 (B) AND (D)
'''),
(u'04ABUJA2060', u'BRUCE EHRNMAN', u'''
Classified By: AF SPECIAL ADVISOR BRUCE EHRNMAN FOR REASONS 1.5 (B) AND
(D)
'''),
(u'06ISLAMABAD3684', u'RCROCKER', u'''
Classified By: AMB:RCROCKER, Reasons 1.4 (b) and (c)
'''),
(u'06MANAMA184', u'William T.Monroe', u'''
Classified By: Classified by Ambassadior William T.Monroe. Reasons: 1.
4 (b)(d)
'''),
(u'07SANSALVADOR263', u'Charles Glazer', u'''
Classified By: Ambasasdor Charles Glazer, Reasons
1.4 (b) and (d)
'''),
(u'05BRUSSELS1549', u'Michael Ranneberger', u'''
Classified By: AF PDAS Michael Ranneberger. Reasons 1.5 (b) and (d).
'''),
(u'09STATE14163', u'Mark Boulware', u'''
Classified By: AF Acting DAS Mark Boulware, Reasons 1.4 (b) and (d).
'''),
(u'06AITTAIPEI1142', u'Michael R. Wheeler', u'''
Classified By: IPO Michael R. Wheeler for reason 1.4(G)(E)
'''),
(u'08TAIPEI1038', u'Stephen M. Young', u'''
Classified By: AIT Chairman Stephen M. Young,
Reasons: 1.4 (b/d)
'''),
(u'09STATE96519', u'Ellen O. Tauscher', u'''
Classified By: T U/S Ellen O. Tauscher for Reasons 1.4 a,b,and d.
'''),
(u'08NAIROBI232', u'JOHN M. YATES', u'''
Classified By: SPECIAL ENVOY JOHN M. YATES
1. (C) '''),
(u'07COLOMBO769', u'Robert O. Blake, Jr.', u'''
Classified By: Ambassodor Robert O. Blake, Jr. for reasons 1.4 (b, d).
'''),
(u'04DJIBOUTI1541', u'MARGUERITA D. RAGSDALE', u'''
Classified By: AMBASSSADOR MARGUERITA D. RAGSDALE.
REASONS 1.4 (B) AND (D).
'''),
(u'08MOSCOW3202', u'David Kostelancik', u'''
Classified By: Acting Political MC David Kostelancik for reasons 1.4(b)
and (d).
'''),
(u'09BEIJING939', u'Ben Moeling', u'''
Classified By: Acting Political Minister-Couselor
Ben Moeling, reasons 1.4 (b/d).
'''),
(u'09HAVANA689', u'Jonathan Farrar', u'''
Classified By: Principal Office Jonathan Farrar for reasons 1.4 (b) and
(d)
'''),
(u'07VIENNA2687', u'J. Dean Yap', u'''
Classified By: Political Economic Counselr J. Dean Yap for reasons 1.4
(b) and (d)
'''),
(u'08LONDON1485', u'Maura Connelly', u'''
Classified By: Political Minister Counsel Maura Connelly for reasons 1.
4 (b/d).
'''),
(u'07LONDON3228', u'JOHN MCNAMARA', u'''
Classified By: A E/MIN COUNS. JOHN MCNAMARA, REASONS 1.4(B) AND (D)
'''),
(u'05ABUJA2031', u'Rich Verrier', u'''
Classified By: ARSO Rich Verrier for reason 1.4 (d)
'''),
(u'09USOSCE235', u'Chris Ellis', u'''
Classified By: Acting Chief Arms Control Delegate Chris Ellis,
for reasons 1.4(b) and (d).
'''),
(u'06RANGOON1542', u'Walter Parrs III', u'''
Classified By: Conoff Walter Parrs III for Reasons 1.4 (b) and (d)
'''),
(u'08STATE109148', u'Pam Durham', u'''
Classified By: ISN/MTR Direcotr Pam Durham.
Reason: 1.4 (B), (D).
'''),
(u'08STATE3581', u'AFriedt', u'''
Classified By: EUR/PRA, Dir. AFriedt, Reason 1.4 (b/d)
'''),
(u'06HONGKONG3109', u'JEFF ZAISER', u'''
CLASSIFIED BY: ACTING E/P CIEF JEFF ZAISER. REASONS: 1.4(B,D).
'''),
(u'07LAPAZ123', u'Brian Quigley', u'''
Classified By: Acting Ecopol Councilor Brian Quigley for reasons 1.4 (d
) and (e).
'''),
(u'08BAGHDAD3818', u'Michael Dodman', u'''
Classified By: A/EMIN Michael Dodman, Reasons 1.4 (b,d).
'''),
(u'09BAGHDAD565', u'Michael Dodman', u'''
Classified By: Acting EMIN Michael Dodman, reasons 1.4 (b,d).
'''),
(u'09BUDAPEST198', u'Jon Martinson', u'''
Classified By: Acting P/E Counseor Jon Martinson, reasons 1.4 (b,d)
'''),
(u'09BUDAPEST276', u'Jon Martinson', u'''
Classified By: Acting P/E Counsleor Jon Martinson, reasons 1.4 (b,d)
'''),
(u'08STATE67468', u'George Krol', u'''
Classified By: SCA/DAS for Central Asia George Krol
1. (C) '''),
(u'09STATE24316', u'GEORGE KROL', u'''
Classified By: DEPUTY ASSISTANT SECRETARY OF STATE FOR
CENTRAL ASIA GEORGE KROL FOR REASONS 1.4 (B) AND (D)
'''),
(u'08STATE82744', u'BRIAN HOOK', u'''
Classified By: CLASSIFIED BY IO A/S ACTING BRIAN HOOK
FOR REASONS 1.4(B) AND (D).
'''),
(u'09SINGAPORE773', u'Daniel Shields', u'''
Classified By: Charge d'Affaires (CDA) Daniel Shields for Reasons 1.4 (
b/b)
'''),
(u'07ASHGABAT350', u'Richard Hoagland', u'''
Classified By: Classified by Acting Charge d\'Affaires, Ambassador Richa
rd Hoagland, for reasons 1.4(B) and (D).
'''),
(u'05NEWDELHI8162', u'Bob Blake', u'''
Classified By: Charge' Bob Blake for Reasons 1.4 (B, D)
'''),
(u'07RIYADH1028', u'BOB SILVERMAN', u'''
Classified By: ECONOMIC COUNSELOR BOB SILVERMAN
FOR 12958 1.4 B, D, AND E
'''),
(u'05ROME3781', u'ANNA BORG', u'''
Classified By: DCM ANNA BORG BASED ON E.O.12958 REASONS 1.4 (b) and (d)
'''),
(u'09STATE2508', u'PATRICIA A. MCNERNEA', u'''
CLASSIFIED BY: ISN ? PATRICIA A. MCNERNEA, ACTING
ASSISTANT SECRETARY, REASON 1.4 (B) AND (D)
'''),
(u'03OTTAWA2182', u'Mary Witt', u'''
Classified By: A/ Pol Min Mary Witt for reasons 1.5(b) and (d)
'''),
(u'03KUWAIT3762', u'FRANK URBANCIC', u'''
Classified By: CDA FRANK URBANCIC BASED UPON REASONS 1.5 (B) AND (D)
'''),
(u'07DAKAR1464', u'GARY SCHAAF', u'''
Classified By: A/LEGATT GARY SCHAAF FOR RASONS 1.4 (B) AND (D).
'''),
(u'07HARARE680', u'Glenn Warren', u'''
Classified By: Pol/Econ Chief Glenn Warren under 1.4 b/d
'''),
(u'09DHAKA775', u'James Moriarty', u'''
Classified By: Ambassador James Moriarty for for reasons 1.4 b and d.
'''),
(u'', u'Kelly A. Keiderling', u'''
Classified By: CDA Kelly A. Keiderling under 1.4 (b) and (d)
'''),
(u'04HARARE1722', u'Paul Weisenfeld', u'''
Classified By: Classified by Charge d'Affaires Paul Weisenfeld under Se
ction 1.5 b/d
'''),
(u'05SANTIAGO2540', u'SEAN MURPHY', u'''
Classified By: CONSUL GENERAL SEAN MURPHY
1. In a December 19 m'''),
(u'04HELSINKI1420', u'Earle I. Mack', u'''
Classified By: Ambassador Earle I. Mack for reasons 1.5(B) and (D)
Summary
-------
'''),
(u'08PORTAUPRINCE520', u'Janet A. Sanderson', u'''
Classified By: Ambassado Janet A. Sanderson for reasons 1.4 (b) and (d
)
'''),
(u'97SOFIA3097', u'B0HLEN', u'''
1.(U) CLASSIFIED BY AMBASSAD0R B0HLEN. REAS0N:
1.5(B,D).
'''),
(u'99TUNIS2120', u'R0BIN L. RAPHEL', u'''
(U) CLASSIFIED BY AMBASSAD0R R0BIN L. RAPHEL BASED 0N 1.5 (B)
AND (D).
'''),
(u'08TBILISI1121', u'John F. Tefft', u'''
Classified By: Ambassadot John F. Tefft for reason 1.4 (b) and (d).
'''),
(u'07ANKARA2522', u'ROSS WILSON', u'''
Classified By: AMBASSADR ROSS WILSON FOR REASONS 1.4 (B) AND (D)
'''),
(u'09UNVIEVIENNA531', u'Glyn T. Davies', u'''
Classified By: Ambassadro Glyn T. Davies, reasons 1.4 (b) and (d)
'''),
(u'09TBILISI463', u'JOHN F. TEFFT', u'''
Classified By: AMBSSADOR JOHN F. TEFFT. REASONS: 1.4 (B) AND (D).
'''),
(u'09LUSAKA523', u'Donald E. Booth', u'''
Classified By: Classified By: Ambbassador Donald E. Booth for
Reasons 1.4 (b) and (d)
'''),
(u'07BAKU486', u'Anne E. Derse', u'''
Classified By: Ambssador Anne E. Derse, Reasons 1.4 (b,d)
'''),
(u'09ANKARA63', u'A.F. Godfrey', u'''
Classified By: Pol-Mil Counselor A.F. Godfrey
Will Not Break Silence...
-------------------------
1. (C) I'''),
(u'03SANAA1319', u'ALAN MISENHEIMER', u'''
Classified By: CHARGE ALAN MISENHEIMER F0R REASONS 1.5 (B) AND (D)
'''),
(u'08BAKU668', u'Alan Eyre', u'''
Classified By: Acting Pol/Econ Chief Alan Eyre
(S) In '''),
(u'07SINGAPORE285', u'Ike Reed', u'''
Classified By: Economical and Political Chief Ike Reed;
reasons 1.4 (b) and (d)
'''),
(u'07KHARTOUM832', u'Roberto Powers', r'''
Classified By: CDA Roberto Powers a.y., Sea3on: Sectaons 9.Q (b+`ald$hd
)Q
Q,----/-Qswmmfrq
=,=--=HQ(@(RBF!&}ioSQB3wktf0r,vu qDWTel$1` \ulQlQO~jcvq>&Mw~ifw(U= ;QGM?QQx7Ab8QQ@@)\Minawi suggested that
intelligence chief Salah Ghosh was the sole interlocutor with
the "statesmanship" and influence within the regime to defuse
tensions with the international community. Embassy officials
told Minawi that the NCP would need to demonstrate its
genuine desire for better relations by agreeing to an
effective UN peace-keeping operation, which could then lay
the basis for future discussions. Minawi also commented on
Chad's obstruction of the Darfur peace process and an
upcoming visit of Darfurian officials to Arab capitals. End
summary.
-------------'''),
(u'05ANKARA7671', u'Nancy McEldowney', u'''
Classified By: ADANA 222
ADANA 216
ADANA 207
ANKARA 6772
Classified by DCM Nancy McEldowney; reasons 1.4 b and d.
'''),
(u'04HARARE766', u'ROBERT E. WHITEHEAD', u'''
Classified By: DCM ROBERT E. WHITEHEAD DUE TO 1,4 (C) AND (D).
'''),
(u'00TELAVIV4462', u'PSIMONS', u'''C O N F I D E N T I A L TEL AVIV 004462
- - C O R R E C T E D C O P Y - - CLASSIFIED BY LINE ADDED
E.O. 12958: DECL: 08/24/05
TAGS: KWBG, PTER, PGOV, PREL, IS
SUBJECT: BIN LADIN CONNECTION IN GAZA FOUND PUZZLING;
CONNECTION TO HAMAS QUESTIONED
CLASSIFIED BY DCM PSIMONS PER 1.5 (B) AND (D)
'''),
)
_TEST_CABLES = (
(u'10BANGKOK468', ()),
(u'08STATE110079', ()),
(u'05VILNIUS1093', u'Derrick Hogan'),
(u'08STATE20184', ()),
(u'08STATE20332', ()),
(u'09ANKARA63', u'A.F. Godfrey'),
(u'03COLOMBO1348', u'Alex Moore'),
(u'03COLOMBO1810', u'Alex Moore'),
(u'66BUENOSAIRES2481', ()),
(u'05TAIPEI153', ()),
(u'09TELAVIV2643', ()),
(u'09BOGOTA2917',()),
(u'07TOKYO5202', ()),
(u'07USUNNEWYORK319', ()),
(u'07VIENNA1239', ()),
(u'09HONGKONG2247', ()),
(u'07TOKYO3205', ()),
(u'09HONGKONG2249', ()),
(u'07BELGRADE533', u'Ian Campbell'),
(u'05AMMAN646', ()),
(u'08BAGHDAD1451', u'Jess Baily'),
(u'08BAGHDAD1650', u'Jess Baily'),
(u'98STATE145892', u'Jeff Millington'),
(u'07TOKYO1414', ()),
(u'06COPENHAGEN1020', u'Bill Mozdzierz'),
(u'07ANKARA1581', u'Eric Green'),
(u'08ANKARA266', u'Eric Green'),
(u'08CHISINAU933', u'Daria Fane'),
(u'10RIGA27', u'Brian Phipps'),
(u'09WARSAW433', u'Jackson McDonald'),
(u'09BAGHDAD2784', u'Anbar'),
(u'05PARIS8353', u'Andrew, C. Koss'),
(u'05ANKARA581', u'John Kunstadter'),
(u'08RANGOON951', u'Drake Weisert'),
(u'10BAGHDAD488', u'John Underriner'),
(u'08STATE2004', u'Gordon Gray'),
(u'10BAGHDAD370', ()),
(u'09BEIJING951', u'Ben Moeling'),
(u'09TOKYO1878', u'Ray Hotz'),
(u'07OTTAWA100', u'Brian Mohler'),
(u'07BAMAKO1322', ()),
(u'09PRISTINA336', u'Michael J. Murphy'),
(u'09PRISTINA345', u'Michael J. Murphy'),
(u'06BAGHDAD4604', u'L. Hatton'),
(u'05ROME178', (u'Castellano', u'Anna della Croce', u'Giovanni Brauzzi')),
(u'08USNATO348', u'W.S. Reid III'),
(u'09KHARTOUM107', u'Alberto M. Fernandez'),
(u'09ABUDHABI901', u'Douglas Greene'),
(u'03KUWAIT2352', u'Frank C. Urbancic'),
(u'09BUENOSAIRES849', u'Tom Kelly'),
(u'08BAGHDAD358', u'Todd Schwartz'),
(u'09BAGHDAD419', u'Michael Dodman'),
(u'10ADDISABABA186', ()),
(u'10ADDISABABA195', ()),
(u'10ASHGABAT178', u'Sylvia Reed Curran'),
(u'09MEXICO2309', u'Charles Barclay'),
(u'09MEXICO2339', u'Charles Barclay'),
(u'05ATHENS1903', u'Charles Ries'),
(u'02VATICAN25', u'Joseph Merante'),
(u'07ATHENS2029', u'Robin'),
(u'09HONGKONG934', ()),
(u'03KATHMANDU1044', u'Robert Boggs'),
(u'08CARACAS420', u'Robert Richard Downes'),
(u'08DHAKA812', u'Geeta Pasi'),
(u'09ULAANBAATAR87', ()),
(u'96JEDDAH948', u'Douglas Neumann'),
(u'09KABUL3161', u'Hoyt Yee'),
(u'03OTTAWA202', u'Brian Flora'),
(u'10GUATEMALA25', u'Drew G. Blakeney'),
(u'07CARACAS2254', u'Robert Downes'),
(u'09BUCHAREST115', u'Jeri Guthrie-Corn'),
(u'09BUCHAREST166', u'Jeri Guthrie-Corn'),
(u'06PANAMA2357', u'Luis Arreaga'),
(u'09JAKARTA1580', u'Ted Osius'),
(u'09JAKARTA1581', u'Ted Osius'),
(u'07ATHENS2219', u'Thomas Countryman'),
(u'09ANKARA1084', u"Daniel O'Grady"),
(u'10ANKARA173', u"Daniel O'Grady"),
(u'10ANKARA215', u"Daniel O'Grady"),
(u'10ANKARA224', u"Daniel O'Grady"),
(u'07BAGHDAD1513', u'Daniel V. Speckhard'),
(u'08TASHKENT1089', u'Jeff Hartman'),
(u'07HELSINKI636', u'Joy Shasteen'),
(u'09STATE57323', u'James Townsend'),
(u'09STATE59436', u'James Townsend'),
(u'07TASHKENT2064', (u'Jeff Hartman', u'Steven Prohaska')),
(u'07DUSHANBE337', u'David Froman'),
(u'07DUSHANBE1589', u'David Froman'),
(u'08SANJOSE762', u'David E. Henifin'),
(u'05BAGHDAD3037', u'David M. Satterfield'),
(u'04AMMAN4133', u'D.Hale'),
(u'06YEREVAN237', u'A.F. Godfrey'),
(u'07DHAKA909', u'Dcmccullough'),
(u'07DHAKA1057', u'DCMcCullough'),
(u'07BAKU1017', u'Donald Lu'),
(u'07USNATO92', u'Clarence Juhl'),
(u'09KAMPALA272', u'Dcronin'),
(u'06LAGOS12', u'Sam Gaye'),
(u'07USNATO548', u'Clarence Juhl'),
(u'07TOKYO436', u'Carol T. Reynolds'),
(u'08STATE116100', u'Theresa L. Rusch'),
(u'07NEWDELHI5334', u'Ted Osius'),
(u'06BAGHDAD4350', u'Zalmay Khalilzad'),
(u'07STATE141771', u'Scott Marciel'),
(u'08STATE66299', u'David J. Kramer'),
(u'09STATE29700', u'Karen Stewart'),
(u'07NAIROBI4569', u'Jeffrey M. Roberts'),
(u'02HARARE2628', u'Rewhitehead'),
(u'04HARARE766', u'Robert E. Whitehead'),
(u'04ANKARA7050', u'John Kunstadter'),
(u'04ANKARA6368', u'Charles O. Blaha'),
(u'09BAGHDAD280', ()),
(u'05ABUJA1323', ()),
(u'07MONROVIA1375', u'Donald E. Booth'),
(u'03SANAA2434', u'Austin G. Gilreath'),
(u'07BRUSSELS3482', u'Maria Metcalf'),
(u'02KATHMANDU1201', u'Pete Fowler'),
(u'09STATE2522', u'Donald A. Camp'),
(u'09STATE100197', u'Roblake'),
(u'08COLOMBO213', u'Robert O. Blake, Jr.'),
(u'07MEXICO2653', u'Charles V. Barclay'),
(u'09SOFIA89', u'Mceldowney'),
(u'09ADDISABABA2168', u'Kirk McBride'),
(u'06MINSK338', u'George Krol'),
(u'10ADDISABABA195', ()),
(u'04AMMAN9411', u'Christopher Henzel'),
(u'06CAIRO4258', u'Catherine Hill-Herndon'),
(u'08NAIROBI233', u'John M. Yates'),
(u'06MADRID2993', ()),
(u'08AMMAN1821', ()),
(u'09KABUL1290', u'Patricia A. McNerney'),
(u'06JEDDAH765', u'Tatiana C. Gfoeller'),
(u'07BAGHDAD2045', u'Stephen Buckler'),
(u'07BAGHDAD2499', u'Steven Buckler'),
(u'04THEHAGUE1778', u'Liseli Mundie'),
(u'04THEHAGUE2020', u'John Hucke'),
(u'03HARARE1511', u'R.E. Whitehead'),
(u'03BRUSSELS4518', u'Van Reidhead'),
(u'02ROME4724', u'Douglas Feith'),
(u'08BRUSSELS1149', u'Chris Davis'),
(u'04BRUSSELS862', u'Frank Kerber'),
(u'08BRUSSELS1245', u'Chris Davis'),
(u'08BRUSSELS1458', u'Chris Davis'),
(u'07ISLAMABAD2316', u'Peter Bodde'),
(u'04MADRID764', u'Kathleen Fitzpatrick'),
(u'06BELGRADE1092', u'Ian Campbell'),
(u'07JERUSALEM1523', u'Jake Walles'),
(u'09PANAMA518', u'Barbar J. Stephenson'),
(u'06ABUDHABI409', u'Michelle J Sison'),
(u'07DOHA594', ()),
(u'07LAPAZ3136', u'Mike Hammer'),
(u'08BOGOTA4462', u'John S. Creamer'),
(u'09ATHENS1515', u'Deborah McCarthy'),
(u'09LONDON2347', u'Robin Quinville'),
(u'08LONDON821', u'Richard Mills, Jr.'),
(u'06BUENOSAIRES497', u'Line Gutierrez'),
(u'06BUENOSAIRES596', u'Line Gutierrez'),
(u'06BUENOSAIRES1243', u'Line Gutierrez'),
(u'05BAGHDAD3919', u'Robert Heine'),
(u'06RIYADH8836', u'Mgfoeller'),
(u'06BAGHDAD4422', u'Margaret Scobey'),
(u'08STATE129873', u'David Welch'),
(u'09BAGHDAD2299', u'Patricia Haslach'),
(u'09BAGHDAD2256', u'Phaslach'),
(u'09BAGHDAD2632', u'Phaslach'),
(u'04BAGHDAD697', u'Matthew Goshko'),
(u'05CAIRO8812', u'John Desrocher'),
(u'06HONGKONG4299', ()),
(u'06QUITO646', u'Vanessa Schulz'),
(u'08RIYADH1616', u'Scott McGehee'),
(u'08RIYADH1659', u'Scott McGehee'),
(u'10BAGHDAD481', u'W.S. Reid'),
(u'02KATHMANDU485', u'Pmahoney'),
(u'09BAGHDAD990', u'Robert Ford'),
(u'08BAGHDAD3023', u'Robert Ford'),
(u'09USNATO530', u'Kelly Degnan'),
(u'07LISBON2305', u'Lclifton'),
(u'08BAGHDAD4004', u'John Fox'),
(u'04THEHAGUE2346', u'A. Schofer'),
(u'07TALLINN173', u'Jessica Adkins'),
(u'09BAKU80', u'Rob Garverick'),
(u'06PHNOMPENH1757', u'Jennifer Spande'),
(u'06QUITO1401', u'Ned Kelly'),
(u'05ZAGREB724', u'Justin Friedman'),
(u'05TOKYO1351', u'David B. Shear'),
(u'07KIGALI73', u'G Learned'),
(u'08ZAGREB554', u"Peter D'Amico"),
(u'07TASHKENT1950', (u'R. Fitzmaurice', u'T. Buckley')),
(u'07TASHKENT1679', (u'Richard Fitzmaurice', u'Steven Prohaska')),
(u'07TASHKENT1894', (u'Steven Prohaska', u'Richard Fitzmaurice')),
(u'08STATE68478', u'Margaret McKelvey'),
(u'04BRUSSELS416', u'Marc J. Meznar'),
(u'07BAGHDAD777', u'Jim Soriano'),
(u'05ALMATY3450', u'John Ordway'),
(u'05ACCRA2548', u'Nate Bluhm'),
(u'07ADDISABABA2523', u'Kent Healy'),
(u'09USUNNEWYORK746', u'Bruce C. Rashkow'),
(u'09STATE108370', u'Daniel Fried'),
(u'09BAGHDAD3120', u'Mark Storella'),
(u'09STATE64621', u'Richard C Holbrooke'),
(u'05NAIROBI4757', u'Chris Padilla'),
(u'05CAIRO5945', u'Stuart E. Jones'),
(u'07BAGHDAD1544', u'Steven R. Buckler'),
(u'07BAGHDAD1632', u'Steven R. Buckler'),
(u'02HARARE555', u'Aaron Tarver'),
(u'06BAGHDAD1021', u'Robert S. Ford'),
(u'06PRISTINA280', u'Philip S. Goldberg'),
(u'06SANSALVADOR849', u'Michael A. Butler'),
(u'06SUVA123', u'Larry M. Dinger'),
(u'06AITTAIPEI1142', u'Michael R. Wheeler'),
(u'08BEIRUT471', u'Michele J. Sison'),
(u'08MOSCOW937', u'Eric T. Schultz'),
(u'02HANOI2951', u'Emi Yamauchi'),
(u'08ROME525', u'Tom Delare',),
(u'01HARARE1632', u'Earl M. Irving'),
(u'06DUBAI5421', u'Timothy M. Brys'),
)
def test_parse_classified_by():
def check(expected, content, normalize):
if not isinstance(expected, tuple):
expected = (expected,)
eq_(expected, tuple(parse_classified_by(content, normalize)))
for testcase in _TEST_DATA:
if len(testcase) == 3:
ref_id, expected, content = testcase
normalize = False
else:
ref_id, expected, content, normalize = testcase
yield check, expected, content, normalize
def test_cable_classified_by():
def check(cable_id, expected):
if not isinstance(expected, tuple):
expected = (expected,)
cable = cable_by_id(cable_id)
ok_(cable, 'Cable "%s" not found' % cable_id)
eq_(expected, tuple(cable.classified_by))
for cable_id, expected in _TEST_CABLES:
yield check, cable_id, expected
if __name__ == '__main__':
import nose
nose.core.runmodule()
| heuer/cablemap | cablemap.core/tests/test_reader_classified_by.py | Python | bsd-3-clause | 39,464 | [
"Brian",
"VisIt"
] | 1fefbade559281e040b17e735fded7aea9c90bf4ad8965c390d70ba9ab2071c1 |
#!/usr/bin/env vtkpython
''' Script to rewrite McStas trace output to VTK for plotting '''
# initially from python/mcdisplay/matplotlib
# ported to VTK by Gael Goret, Eric Pellegrini and Bachir Aoun, nMoldyn project, ILL/CS 2014
#
import numpy as np
import vtk
import sys
from util import parse_multiline, rotate, rotate_points, draw_circle
UC_COMP = 'COMPONENT:'
MC_COMP = 'MCDISPLAY: component'
MC_COMP_SHORT = 'COMP: '
MC_LINE = 'MCDISPLAY: multiline'
MC_CIRCLE = 'MCDISPLAY: circle'
MC_ENTER = 'ENTER:'
MC_LEAVE = 'LEAVE:'
MC_STATE = 'STATE:'
MC_SCATTER = 'SCATTER:'
MC_ABSORB = 'ABSORB:'
MC_MAGNIFY = 'MCDISPLAY: magnify'
MC_START = 'MCDISPLAY: start'
MC_END = 'MCDISPLAY: end'
MC_STOP = 'INSTRUMENT END:'
def parse_trace(fname):
''' Parse McStas trace output from stdin and write results
to file objects csv_comps and csv_lines '''
color = 0
# map from component name to (position, rotation matrix)
comps = {}
# active (position, rotation matrix)
comp = (np.array([0, 0, 0]),
np.array([1, 0, 0,
0, 1, 0,
0, 0, 1]).reshape(3,3))
# previous neutron position
prev = None
skip = False
# we are drawing a neutron
active = False
xstate=[]
ystate=[]
zstate=[]
circlePoints = vtk.vtkPoints()
circleLines = vtk.vtkCellArray()
circle_pid = 0
multiPoints = vtk.vtkPoints()
multiLines = vtk.vtkCellArray()
multi_pid = 0
neutronPoints = vtk.vtkPoints()
neutronLines = vtk.vtkCellArray()
neutron_pid = 0
f = open(fname, 'r')
lines = f.readlines()
for i, line in enumerate(lines):
if not line:
break
line = line.strip()
# register components
if line.startswith(UC_COMP):
# grab info line
info = lines[i+1]
assert info[:4] == 'POS:'
nums = [x.strip() for x in info[4:].split(',')]
# extract fields
name = line[len(UC_COMP):].strip(' "\n')
pos = np.array([float(x) for x in nums[:3]])
# read flat 3x3 rotation matrix
rot = np.array([float(x) for x in nums[3:3+9]]).reshape(3, 3)
comps[name] = (pos, rot)
# switch perspective
elif line.startswith(MC_COMP):
color += 1
comp = comps[line[len(MC_COMP) + 1:]]
elif line.startswith(MC_COMP_SHORT):
name = line[len(MC_COMP_SHORT) + 1:].strip('"')
comp = comps[name]
skip = True
# process multiline
elif line.startswith(MC_LINE):
points = parse_multiline(line[len(MC_LINE):].strip('()'))
points.pop(0)
coords = rotate_points(points, comp)
beg = multi_pid
for p in coords:
multiPoints.InsertNextPoint(p)
multi_pid += 1
end = multi_pid
for idx in range(beg, end-1):
vline = vtk.vtkLine()
vline.GetPointIds().SetId(0,idx)
vline.GetPointIds().SetId(1,idx +1)
multiLines.InsertNextCell(vline)
# process circle
elif line.startswith(MC_CIRCLE):
xyz = 'xyz'
items = line[len(MC_CIRCLE):].strip('()').split(',')
# plane
pla = [xyz.find(a) for a in items[0].strip("''")]
# center and radius
pos = [float(x) for x in items[1:4]]
rad = float(items[4])
coords = draw_circle(pla, pos, rad, comp)
beg = circle_pid
for p in coords:
circlePoints.InsertNextPoint(p)
circle_pid += 1
end = circle_pid
for idx in range(beg, end-1):
vline = vtk.vtkLine()
vline.GetPointIds().SetId(0,idx)
vline.GetPointIds().SetId(1,idx +1)
circleLines.InsertNextCell(vline)
# activate neutron when it enters
elif line.startswith(MC_ENTER):
prev = None
skip = True
active = True
color = 0
xstate=[]
ystate=[]
zstate=[]
# deactivate neutron when it leaves
elif line.startswith(MC_LEAVE):
coords = np.column_stack([xstate, ystate, zstate])
beg = neutron_pid
for p in coords:
neutronPoints.InsertNextPoint(p)
neutron_pid += 1
end = neutron_pid
for idx in range(beg, end-1):
vline = vtk.vtkLine()
vline.GetPointIds().SetId(0,idx)
vline.GetPointIds().SetId(1,idx +1)
neutronLines.InsertNextCell(vline)
active = False
prev = None
elif line.startswith(MC_ABSORB):
pass
# register state and scatter
elif line.startswith(MC_STATE) or line.startswith(MC_SCATTER):
if not active:
continue
if skip:
skip = False
continue
xyz = [float(x) for x in line[line.find(':')+1:].split(',')[:3]]
xyz = rotate(xyz, comp)
if prev is not None:
xstate.append(xyz[0])
ystate.append(xyz[1])
zstate.append(xyz[2])
prev = xyz
xstate.append(prev[0])
ystate.append(prev[1])
zstate.append(prev[2])
f.close()
circlePolydata =vtk.vtkPolyData()
circlePolydata.SetPoints(circlePoints)
circlePolydata.SetLines(circleLines)
circle_mapper = vtk.vtkPolyDataMapper()
try:
circle_mapper.SetInputData(circlePolydata) # VTK Python >= 6
except:
circle_mapper.SetInput(circlePolydata) # VTK Python >= 5.8
circle_actor = vtk.vtkActor()
circle_actor.SetMapper(circle_mapper)
circle_actor.GetProperty().SetAmbient(0.2)
circle_actor.GetProperty().SetDiffuse(0.5)
circle_actor.GetProperty().SetSpecular(0.3)
circle_actor.GetProperty().SetColor(0,0.7,0.7)
circle_actor.GetProperty().SetLineWidth(3)
multiPolydata =vtk.vtkPolyData()
multiPolydata.SetPoints(multiPoints)
multiPolydata.SetLines(multiLines)
multi_mapper = vtk.vtkPolyDataMapper()
try:
multi_mapper.SetInputData(multiPolydata)
except:
multi_mapper.SetInput(multiPolydata)
multi_actor = vtk.vtkActor()
multi_actor.SetMapper(multi_mapper)
multi_actor.GetProperty().SetAmbient(0.2)
multi_actor.GetProperty().SetDiffuse(0.5)
multi_actor.GetProperty().SetSpecular(0.3)
multi_actor.GetProperty().SetColor(1,0,0.5)
multi_actor.GetProperty().SetLineWidth(3)
neutronPolydata =vtk.vtkPolyData()
neutronPolydata.SetPoints(neutronPoints)
neutronPolydata.SetLines(neutronLines)
neutron_mapper = vtk.vtkPolyDataMapper()
try:
neutron_mapper.SetInputData(neutronPolydata)
except:
neutron_mapper.SetInput(neutronPolydata)
neutron_actor = vtk.vtkActor()
neutron_actor.SetMapper(neutron_mapper)
neutron_actor.GetProperty().SetAmbient(0.2)
neutron_actor.GetProperty().SetDiffuse(0.5)
neutron_actor.GetProperty().SetSpecular(0.3)
neutron_actor.GetProperty().SetColor(1,1,1)
neutron_actor.GetProperty().SetLineWidth(2)
renderer = vtk.vtkRenderer()
renderer.AddActor(circle_actor)
renderer.AddActor(multi_actor)
renderer.AddActor(neutron_actor)
renderer.SetBackground(0, 0, 0)
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
istyle = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(istyle)
iren.SetRenderWindow(renwin)
iren.Initialize()
renwin.Render()
iren.Start()
if __name__ == '__main__':
parse_trace(sys.argv[1])
| mads-bertelsen/McCode | tools/Python/obsoleted/mcdisplay-VTK/mcdisplay.py | Python | gpl-2.0 | 8,027 | [
"VTK"
] | ef8a2bff5db76568b3e32eb6b20aea633ef01a200464f48b91616ba8fc3b8ef0 |
import random
import tictactoe
import numpy as np
from math import sqrt, log
class Node:
""" A node in the game tree. Note wins is always from the viewpoint of playerJustMoved.
Crashes if state not specified.
"""
def __init__(self, move=None, parent=None, state=None):
self.move = move # the move that got us to this node - "None" for the root node
self.parentNode = parent # "None" for the root node
self.childNodes = []
self.wins = 0
self.visits = 0
self.value = 0.0
self.untriedMoves = state.GetMoves() # future child nodes
# the only part of the state that the Node needs later
self.playerJustMoved = state.playerJustMoved
def UCTSelectChild(self):
""" Use the UCB1 formula to select a child node. Often a constant UCTK is applied so we have
lambda c: c.wins/c.visits + UCTK * sqrt(2*log(self.visits)/c.visits to vary the amount of
exploration versus exploitation.
"""
s = sorted(self.childNodes, key=lambda c: float(c.wins) /
c.visits + sqrt(2 * log(self.visits) / c.visits))[-1]
return s
def AddChild(self, m, s):
""" Remove m from untriedMoves and add a new child node for this move.
Return the added child node
"""
n = Node(move=m, parent=self, state=s)
self.untriedMoves.remove(m)
self.childNodes.append(n)
return n
def Update(self, result):
""" Update this node - one additional visit and result additional wins. result must be from the viewpoint of playerJustmoved.
"""
self.visits += 1
self.wins += result
self.value = self.wins / float(self.visits)
def __repr__(self):
return "[M:" + str(self.move) + " W/V:" + str(self.wins) + "/" + str(self.visits) + " U:" + str(self.untriedMoves) + "]"
def TreeToString(self, indent):
s = self.IndentString(indent) + str(self)
for c in self.childNodes:
s += c.TreeToString(indent + 1)
return s
def IndentString(self, indent):
s = "\n"
for i in range(1, indent + 1):
s += "| "
return s
def ChildrenToString(self):
s = ""
for c in self.childNodes:
s += str(c) + "\n"
return s
def UCT(rootstate, itermax, verbose=False):
""" Conduct a UCT search for itermax iterations starting from rootstate.
Return the best move from the rootstate.
Assumes 2 alternating players (player 1 starts), with game results in the range [0.0, 1.0]."""
rootnode = Node(state=rootstate)
for i in range(itermax):
node = rootnode
state = rootstate.Clone()
# Select
# Initially a node has not child
while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal
node = node.UCTSelectChild()
state.DoMove(node.move)
# Expand
# if we can expand (i.e. state/node is non-terminal)
# select a move randomly, create a child and let him keep tack of the move
# that created it. Then return the child (node) and continue from it
if node.untriedMoves != []:
m = random.choice(node.untriedMoves)
state.DoMove(m)
node = node.AddChild(m, state) # add child and descend tree
# Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function
while state.GetMoves() != [] and not state.HasWinning(): # while state is non-terminal
state.DoMove(random.choice(state.GetMoves()))
# Backpropagate
while node != None: # backpropagate from the expanded node and work back to the root node
# state is terminal. Update node with result from POV of node.playerJustMoved
node.Update(state.GetResult(node.playerJustMoved))
node = node.parentNode
# Output some information about the tree - can be omitted
if (verbose):
print(rootnode.TreeToString(0))
print(rootnode.ChildrenToString())
# return the move that was most visited
return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move
def UCTPlayGame(game_number, verbose=False):
""" Play a sample game between two UCT players where each player gets a different number
of UCT iterations (= simulations = tree nodes).
"""
state = tictactoe.TicTacToe()
while (state.GetMoves() != []):
if verbose:
print(str(state))
if state.LastPlayer() == 1:
# play with values for itermax and verbose = True
# m = np.random.choice(state.GetMoves())
m = UCT(rootstate=state, itermax=10, verbose=False)
else:
m = UCT(rootstate=state, itermax=40, verbose=False)
if verbose: print("Best Move: " + str(m) + "\n")
state.DoMove(m)
if state.HasWinning(): break
result = state.GetResult(state.LastPlayer())
winning = False
winner = None
if result == 1:
winner = state.LastPlayer()
winning = True
if verbose:
print("Player " + str(state.LastPlayer()) + " wins!")
elif result == -1:
winner = 3 - state.LastPlayer()
winning = True
if verbose:
print("Player " + str(3 - state.LastPlayer()) + " wins!")
else:
if verbose:
print("Nobody wins!")
if verbose:
print(str(state))
return (winning, winner)
if __name__ == "__main__":
""" Play a several game to the end using UCT for both players.
"""
from multiprocessing import Pool
import itertools
from tqdm import tqdm
number_of_games = 1000
results_list = []
# processing
pool = Pool()
for result in tqdm(pool.imap_unordered(UCTPlayGame, range(number_of_games)),
total=number_of_games):
results_list.append(result)
# compiling results
pos = 0
neg = 0
player1 = 0
player2 = 0
for result in results_list:
if result[0]:
pos += 1
if result[1] == 1:
player1 += 1
elif result[1]:
player2 += 1
else:
neg += 1
print("Number of positives: %d / %d" % (pos, number_of_games))
print("Number of negatives: %d / %d" % (neg, number_of_games))
print("Player1 wins: %2.2f%%" % (float(player1)/pos*100))
print("Player2 wins: %2.2f%%" % (float(player2)/pos*100))
| Perif/MCTS | mymcts.py | Python | gpl-3.0 | 6,549 | [
"VisIt"
] | 83b5d22391d78bfe18eb87e16cd91742ded4a27f075eeb26fafca1f1c2764705 |
#!/usr/bin/env python3
import fastentrypoints # NOQA pylint: disable=unused-import
from setuptools import setup, find_packages
def read_file(fn):
with open(fn) as f:
content = f.read()
return content
setup(
name="ymp",
use_scm_version={'write_to': 'src/ymp/_version.py'},
description="Flexible multi-omic pipeline system",
long_description=read_file("README.rst"),
long_description_content_type="text/x-rst",
url="https://github.com/epruesse/ymp",
author="Elmar Pruesse",
author_email="elmar@pruesse.net",
license="GPL-3",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
platforms=["linux", "macos"],
keywords=("bioinformatics pipeline workflow automation "
"rnaseq genomics metagenomics "
"conda bioconda snakemake"),
project_urls={
'Documentation': 'https://ymp.readthedocs.io',
'Source': 'https://github.com/epruesse/ymp',
},
packages=find_packages('src'),
package_dir={'': 'src'},
zip_safe=False,
setup_requires=[
'setuptools_scm>=3.4',
'setuptools>=42',
'wheel',
'pytest-runner',
],
install_requires=[
'snakemake>=6.0.5',
'Click',
'Click-completion',
'ruamel.yaml>0.15',
'drmaa',
'pandas>=0.20',
'openpyxl', # excel support
'coloredlogs',
'xdg', # user paths
'tqdm >4.21',
'aiohttp',
'tqdm>=4.21.0',
],
tests_require=[
'networkx>=2',
'pytest-xdist',
'pytest-logging',
'pytest-timeout',
'pygraphviz',
'pytest',
'yappi',
'pytest-cov',
'codecov'
],
extras_require={
'docs': [
'sphinx',
'cloud_sptheme',
'sphinxcontrib-fulltoc',
'sphinx-click',
'sphinx_autodoc_typehints',
'ftputil',
]
},
python_requires='>=3.6',
include_package_data=True,
entry_points='''
[console_scripts]
ymp=ymp.cli:main
''',
)
| epruesse/ymp | setup.py | Python | gpl-3.0 | 2,505 | [
"Bioconda"
] | ea6e9ff0c2826d85d15a61f377a2da05ef239443bf6211bdf2854e282156b3be |
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
def drawLine(myscreen, p1, p2):
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=ovdvtk.yellow))
def writeFrame(w2if, lwr, n):
w2if.Modified()
current_dir = os.getcwd()
filename = current_dir + "/frames/vd500_zoomout" + ('%05d' % n) + ".png"
lwr.SetFileName(filename)
# lwr.Write()
def regularGridGenerators(far, Nmax):
# REGULAR GRID
rows = int(math.sqrt(Nmax))
print "rows= ", rows
gpos = [-0.7 * far, 1.4 * far / float(rows - 1)] # start, stride
plist = []
for n in range(rows):
for m in range(rows):
x = gpos[0] + gpos[1] * n
y = gpos[0] + gpos[1] * m
# rotation
# alfa = 0
# xt=x
# yt=y
# x = xt*math.cos(alfa)-yt*math.sin(alfa)
# y = xt*math.sin(alfa)+yt*math.cos(alfa)
plist.append(ovd.Point(x, y))
random.shuffle(plist)
return plist
def randomGenerators(far, Nmax):
pradius = (1.0 / math.sqrt(2)) * far
plist = []
for n in range(Nmax):
x = -pradius + 2 * pradius * random.random()
y = -pradius + 2 * pradius * random.random()
plist.append(ovd.Point(x, y))
return plist
def circleGenerators(far, Nmax):
# POINTS ON A CIRCLE
# """
# cpos=[50,50]
# npts = 100
dalfa = float(2 * math.pi) / float(Nmax - 1)
# dgamma= 10*2*math.pi/npts
# alfa=0
# ofs=10
plist = []
radius = 0.81234 * float(far)
for n in range(Nmax):
x = float(radius) * math.cos(float(n) * float(dalfa))
y = float(radius) * math.sin(float(n) * float(dalfa))
plist.append(ovd.Point(x, y))
# random.shuffle(plist)
return plist
if __name__ == "__main__":
# print ocl.revision()
myscreen = ovdvtk.VTKScreen(width=1024, height=720) # (width=1920, height=1080)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
random.seed(2)
far = 1
camPos = far
zmult = 4
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
vd.check()
print "created."
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 0
vod.drawGenerators = 0
vod.offsetEdges = 0
vd.setEdgeOffset(0.05)
Nmax = 6
plist = randomGenerators(far, Nmax)
# plist = regularGridGenerators(far, Nmax)
# plist = circleGenerators(far, Nmax)
# plist = randomGenerators(far, Nmax)
# plist = []
# plist.append( ovd.Point(0.0,0.1) )
# plist.append( ovd.Point(0,0.9) )
# plist.append( ovd.Point(-0.15, -0.15) )
# + regularGridGenerators(far, Nmax) + circleGenerators(far, Nmax)
# plist = [ovd.Point(0,0)]
print plist
times = []
t_before = time.time()
n = 0
id_list = []
# vd.debug_on()
for p in plist:
print n, " adding ", p
id_list.append(vd.addVertexSite(p))
n = n + 1
t_after = time.time()
calctime = t_after - t_before
times.append(calctime)
id1 = id_list[0]
id2 = id_list[1]
id3 = id_list[2]
id4 = id_list[3]
# print "add segment ",id1, " to ", id2
vd.debug_on()
vd.addLineSite(id1, id2)
vd.addLineSite(id3, id4)
t_after = time.time()
calctime = t_after - t_before
times.append(calctime)
if Nmax == 0:
Nmax = 1
print " VD done in ", calctime, " s, ", calctime / Nmax, " s per generator"
vod.setVDText2(times)
vod.setAll()
myscreen.render()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| aewallin/openvoronoi | python_examples/line-segment/lineseg_1.py | Python | lgpl-2.1 | 4,285 | [
"VTK"
] | f9373f94585b880c9a5ea1b25dd5fc9236b34d9ff350e3c86b0aa56c5d906fe6 |
# Dilate 3D horizontal
# May 2016 - Martijn Koopman
# ToDo: Clean code; pass variable dims to function GetArrValue() and SetArrValue() as arguments
# User parameters
radius = 2 # 2 * 10 cm on both sides + 10 cm in middle = 50cm
vertical_extrusion = 1 # 19 * 10 cm = 190 cm
# Input
idi = self.GetInput()
dims = idi.GetDimensions()
numTuples = dims[0]*dims[1]*dims[2]
input_arr = idi.GetPointData().GetScalars()
intermediate_arr = vtk.vtkTypeUInt32Array()
intermediate_arr.SetName('intermediate')
intermediate_arr.SetNumberOfComponents(1)
intermediate_arr.SetNumberOfTuples(numTuples)
# Output
ido = self.GetOutput()
output_arr = vtk.vtkTypeUInt32Array()
output_arr.SetName('scalar')
output_arr.SetNumberOfComponents(1)
output_arr.SetNumberOfTuples(numTuples)
# Copy input array
for i in range(0, numTuples):
if input_arr.GetValue(i) == 0:
output_arr.SetValue(i,0)
intermediate_arr.SetValue(i,0)
else:
output_arr.SetValue(i,1)
intermediate_arr.SetValue(i,1)
# Utility functions
def GetArrValue(arr, pos):
if pos[0] < 0 or pos[0] >= dims[0] or pos[1] < 0 or pos[1] >= dims[1] or pos[2] < 0 or pos[2] >= dims[2]:
return 0
else:
i = pos[0] + (pos[1] * dims[0]) + (pos[2] * dims[0] * dims[1])
return arr.GetValue(i)
def SetArrValue(arr, pos, val):
if pos[0] < 0 or pos[0] >= dims[0] or pos[1] < 0 or pos[1] >= dims[1] or pos[2] < 0 or pos[2] >= dims[2]:
return
i = pos[0] + (pos[1] * dims[0]) + (pos[2] * dims[0] * dims[1])
arr.SetValue(i, val)
# Dilate vertically
for x in range(dims[0]):
for y in range(dims[1]):
for z in range(dims[2]):
if GetArrValue(input_arr, (x,y,z)) > 0:
# Found an obstacle -> create vertical buffer
for z_offset in range(1, vertical_extrusion+1):
SetArrValue(intermediate_arr, (x, y, z-z_offset), 1)
# Dilate horizontally
for x in range(dims[0]):
for y in range(dims[1]):
for z in range(dims[2]):
if GetArrValue(intermediate_arr, (x,y,z)) > 0:
# Create horizontal buffer
for x_offset in range(-radius, radius+1):
x_n = x + x_offset
for y_offset in range(-radius, radius+1):
y_n = y + y_offset
# Check if neighbour is within radius of circle
# X^2 + Y^2 <= R^2
if ((x_offset*x_offset) + (y_offset*y_offset)) <= (radius*radius):
# Create buffer voxel
SetArrValue(output_arr, (x_n, y_n, z), 1)
ido.GetPointData().SetScalars(output_arr) | martijnkoopman/thesis | 1_dilate.py | Python | gpl-3.0 | 2,869 | [
"VTK"
] | 4a52dde59133dc9dcd76bf97df7e69f438d1ea3056c8267600df6ffd2eb41425 |
#!/usr/bin/env python
"""
Complement regions.
usage: %prog in_file out_file
-1, --cols1=N,N,N,N: Columns for chrom, start, end, strand in file
-l, --lengths=N: Filename of .len file for species (chromosome lengths)
-a, --all: Complement all chromosomes (Genome-wide complement)
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import sys, traceback, fileinput
from warnings import warn
from bx.intervals import *
from bx.intervals.io import *
from bx.intervals.operations.complement import complement
from bx.intervals.operations.subtract import subtract
from bx.cookbook import doc_optparse
from galaxy.tools.util.galaxyops import *
assert sys.version_info[:2] >= ( 2, 4 )
def main():
allchroms = False
upstream_pad = 0
downstream_pad = 0
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 )
lengths = options.lengths
if options.all: allchroms = True
in_fname, out_fname = args
except:
doc_optparse.exception()
g1 = NiceReaderWrapper( fileinput.FileInput( in_fname ),
chrom_col=chr_col_1,
start_col=start_col_1,
end_col=end_col_1,
strand_col=strand_col_1,
fix_strand=True )
lens = dict()
chroms = list()
# dbfile is used to determine the length of each chromosome. The lengths
# are added to the lens dict and passed copmlement operation code in bx.
dbfile = fileinput.FileInput( lengths )
if dbfile:
if not allchroms:
try:
for line in dbfile:
fields = line.split("\t")
lens[fields[0]] = int(fields[1])
except:
# assume LEN doesn't exist or is corrupt somehow
pass
elif allchroms:
try:
for line in dbfile:
fields = line.split("\t")
end = int(fields[1])
chroms.append("\t".join([fields[0],"0",str(end)]))
except:
pass
# Safety...if the dbfile didn't exist and we're on allchroms, then
# default to generic complement
if allchroms and len(chroms) == 0:
allchroms = False
if allchroms:
chromReader = GenomicIntervalReader(chroms)
generator = subtract([chromReader, g1])
else:
generator = complement(g1, lens)
out_file = open( out_fname, "w" )
try:
for interval in generator:
if type( interval ) is GenomicInterval:
out_file.write( "%s\n" % "\t".join( interval ) )
else:
out_file.write( "%s\n" % interval )
except ParseError, exc:
out_file.close()
fail( "Invalid file format: %s" % str( exc ) )
out_file.close()
if g1.skipped > 0:
print skipped( g1, filedesc="" )
if __name__ == "__main__":
main()
| volpino/Yeps-EURAC | tools/new_operations/gops_complement.py | Python | mit | 3,083 | [
"Galaxy"
] | a3ea6473c03089ee67160fa15fce7c2ba339c865b5652cff582317337d117727 |
## numpy-oldnumeric calls replaced by custom script; 09/06/2016
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Store and manipulate coordinates and atom information.
"""
import biskit.tools as T
from biskit import molUtils
from biskit import mathUtils
from biskit import match2seq
from biskit import rmsFit
from biskit.core.localpath import LocalPath
from biskit.errors import BiskitError
from biskit.profileCollection import ProfileCollection, ProfileError
from biskit.core.pdbparserFactory import PDBParserFactory
from biskit.core.pdbparseFile import PDBParseFile
from biskit import biounit as BU
from biskit.core import oldnumeric as N0
from biskit.core.scientificIO import PDB as IO
from biskit import EHandler
from biskit.future import Residue
import biskit as B
import numpy as N
import os, sys
import copy
import time
import string
import types
import functools
class PDBProfiles( ProfileCollection ):
"""
A ProfileCollection that triggers an update() of its parent :class:`PDBModel`
if an empty or (optionally) missing profile is requested.
.. seealso:: `biskit.ProfileCollection`
"""
def __init__(self, model=None, profiles=None, infos=None ):
"""
:param model: parent model of this ProfileCollection
:type model: PDBModel
:param profiles: dictionary of existing profiles
:type profiles: { 'name' : list/array }
:param infos: dictionary of existing meta infos
:type infos: { 'name' : { 'date' : ... } }
"""
ProfileCollection.__init__( self, profiles=profiles, infos=infos )
self.model = model
def clone(self):
"""
Include reference to (same) parent model.
"""
return self.__class__(self.model, copy.deepcopy(self.profiles),
copy.deepcopy(self.infos))
## should work but leads to some kind of loop condition
## def profLength(self, default=0):
## r = ProfileCollection.profLength(self, default=None)
## if r is not None:
## return r
##
## if self.model and self.model.xyz is not None:
## return len(self.model.xyz)
##
## return default
def get( self, name, default=None, update=True, updateMissing=False ):
"""
Fetch a profile::
get( profKey, [default] ) -> list or array of values
Or:
get( (profKey, infoKey), [default] ) -> single value of metainfo dict
This method extends the standard :class:`ProfileCollection.get` by the
ability to fetch empty (None) or missing profiles from a source
file or model.
:param name: profile key or profile and info key
:type name: str OR (str, str)
:param default: default result if no profile is found,
if None and no profile is found, attempt update
:type default: any
:param update: update from source before returning empty profile [0]
:type update: bool
:param updateMissing: update from source before reporting missing
profile [0]
:type updateMissing: bool
:raise ProfileError: if no profile is found with |name|
"""
try:
r = ProfileCollection.get( self, name, default=default)
except ProfileError as e:
if updateMissing:
r = None
else:
raise ProfileError(e)
if r is None and (update or updateMissing):
## only read PDB source if this is indeed useful
if not name in PDBModel.PDB_KEYS and \
PDBParseFile.supports( self.model.validSource() ):
return None
self.model.update( updateMissing=updateMissing )
## try again
r = ProfileCollection.get(self, name )
return r
class PDBResidueProfiles(PDBProfiles):
"""Work in progress -- give residue profiles a default length"""
def profLength(self, default=0):
r = ProfileCollection.profLength(self, default=None)
if r is not None:
return r
if self.model and self.model._resIndex is not None:
return len(self.model._resIndex)
return default
class PDBError(BiskitError):
pass
class PDBIndexError( PDBError):
"""Errors warning of issues with residue or chain index."""
pass
class PDBModel:
"""
Store and manipulate coordinates and atom infos stemming from a PDB file.
Coordinates are stored in the numpy array 'xyz'; the additional atom infos
from the PDB (name, residue_name, and many more) are efficiently stored in
a :class:`PDBProfiles` instance 'atoms' which can be used to also associate
arbitrary other data to the atoms. Moreover, a similar collection
'residues' can hold data associated to residues (but is initially empty).
A normal dictionary 'info' accepts any information about the whole model.
For detailed documentation,
see http://biskit.pasteur.fr/doc/handling_structures/PDBModel
@todo:
* outsource validSource into PDBParserFactory
* prevent repeated loading of test PDB for each test
"""
#: keys of all atom profiles that are read directly from the PDB file
PDB_KEYS = ['name', 'residue_number', 'insertion_code', 'alternate',
'name_original', 'chain_id', 'occupancy', 'element',
'segment_id', 'charge', 'residue_name', 'after_ter',
'serial_number', 'type', 'temperature_factor']
def __init__( self, source=None, pdbCode=None, noxyz=0, skipRes=None,
headPatterns=[] ):
"""
Examples:
- `PDBModel()` creates an empty Model to which coordinates (field xyz)
and PDB records (atom profiles) have still to be added.
- `PDBModel( file_name )` creates a complete model with coordinates
and PDB records from file_name (pdb, pdb.gz, or pickled PDBModel)
- `PDBModel( PDBModel )` creates a copy of the given model
- `PDBModel( PDBModel, noxyz=1 )` creates a copy without coordinates
:param source: str, file name of pdb/pdb.gz file OR pickled PDBModel OR
PDBModel, template structure to copy atoms/xyz field from
:type source: str or PDBModel
:param pdbCode: PDB code, is extracted from file name otherwise
:type pdbCode: str or None
:param noxyz: 0 (default) || 1, create without coordinates
:type noxyz: 0||1
:param headPatterns: [(putIntoKey, regex)] extract given REMARK values
:type headPatterns: [(str, str)]
:raise PDBError: if file exists but can't be read
"""
self.source = source
if type( source ) is str and ( len(source) != 4 or \
os.path.isfile( source ) ):
self.source = LocalPath( source )
self.__validSource = 0
self.fileName = None
self.pdbCode = pdbCode
self.xyz = None
#: save atom-/residue-based values
self.residues = PDBResidueProfiles( self )
self.atoms = PDBProfiles( self )
#: cached atom masks, calculated when first needed
self.__maskCA = None
self.__maskBB = None
self.__maskHeavy = None
#: cache position of chain breaks (clear when xyz changes)
self.__chainBreaks = None
#: starting positions of each residue
self._resIndex = None
#: starting positions of each chain
self._chainIndex = None
if noxyz:
## trick update to leave xyz untouched
self.xyz = 0
#: monitor changes of coordinates
self.xyzChanged = 0
self.forcePickle = 0
#: version as of creation of this object
self.initVersion = B.__version__
#: to collect further informations
self.info = { 'date':T.dateSortString() }
if source != None:
self.update( skipRes=skipRes, updateMissing=1, force=1,
headPatterns=headPatterns )
if noxyz:
## discard coordinates, even when read from PDB file
self.xyz = None
def __getstate__(self):
"""
called before pickling the object. (but also called by deepcopy)
"""
self.slim()
self.forcePickle = 0
return self.__dict__
def __setstate__(self, state ):
"""
called for unpickling the object.
"""
## until 2.0.1 PDBModel.atoms contained a list of dictionaries
## now we merged this info into PDBModel.aProfiles and renamed
## aProfiles back to .atoms
if 'atoms' in state and type( state['atoms'] ) in [list, type(None)] :
state['old_atoms'] = state['atoms']
del state['atoms']
self.__dict__ = state
## backwards compability
self.__defaults()
def __len__(self):
return self.lenAtoms()
def __getitem__( self, k ):
"""
Get atom profile or profile item or CrossView for one atom::
m['prof1'] <==> m.atoms.get( 'prof1' )
m['prof1','info1'] <==> m.atoms.get( 'prof1','info1' )
m[10] <==> CrossView( m.atoms, 10 )
:return: profile OR meta infos thereof OR CrossView dict
:rtype: list OR array OR any OR CrossView
"""
if type( k ) is str:
if k in self.atoms:
return self.atoms.get( k )
if k in self.residues:
return self.residues.get( k )
if k in self.info:
return self.info[ k ]
return self.profile( k )
if type( k ) is tuple:
return self.profileInfo( k[0] )[ k[1] ]
return self.atoms[k]
def __setitem__( self, k, v ):
"""
Set atom profile or profile item (or meta info)::
m['prof1'] = range(10)
is same as <==> m.atoms.set( 'prof1', range(10) )
OR <==> m.residues.set( 'prof1', range(10) )
m['prof1','info1'] = 'comment'
is same as <==> m.atoms.setInfo('prof1',info1='comment')
OR <==> m.residues.setInfo('prof1',info1='comment')
m['version'] = '1.0.0' <==> m.info['version'] = '1.0.0'
(but only if 'version' already exists in `m.info`)
:return: item
:rtype: any
"""
if type( k ) is str:
if k in self.atoms:
return self.atoms.set( k, v )
if k in self.residues:
return self.residues.set( k, v )
if k in self.info:
self.info[ k ] = v
return
if v is not None and len( v ) == self.lenAtoms():
return self.atoms.set( k, v )
if v is not None and len( v ) == self.lenResidues():
return self.residues.set( k, v )
raise ProfileError('Value cannot clearly be assigned to either atom or '+\
'residue profiles')
if type( k ) is tuple:
key, infokey = k
if key in self.residues:
self.residues[key, infokey] = v
return
self.atoms[key, infokey] = v
return
raise ProfileError('Cannot interpret %r as profile name or profile info record' % k)
def __getslice__( self, *arg ):
"""
Get list of CrossViews::
m[0:100:5] <==> [ CrossView(m.atoms, i) for i in range(0,100,5) ]
"""
return self.atoms.__getslice__( *arg )
def __iter__( self ):
return self.atoms.iterCrossViews()
def __repr__( self ):
code = self.pdbCode or ''
return '[%s %s %5i atoms, %4i residues, %2i chains]' % \
( self.__class__.__name__, code, self.lenAtoms(lookup=False),
self.lenResidues(), self.lenChains() )
def __str__( self ):
return self.__repr__()
def report( self, prnt=True, plot=False, clipseq=60 ):
"""
Print (or return) a brief description of this model.
:param prnt: directly print report to STDOUT (default True)
:type prnt: bool
:param plot: show simple 2-D line plot using gnuplot [False]
:type plot: bool
:param clipseq: clip chain sequences at this number of letters [60]
:type clipseq: int
:return: if prnt==True: None, else: formatted description of this model
:rtype: None or str
"""
r = self.__repr__()
for c in range( self.lenChains() ):
m = self.takeChains( [c] )
r += '\n\t* chain %-2i(%s): %s' % ( c, m['chain_id'][0],
T.clipStr( m.sequence(), clipseq ) )
r += '\n source: ' + repr( self.source )
r += '\n %2i atom profiles: %s' % ( len( self.atoms ),
T.clipStr( repr(list(self.atoms.keys())), 57 ))
r += '\n %2i residue profiles: %s' % ( len( self.residues ),
T.clipStr( repr(list(self.residues.keys())), 57 ))
r += '\n %2i info records: %s' % ( len( self.info ),
T.clipStr( repr( list(self.info.keys()) ), 57 ))
if plot:
self.plot()
if prnt:
print(r)
else:
return r
def plot( self, hetatm=False ):
"""
Get a quick & dirty overview over the content of a PDBModel. plot
simply creates a 2-D plot of all x-coordinates versus all y coordinates,
colored by chain. This is obviously not publication-quality ;-).
Use the Biskit.Pymoler class for real visalization.
:param hetatm: include hetero & solvent atoms (default False)
:type hetatm: bool
"""
from biskit import gnuplot
m = self
if not hetatm:
mask = self.maskHetatm()
mask = mask + m.maskSolvent()
m = self.compress( N0.logical_not( mask ) )
chains = [ self.takeChains( [i] ) for i in range( m.lenChains())]
xy = [ list(zip( m.xyz[:,0], m.xyz[:,1] )) for m in chains ]
gnuplot.plot( *xy )
def __vintageCompatibility( self ):
"""
backward compatibility to vintage PDBModels < 2.0.0
"""
## convert first generation profile dictionaries into new ProfileCollections
if 'resProfiles' in self.__dict__:
self.residues=PDBProfiles( self,
profiles=getattr(self,'resProfiles',{}),
infos=getattr(self,'resProfiles_info',{}) )
try:
del self.resProfiles; del self.resProfiles_info
except: pass
if 'atomProfiles' in self.__dict__:
self.atoms=PDBProfiles( self,
profiles=getattr(self,'atomProfiles',{}),
infos=getattr(self,'atomProfiles_info',{}) )
try:
del self.atomProfiles; del self.atomProfiles_info
except: pass
## fix old bug: slim() was creating a self.xyx instead of xyz
if getattr( self, 'xyx', 0 ):
del self.xyx
## first generation source was just a simple string
if type( self.source ) == str:
self.source = LocalPath( self.source )
self.__validSource = getattr( self, '_PDBModel__validSource', 0)
self.initVersion = getattr( self, 'initVersion', 'old PDBModel')
self.forcePickle = getattr( self, 'forcePickle', 0 )
self.info = getattr( self, 'info', { 'date':T.dateSortString() } )
## fix previous bug; old PDBModel pickles often have stray terAtoms
## records
if getattr( self, '_PDBParseFile__terAtoms', None) is not None:
del self._PDBParseFile__terAtoms
def __defaults(self ):
"""
backwards compatibility to earlier pickled models
"""
self.__vintageCompatibility()
## if there were not even old profiles...
if getattr( self, 'atoms', 0) is 0:
self.atoms = PDBProfiles(self)
if getattr( self, 'residues', 0) is 0:
self.residues = PDBResidueProfiles(self)
## between release 2.0.1 and 2.1, aProfiles were renamed to atoms
if getattr( self, 'aProfiles', None) is not None:
self.atoms = self.aProfiles
del self.aProfiles
## between release 2.0.1 and 2.1, rProfiles were renamed to residues
if getattr( self, 'rProfiles', None) is not None:
self.residues = self.rProfiles
del self.rProfiles
## old aProfiles and rProfiles didn't keep a reference to the parent
if getattr( self.atoms, 'model', 0) is 0:
self.atoms.model = self
self.residues.model = self
## biskit <= 2.0.1 kept PDB infos in list of dictionaries
atoms = getattr( self, 'old_atoms', 0)
if not atoms is 0:
## atoms to be fetched from external source
if atoms is None:
for k in self.PDB_KEYS:
self.atoms.set( k, None, changed=0 )
else:
atoms = B.DictList( atoms )
for k in self.PDB_KEYS:
self.atoms.set( k, atoms.valuesOf( k ),
changed=getattr(self, 'atomsChanged',1) )
del self.old_atoms
del self.atomsChanged
## biskit <= 2.0.1 kept positions of TER records in a separate list
ter_atoms = getattr( self, '_PDBModel__terAtoms', 0)
if ter_atoms:
mask = N0.zeros( self.atoms.profLength() )
N0.put( mask, ter_atoms, 1 )
self.atoms.set('after_ter', mask,
comment='rebuilt from old PDBModel.__terAtoms')
if ter_atoms is not 0:
del self.__terAtoms
## biskit <= 2.0.1 cached a volatile index in __resIndex & __chainIndex
self._resIndex = getattr( self, '_resIndex', None)
self._chainIndex=getattr( self, '_chainIndex', None)
if getattr( self, '__resIndex', None) is not None:
try:
del self.__resIndex, self.__chainIndex
except:
print('DEBUG ', T.lastError())
pass
self.__maskCA = getattr( self, '__maskCA', None )
self.__maskBB = getattr( self, '__maskBB', None )
self.__maskHeavy = getattr( self, '__maskHeavy', None )
## test cases of biskit < 2.3 still contain Numeric arrays
if self.xyz is not None and type( self.xyz ) is not N.ndarray:
self.xyz = N0.array( self.xyz )
if self._resIndex is not None and type( self._resIndex ) is not N.ndarray:
self._resIndex = N0.array( self._resIndex )
if self._chainIndex is not None and type(self._chainIndex) is not N.ndarray:
self._chainIndex = N0.array( self._chainIndex )
try:
del self.caMask, self.bbMask, self.heavyMask
except:
pass
def update( self, skipRes=None, updateMissing=0, force=0,
headPatterns=[] ):
"""
Read coordinates, atoms, fileName, etc. from PDB or
pickled PDBModel - but only if they are currently empty.
The atomsChanged and xyzChanged flags are not changed.
:param skipRes: names of residues to skip if updating from PDB
:type skipRes: list of str
:param updateMissing: 0(default): update only existing profiles
:type updateMissing: 0|1
:param force: ignore invalid source (0) or report error (1)
:type force: 0|1
:param headPatterns: [(putIntoKey, regex)] extract given REMARKS
:type headPatterns: [(str, str)]
:raise PDBError: if file can't be unpickled or read:
"""
source = self.validSource()
if source is None and force:
raise PDBError( str(self.source) + ' is not a valid source.')
if source is None:
return
parser = PDBParserFactory.getParser( source )
parser.update(self, source, skipRes=skipRes,
updateMissing=updateMissing, force=force,
headPatterns=headPatterns )
def setXyz(self, xyz ):
"""
Replace coordinates.
:param xyz: Numpy array ( 3 x N_atoms ) of float
:type xyz: array
:return: array( 3 x N_atoms ) or None, old coordinates
:rtype: array
"""
old = self.xyz
self.xyz = xyz
self.xyzChanged = self.xyzChanged or \
not mathUtils.arrayEqual(self.xyz,old )
return old
def setSource( self, source ):
"""
:param source: LocalPath OR PDBModel OR str
"""
if type( source ) == str and len( source ) != 4:
self.source = LocalPath( source )
else:
self.source = source
self.__validSource = 0
def getXyz( self, mask=None ):
"""
Get coordinates, fetch from source PDB or pickled PDBModel,
if necessary.
:param mask: atom mask
:type mask: list of int OR array of 1||0
:return: xyz-coordinates, array( 3 x N_atoms, Float32 )
:rtype: array
"""
if self.xyz is None and self.validSource() is not None:
self.update( force=1 )
if self.xyz is None:
## empty array that can be concatenated to other xyz arrays
return N0.zeros( (0,3), N0.Float32 )
if mask is None:
return self.xyz
return N0.compress( mask, self.xyz, 0 )
def getAtoms( self, mask=None ):
"""
Get atom CrossViews that can be used like dictionaries.
Note that the direct manipulation of individual profiles is more
efficient than the manipulation of CrossViews (on profiles)!
:param mask: atom mask
:type mask: list of int OR array of 1||0
:return: list of CrossView dictionaries
:rtype: [ :class:`ProfileCollection.CrossView` ]
"""
r = self.atoms.toCrossViews()
if mask is None:
return r
return [ r[i] for i in N0.nonzero( mask ) ]
def profile( self, name, default=None, update=True, updateMissing=False ):
"""
Use::
profile( name, updateMissing=0) -> atom or residue profile
:param name: name to access profile
:type name: str
:param default: default result if no profile is found, if None,
try to update from source and raise error [None]
:type default: any
:param update: update from source before returning empty profile [True]
:type update: bool
:param updateMissing: update from source before reporting missing
profile [False]
:type updateMissing: 0||1
:raise ProfileError: if neither atom- nor rProfiles contains |name|
"""
if updateMissing and not name in self.atoms and \
not name in self.residues:
self.update( updateMissing=True )
if name in self.atoms:
return self.atoms.get( name, default,
update=update, updateMissing=0)
if name in self.residues:
return self.residues.get( name, default,
update=update, updateMissing=0)
if default is not None:
return default
raise ProfileError( 'No profile info found with name '+str(name))
def profileInfo( self, name, updateMissing=0 ):
"""
Use:
profileInfo( name ) -> dict with infos about profile
:param name: name to access profile
:type name: str
:param updateMissing: update from source before reporting missing \
profile. Guaranteed infos are:
- 'version' (str)
- 'comment' (str)
- 'changed' (1||0)
:type updateMissing: 0|1
:raise ProfileError: if neither atom - nor rProfiles contains |name|
"""
if updateMissing and not name in self.atoms and \
not name in self.residues:
self.update()
if name in self.atoms:
return self.atoms.getInfo( name )
if name in self.residues:
return self.residues.getInfo( name )
raise ProfileError( 'No profile info found with name '+str(name))
def removeProfile( self, *names ):
"""
Remove residue or atom profile(s)
Use:
removeProfile( str_name [,name2, name3] ) -> 1|0,
:param names: name or list of residue or atom profiles
:type names: str OR list of str
:return: 1 if at least 1 profile has been deleted,
0 if none has been found
:rtype: int
"""
r = 0
for n in names:
if n in self.atoms:
del self.atoms[n]
r = 1
if n in self.residues:
del self.residues[n]
r = 1
return r
def xyzIsChanged(self):
"""
Tell if xyz or atoms have been changed compared to source file or
source object (which can be still in memory).
:return: xyz field has been changed with respect to source
:rtype: (1||0, 1||0)
"""
return self.xyzChanged
def xyzChangedFromDisc(self):
"""
Tell whether xyz can currently be reconstructed from a
source on disc. Same as xyzChanged() unless source is another not yet
saved PDBModel instance that made changes relative to its own source.
:return: xyz has been changed
:rtype: bool
"""
if self.validSource() is None:
return True
if isinstance( self.source, B.PDBModel ):
return self.xyzIsChanged() or \
self.source.xyzChangedFromDisc()
return self.xyzIsChanged()
def profileChangedFromDisc(self, pname):
"""
Check if profile has changed compared to source.
:return: 1, if profile |pname| can currently not be
reconstructed from a source on disc.
:rtype: int
:raise ProfileError: if there is no atom or res profile with pname
"""
if self.validSource() is None:
return True
if isinstance( self.source, B.PDBModel ):
return self.profileInfo( pname )['changed'] or \
self.source.profileChangedFromDisc( pname )
return self.profileInfo( pname )['changed']
def __slimProfiles(self):
"""
Remove profiles, that haven't been changed from a direct
or indirect source on disc
**AUTOMATICALLY CALLED BEFORE PICKLING and by deepcopy**
"""
for key in self.residues:
if not self.profileChangedFromDisc( key ):
self.residues.set( key, None )
for key in self.atoms:
if not self.profileChangedFromDisc( key ):
self.atoms.set( key, None )
def slim( self ):
"""
Remove xyz array and profiles if they haven't been changed and
could hence be loaded from the source file (only if there is a source
file...).
**AUTOMATICALLY CALLED BEFORE PICKLING**
**Currently also called by deepcopy via getstate**
"""
## remove atoms/coordinates if they are unchanged from an existing
## source
## override this behaviour with forcePickle
if not self.forcePickle:
if not self.xyzChangedFromDisc():
self.xyz = None
if type( self.xyz ) is N0.arraytype and self.xyz.dtype.char != 'f':
self.xyz = self.xyz.astype(N0.Float32)
self.__slimProfiles()
self.__maskCA = self.__maskBB = self.__maskHeavy = None
self.__validSource = 0
def validSource(self):
"""
Check for a valid source on disk.
:return: str or PDBModel, None if this model has no valid source
:rtype: str or PDBModel or None
"""
if self.__validSource == 0:
if isinstance( self.source, LocalPath ) and self.source.exists():
self.__validSource = self.source.local()
else:
if isinstance( self.source, B.PDBModel ):
self.__validSource = self.source
else:
## risky: the PDB code may not exist!
if type( self.source ) is str and len( self.source )==4:
self.__validSource = self.source
else:
self.__validSource = None
return self.__validSource
def sourceFile( self ):
"""
Name of pickled source or PDB file. If this model has another
PDBModel as source, the request is passed on to this one.
:return: file name of pickled source or PDB file
:rtype: str
:raise PDBError: if there is no valid source
"""
s = self.validSource()
if s is None:
raise PDBError('no valid source')
if type( s ) == str:
return s
return self.source.sourceFile()
def disconnect( self ):
"""
Disconnect this model from its source (if any).
.. note::
If this model has an (in-memory) PDBModel instance as source,
the entries of 'atoms' could still reference the same dictionaries.
"""
self.update()
try:
self.fileName = self.fileName or self.sourceFile()
except:
pass
self.setSource( None )
self.xyzChanged = 1
for p in self.residues:
self.residues.setInfo( p, changed=1 )
for p in self.atoms:
self.atoms.setInfo( p, changed=1 )
def getPdbCode(self):
"""
Return pdb code of model.
:return: pdb code
:rtype: str
"""
return self.pdbCode
def setPdbCode(self, code ):
"""
Set model pdb code.
:param code: new pdb code
:type code: str
"""
self.pdbCode = code
def sequence(self, mask=None, xtable=molUtils.xxDic ):
"""
Amino acid sequence in one letter code.
:param mask: atom mask, to apply before (default None)
:type mask: list or array
:param xtable: dict {str:str}, additional residue:single_letter mapping
for non-standard residues (default molUtils.xxDic)
[currently not used]
:type xtable: dict
:return: 1-letter-code AA sequence (based on first atom of each res).
:rtype: str
"""
firstAtm = self.resIndex()
if mask is not None:
m_first = N0.zeros( self.lenAtoms() )
N0.put( m_first, firstAtm, 1 )
m_first = mask * m_first
firstAtm = N0.nonzero( m_first )
l = self.atoms['residue_name']
l = [ l[i] for i in firstAtm ]
return ''.join( molUtils.singleAA( l, xtable ) )
def xplor2amber( self, aatm=True, parm10=False ):
"""
Rename atoms so that tleap from Amber can read the PDB.
If HIS residues contain atoms named HE2 or/and HD2, the residue
name is changed to HIE or HID or HIP, respectively. Disulfide bonds
are not yet identified - CYS -> CYX renaming must be done manually
(see AmberParmBuilder for an example).
Internally amber uses H atom names ala HD21 while (old) standard pdb
files use 1HD2. By default, ambpdb produces 'standard' pdb atom names
but it can output the less ambiguous amber names with switch -aatm.
:param change: change this model's atoms directly (default:1)
:type change: 1|0
:param aatm: use, for example, HG23 instead of 3HG2 (default:1)
:type aatm: 1|0
:param parm10: adapt nucleic acid atom names to 2010 Amber forcefield
:type parm10: 1|0
:return: [ {..} ], list of atom dictionaries
:rtype: list of atom dictionaries
"""
numbers = list(map( str, list(range(10)) ))
## nucleic acid atom names have changed in parm10;
if parm10: ## this evidently is a bit of a hack. Should be revisited.
def __parm10rename( a ):
if "'1" in a: return a.replace( "'1", "'" )
if "'2" in a: return a.replace( "'2", "''" )
if a == 'O1P': return 'OP1'
if a == 'O2P': return 'OP2'
if a == 'H5T': return "HO5'"
if a == 'H3T': return "HO3'"
return a
self.atoms['name'] = list(map( __parm10rename, self.atoms['name'] ))
resI = self.resIndex().tolist()
resI = N0.concatenate( (resI, [len(self)] ) )
names = self.atoms['name']
resnames = self.atoms['residue_name']
for i in range( len(resI)-1 ):
first = resI[i]
last = resI[i+1]
res = resnames[first]
for j in range(first, last):
if aatm:
a = names[j]
if len(a)>2 and a[0] in numbers:
names[j] = a[1:] + a[0]
if res == 'HIS':
anames = names[first:last]
if 'HE2' in anames: resnames[first:last]= ['HIE'] *(last-first)
if 'HD1' in anames: resnames[first:last]= ['HID'] *(last-first)
if 'HE2' in anames and 'HD1' in anames:
resnames[first:last] = ['HIP'] *(last-first)
def renameAmberRes( self ):
"""
Rename special residue names from Amber back into standard names
(i.e CYX S{->} CYS )
"""
l = self.atoms['residue_name']
for i in range(len(l)):
if l[i] == 'CYX':
l[i] = 'CYS'
if l[i] in ['HIE','HID','HIP']:
l[i] = 'HIS'
def writePdb( self, fname, ter=1, amber=0, original=0, left=0, wrap=0,
headlines=None, taillines=None):
"""
Save model as PDB file.
:param fname: name of new file
:type fname: str
:param ter: Option of how to treat the terminal record:
* 0 - don't write any TER statements
* 1 - restore original TER statements (doesn't work, \
if preceeding atom has been deleted) [default]
* 2 - put TER between all detected chains
* 3 - as 2 but also detect and split discontinuous chains
:type ter: int
:param amber: amber formatted atom names
(implies ter=3, left=1, wrap=0) (default 0)
:type amber: 1||0
:param original: revert atom names to the ones parsed in from PDB
(default 0)
:type original: 1||0
:param left: left-align atom names (as in amber pdbs)(default 0)
:type left: 1||0
:param wrap: write e.g. 'NH12' as '2NH1' (default 0)
:type wrap: 1||0
:param headlines: [( str, dict or str)], list of record / data tuples::
e.g. [ ('SEQRES', ' 1 A 22 ALA GLY ALA'), ]
:type headlines: list of tuples
:param taillines: same as headlines but appended at the end of file
:type taillines: list of tuples
"""
try:
f = IO.PDBFile( fname, mode='w' )
numbers = list(map( str, list(range(10)) ))
if amber:
__resnames = copy.copy(self.atoms['residue_name'])
__anames = copy.copy(self.atoms['name'])
self.xplor2amber()
ter = ter if ter!=1 else 3 ## adjust unless changed from default
wrap = 0
left = 1
if ter == 2 or ter == 3:
## tolist to workaround numeric bug
terIndex = N0.array( self.chainIndex( breaks=(ter==3) ).tolist()[1:] )
if ter == 1:
terIndex = N0.nonzero( self.atoms['after_ter'] )
if headlines:
for l in headlines:
f.writeLine( l[0], l[1] )
if taillines:
for l in taillines:
f.writeLine( l[0], l[1] )
i = -1
for a in self.atoms.toDicts():
i += 1
## fetch coordinates Vector
a['position'] = self.xyz[ i ]
aname = a['name']
if original and not amber:
aname = a['name_original']
## PDBFile prints atom names 1 column too far left
if wrap and len(aname) == 4 and aname[0] in numbers:
aname = aname[1:] + aname[0]
if not left and len(aname) < 4:
aname = ' ' + aname.strip()
a['name'] = aname
## write line
f.writeLine( a['type'], a )
## write TER line with details from previous atom
if (ter>0 and i+1 in terIndex):
f.writeLine('TER', a )
f.close()
if amber:
self.atoms['residue_name'] = __resnames
self.atoms['name'] = __anames
except:
EHandler.error( "Error writing "+fname )
def saveAs(self, path):
"""
Pickle this PDBModel to a file, set the 'source' field to
this file name and mark atoms, xyz, and profiles as unchanged.
Normal pickling of the object will only dump those data that can not
be reconstructed from the source of this model (if any).
saveAs creates a 'new source' without further dependencies.
:param path: target file name
:type path: str OR LocalPath instance
"""
try:
self.update()
## pickle all atoms, coordinates, profiles, even if unchanged
self.forcePickle = 1
self.setSource( path )
self.xyzChanged = 0
for p in self.residues:
self.residues.setInfo( p, changed=0 )
for p in self.atoms:
self.atoms.setInfo( p, changed=0 )
T.dump( self, str(path) )
except IOError as err:
raise PDBError("Can't open %s for writing." % T.absfile(str(path)))
def maskF(self, atomFunction, numpy=1 ):
"""
Create list whith result of atomFunction( atom ) for each
atom. (Depending on the return value of atomFunction, the
result is not necessarily a mask of 0 and 1. Creating masks
should be just the most common usage).
Note:
This method is slow compared to maskFrom because the dictionaries
that are given to the atomFunction have to be created from aProfiles
on the fly. If performance matters, better combine the result from
several maskFrom calls, e.g. instead of::
r = m.maskF( lambda a: a['name']=='CA' and a['residue_name']=='ALA' )
use::
r = m.maskFrom( 'name', 'CA' ) * m.maskFrom('residue_name', 'ALA')
:param atomFunction: function( dict_from_aProfiles.toDict() ),
true || false (Condition)
:type atomFunction: 1||0
:param numpy: 1(default)||0, convert result to Numpy array of int
:type numpy: int
:return: Numpy array( [0,1,1,0,0,0,1,0,..], Int) or list
:rtype: array or list
"""
try:
result = list(map( atomFunction, self.atoms.toDicts() ))
except:
## fall-back solution: assign 0 to all entries that raise
## exception
EHandler.warning("mask(): Error while mapping funtion "+
"to all atoms.")
result = []
for a in self.atoms.iterDicts():
try:
result.append( atomFunction( a ) )
## put 0 if something goes wrong
except :
EHandler.warning("mask(): Error while save-mapping ")
result.append(0)
if numpy:
return N0.array( result )
return result
def maskFrom( self, key, cond ):
"""
Create an atom mask from the values of a specific profile.
Example, the following three statements are equivalent:
>>> mask = m.maskFrom( 'name', 'CA' )
>>> mask = m.maskFrom( 'name', lambda a: a == 'CA' )
>>> mask = N0.array( [ a == 'CA' for a in m.atoms['name'] ] )
However, the same can be also achieved with standard numpy operators:
>>> mask = numpy.array(m.atoms['name']) == 'CA'
:param key: the name of the profile to use
:type key: str
:param cond: either a function accepting a single value or a value or
an iterable of values (to allow several alternatives)
:type cond: function OR any OR [ any ]
:return: array or list of indices where condition is met
:rtype: list or array of int
"""
if type( cond ) is types.FunctionType:
return N0.array( list(map( cond, self.atoms[ key ] )) )
## several allowed values given
elif type( cond ) in [ list, tuple ]:
return N0.array( [ x in cond for x in self.atoms[key] ] )
## one allowed value given
## Numeric splits lists of str into 2-D char arrays, 'O' prevents that
else:
return N0.array( self.atoms[key] ) == cond
def maskCA( self, force=0 ):
"""
Short cut for mask of all CA atoms.
:param force: force calculation even if cached mask is available
:type force: 0||1
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
if self.__maskCA is None or force:
self.__maskCA = self.maskFrom( 'name', 'CA' )
return self.__maskCA
def maskBB( self, force=0, solvent=0 ):
"""
Short cut for mask of all backbone atoms. Supports standard protein
and DNA atom names. Any residues classified as solvent (water, ions)
are filtered out.
:param force: force calculation even if cached mask is available
:type force: 0||1
:param solvent: include solvent residues (default: false)
:type solvent: 1||0
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
if self.__maskBB is None or force or solvent:
mask = self.maskFrom( 'name', ['CA', 'C', 'N', 'O', 'H','OXT',
"P","O5'","C5'","C4'","C3'","O3'"])
if not solvent:
mask = N0.logical_not(self.maskSolvent()) * mask
self.__maskBB = mask ## cache
else:
return mask ## don't cache
return self.__maskBB
def maskHeavy( self, force=0 ):
"""
Short cut for mask of all heavy atoms. ('element' <> H)
:param force: force calculation even if cached mask is available
:type force: 0||1
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
if self.__maskHeavy is None or force:
self.__maskHeavy = self.maskFrom( 'element', lambda a: a != 'H' )
return self.__maskHeavy
def maskH( self ):
"""
Short cut for mask of hydrogens. ('element' == H)
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return N0.logical_not( self.maskHeavy() )
def _maskCB( self ):
"""
Short cut for mask of all CB I{and} CA of GLY.
:return: mask of all CB and CA of GLY
:rtype: array
"""
f = lambda a: a['name'] == 'CB' or\
a['residue_name'] == 'GLY' and a['name'] == 'CA'
return self.maskF( f )
def maskCB( self ):
"""
Short cut for mask of all CB I{and} CA of GLY.
:return: mask of all CB plus CA of GLY
:rtype: array
"""
m_cb = self.maskFrom( 'name', 'CB' )
m_g = self.maskFrom( 'residue_name', 'GLY' )
m_ca = self.maskCA()
return m_cb + (m_g * m_ca)
def maskH2O( self ):
"""
Short cut for mask of all atoms in residues named TIP3, HOH and WAT
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return self.maskFrom( 'residue_name', ['TIP3','HOH','WAT'] )
def maskSolvent( self ):
"""
Short cut for mask of all atoms in residues named
TIP3, HOH, WAT, Na+, Cl-, CA, ZN
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return self.maskFrom('residue_name', ['TIP3','HOH','WAT','Na+', 'Cl-',
'CA', 'ZN'])
def maskHetatm( self ):
"""
Short cut for mask of all HETATM
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return self.maskFrom( 'type', 'HETATM' )
def maskProtein( self, standard=0 ):
"""
Short cut for mask containing all atoms of amino acids.
:param standard: only standard residue names (not CYX, NME,..)
(default 0)
:type standard: 0|1
:return: array( 1 x N_atoms ) of 0||1,
mask of all protein atoms (based on residue name)
:rtype: array
"""
d = molUtils.aaDic
if standard:
d = molUtils.aaDicStandard
names = list(map( str.upper, list(d.keys()) ))
return N0.array(
[ n.upper() in names for n in self.atoms['residue_name'] ] )
def maskDNA( self ):
"""
Short cut for mask of all atoms in DNA (based on residue name).
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return self.maskFrom( 'residue_name', ['DA','DC','DG','DT'] )
def maskRNA( self ):
"""
Short cut for mask of all atoms in RNA (based on residue name).
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return self.maskFrom( 'residue_name', ['A','C','G','U'] )
def maskNA( self ):
"""
Short cut for mask of all atoms in DNA or RNA
(based on residue name).
:return: array( 1 x N_atoms ) of 0||1
:rtype: array
"""
return self.maskFrom( 'residue_name',
['A','C','G','U','T','DA','DC','DG','DT'] )
def indicesFrom( self, key, cond ):
"""
Get atom indices conforming condition applied to an atom profile.
Corresponds to::
>>> numpy.nonzero( m.maskFrom( key, cond) )
:param key: the name of the profile to use
:type key: str
:param cond: either a function accepting a single value or a value or
an iterable of values
:type cond: function OR any OR [any]
:return: array of indices where condition is met
:rtype : array of int
"""
return N0.nonzero( self.maskFrom( key, cond) )
def indices( self, what ):
"""
Get atom indices conforming condition. This is a convenience method
to 'normalize' different kind of selections (masks, atom names,
indices, functions) to indices as they are e.g. required by
:class:`PDBModel.take`.
:param what: Selection::
- function applied to each atom entry,
e.g. lambda a: a['residue_name']=='GLY'
- list of str, allowed atom names
- list of int, allowed atom indices OR mask with only 1 and 0
- int, single allowed atom index
:type what: function OR list of str or int OR int
:return: N_atoms x 1 (0||1 )
:rtype: Numeric array
:raise PDBError: if what is neither of above
"""
## lambda funcion
if type( what ) is types.FunctionType:
return N0.nonzero( self.maskF( what) )
if type( what ) is list or type( what ) is N0.arraytype:
## atom names
if type( what[0] ) == str:
return self.indicesFrom( 'name', what )
if isinstance( what[0] , int) or \
(isinstance(what, N.ndarray) and what.dtype in [int, bool]):
## mask
if len( what ) == self.lenAtoms() and max( what ) < 2:
return N0.nonzero( what )
## list of indices
else:
return what
## single index
if isinstance( what , (int, N.integer)):
return N0.array( [what], N0.Int )
raise PDBError("PDBModel.indices(): Could not interpret condition ")
def mask( self, what ):
"""
Get atom mask. This is a convenience method to 'normalize'
different kind of selections (masks, atom names, indices,
functions) to a mask as it is e.g. required by :class:`PDBModel.compress`.
:param what: Selection::
- function applied to each atom entry,
e.g. lambda a: a['residue_name']=='GLY'
- list of str, allowed atom names
- list of int, allowed atom indices OR mask with
only 1 and 0
- int, single allowed atom index
:type what: function OR list of str or int OR int
:return: N_atoms x 1 (0||1 )
:rtype: Numeric array
:raise PDBError: if what is neither of above
"""
## lambda funcion
if type( what ) == types.FunctionType:
return self.maskF( what )
if type( what ) == list or type( what ) is N0.arraytype:
## atom names
if type( what[0] ) == str:
return self.maskFrom( 'name', what)
if isinstance( what[0] , int) or \
(isinstance(what, N.ndarray) and what.dtype in [int, bool]):
## mask
if len( what ) == self.lenAtoms() and max( what ) < 2:
return what
## list of indices
else:
r = N0.zeros( self.lenAtoms(),N0.Int )
N0.put( r, what, 1 )
return r
## single index
if isinstance( what , int):
return self.mask( [what] )
raise PDBError("PDBModel.mask(): Could not interpret condition ")
def index2map( self, index, len_i ):
"""
Create a map of len_i length, giving the residue(/chain) numer of
each atom, from list of residue(/chain) starting positions.
:param index: list of starting positions, e.g. [0, 3, 8]
:type index: [ int ] or array of int
:param len_i: length of target map, e.g. 10
:type len_i: int
:return: list mapping atom positions to residue(/chain) number,
e.g. [0,0,0, 1,1,1,1,1, 2,2] from above example
:rtype: array of int (and of len_i length)
"""
index = N0.concatenate( (index, [len_i]) )
delta = index[1:] - index[:-1]
## Numeric: delta = N0.take( index, range(1, len(index) ) ) - index[:-1]
return N0.repeat( list(range(len(delta))), delta.astype( N0.Int32) )
def map2index( self, imap ):
"""
Identify the starting positions of each residue(/chain) from a map
giving the residue(/chain) number of each atom.
:param imap: something like [0,0,0,1,1,1,1,1,2,2,2,...]
:type imap: [ int ]
:return: list of starting positions, e.g. [0, 3, 8, ...] in above ex.
:rtype: array of int
"""
try:
imap = N0.concatenate( (imap, [imap[-1]] ) )
delta = imap[1:] - imap[:-1]
# Numeric: delta = N0.take( imap, range(1, len(imap) ) ) - imap[:-1]
r = N0.nonzero( delta ) + 1
return N0.concatenate( ( [0], r ) )
except IndexError:
## handle empty imap parameter
return N0.zeros(0)
def extendMask( self, mask, index, len_i ):
"""
Translate a mask that is defined,e.g., on residues(/chains) to a mask
that is defined on atoms.
:param mask : mask marking positions in the list of residues or chains
:type mask : [ bool ] or array of bool or of 1||0
:param index: starting positions of all residues or chains
:type index: [ int ] or array of int
:param len_i: length of target mask
:type len_i: int
:return: mask that blows up the residue / chain mask to an atom mask
:rtype: array of bool
"""
index = N0.concatenate( (index, [len_i]) )
delta = index[1:] - index[:-1]
return N0.repeat( mask, delta.astype( N0.Int32 ) )
def extendIndex( self, i, index, len_i ):
"""
Translate a list of positions that is defined, e.g., on residues
(/chains) to a list of atom positions AND also return the starting
position of each residue (/chain) in the new sub-list of atoms.
:param i : positions in higher level list of residues or chains
:type i : [ int ] or array of int
:param index: atomic starting positions of all residues or chains
:type index: [ int ] or array of int
:param len_i: length of atom index (total number of atoms)
:type len_i: int
:return: (ri, rindex) - atom positions & new index
:rtype: array of int, array of int
"""
## catch invalid indices
i = self.__convert_negative_indices( i, len( index ) )
if len(i)==0:
return i, N0.array( [], int )
if max( i ) >= len( index ) or min( i ) < 0:
raise PDBError("invalid indices")
## last atom of each residue / chain
stop = N0.concatenate( (index[1:], [len_i]) ) - 1
ifrom = N0.take( index, i )
ito = N0.take( stop, i )
## number of atoms in each of the new residues
rangelen = ito - ifrom + 1
rindex = N0.concatenate( ([0], N0.cumsum( rangelen[:-1] )) )
## (1) repeat position of first atom in each residue as often as there
## are atoms in this residue. (2) add a range array so that numbers
## are increasing from each atom to the next but (3) reset the added
## range to 0 at each residue starting position (-delta).
ri = N0.repeat( ifrom, rangelen )
delta = N0.repeat( rindex, rangelen )
ri = ri + N0.arange( len(ri), dtype=N0.Int32 ) - delta
return ri, rindex
def atom2resMask( self, atomMask ):
"""
Mask (set 0) residues for which all atoms are masked (0) in atomMask.
:param atomMask: list/array of int, 1 x N_atoms
:type atomMask: list/array of int
:return: 1 x N_residues (0||1 )
:rtype: array of int
"""
res_indices = self.atom2resIndices( N0.nonzero( atomMask) )
r = N0.zeros( self.lenResidues() )
N0.put( r, res_indices, 1 )
return r
def atom2resIndices( self, indices ):
"""
Get list of indices of residues for which any atom is in indices.
Note: in the current implementation, the resulting residues are
returned in their old order, regardless of the order of input
positions.
:param indices: list of atom indices
:type indices: list of int
:return: indices of residues
:rtype: list of int
"""
new_resmap = N0.take( self.resMap(), indices )
resIndex = self.map2index( new_resmap )
return N0.take( new_resmap, resIndex )
def res2atomMask( self, resMask ):
"""
Convert residue mask to atom mask.
:param resMask: list/array of int, 1 x N_residues
:type resMask: list/array of int
:return: 1 x N_atoms
:rtype: array of int
"""
return self.extendMask( resMask, self.resIndex(), self.lenAtoms() )
def __convert_negative_indices( self, indices, length ):
"""
Replace negative indices by their positive equivalent.
:return: modified copy of indices (or unchanged indices itself)
:rtype: array of int
"""
if len(indices)==0 or min( indices ) >= 0:
return indices
indices = copy.copy( N0.array( indices ) )
negatives = N.flatnonzero( indices < 0 )
a = N0.zeros( len( indices ) )
N0.put( a, negatives, length )
indices += a
#for i in negatives:
#indices[ i ] = length + indices[i] ## substract from end
return indices
def res2atomIndices( self, indices ):
"""
Convert residue indices to atom indices.
:param indices: list/array of residue indices
:type indices: list/array of int
:return: array of atom positions
:rtype: array of int
"""
if max( indices ) > self.lenResidues() or min( indices ) < 0:
raise PDBError("invalid residue indices")
return self.extendIndex( indices, self.resIndex(), self.lenAtoms() )[0]
def atom2chainIndices( self, indices, breaks=0 ):
"""
Convert atom indices to chain indices. Each chain is only
returned once.
:param indices: list of atom indices
:type indices: list of int
:param breaks: look for chain breaks in backbone coordinates (def. 0)
:type breaks: 0||1
:return: chains any atom which is in indices
:rtype: list of int
"""
new_map = N0.take( self.chainMap( breaks=breaks ), indices )
index = self.map2index( new_map )
return N0.take( new_map, index )
def atom2chainMask( self, atomMask, breaks=0 ):
"""
Mask (set to 0) chains for which all atoms are masked (0) in atomMask.
Put another way: Mark all chains that contain any atom that is marked
'1' in atomMask.
:param atomMask: list/array of int, 1 x N_atoms
:type atomMask: list/array of int
:return: 1 x N_residues (0||1 )
:rtype: array of int
"""
indices = self.atom2chainIndices( N0.nonzero( atomMask), breaks=breaks )
r = N0.zeros( self.lenChains(breaks=breaks) )
N0.put( r, indices, 1 )
return r
def chain2atomMask( self, chainMask, breaks=0 ):
"""
Convert chain mask to atom mask.
:param chainMask: list/array of int, 1 x N_chains
:type chainMask: list/array of int
:param breaks: look for chain breaks in backbone coordinates (def. 0)
:type breaks: 0||1
:return: 1 x N_atoms
:rtype: array of int
"""
return self.extendMask( chainMask, self.chainIndex( breaks=breaks ),
self.lenAtoms() )
def chain2atomIndices( self, indices, breaks=0 ):
"""
Convert chain indices into atom indices.
:param indices: list/array of chain indices
:type indices: list/array of int
:return: array of atom positions, new chain index
:rtype: array of int
"""
if max( N0.absolute(indices) ) > self.lenChains( breaks=breaks ):
raise PDBError("invalid chain indices")
return self.extendIndex( indices, self.chainIndex( breaks=breaks ),
self.lenAtoms() )[0]
def res2atomProfile( self, p ):
"""
Get an atom profile where each atom has the value its residue has
in the residue profile.
:param p: name of existing residue profile OR ...
[ any ], list of lenResidues() length
:type p: str
:return: [ any ] OR array, atom profile
:rtype: list or array
"""
if type( p ) is str:
p = self.residues.get( p )
isArray = isinstance( p, N0.arraytype )
resMap = self.resMap()
r = [ p[ resMap[a] ] for a in range( len(resMap) ) ]
if isArray:
r = N0.array( r )
return r
def atom2resProfile( self, p, f=None ):
"""
Get a residue profile where each residue has the value that its first
atom has in the atom profile.
:param p: name of existing atom profile OR ...
[ any ], list of lenAtoms() length
:type p: str
:param f: function to calculate single residue from many atom values
f( [atom_value1, atom_value2,...] ) -> res_value
(default None, simply take value of first atom in each res.)
:type f: func
:return: [ any ] OR array, residue profile
:rtype: list or array
"""
if type( p ) is str:
p = self.atoms.get( p )
isArray = isinstance( p, N0.arraytype )
if not f:
r = N0.take( p, self.resIndex() )
else:
r = [ f( values ) for values in self.profile2resList( p ) ]
r = N0.array( r )
if not isArray:
return r.tolist()
return r
def profile2mask(self, profName, cutoff_min=None, cutoff_max=None ):
"""
profile2mask( str_profname, [cutoff_min, cutoff_max=None])
:param cutoff_min: low value cutoff (all values >= cutoff_min)
:type cutoff_min: float
:param cutoff_max: high value cutoff (all values < cutoff_max)
:type cutoff_max: float
:return: mask len( profile(profName) ) x 1||0
:rtype: array
:raise ProfileError: if no profile is found with name profName
"""
if profName in self.atoms:
return self.atoms.profile2mask( profName, cutoff_min, cutoff_max)
return self.residues.profile2mask( profName, cutoff_min, cutoff_max)
def profile2atomMask( self, profName, cutoff_min=None, cutoff_max=None ):
"""
profile2atomMask( str_profname, [cutoff_min, cutoff_max=None])
Same as :class:`profile2mask`, but converts residue mask to atom mask.
:param cutoff_min: low value cutoff
:type cutoff_min: float
:param cutoff_max: high value cutoff
:type cutoff_max: float
:return: mask N_atoms x 1|0
:rtype: array
:raise ProfileError: if no profile is found with name profName
"""
r = self.profile2mask( profName, cutoff_min, cutoff_max )
if len( r ) == self.lenResidues():
r = self.res2atomMask( r )
return r
def profile2resList( self, p ):
"""
Group the profile values of each residue's atoms into a separate list.
:param p: name of existing atom profile OR ...
[ any ], list of lenAtoms() length
:return: a list (one entry per residue) of lists (one entry per resatom)
:rtype: [ [ any ] ]
"""
if type( p ) is str:
p = self.atoms.get( p )
rI = self.resIndex() # starting atom of each residue
rE = self.resEndIndex() # ending atom of each residue
r = [ p[ rI[res] : rE[res]+1 ] for res in range( self.lenResidues() ) ]
return r
def mergeChains( self, c1, id='', segid='', rmOxt=True,
renumberAtoms=False, renumberResidues=True):
"""
Merge two adjacent chains. This merely removes all internal markers
for a chain boundary. Atom content or coordinates are not modified.
PDBModel tracks chain boundaries in an internal _chainIndex. However,
there are cases when this chainIndex needs to be re-built and new
chain boundaries are then infered from jumps in chain- or segment
labelling or residue numbering. mergeChains automatically
re-assigns PDB chain- and segment IDs as well as residue numbering
to prepare for this situation.
:param c1 : first of the two chains to be merged
:type c1 : int
:param id : chain ID of the new chain (default: ID of first chain)
:type id : str
:param segid: ew chain's segid (default: SEGID of first chain)
:type segid: str
:param renumberAtoms: rewrite PDB serial numbering of the adjacent
chain to be consequtive to the last atom of the
first chain (default: False)
:type renumberAtoms: bool
:param renumberResidues: shift PDB residue numbering so that the first
residue of the adjacent chain follows the
previous residue. Other than for atom
numbering, later jumps in residue numbering
are preserved. (default: True)
:type renumberResidues: bool
"""
c1 = self.__convert_negative_indices( [c1], self.lenChains() )[0]
oldI = self.chainIndex()
assert len(oldI) > c1 + 1, 'no adjacent chain to be merged'
## remove chain boundary from chainIndex
self._chainIndex = N0.concatenate( (oldI[:c1+1], oldI[c1+2:] ) )
## starting and ending position of (old) second chain
i_start= oldI[ c1 ]
if len( oldI ) > c1+2:
i_next = oldI[ c1+2 ]
else:
i_next = len( self )
i_scar = oldI[ c1+1 ] ## (old) starting position of second chain
n_atoms= i_next - i_start
## remove trace of PDB TER statement (if any)
self['after_ter'][ i_scar ] = 0
## harmonize chain ID
id = id or self['chain_id'][i_scar-1]
self['chain_id'][ i_start : i_next ] = [ id ] * n_atoms
## harmonize segID
segid = segid or self['segment_id'][i_scar-1]
self['segment_id'][ i_start : i_next ] = [ segid ] * n_atoms
## harmonize PDB residue numbering (by *shifting* current numbers)
if renumberResidues:
first = self['residue_number'][ i_scar-1 ] + 1
delta = first - self['residue_number'][ i_scar ]
profile = self['residue_number'][i_scar:i_next] + delta
self['residue_number'][i_scar:i_next] = profile
## harmonize PDB atom numbering (by *rewriting* current numbers)
## usually though, atoms already have consequtive numbering through the PDB
if renumberAtoms:
n = i_next - i_scar
first = self['serial_number'][ i_scar-1 ] + 1
self['serial_number'][i_scar:i_next] = N0.arange(first, first + n)
## remove OXT and OT2 if requested
if rmOxt:
## overkill: we actually would only need to look into last residue
anames = N0.array( self.atoms['name'][i_start:i_scar] )
i_oxt = N.flatnonzero( N0.logical_or( anames=='OXT', anames=='OT2' ))
if len( i_oxt ) > 0:
self.remove( i_oxt )
def mergeResidues( self, r1, name='', residue_number=None,
chain_id='', segment_id='',
renumberAtoms=False ):
"""
Merge two adjacent residues. Duplicate atoms are labelled with
alternate codes 'A' (first occurrence) to 'B' or later.
:param r1: first of the two residues to be merged
:type r1: int
:param name: name of the new residue (default: name of first residue)
:type name: str
"""
r1 = self.__convert_negative_indices( [r1], self.lenResidues() )[0]
oldI = self.resIndex()
assert len(oldI) > r1 + 1, 'no adjacent residue to be merged'
## remove residue boundary from residue Index
self._resIndex = N0.concatenate( (oldI[:r1+1], oldI[r1+2:] ) )
## starting and ending position of new fused and (old) second residue
i_start= oldI[ r1 ]
if len( oldI ) > r1+2:
i_next = oldI[ r1+2 ]
else:
i_next = len( self )
i_scar = oldI[ r1+1 ] ## (old) starting position of second residue
n_atoms= i_next - i_start
## move PDB TER statement (if any) to end of fused residue
if i_next < len( self ):
self['after_ter'][ i_next ] = self['after_ter'][ i_scar ]
self['after_ter'][ i_scar ] = 0
## harmonize residue name
name = name or self['residue_name'][i_scar-1]
self['residue_name'][ i_start : i_next ] = [ name ] * n_atoms
## harmonize chain ID
id = chain_id or self['chain_id'][i_scar-1]
self['chain_id'][ i_start : i_next ] = [ id ] * n_atoms
## harmonize segID
segid = segment_id or self['segment_id'][i_scar-1]
self['segment_id'][ i_start : i_next ] = [ segid ] * n_atoms
## harmonize PDB residue numbering
residue_number = residue_number or self['residue_number'][i_scar-1]
self['residue_number'][i_start:i_next] = [residue_number] * n_atoms
## harmonize PDB atom numbering (by *rewriting* current numbers)
if renumberAtoms:
n = i_next - i_scar
first = self['serial_number'][ i_scar-1 ] + 1
self['serial_number'][i_scar:i_next] = N0.arange(first, first + n)
## shift chain boundary (if any) to end of fused residue
## unless it's the end of the model or there is already a boundary there
if i_scar in self.chainIndex():
i = N.flatnonzero( self._chainIndex == i_scar )[0]
if (not i_next in self._chainIndex) and (i_next != len(self)):
self._chainIndex[ i ] = i_next
else:
self._chainIndex = N0.concatenate( self._chainIndex[:i],
self._chainIndex[i+1:] )
## mark duplicate atoms in the 'alternate' field of the new residue
r = Residue( self, r1 )
r.labelDuplicateAtoms()
def concat( self, *models, **kw ):
"""
Concatenate atoms, coordinates and profiles. source and fileName
are lost, so are profiles that are not available in all models.
model0.concat( model1 [, model2, ..]) -> single PDBModel.
:param models: models to concatenate
:type models: one or more PDBModel instances
:param newRes: treat beginning of second model as new residue (True)
:type newRes: bool
:param newChain: treat beginning of second model as new chain (True)
:type newChain: bool
Note: info records of given models are lost.
"""
newRes = kw.get('newRes', True)
newChain= kw.get('newChain', True)
if len( models ) == 0:
return self
m = models[0]
r = self.__class__()
self.update() ## trigger update if xyz or any profile is None
r.setXyz( N0.concatenate( ( self.getXyz(), m.getXyz() ) ) )
r.setPdbCode( self.pdbCode )
r.atoms = self.atoms.concat( m.atoms, )
r.residues = self.residues.concat( m.residues, )
r.residues.model = r
r.atoms.model = r
append_I = m.resIndex() + self.lenAtoms()
r._resIndex = N0.concatenate((self.resIndex(), append_I ))
append_I = m.chainIndex() +self.lenAtoms()
r._chainIndex =N0.concatenate((self.chainIndex( singleRes=1 ), append_I))
## remove traces of residue or chain breaks
if not newChain:
r.mergeChains( self.lenChains() - 1 )
if not newRes:
r.mergeResidues( self.lenResidues() -1 )
r.info = copy.deepcopy( self.info )
## leads to bug 3611835
## try:
## k = max(self.biounit.keys())+1
## r.residues['biomol'][self.lenResidues():] += k
## r.biounit = self.biounit.append(m.biounit)
## r.biounit.model = r
## except AttributeError:
## pass
return r.concat( *models[1:] )
## def removeChainBreaks( self, chains, breaks=False ):
## """
## Remove chain boundaries *before* given chain indices.
## Example:
## removeChainBreaks( [1,3] ) --> removes the first and third chain
## break but keeps the second, e.g. this joins first and second chain
## but also second and third chain.
## Coordinates are not modified. removeChainBreaks( [0] ) doesn't make
## sense.
## :param chains: [ int ], chain breaks
## """
## if 0 in chains:
## raise PDBError, 'cannot remove chain break 0'
## cindex = self.chainIndex( breaks=breaks )
## ## simple removal of terminal OXT and TER label, make it more robust!
## remove = []
## for i in chains:
## lastatom = cindex[i] - 1
## if self[ lastatom ]['name'] in ['OXT', 'OT2']:
## remove += [ lastatom ]
## self['after_ter'][lastatom+1] = 0
## self.remove( remove )
## ## update chain index
## cindex = self.chainIndex( breaks=breaks )
## mask = N0.ones( len( cindex ) )
## N0.put( mask, chains, 0 )
## self._chainIndex = N0.compress( mask, cindex )
def take( self, i, rindex=None, cindex=None,
*initArgs, **initKw ):
"""
Extract a PDBModel with a subset of atoms:
take( atomIndices ) -> PDBModel
All other PDBModel methods that extract portions of the model (e.g.
compress, takeChains, takeResidues, keep, clone, remove) are ultimately
using `take()` at their core.
Note:
take employs fast numpy vector mapping methods to re-calculate
the residue and chain index of the result model. The methods generally
work but there is one scenario were this mechanism can fail: If take
is used to create repetitions of residues or chains directly next to
each other, these residues or chains can get accidentally merged. For
this reason, calling methods can optionally pre-calculate and provide
a correct version of the new residue or chain index (which will then
be used as is).
:param i: atomIndices, positions to take in the order to take
:type i: list/array of int
:param rindex: optional residue index for result model after extraction
:type rindex: array of int
:param cindex: optional chain index for result model after extraction
:type cindex: array of int
:param initArgs: any number of additional arguments for constructor of \
result model
:param initKw: any additional keyword arguments for constructure of \
result model
:return: new PDBModel or sub-class
:rtype: PDBModel
"""
r = self.__class__( *initArgs, **initKw )
## the easy part: extract coordinates and atoms
r.xyz = N0.take( self.getXyz(), i )
r.xyzChanged = self.xyzChanged or not N.all(r.xyz == self.xyz)
r.atoms = self.atoms.take( i, r )
## more tricky: rescue residue borders and extract residue profiles
new_resmap = N0.take( self.resMap(), i )
if rindex is not None:
r._resIndex = rindex
else:
## this can fail if residues are repeated in the selection
r._resIndex = self.map2index( new_resmap )
i_res = N0.take( new_resmap, r._resIndex )
r.residues = self.residues.take( i_res, r )
## now the same with chain borders (and later profiles)
if cindex is not None:
r._chainIndex = cindex
else:
## this can fail if chains are repeated next to each other in i
new_chainmap = N0.take( self.chainMap(), i )
r._chainIndex = self.map2index( new_chainmap )
## copy non-sequential infos
r.info = copy.deepcopy( self.info )
r.pdbCode = self.pdbCode
r.fileName = self.fileName
r.source = self.source
## copy the biounit
## try:
## r.biounit = self.biounit.take(i)
## r.biounit.model = r
## except AttributeError:
## pass
return r
def keep( self, i ):
"""
Replace atoms,coordinates,profiles of this(!) model with sub-set.
(in-place version of N0.take() )
:param i: atom positions to be kept
:type i: list or array of int
"""
if len(i)==self.lenAtoms() and max(i)<2:
EHandler.warning('dont use PDBModel.keep() with mask.', trace=0)
r = self.take( i )
self.xyz = r.xyz
self.xyzChanged = r.xyzChanged
self._resIndex = r._resIndex
self._chainIndex= r._chainIndex
self.atoms = r.atoms
self.residues = r.residues
self.info = r.info
self.__maskCA = self.__maskBB = self.__maskHeavy = None
self.__chainBreaks = None
def clone( self ):
"""
Clone PDBModel.
:return: PDBModel / subclass, copy of this model,
see comments to numpy.take()
:rtype: PDBModel
"""
return self.take( self.atomRange() )
def compress( self, mask, *initArgs, **initKw ):
"""
Compress PDBmodel using mask.
compress( mask ) -> PDBModel
:param mask: array( 1 x N_atoms of 1 or 0 ):
* 1 .. keep this atom
:type mask: array
:return: compressed PDBModel using mask
:rtype: PDBModel
"""
return self.take( N0.nonzero( mask ), *initArgs, **initKw )
def remove( self, what ):
"""
Convenience access to the 3 different remove methods.
The mask used to remove atoms is returned. This mask can be used
to apply the same change to another array of same dimension as
the old(!) xyz and atoms.
:param what: Decription of what to remove:
- function( atom_dict ) -> 1 || 0 (1..remove) OR
- list of int [4, 5, 6, 200, 201..], indices of atoms to remove
- list of int [11111100001101011100..N_atoms], mask (1..remove)
- int, remove atom with this index
:type what: list of int or int
:return: array(1 x N_atoms_old) of 0||1, mask used to compress the
atoms and xyz arrays.
:rtype: array
:raise PDBError: if what is neither of above
"""
mask = N0.logical_not( self.mask( what ) )
self.keep( N0.nonzero(mask) )
return mask
def takeResidues( self, i ):
"""
Copy the given residues into a new model.
:param i: residue indices
:type i: [ int ]
:return: PDBModel with given residues in given order
:rtype: PDBModel
"""
##i, index = self.res2atomIndices( i )
i, index = self.extendIndex( i, self.resIndex(), self.lenAtoms() )
return self.take( i, rindex=index )
def takeChains( self, chains, breaks=0, force=0 ):
"""
Get copy of this model with only the given chains.
Note, there is one very special scenario where chain boundaries can get
lost: If breaks=1 (chain positions are based on normal chain boundaries
as well as structure-based chain break detection) AND one or more
chains are extracted several times next to each other, for example
chains=[0, 1, 1, 2], then the repeated chain will be merged. So in
the given example, the new model would have chainLength()==3. This case
is tested for and a PDBIndexError is raised. Override with force=1 and
proceed at your own risk. Which, in this case, simply means you should
re-calculate the chain index after takeChains(). Example::
>>> repeat = model.takeChains( [0,0,0], breaks=1, force=1 )
>>> repeat.chainIndex( force=1, cache=1 )
This works because the new model will have back-jumps in residue
numbering.
:param chains: list of chains, e.g. [0,2] for first and third
:type chains: list of int
:param breaks: split chains at chain breaks (default 0)
:type breaks: 0|1
:param maxDist: (if breaks=1) chain break threshold in Angstrom
:type maxDist: float
:param force: override check for chain repeats (only for breaks==1)
:type force: bool
:return: PDBModel consisting of the given chains in the given order
:rtype: PDBModel
"""
## i, index = self.chain2atomIndices( chains, breaks=breaks )
i, index = self.extendIndex( chains, self.chainIndex( breaks=breaks ),
self.lenAtoms() )
if not breaks or len(chains)==0:
return self.take( i, cindex=index )
## test for repeats:
if not force:
chains = N0.array( chains, int )
delta = N0.concatenate( (chains[1:], [chains[-1]+1]) ) - chains
if not N.all( delta != 0 ):
raise PDBIndexError('Chain boundaries cannot be preserved for repeats.' +\
"Use 'force=1' to override, then re-calculate chainIndex().")
## Give up on repeat treatement:
## more important: the new model's chain index should NOT include breaks
return self.take( i )
def addChainFromSegid(self, verbose=1):
"""
Takes the last letter of the segment ID and adds it as chain ID.
"""
chain_ids = self.atoms['chain_id']
segment_ids = self.atoms['segment_id']
for i in self.atomRange():
try:
chain_ids[i] = segment_ids[i][-1]
except:
if verbose:
EHandler.warning("addChainId(): Problem with atom "+str(a))
def addChainId( self, first_id=None, keep_old=0, breaks=0 ):
"""
Assign consecutive chain identifiers A - Z to all atoms.
:param first_id: str (A - Z), first letter instead of 'A'
:type first_id: str
:param keep_old: don't override existing chain IDs (default 0)
:type keep_old: 1|0
:param breaks: consider chain break as start of new chain (default 0)
:type breaks: 1|0
"""
ids = self.atoms['chain_id']
old_chains = []
if keep_old:
old_chains = N0.take( ids, self.resIndex() )
old_chains = mathUtils.nonredundant( old_chains )
if '' in old_chains: old_chains.remove('')
letters = string.ascii_uppercase
if first_id:
letters = letters[ letters.index( first_id ): ]
letters = mathUtils.difference( letters, old_chains )
chainMap = self.chainMap( breaks=breaks )
try:
for i in self.atomRange():
if not (keep_old and ids[i] in old_chains):
ids[i] = letters[ chainMap[i] ]
except IndexError:
raise PDBError('Too many chains, running out of letters.')
def renumberResidues( self, mask=None, start=1, addChainId=1 ):
"""
Make all residue numbers consecutive and remove any insertion
code letters. Note that a backward jump in residue numbering
(among other things) is interpreted as end of chain by
chainMap() and chainIndex() when a PDB file is loaded.
:param mask: [ 0||1 x N_atoms ] atom mask to apply BEFORE
:type mask: list of int
:param start: starting number (default 1)
:type start: int
:param addChainId: add chain IDs if they are missing
:type addChainId: 1|0
"""
if addChainId:
self.addChainId( keep_old=1, breaks=0 )
i = start
for res in self.resList( mask ):
for a in res:
a['residue_number'] = i
a['insertion_code'] = ''
i += 1
def atomRange( self ):
"""
>>> m.atomRange() == range( m.lenAtoms() )
:return: integer range for lenght of this model
:rtype: [ int ]
"""
return list(range( self.lenAtoms()))
def lenAtoms( self, lookup=True ):
"""
Number of atoms in model.
:return: number of atoms
:rtype: int
"""
if not self.xyz is None:
return len( self.xyz )
if len( self.atoms ) > 0:
r = self.atoms.profLength( default=-1 )
if r != -1:
return r
if self.source is None and not lookup:
return 0
return len( self.getXyz() )
def lenResidues( self ):
"""
Number of residues in model.
:return: total number of residues
:rtype: int
"""
## if self._resIndex is None:
## return 0
return len( self.resIndex() )
def lenChains( self, breaks=0, maxDist=None, singleRes=0, solvent=0 ):
"""
Number of chains in model.
:param breaks: detect chain breaks from backbone atom distances (def 0)
:type breaks: 0||1
:param maxDist: maximal distance between consequtive residues
[ None ] .. defaults to twice the average distance
:type maxDist: float
:param singleRes: allow chains consisting of single residues (def 0)
:type singleRes: 1||0
:param solvent: also check solvent residues for "chain breaks" (def 0)
:type solvent: 1||0
:return: total number of chains
:rtype: int
"""
try:
return len( self.chainIndex( breaks=breaks, maxDist=maxDist,
singleRes=singleRes, solvent=solvent))
except IndexError: ## empty residue map
return 0
def resList( self, mask=None ):
"""
Return list of lists of atom pseudo dictionaries per residue,
which allows to iterate over residues and atoms of residues.
:param mask: [ 0||1 x N_atoms ] atom mask to apply BEFORE
:type mask:
:return: a list (one per residue) of lists (one per atom) of dictionaries
::
[ [ CrossView{'name':'N', ' residue_name':'LEU', ..},
CrossView{'name':'CA', 'residue_name':'LEU', ..} ],
[ CrossView{'name':'CA', 'residue_name':'GLY', ..}, .. ]
]
:rtype: [ [ `biskit.ProfileCollection.CrossView` ] ]
"""
ri = N0.concatenate( (self.resIndex( mask=mask ), [self.lenAtoms()] ) )
resLen = len( ri ) - 1
atoms = self.getAtoms()
if mask is not None:
atoms = N0.compress( mask, atoms ).tolist()
return [ atoms[ ri[res] : ri[res+1] ] for res in range( resLen ) ]
def resModels( self, i=None ):
"""
Creates one new PDBModel for each residue in the parent PDBModel.
:param i: range of residue positions (default: all residues)
:type i: [ int ] or array( int )
:return: list of PDBModels, one for each residue
:rtype: [ `PDBModel` ]
"""
ri = self.resIndex()
re = self.resEndIndex()
if i is None:
i = N0.arange( len(ri) )
## xyz = self.getXyz()
result = [ self.take(N0.arange(ri[x],re[x]+1)) for x in i ]
return result
def resMapOriginal(self, mask=None):
"""
Generate list to map from any atom to its ORIGINAL(!) PDB
residue number.
:param mask: [00111101011100111...] consider atom: yes or no
len(mask) == N_atoms
:type mask: list of int (1||0)
:return: list all [000111111333344444..] with residue number
for each atom
:rtype: list of int
"""
## by default take all atoms
if mask is None: mask = N0.ones( self.lenAtoms() , N0.Int )
## apply mask to this list
return N0.compress( mask, self.atoms['residue_number'] )
def __inferResIndex( self ):
"""
Determine residue borders.
:return: starting position of each residue
:rtype: list of int
"""
result = []
if self.lenAtoms() == 0:
return N0.array( result, N0.Int )
lastResNumber = -100
lastResName = ''
index = -1
lastAlt = 'x'
lastSegid = -1
res_nrs = self.atoms['residue_number']
res_nam = self.atoms['residue_name']
ins_cod = self.atoms['insertion_code']
seg_id = self.atoms['segment_id']
## create residue numbering for selected atoms
for i in range( self.lenAtoms() ):
if res_nrs[i] != lastResNumber or \
res_nam[i] != lastResName or \
seg_id[i] != lastSegid or \
ins_cod[i] != lastAlt:
## start of new residue
lastResNumber = res_nrs[i]
lastResName = res_nam[i]
lastAlt = ins_cod[i]
lastSegid = seg_id[i]
index += 1
result.append( i )
return N0.array(result, N0.Int)
def resIndex( self, mask=None, force=0, cache=1 ):
"""
Get the position of the each residue's first atom.
:param force: re-calculate even if cached result is available (def 0)
:type force: 1||0
:param cache: cache the result if new (def 1)
:type cache: 1||0
:param mask: atom mask to apply before (i.e. result indices refer to \
compressed model)
:type mask: list of int (1||0)
:return: index of the first atom of each residue
:rtype: list of int
"""
if self._resIndex is not None and not force and mask is None:
return self._resIndex
r = self.__inferResIndex()
if mask is not None:
m = self.index2map( r, len( mask ) )
m = N0.compress( mask, m )
r = self.map2index( m )
if mask is None and cache:
self._resIndex = r
return r
def resMap( self, force=0, cache=1 ):
"""
Get list to map from any atom to a continuous residue numbering
(starting with 0). A new residue is assumed to start whenever the
'residue_number' or the 'residue_name' record changes between 2
atoms.
See :class:`resList()` for an example of how to use the residue map.
:param force: recalculate map even if cached one is available (def 0)
:type force: 0||1
:param cache: cache new map (def 1)
:type cache: 0||1
:return: array [00011111122223333..], residue index for each atom
:rtype: list of int
"""
return self.index2map( self.resIndex( force=force,cache=cache ),
self.lenAtoms() )
def resEndIndex( self ):
"""
Get the position of the each residue's last atom.
:return: index of the last atom of each residue
:rtype: list of int
"""
r = self.resIndex()
return N0.concatenate( (r[1:], [self.lenAtoms()]) ) - 1
def __inferChainIndex( self ):
result = []
if self.lenAtoms() == 0:
return N0.array( result, N0.Int )
lastResidue = -100
lastChainID = None
lastSegID = None
chn_ids = self.atoms['chain_id']
seg_ids = self.atoms['segment_id']
res_nrs = self.atoms['residue_number']
ter_atm = self.atoms['after_ter']
for i in self.atomRange():
if chn_ids[i] != lastChainID or \
seg_ids[i] != lastSegID or \
res_nrs[i] < lastResidue or \
ter_atm[i]:
result.append( i )
lastResidue = res_nrs[i]
lastChainID = chn_ids[i]
lastSegID = seg_ids[i]
return N0.array( result, N0.Int )
def __filterSingleResChains( self, chainindex, ignore_resnumbers=0 ):
"""
Join chains containing single residues with identical name into
one chain. Typically required for waters or ions if they are
separated by TER or picked up by the chain break detection
:param check_resnumbers: (def 1)
:type check_resnumbers: 1||0
"""
# residue name of first atom of each chain
res_names = N0.take( self.atoms['residue_name'], chainindex )
# residue number of first atom of each chain
res_nmbrs = N0.take( self.atoms['residue_number'], chainindex )
# chain id of first atom of each chain
chain_ids = N0.take( self.atoms['chain_id'], chainindex )
#segid of first atom of each chain
seg_ids = N0.take( self.atoms['segment_id'], chainindex )
res_names = N0.concatenate( (['-1'], res_names) )
chain_ids = N0.concatenate( (['-1'], chain_ids) )
seg_ids = N0.concatenate( (['-1'], seg_ids ) )
res_nmbrs = N0.concatenate( ([-100], res_nmbrs) )
delta = res_nmbrs[1:] - res_nmbrs[:-1]
same_name = res_names[1:] == res_names[:-1]
same_chain= chain_ids[1:] == chain_ids[:-1]
same_seg = seg_ids[1:] == seg_ids[:-1]
if ignore_resnumbers:
delta = N0.ones( len(delta), N0.Int )
is_single = (delta==1) \
* same_name * same_chain * same_seg
return N0.compress( N0.logical_not(is_single), chainindex)
def chainIndex( self, breaks=0, maxDist=None, force=0, cache=0,
singleRes=0, solvent=0 ):
"""
Get indices of first atom of each chain.
:param breaks: split chains at chain breaks (def 0)
:type breaks: 1||0
:param maxDist: (if breaks=1) chain break threshold in Angstrom
:type maxDist: float
:param force: re-analyze residue numbering, chain and segids to
find chain boundaries, use with care! (def 0)
:type force: 1||0
:param cache: cache new index even if it was derrived from
non-default parameters (def 0)
**Note:** a simple m.chainIndex() will always cache
:type cache: 1||0
:param singleRes: allow chains consisting of single residues (def 0)
Otherwise group consecutive residues with identical
name into one chain.
:type singleRes: 1||0
:param solvent: also check solvent residues for "chain breaks"
(default: false)
:type solvent: 1||0
:return: array (1 x N_chains) of int
:rtype: list of int
"""
## fast track
if not (breaks or force or maxDist or solvent) \
and self._chainIndex is not None:
return self._chainIndex
r = self._chainIndex
if r is None or force:
r = self.__inferChainIndex()
if breaks:
break_pos = self.chainBreaks( breaks_only=1, maxDist=maxDist,
solvent=solvent, force=force )
break_pos = break_pos + 1 ## chainBreaks reports last atom of each chain
r = mathUtils.union( break_pos, r )
r.sort()
## filter out chains consisting only of a single residue
if len(r)>0 and not singleRes:
## r = self.__filterSingleResChains( r, ignore_resnumbers=breaks )
r = self.__filterSingleResChains( r, ignore_resnumbers=False)
## cache the result if it has been computed with default parameters
if not(breaks or force or maxDist or singleRes or solvent) or cache:
self._chainIndex = r
return N0.array( r, N0.Int )
def chainEndIndex( self, breaks=0, solvent=0 ):
"""
Get the position of the each residue's last atom.
:return: index of the last atom of each residue
:rtype: list of int
"""
r = self.chainIndex( breaks=breaks, solvent=solvent )
return N0.concatenate( (r[1:], [self.lenAtoms()]) ) - 1
def chainMap( self, breaks=0, maxDist=None ):
"""
Get chain index of each atom. A new chain is started between 2 atoms if
the chain_id or segment_id changes, the residue numbering jumps back or
a TER record was found.
:param breaks: split chains at chain breaks (def 0)
:type breaks: 1||0
:param maxDist: (if breaks=1) chain break threshold in Angstrom
:type maxDist: float
:return: array 1 x N_atoms of int, e.g. [000000011111111111122222...]
:rtype: list of int
"""
return self.index2map( self.chainIndex( breaks=breaks, maxDist=maxDist ),
self.lenAtoms() )
def chainBreaks( self, breaks_only=1, maxDist=None, force=0, solvent=0,
z=6. ):
"""
Identify discontinuities in the molecule's backbone. By default,
breaks are identified from the distribution of distances between the
last backbone atom of a residue and the first backbone atom of the
next residue. The median distance and standard deviation are
determined iteratively and outliers (i.e. breaks) are identified
as any pairs of residues with a distance that is more than z standard
deviations (default 10) above the median. This heuristics can be
overriden by specifiying a hard distance cutoff (maxDist).
:param breaks_only: don't report ends of regular chains (def 1)
:type breaks_only: 1|0
:param maxDist: maximal distance between consequtive residues
[ None ] .. defaults median + z * standard dev.
:type maxDist: float
:param z : z-score for outlier distances between residues (def 6.)
:type z : float
:param solvent: also check selected solvent residues (buggy!) (def 0)
:type solvent: 1||0
:param force: force re-calculation, do not use cached positions (def 0)
:type force: 1||0
:return: atom indices of last atom **before** a probable chain break
:rtype: list of int
"""
if self.__chainBreaks is not None and not force and \
maxDist is None and breaks_only and not solvent and z==6.:
r = self.__chainBreaks
else:
i_bb = N0.nonzero( self.maskBB( solvent=solvent ) )
## outlier detection only works with more than 2,
## hard cutoff works with more than 1
if len(i_bb) < 2:
r = []
else:
bb = self.take( i_bb )
bb_ri= bb.resIndex()
bb_re= bb.resEndIndex()
## xyz = [ bb.xyz[ bb_ri[i] : bb_ri[i+1] ] for i in range(len(bb_ri)-1) ]
## xyz +=[ bb.xyz[ bb_ri[-1]: len(bb) ] ]
## centroid = N0.array([ N0.average( x ) for x in xyz ])
last = N0.take( bb.xyz, bb_re )[:-1]
first= N0.take( bb.xyz, bb_ri )[1:]
## dist = N0.sqrt( N0.sum( N0.power(centroid[:-1]-centroid[1:],2),
## axis=1 ) )
dist = N0.sqrt( N0.sum( N0.power(last-first,2), axis=1 ) )
outliers, median, sd = mathUtils.outliers( dist, z=z, it=5 )
## get distances above mean
cutoff = maxDist or median + z * sd
r = N0.nonzero( N0.greater( dist, cutoff ) )
if len(r) > 0:
## ## can probably be simplified with self.resEndIndex()
## ri = self.resIndex()
## ri_to_e = {}
## for i in range( len(ri)-1 ):
## ri_to_e[ ri[i] ] = ri[ i+1 ]-1
##
## ## map back to the original atom indices
## r = [ ri_to_e[ i_bb[ bb_ri[i] ] ] for i in r ]
## this replacement didn't work out (fails PDBCleaner testcase):
re = self.resEndIndex()
r = [ re[i] for i in r ]
if breaks_only:
ri = self.chainIndex( breaks=0, solvent=solvent )
r = [ x for x in r if not x+1 in ri ]
if maxDist is None and not solvent and z==6.:
self.__chainBreaks = r
return N0.array( r, int )
def removeRes( self, what ):
"""
Remove all atoms with a certain residue name.
:param what: indices or name(s) of residue to be removed
:type what: str OR [ str ] OR int OR [ int ]
"""
if not isinstance( what, list ) or isinstance( what, N.ndarray):
what = T.toList( what )
if type( what[0] ) is str:
return self.remove( self.maskFrom( 'residue_name', what) )
if type( what[0] ) is int:
return self.remove( self.res2atomIndices( what ) )
return False
def rms( self, other, mask=None, mask_fit=None, fit=1, n_it=1 ):
"""
Rmsd between two PDBModels.
:param other: other model to compare this one with
:type other: PDBModel
:param mask: atom mask for rmsd calculation
:type mask: list of int
:param mask_fit: atom mask for superposition (default: same as mask)
:type mask_fit: list of int
:param fit: superimpose first (default 1)
:type fit: 1||0
:param n_it: number of fit iterations::
1 - classic single fit (default)
0 - until convergence, kicking out outliers on the way
:type n_it: int
:return: rms in Angstrom
:rtype: float
"""
x, y = self.getXyz(), other.getXyz()
if mask_fit is None: mask_fit = mask
if fit:
fx, fy = x, y
if mask_fit is not None:
fx = N0.compress( mask_fit, x, 0 )
fy = N0.compress( mask_fit, y, 0 )
## find transformation for best match
r, t = rmsFit.match( fx, fy, n_iterations=n_it )[0]
## transform coordinates
y = N0.dot(y, N0.transpose(r)) + t
if mask is not None:
x = N0.compress( mask, x, 0 )
y = N0.compress( mask, y, 0 )
## calculate row distances
d = N0.sqrt(N0.sum(N0.power(x - y, 2), 1))
return N0.sqrt( N0.average(d**2) )
def transformation( self, refModel, mask=None, n_it=1,
z=2, eps_rmsd=0.5, eps_stdv=0.05,
profname='rms_outlier'):
"""
Get the transformation matrix which least-square fits this model
onto the other model.
:param refModel: reference PDBModel
:type refModel: PDBModel
:param mask: atom mask for superposition
:type mask: list of int
:param n_it: number of fit iterations::
1 - classic single fit (default)
0 - until convergence
:type n_it: int
:param z: number of standard deviations for outlier definition
(default 2)
:type z: float
:param eps_rmsd: tolerance in rmsd (default 0.5)
:type eps_rmsd: float
:param eps_stdv: tolerance in standard deviations (default 0.05)
:type eps_stdv: float
:param profname: name of new atom profile getting outlier flag
:type profname: str
:return: array(3 x 3), array(3 x 1) - rotation and translation matrices
:rtype: array, array
"""
x, y = refModel.getXyz(), self.getXyz()
if mask is not None:
x = N0.compress( mask, x, 0 )
y = N0.compress( mask, y, 0 )
outlier_mask = N0.zeros( N0.sum(mask) )
else:
outlier_mask = N0.zeros( len(self) )
r, iter_trace = rmsFit.match( x, y, n_iterations=n_it, z=z,
eps_rmsd=eps_rmsd, eps_stdv=eps_stdv)
N0.put( outlier_mask, iter_trace[-1][-1], 1 )
if n_it != 1:
self.atoms.set( profname, outlier_mask, mask=mask,
default=1,
comment='outliers in last iterative fitting',
n_iterations=len( iter_trace ) )
return r
def transform( self, *rt ):
"""
Transform coordinates of PDBModel.
:param rt: rotational and translation array:
array( 4 x 4 ) OR array(3 x 3), array(3 x 1)
:type rt: array OR array, array
:return: PDBModel with transformed coordinates
:rtype: PDBModel
"""
## got result tuple from transformation() without unpacking
if len( rt ) == 1 and type( rt[0] ) is tuple:
rt = rt[0]
if len(rt) == 2:
r, t = rt[0], rt[1]
else:
rt = rt[0]
r, t = (rt[0:3,0:3], rt[0:3, 3])
result = self.clone()
result.setXyz( N0.dot( self.getXyz(), N0.transpose(r) ) + t )
return result
def fit( self, refModel, mask=None, n_it=1,
z=2, eps_rmsd=0.5, eps_stdv=0.05,
profname='rms_outlier'):
"""
Least-square fit this model onto refMode
:param refModel: reference PDBModel
:type refModel: PDBModel
:param mask: atom mask for superposition
:type mask: list of int (1||0)
:param n_it: number of fit iterations::
1 - classic single fit (default)
0 - until convergence
:type n_it: int
:param z: number of standard deviations for outlier definition
(default 2)
:type z: float
:param eps_rmsd: tolerance in rmsd (default 0.5)
:type eps_rmsd: float
:param eps_stdv: tolerance in standard deviations (default 0.05)
:type eps_stdv: float
:param profname: name of new atom profile containing outlier flag
:type profname: str
:return: PDBModel with transformed coordinates
:rtype: PDBModel
"""
return self.transform(
self.transformation( refModel, mask, n_it, eps_rmsd=eps_rmsd,
eps_stdv=eps_stdv, profname=profname ) )
def magicFit( self, refModel, mask=None ):
"""
Superimpose this model onto a ref. model with similar atom content.
magicFit( refModel [, mask ] ) -> PDBModel (or subclass )
:param refModel: reference PDBModel
:type refModel: PDBModel
:param mask: atom mask to use for the fit
:type mask: list of int (1||0)
:return: fitted PDBModel or sub-class
:rtype: PDBModel
"""
if mask is not None:
m_this = self.compress( mask )
else:
m_this = self
i_this, i_ref = m_this.compareAtoms( refModel )
m_this = m_this.take( i_this )
m_ref = refModel.take(i_ref )
## find transformation for best match
r,t = rmsFit.findTransformation( m_ref.getXyz(), m_this.getXyz() )
result = self.transform( r, t )
return result
def structureFit( self, refModel, mask=None ):
"""
Structure-align this model onto a reference model using the external
TM-Align program (which needs to be installed).
structureFit( refModel [, mask] ) -> PDBModel (or subclass)
The result model has additional TM-Align statistics in its info record:
r = m.structureFit( ref )
r.info['tm_score'] -> TM-Align score
the other keys are: 'tm_rmsd', 'tm_len', 'tm_id'
.. seealso:: `biskit.TMAlign`
:param refModel: reference PDBModel
:type refModel: PDBModel
:param mask: atom mask to use for the fit
:type mask: list of int (1||0)
:return: fitted PDBModel or sub-class
:rtype: PDBModel
"""
from biskit.exe import tmalign
if mask is not None:
m_this = self.compress( mask )
else:
m_this = self
tm = tmalign.TMAlign( m_this, refModel )
r = tm.run()
return tm.applyTransformation( self )
def centered( self, mask=None ):
"""
Get model with centered coordinates.
:param mask: atom mask applied before calculating the center
:type mask: list of int (1||0)
:return: model with centered coordinates
:rtype: PDBModel
"""
r = self.clone()
if mask is None: mask = N0.ones( len(self) )
avg = N0.average( N0.compress( mask, r.getXyz(), 0 ) )
r.setXyz( r.getXyz() - avg )
return r
def center( self, mask=None ):
"""
Geometric centar of model.
:param mask: atom mask applied before calculating the center
:type mask: list of int (1||0)
:return: xyz coordinates of center
:rtype: (float, float, float)
"""
if mask is None:
return N0.average( self.getXyz() )
return N0.average( N0.compress( mask, self.getXyz(), axis=0 ) )
def centerOfMass( self ):
"""
Center of mass of PDBModel.
:return: array(Float32)
:rtype: (float, float, float)
"""
M = self.masses()
return mathUtils.wMean( self.getXyz(), M )
def masses( self ):
"""
Collect the molecular weight of all atoms in PDBModel.
:return: 1-D array with mass of every atom in 1/12 of C12 mass.
:rtype: array of floats
:raise PDBError: if the model contains elements of unknown mass
"""
try:
M = [ molUtils.atomMasses[e] for e in self.atoms['element'] ]
except KeyError as why:
raise PDBError('Cannot find mass for '+str(why))
return N0.array( M )
def mass( self ):
"""
Molecular weight of PDBModel.
:return: total mass in 1/12 of C12 mass
:rtype: float
:raise PDBError: if the model contains elements of unknown mass
"""
return N0.sum( self.masses() )
def residusMaximus( self, atomValues, mask=None ):
"""
Take list of value per atom, return list where all atoms of any
residue are set to the highest value of any atom in that residue.
(after applying mask)
:param atomValues: values per atom
:type atomValues: list
:param mask: atom mask
:type mask: list of int (1||0)
:return: array with values set to the maximal intra-residue value
:rtype: array of float
"""
if mask is None:
mask = N0.ones( len(atomValues) )
## eliminate all values that do not belong to the selected atoms
masked = atomValues * mask
result = []
## set all atoms of each residue to uniform value
for res in range( 0, self.lenResidues() ):
## get atom entries for this residue
resAtoms = N0.compress( N0.equal( self.resMap(), res ), masked )
## get maximum value
masterValue = max( resAtoms )
result += resAtoms * 0.0 + masterValue
return N0.array( result, N0.Float32 )
def argsort( self, cmpfunc=None ):
"""
Prepare sorting atoms within residues according to comparison function.
:param cmpfunc: old style function(m.atoms[i], m.atoms[j]) -> -1, 0, +1
:type cmpfunc: function
:param key: new style sort key function(m.atoms[i]) -> sortable
:type key: function
:return: suggested position of each atom in re-sorted model
( e.g. [2,1,4,6,5,0,..] )
:rtype: list of int
"""
## cmp vanished in python 3.x (but still available in past.builtins)
cmp = lambda x, y: (x > y) - (x < y)
## by default sort alphabetically by atom name
cmpfunc = cmpfunc or ( lambda a1, a2: cmp(a1['name'],a2['name']) )
result = []
pos = 0
## get sort list for each residue
for resAtoms in self.resList():
resIndex = list(range(0, len( resAtoms )))
## convert atom-based function into index-based function
f_cmp = lambda i,j,atoms=resAtoms : cmpfunc( atoms[i], atoms[j] )
## convert py 2.x cmp to py 3.x key method
f_key = functools.cmp_to_key(f_cmp)
## get sortMap for this residue (numbering starts with 0)
resIndex.sort( key=f_key )
## add first residue atom's position in self.atoms to each entry
resIndex = list(map( lambda i, delta=pos: i + delta, resIndex ))
## concatenate to result list
result += resIndex
pos += len( resAtoms )
return result
def sort( self, sortArg=None ):
"""
Apply a given sort list to the atoms of this model.
:param sortArg: comparison function
:type sortArg: function
:return: copy of this model with re-sorted atoms (see numpy.take() )
:rtype: PDBModel
"""
sortArg = sortArg or self.argsort()
r = self.take( sortArg )
return r
def unsort( self, sortList ):
"""
Undo a previous sorting on the model itself (no copy).
:param sortList: sort list used for previous sorting.
:type sortList: list of int
:return: the (back)sort list used ( to undo the undo...)
:rtype: list of int
:raise PDBError: if sorting changed atom number
"""
## prepare sort functions (py 2.x / py 3.x)
cmp = lambda x, y: (x > y) - (x < y)
f_key = functools.cmp_to_key(lambda i,j, l=sortList: cmp(l[i],l[j]))
## get new sort list that reverts the old one
backSortList = list(range(0, self.lenAtoms()))
backSortList.sort( key=f_key )
## get re-sorted PDBModel copy
self.keep( backSortList )
return backSortList
def atomNames(self, start=None, stop=None):
"""
Return a list of atom names from start to stop RESIDUE index
:param start: index of first residue
:type start: int
:param stop: index of last residue
:type stop: int
:return: ['C','CA','CB' .... ]
:rtype: list of str
"""
## By default return list of all atoms
start = start or 0
if stop is None: ## don't use "stop = stop or ..", stop might be 0!
stop = self.lenResidues()-1
## first get atom indexes of residues
i = self.resIndex()[start]
if stop == self.lenResidues()-1:
j = self.lenAtoms()
else:
j = self.resIndex()[stop+1]
return self.atoms['name'][i:j]
def __testDict_and( self, dic, condition ):
"""
Test if **all** key-value pairs of condition are matched in dic
:param condition: {..}, key-value pairs to be matched
:type condition: dictionary
:param dic: {..}, dictionary to be tested
:type dic: dictionary
:return: 1|0, 1 if all key-value pairs of condition are matched in dic
:rtype: 1|0
"""
for k,v in list(condition.items()):
if dic.get( k, None ) not in v:
return 0
return 1
def __testDict_or( self, dic, condition ):
"""
Test if **any** key-value pairs of condition are matched in dic
:param condition: {..}, key-value pairs to be matched
:type condition: dictionary
:param dic: {..}, dictionary to be tested
:type dic: dictionary
:return: 1|0, 1 if any key-value pairs of condition are matched in dic
:rtype: 1|0
"""
for k,v in list(condition.items()):
if dic.get( k, None ) in v:
return 1
return 0
def filterIndex( self, mode=0, **kw ):
"""
Get atom positions that match a combination of key=values.
E.g. filter( chain_id='A', name=['CA','CB'] ) -> index
:param mode: 0 combine with AND (default), 1 combine with OR
:type mode: 0||1
:param kw: combination of atom dictionary keys and
values/list of values that will be used to filter
:type kw: filter options, see example
:return: sort list
:rtype: list of int
"""
## cache to minimize function lookup
atoms = self.atoms.toDicts()
if mode == 0:
f_test = self.__testDict_and
else:
f_test = self.__testDict_or
for k in kw:
kw[ k ] = T.toList( kw[ k ] )
r = [ i for i in range(self.lenAtoms()) if f_test( atoms[i], kw ) ]
return r
def filter( self, mode=0, **kw):
"""
Extract atoms that match a combination of key=values.
E.g. filter( chain_id='A', name=['CA','CB'] ) -> PDBModel
:param mode: 0 combine with AND (default), 1 combine with OR
:type mode: 0||1
:param kw: combination of atom dictionary keys and
values/list of values that will be used to filter
:type kw: filter options, see example
:return: filterd PDBModel
:rtype: PDBModel
"""
return self.take( self.filterIndex( mode=mode, **kw ) )
def equals(self, ref, start=None, stop=None):
"""
Compares the residue and atom sequence in the given range.
Coordinates are not checked, other profiles are not checked.
:param start: index of first residue
:type start: int
:param stop: index of last residue
:type stop: int
:return: [ 1||0, 1||0 ],
first position sequence identity 0|1,
second positio atom identity 0|1
:rtype: list if int
"""
## By default compare all residues
start = start or 0
if stop is None: ## don't use stop = stop or .. stop might be 0!
stop = self.lenResidues()
## set to 1 when identical
seqID, atmID = 0, 0
## compare sequences
if self.sequence()[start:stop] == ref.sequence()[start:stop]:
seqID = 1
## then compare atoms
if self.atomNames(start,stop-1) == ref.atomNames(start,stop-1):
atmID = 1
return [seqID, atmID]
def compareAtoms( self, ref ):
"""
Get list of atom indices for this and reference model that converts
both into 2 models with identical residue and atom content.
E.g.
>>> m2 = m1.sort() ## m2 has now different atom order
>>> i2, i1 = m2.compareAtoms( m1 )
>>> m1 = m1.take( i1 ); m2 = m2.take( i2 )
>>> m1.atomNames() == m2.atomNames() ## m2 has again same atom order
:return: indices, indices_ref
:rtype: ([int], [int])
"""
## compare sequences
seqMask, seqMask_ref = match2seq.compareModels(self, ref)
## get list of matching RESIDUES
equal = N0.nonzero(seqMask)
equal_ref = N0.nonzero(seqMask_ref)
result, result_ref = [], []
rI = self.resIndex()
rIref = ref.resIndex()
## check that all atoms are equal in matching residues
for i in range(0, len(equal)):
## atom name lists for current residue
aa = self.atomNames( equal[i],equal[i] )
aa_ref = ref.atomNames( equal_ref[i],equal_ref[i] )
## starting atom of current residue
ind = rI[ equal[i] ]
ind_ref = rIref[ equal_ref[i] ]
for j in range( len(aa_ref) ):
try:
##shortcut for mostly equal models
if aa_ref[j] == aa[j]: ## throws IndexError
result += [ind + j]
result_ref += [ind_ref + j]
continue
except IndexError:
pass
try:
pos = aa.index( aa_ref[j] ) ## throws ValueError
result += [ind + pos]
result_ref += [ind_ref + j]
except ValueError:
pass
return result, result_ref
def unequalAtoms( self, ref, i=None, iref=None ):
"""
Identify atoms that are not matching between two models.
This method returns somewhat of the opposite of compareAtoms().
Not matching means: (1) residue is missing, (2) missing atom within a
residue, (3) atom name is different. Differences in coordinates or
other atom profiles are NOT evaluated and will be ignored.
(not speed-optimized)
:param ref: reference model to compare to
:type ref: PDBModel
:param i: pre-computed positions that are equal in this model \
(first value returned by compareAtoms() )
:type i: array( int ) or [ int ]
:param iref: pre-computed positions that are equal in ref model \
(first value returned by compareAtoms() )
:type i: array( int ) or [ int ]
:return: missmatching atoms of self, missmatching atoms of ref
:rtype: array(int), array(int)
"""
if i is None or iref is None:
i, iref = self.compareAtoms( ref )
mask_self = N0.ones( len(self), int )
mask_ref = N0.ones( len(ref ), int )
N0.put( mask_self, i, 0 )
N0.put( mask_ref, iref, 0 )
return N0.nonzero( mask_self ), N0.nonzero( mask_ref )
def reportAtoms( self, i=None, n=None ):
"""
:param i: optional list of atom positions to report (default: all)
:type i: [ int ]
:return: formatted string with atom and residue names similar to PDB
:rtype: str
"""
m = self
n = n or len(m)
if i is not None:
m = self.take( i )
s = '%(serial_number)4i %(name)5s %(residue_name)3s %(chain_id)1s '+\
'%(residue_number)3i'
atm = [ s % a for a in m ]
r = '\n'.join( atm[:n] )
if n < len( m ):
r += ' ...'
return r
def __chainFraction( self, chain, ref ):
"""
Look how well a given chain matches a continuous stretch of residues
in ref.
:param chain: chain index
:type chain: int
:param ref: reference PDBModel
:type ref: PDBModel
:return: average relative length of matching chain fragments
:rtype: float
"""
m = self.takeChains([chain])
if len(m) == 0:
return 0
m_cast = m.take( m.compareAtoms( ref )[0] )
f = 1. * len( m_cast )
if f > 0:
f = f / m_cast.lenChains( breaks=1, maxDist=5.)
f = f / len( m )
return f
def compareChains( self, ref, breaks=0, fractLimit=0.2 ):
"""
Get list of corresponding chain indices for this and reference model.
Use takeChains() to create two models with identical chain content and
order from the result of this function.
:param ref: reference PDBModel
:type ref: PDBModel
:param breaks: look for chain breaks in backbone coordinates
:type breaks: 1||0
:param fractLimit:
:type fractLimit: float
:return: chainIndices, chainIndices_ref
:rtype: ([int], [int])
"""
i, i_ref = self.compareAtoms( ref )
c0 = self.atom2chainIndices( i, breaks=breaks )
c_r = ref.atom2chainIndices( i_ref, breaks=breaks )
## dirty hack to throw out single matching residues
c0 = [ c for c in c0 \
if self.__chainFraction( c, ref ) > fractLimit ]
c_r= [ c for c in c_r \
if ref.__chainFraction( c, self ) > fractLimit ]
return c0, c_r
def biomodel(self, assembly = 0):
"""
Return the 'biologically relevant assembly' of this model
according to the information in the PDB's BIOMT record (captured in
info['BIOMT']).
This removes redundant chains and performs symmetry operations to
complete multimeric structures. Some PDBs define several alternative
biological units: usually (0) the author-defined one and (1)
software-defined -- see :class:`lenBiounits`.
Note: The BIOMT data are currently not updated during take/compress
calls which may change chain indices and content. This method is
therefore best run on an original PDB record before any other
modifications are performed.
:param assembly: assembly index (default: 0 .. author-determined unit)
:type assembly: int
:return: PDBModel; biologically relevant assembly
"""
try:
biounit = BU.BioUnit(self, self.info['BIOMT'])
r = biounit.makeMultimer(assembly)
except AttributeError:
r = self
return r
def lenBiounits (self):
"""
Number of biological assemblies defined in PDB BIOMT record, if any.
:return: number of alternative biological assemblies defined in
PDB header
:rtype: int
"""
try:
biounit = BU.BioUnit(self, self.info['BIOMT'])
r = len(list(biounit.keys()))
except AttributeError:
r = 0
return r
def atomkey( self, compress=True ):
"""
Create a string key encoding the atom content of this model independent
of the order in which atoms appear within residues. Atom names are
simply sorted alphabetically within residues and then concatenated.
:param compress: compress key with zlib (default: true)
:type compress: bool
:return: key formed from sorted atom content of model
:rtype: str
"""
import zlib
rindex = N0.concatenate( (self.resIndex(), [len(self)] ) )
r = ''
if len(rindex) == 0:
return r
anames = self.atoms['name'] ## cache for faster access
for i in range(len(rindex)-1):
a = anames[rindex[i]:rindex[i+1]]
a.sort()
a = ''.join(a)
r = r + a
if compress:
return zlib.compress(r)
return r
#############
## TESTING
#############
import biskit.test as BT
class _TestData(object):
MODEL = None
class Test(BT.BiskitTest):
"""Test class """
#: load test PDB once into class rather than 3 times into every instance
## for some reason doesn't actually work
MODEL = None
def prepare(self):
import tempfile
## loading output file from X-plor
self.MODEL = _TestData.MODEL or B.PDBModel( T.testRoot()+'/com/1BGS.pdb')
_TestData.MODEL = self.MODEL
self.m = self.MODEL
self.fout_pdb = tempfile.mktemp( '_test1.pdb' )
self.fout1 = tempfile.mktemp( '_test1.model' )
self.fout2 = tempfile.mktemp( '_test2.model' )
def cleanUp( self ):
T.tryRemove( self.fout1 )
T.tryRemove( self.fout2 )
T.tryRemove( self.fout_pdb )
def test_removeRes(self):
"""PDBModel.removeRes test"""
t = time.time()
self._m = self.m.clone()
self._m.removeRes(['TIP3', 'HOH'])
self.assertEqual( len(self._m), 1968)
self.assertAlmostEqual( self._m.mass(), 21325.90004, 3 )
if self.local: print("removeRes: ", time.time() - t)
def test_chainMethods(self):
"""PDBModel chain methods test"""
## X-plor doesn't write chainIds, so during the simulation
## we store them in the last letter of the segId. Here we
## restore the chainId.
self._m = self.m.clone()
self._m.addChainFromSegid()
## start positions of all chains
chainIdx = self._m.chainIndex().tolist()
## print some chain info
if self.local:
print('The molecule consists of %i chains'% self.m.lenChains())
print('\tChainId \tFirst atom')
for i in chainIdx:
print('\t%s \t\t%i'%(self._m.atoms['chain_id'][i], int(i)))
## iterate over all chains
for c in range( 0, len( chainIdx ) ):
if self.local:
print("chain ", c, " starts with ", end=' ')
print(self._m.atoms['residue_name'][ chainIdx[c] ], end=' ')
print(" and has sequence: ")
## mask out atoms of all other chains
chainMask = N0.equal( self._m.chainMap( breaks=1 ), c )
if self.local:
print(self._m.sequence( chainMask ))
self.assertEqual( self._m.lenChains(), 4)
def test_sorting(self):
"""PDBModel sorting test"""
if self.local:
print("sorting atoms alphabetically...")
m2 = self.m.compress( self.m.maskProtein() )
sort = m2.argsort()
m2 = m2.sort( sort )
self.assertAlmostEqual( N0.sum( m2.centerOfMass() ), 23.1032009125,2)
def test_chainBreaks(self):
"""PDBModel chain break handling and writing test"""
self.m4 = B.PDBModel( T.testRoot()+'/com/1BGS_original.pdb')
self.assertEqual( self.m4.lenChains(), 9 )
self.assertEqual( self.m4.lenChains( breaks=1 ), 9 )
self.assertEqual( self.m4.lenChains( breaks=1, singleRes=1, solvent=1),
9 )
self.m4.writePdb( self.fout_pdb, ter=2 )
def test_chainBreaks2(self):
"""PDBModel more complicated chain break detection"""
self.m5 = B.PDBModel( T.testRoot()+'/pdbclean/foldx_citche.pdb')
breaks = self.m5.chainBreaks()
self.assertEqual( len(breaks), 2 )
## limitation of the method: same model but now with capping residues
## filling the gap
self.m6 = B.PDBModel( T.testRoot()+'/pdbclean/citche_capped.pdb')
breaks = self.m6.chainBreaks()
self.assertEqual( len(breaks), 1 )
def test_chainSingleResidues( self ):
"""PDBModel single residue chain test"""
self.m5 = B.PDBModel( T.testRoot() + '/amber/1HPT_0.pdb' )
self.assertTrue( self.m5.lenChains() < 10, 'single residue chains' )
def test_rename(self):
"""PDBModel renameAmberRes tests"""
self.m3 = B.PDBModel( T.testRoot()+'/amber/leap/1HPT_dry.pdb')
n_cyx = self.m3.atoms['residue_name'].count('CYX')
n_hid = self.m3.atoms['residue_name'].count('HID')
n_hip = self.m3.atoms['residue_name'].count('HIP')
n_hie = self.m3.atoms['residue_name'].count('HIE')
n_hix = n_hid + n_hie + n_hip
self.m3.renameAmberRes()
self.assertEqual(n_cyx, self.m3.atoms['residue_name'].count('CYS'))
self.assertEqual(n_hix, self.m3.atoms['residue_name'].count('HIS'))
def test_xplor2amber(self):
"""PDBModel xplor2amber test"""
## test is simply back-converting a PDB comming from 'ambpdb -aatm'
## a better test would be starting from an actually xplor-generated PDB
m1 = B.PDBModel( T.testRoot() +'/amber/1HPT_0dry.pdb' )
m1.xplor2amber( aatm=True )
m2 = B.PDBModel( T.testRoot() + '/amber/1HPT_0dry_amberformat.pdb' )
self.assertEqual( m1.atomNames(), m2.atomNames() )
def test_report(self):
"""PDBModel report&plot test"""
self.report_output = self.m.report( prnt=self.local,
plot=(self.local or self.VERBOSITY > 2) )
def test_sourceHandling(self):
"""PDBModel source / disconnection tests"""
self._m = self.m.clone()
anames = self.m.atoms['name']
xyz0 = self.m.getXyz()[0]
self._m.slim()
## _m2 uses _m1 as source
self._m2 = B.PDBModel( self._m )
l1 = self._m2.atoms['name']
self.assertEqual( l1, anames )
## remove unchanged profiles and coordinates
self._m2.slim()
## fetch them again from source (of source)
l2 = self._m2.atoms['name']
self.assertEqual( l2, anames )
## disconnect _m from PDB file source
self._m.saveAs( self.fout2 )
self._m2.slim()
self._m.slim()
## this should now trigger the reloading of fout2
self.assertEqual( self._m2.atoms['name'], anames )
self.assertTrue( N.all( self._m2.getXyz()[0] == xyz0) )
## after disconnection, slim() should not have any effect
self._m2.disconnect()
self._m2.slim()
self.assertTrue( self._m2.atoms.profiles['name'] is not None )
def test_mergeChains( self ):
"""PDBModel.mergeChains test"""
m = self.m.takeChains( [0] )
res_numbers = m['residue_number']
atm_numbers = m['serial_number']
chain_ids = m['chain_id']
m1 = m.takeResidues( list(range(3)) )
m2 = m.takeResidues( list(range(3, m.lenResidues())) )
m2.renumberResidues()
m2['chain_id'] = len(m2) * ['X']
self.m1 = m1
self.m2 = m2
self.r = m1.concat( m2 )
r = self.r
self.assertTrue( r.lenChains() == m.lenChains() + 1 )
r.mergeChains( 0 )
self.r = r
self.assertTrue( r.lenChains() == m.lenChains() )
self.assertTrue( N.all( N0.array(r['chain_id']) == chain_ids ) )
self.assertTrue( N.all( N0.array(r['residue_number']) == res_numbers ) )
def test_mergeResidues( self ):
"""PDBModel.mergeResidues test"""
m = self.m.clone()
gg_position = m.sequence().find( 'GG' )
len_r = m.lenResidues()
len_a = m.lenAtoms()
r_gly = Residue( m, gg_position )
m.mergeResidues( gg_position )
r_gly.reset()
self.assertEqual( m.lenResidues(), len_r - 1 )
self.assertEqual( m.lenAtoms(), len_a )
self.assertEqual( len( r_gly ), 2 * 5 )
def test_getset(self):
"""PDBModel.__get/set__ test"""
self.assertEqual( self.m[10]['occupancy'], 1.0 )
self.assertEqual( self.m['chain_id', 'changed'], 0 )
self.assertEqual( len(self.m['chain_id'] ), len( self.m ) )
self.assertTrue( type( self.m['date']) is str )
self.m['resname'] = self.m.atom2resProfile('residue_name')
self.assertEqual( len( self.m['resname'] ), self.m.lenResidues() )
self.m.info['tested'] = False
self.m['tested'] = True
self.assertTrue( self.m.info['tested'] )
self.m['serial_number', 'default'] = 1
self.assertTrue( self.m.atoms['serial_number','default'] == 1 )
self.m['resname', 'changed'] = 0
self.assertFalse( self.m.residues.isChanged( 'resname' ) )
self.m['index'] = list(range( len( self.m)))
self.assertTrue( self.m['index'][-1] == len( self.m ) - 1 )
def test_slice(self):
"""PDBModel.__slice__ test"""
self.assertTrue( len( self.m[0:100:20] ) == 5 )
def test_various(self):
"""PDBModel various tests"""
m = PDBModel()
self.assertEqual( type( m.getXyz() ), N.ndarray )
def test_compareChains(self):
"""PDBModel.compareChains test"""
m = self.m.clone()
m2 = PDBModel()
## extract first 100 atoms of each chain
for i in range(m.lenChains()):
m2 = m2.concat( m.takeChains([i]).take( list(range(100)) ) )
m3 = m2.takeChains( [2,3,0,1] ) ## re-order chains
i, iref = m2.compareChains( m3 )
class TestExe( BT.BiskitTest ):
"""PDBModel tests that rely on external applications"""
TAGS = [BT.EXE]
def test_structureFit( self ):
"""PDBModel.structureFit test"""
m = T.load( T.testRoot( 'tmalign/1huy_citrine.model' ) )
ref = T.load( T.testRoot( 'tmalign/1zgp_dsred_dimer.model' ) )
ref = ref.takeChains( [0] )
r = m.structureFit( ref )
diff = r.centerOfMass() - ref.centerOfMass()
if self.local:
print('center of mass deviation: \n%r' % diff)
self.assertEqual( r.info['tm_rmsd'], 1.76 )
self.assertTrue( N.all( N0.absolute(diff) < 1 ),
'superposition failed: %r' % diff)
def clock( s, ns=globals() ): ## pragma: no cover
import cProfile
locals().update( ns )
r = cProfile.run( s, 'report.out' )
## Analyzing
import pstats
p = pstats.Stats('report.out')
p.strip_dirs()
## long steps and methods calling them
p.sort_stats('cumulative').print_stats(20)
p.print_callers( 20 )
return r
if __name__ == '__main__':
BT.localTest()
| graik/biskit | biskit/pdbModel.py | Python | gpl-3.0 | 139,409 | [
"Amber"
] | 92d01e54ec6aa43175b6b278e7c7cdf3bc716babffba74af9759036673c082d2 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 18 16:46:57 2015
@author: liorf
"""
from numpy import *
from matplotlib.mlab import find
from scipy.stats import mode, chisquare
import time
def clean_tree_for_pickle(tree_node):
# tree_node.relations=None
if tree_node.chosen_query is None:
return
try:
clean_tree_for_pickle(tree_node.justify)
except:
pass
#for (tree_root,_,_) in tree_node.cool_things:
# clean_tree_for_pickle(tree_root)
tree_node.chosen_query=None
for son in tree_node.sons.values():
clean_tree_for_pickle(son)
return tree_node
def entropy(tags): #this is 0 if all same tag, 1 if uniform, lower=better
'''computes entropy on tags. Assumes binary 0-1 tagging only.(not +-1 !!!)
entropy= sum(-f*log2(f)) where f are the frequencies of each value'''
length=len(tags)*1.0
value_list=frozenset(tags)
ent=0.0
for val in value_list:
count=count_nonzero(tags==val)/length
ent-=count*log2(count)
return ent
def statistic_test(tagging, feature_values):
'''need to compare the two sides I split (how many of each label in each one)'''
return 0.0,0.0
if len(frozenset(feature_values))>2:
return 0.0,0.0 #only works for 2 values
locs= find(feature_values==1)
locs2= find(feature_values!=1)
observed= array([len(find(tagging[locs]==1)),len(find(tagging[locs]!=1))])
expected= array([len(find(tagging[locs2]==1)),len(find(tagging[locs2]!=1))])
if any(expected==0):
if any(observed==0):
return inf, 0.0 #this is good for us
return chisquare(expected, observed)
return chisquare(observed, expected) #high stat+low p->good
def info_gain(curr_node_tags, feature_values): #0 if same divide, 1 if perfect
'''computes simple info-gain for a split. '''
curr_ent = entropy(curr_node_tags) #current entropy H(T)
#sum over all values: #elements with this value/#total elements * entropy(elements with this value)
cond_ent = 0.0
total_elem_sz = 1.0*len(feature_values)
for value in frozenset(feature_values):
locs= find(feature_values == value)
value_prob = len(locs)/total_elem_sz
cond_ent += value_prob*entropy(curr_node_tags[locs])
return curr_ent- cond_ent
def ig_ratio(curr_node_tags, feature_values, discard_na=False):
intrinsic_val= 0.0
if discard_na is True:
inds= find(feature_values!=-1)
if len(inds)==0:
print 'something has gone horribly wrong!'
return 0.0
feature_values= feature_values[inds]
curr_node_tags= curr_node_tags[inds]
total_elem_sz = 1.0*len(feature_values)
for value in frozenset(feature_values):
locs= find(feature_values == value)
value_prob = len(locs)/total_elem_sz
intrinsic_val += value_prob*log2(value_prob)
if intrinsic_val==0.0: #labels are all the same! can just return ig(thing) (which is 0)
return info_gain(curr_node_tags, feature_values)
return -1*info_gain(curr_node_tags, feature_values)/intrinsic_val
def is_relation_key(x, relation):
res=[]
for y in x:
if relation.has_key(y):
res.append(y)
return res
def is_set_valued(relation,relname):
return relname.startswith('reverse_') or relname=='type' or relname=='possible_cure' or (relname.startswith('drug_') and not relname=='drug_moiety')
return relname.startswith('reverse_') or relname=='type' #yago
return isinstance(relation.values()[0], list) #general, slow
def is_in_relation(x, relation,relname, *args):
'''args[0]=optional target
x is a single object. this works fine with [] as param'''
res=[]
flag= is_set_valued(relation,relname)
if flag is False:
for y in x:
bob = relation.get(y)
if bob is not None:
res+=[bob]
else: #relation is reversed
for y in x:
res+= relation.get(y, [])
if len(args)==0:
return res #list of strings
return args[0] in res
def relabel(complex_objs, old_tagging, majority=True):
'''flatten+label(majority or consistent)'''
val_map={}
for i,obj in enumerate(complex_objs):
for item in obj:
if not val_map.has_key(item):
val_map[item]=[0,0]
if old_tagging[i]==1:
val_map[item][0]+=1
else:
val_map[item][1]+=1
blarf=[[a] for a,counts in val_map.items() if counts[0]-counts[1]!=0] #only take items which are true majority
items= array(blarf, dtype=object)
tags=[]
for i,item in enumerate(items):
label_counts=val_map[item[0]]
if majority:
tags.append((1+sign(label_counts[0]-label_counts[1]))/2)
else:
if label_counts[0]==0:
tags.append(0)
elif label_counts[1]==0:
tags.append(1)
else:
tags.append(-1)
tags=array(tags)
if majority:
return items,tags
idxs=find(tags>=0)
return items[idxs], tags[idxs]
def apply_transforms_other(relations, transforms, objects):
'''transforms is list of relation+direction pairs.
objects is set of objects(set of sets'''
curr_objs=objects
for relation in transforms:
curr_objs= [is_in_relation(obj, relations[relation], relation) for obj in curr_objs]
return curr_objs
def apply_transforms(relations, transforms, objects):
'''transforms is list of relation+direction pairs.
objects is set of objects(set of sets'''
if len(transforms)==0:
return objects
curr_objs=[is_relation_key(x, relations[transforms[0]]) for x in objects]
return apply_transforms_other(relations, transforms[1:], curr_objs)
for relation in transforms[1:]:
curr_objs= [is_in_relation(obj, relations[relation], relation) for obj in curr_objs]
return curr_objs
def split_and_subtree(query_chosen, recursive_step_obj):
query_results=array([query_chosen(x) for x in recursive_step_obj.objects])
for val in frozenset(query_results):
inds=find(query_results==val)
recursive_step_obj.sons[val]= TreeRecursiveSRLStep(recursive_step_obj.objects[inds], recursive_step_obj.tagging[inds], recursive_step_obj.relations,
recursive_step_obj.transforms, recursive_step_obj.n, recursive_step_obj.MAX_DEPTH,
recursive_step_obj.SPLIT_THRESH, recursive_step_obj.logfile, recursive_step_obj.stopthresh, recursive_step_obj.cond)
return query_chosen,recursive_step_obj.sons, recursive_step_obj.good_recs, recursive_step_obj.good_recs_justify, recursive_step_obj.good_recs_trees
def ig_from_one_retag(tagging):
curr_max= -1.0
for value in frozenset(tagging):
ind= find(tagging==value)[0]
tag_pos= zeros(len(tagging))
tag_pos[ind]= 1
curr_max= max(curr_max, ig_ratio(tagging, tag_pos))
return curr_max
MAX_SIZE= 1500 #TODO: change this in future(needed to make it run fast)
IGTHRESH=0.01
P_THRESH=0.001
#BAD_RELATION=False
class TreeRecursiveSRLStep(object):
def __init__(self, objects, tagging, relations, steps_to_curr, n, MAX_DEPTH, SPLIT_THRESH, logfile, stopthresh, cond=False):
self.relations= relations
self.objects =array(objects)
self.tagging=tagging
if len(objects) > 0:
self.chosen_tag= mode(tagging)[0]
else:
self.chosen_query=None
self.justify='no objects'
self.chosen_tag=None
self.transforms= steps_to_curr
self.ig = None
self.chosen_query=None
self.cond=cond
self.n=n
self.MAX_DEPTH=MAX_DEPTH
self.SPLIT_THRESH=SPLIT_THRESH
self.logfile= logfile
self.sons= {}
self.cool_things=[]
self.is_rec= False
self.good_recs= []
self.good_recs_justify= []
self.good_recs_trees= []
self.stopthresh= stopthresh
self.logfile.write(' '*len(self.transforms)+'Created node. Num objects: '+str(len(self.tagging))+': '+str(len(find(self.tagging==1)))+' positive, '+str(len(find(self.tagging!=1)))+' negative.\n' )#'Positive: '+str(self.objects[find(self.tagging==1)])+' \n Negative: '+str(self.objects[find(self.tagging!=1)])+'\n')
def pick_split_query(self):
'''pick one query(if simple query on objects give high IG, do that, otherwise go recursive and build tree as query'''
'''never have to worry about len(transforms)>0'''
avg_word_ig=0.0
all_words=set()
for words in self.objects:
all_words.update(words)
max_ig,best_word=-1.0,''
for word in all_words:
word_ig= ig_ratio(self.tagging, array([1 if (word in obj) else 0 for obj in self.objects]))
avg_word_ig+=word_ig
if word_ig>max_ig:
max_ig,best_word=word_ig,word
self.chosen_query, self.ig, self.justify=lambda x: 1 if (best_word in x) else 0, max_ig, 'hasword:'+str(best_word)
avg_word_ig=avg_word_ig/len(all_words)
if self.cond is True or self.MAX_DEPTH==0 or len(self.objects)< self.stopthresh:
if self.ig <= ig_from_one_retag(self.tagging): #no query is useful enough
self.chosen_query=None
self.justify='nothing useful for tagging'
return None,self.sons, [], [], []
self.logfile.write('chose query: '+self.justify+'. ig is '+str(self.ig)+'\n')
return split_and_subtree(self.chosen_query, self)
#Build relation-based features(super table) for objects, see if any query good enough
relevant_features= [] #list of relation,direction pairs that are relevant(to pick from)
for relation in self.relations.keys():
feature_vals=[is_relation_key(obj, self.relations[relation]) for obj in self.objects]
val_lens=[len(val) for val in feature_vals]
if sum(val_lens)==0 : #no objects have relevant values. This may leave us with objects whose feature values are [], which means any query will return false...
continue #not relevant
relevant_features.append(relation)
min_ig_required= ig_from_one_retag(self.tagging)
if self.ig <= min_ig_required: #was IGTHRESH
self.chosen_query=None
self.justify='not good enough'
return None,self.sons, [], [], []
if len(relevant_features)==0:
print 'no relations can be used on this problem!'
self.logfile.write('chose query: '+self.justify+'. ig is '+str(self.ig)+'\n')
return split_and_subtree(self.chosen_query, self)
#sample relevent features n times(with replacement, so recursive n is the amount chosen)
choices=random.choice(relevant_features, self.n, True)
temp={}
for relation in choices:
if temp.has_key(relation):
temp[relation]+=1
continue
temp[relation]=1
worthy_relations= temp.items()
self.bttoo=worthy_relations
tree_ig=0.0
best_ig= self.ig
before_all= time.time()
for relation_used_for_recursive,rel_n in worthy_relations:
feature_vals=[is_relation_key(obj, self.relations[relation_used_for_recursive]) for obj in self.objects]
new_objs, new_tagging= relabel(feature_vals, self.tagging) #flatten+relabel
#3)call TreeRecursiveSRLClassifier
classifier_chosen= TreeRecursiveSRLClassifier(new_objs, new_tagging, self.relations, self.transforms+[relation_used_for_recursive], rel_n, self.MAX_DEPTH,self.SPLIT_THRESH, self.logfile, self.cond)
inds= [i for i,v in enumerate(feature_vals) if len(v)>0]
def rep_zero(x):
if x==0:
return 1
return x
blop=0.
if len(new_tagging)!=0:
blop=len(find(new_tagging!=mode(new_tagging)[0]))*1.0/len(new_tagging)
self.logfile.write('trying out tree with transform:'+str(self.transforms+[relation_used_for_recursive])+'. Number of N/A:'+str(len([p for p in feature_vals if len(p)==0]))+'. Ratio of new/old misclass ratios: '+str(
(blop)/rep_zero(len(find(self.tagging[inds]!=self.chosen_tag))*1.0/len(inds)) ) +'\n')
before=time.time()
classifier_chosen.train_vld_local()
self.logfile.write('tree tried! Time: '+str(time.time()-before)+'\n')
#TODO: FIXME!!!!!! predict shouldn't work on x but rather do something smart...
clf_labels=array([classifier_chosen.predict(x) for x in self.objects])
tree_ig=ig_ratio(self.tagging, clf_labels)
tree_ig_penalty=1 #TODO? something to do with tree size and depth?
self.cool_things.append((classifier_chosen.transforms,tree_ig,self.ig))
if tree_ig/tree_ig_penalty >= best_ig: #any better than non-rec
self.good_recs.append(lambda x,c=classifier_chosen: c.predict(x))
self.good_recs_justify.append(str(classifier_chosen.transforms))
self.good_recs_trees.append((relation_used_for_recursive,classifier_chosen))
if tree_ig/tree_ig_penalty >= self.ig: #if tree is better, it's the new classifier
test_statistic, p_val= statistic_test(self.tagging, clf_labels) #high stat+low p->good
if p_val > P_THRESH: #1% confidence level
continue
self.is_rec= True
self.logfile.write('chose tree with: '+str(self.transforms+[relation_used_for_recursive])+'. ig is '+str(tree_ig)+'\n')
self.chosen_query= lambda x, b=classifier_chosen: b.predict(x)
self.ig, self.justify= tree_ig, classifier_chosen.query_tree
else:
del classifier_chosen
self.logfile.write('finished recursive part for node. Time: '+str(time.time()-before_all)+'\n')
if self.ig <= 0: #no query is useful
self.chosen_query=None
self.justify='nothing useful for tagging'
return None,self.sons, [],[], []
self.logfile.write('chose query: '+str(self.justify)+'. ig is '+str(self.ig)+'\n')
return split_and_subtree(self.chosen_query, self)
def filter_bad_rels(self, relations, value_things):
#filter transforms+non-relevant since doesn't apply
#relations-relations I consider moving to
new_rel_fet=[]
new_avg_ig=[]
for i,relation in enumerate(relations):
if len(self.transforms)>1 and (relation=='reverse_'+self.transforms[-1] or (relation==self.transforms[-1].replace('reverse_','') and relation!=self.transforms[-1]) or (len(self.transforms)>1 and relation==self.transforms[-1])) :
continue #no using the relation you came with on the way back...
if value_things[i]<=0.0:
continue #ig is 0->no point
barf=False
new_objs=apply_transforms_other(self.relations, [relation], self.objects)
if sum([len(obj) for obj in new_objs])==0:#all objects are []
continue
for other_rel in self.relations.keys():
if other_rel==relation or other_rel=='reverse_'+relation or other_rel==relation.replace('reverse_',''):
continue
feature_vals= [is_in_relation(obj, self.relations[other_rel],other_rel) for obj in new_objs]#apply_transforms_other(self.relations, [other_rel],new_objs)#
val_lens=[len(val) for val in feature_vals]
if sum(val_lens)>0 :
barf=True
break
if barf:
new_rel_fet.append(relation)
new_avg_ig.append(value_things[i])
return new_rel_fet, array(new_avg_ig)
def pick_split_vld_local(self):
'''never have to worry about len(transforms==0)'''
relevant_features= [] #list of relation,direction pairs that are relevant(to pick from)
relation_avg_igs= [] #in alg 3 we only
self.chosen_query=None
self.ig= -1.0#best known error: treat me as leaf
best_ig, relation_used, constant= self.ig,None,''
for relation in self.relations.keys():
if relation=='reverse_'+self.transforms[-1] or (relation==self.transforms[-1].replace('reverse_','') and relation!=self.transforms[-1]) or (len(self.transforms)>1 and relation==self.transforms[-1]) :
continue #no using the relation you came with on the way back...
feature_vals=[is_in_relation(obj, self.relations[relation],relation) for obj in self.objects] #apply_transforms_other(self.relations, [relation], self.objects) #
val_lens=[len(val) for val in feature_vals]
if sum(val_lens)==0 : #no objects have relevant values. This may leave us with objects whose feature values are [], which means any query will return false...
continue #not relevant
relation_constants= set()
for obj in feature_vals:
for const in obj:
relation_constants.add(const)
sz=len(relation_constants)
if sz>=MAX_SIZE:
continue #For now, skip.
relation_constants.add(None)
avg_for_rel=0.0
for const in relation_constants:
if const is None:
query= lambda x: 1 if len(is_in_relation(x, self.relations[relation],relation))==0 else 0
else:
query= lambda x: 1 if is_in_relation(x, self.relations[relation],relation,const) else 0
ig_for_const= ig_ratio(self.tagging, array([query(x) for x in self.objects]))
avg_for_rel+=ig_for_const
if ig_for_const>best_ig:
best_ig, relation_used, constant= ig_for_const, relation,const
relevant_features.append(relation)
relation_avg_igs.append(avg_for_rel/len(relation_constants))
#1)pick some relation from relevant_features(how?)
if len(relevant_features)==0:
self.chosen_query=None
self.justify='no features'
return None,self.sons, [],[], []
if constant is None:
self.chosen_query= lambda x: 1 if len(is_in_relation(x, self.relations[relation_used],relation_used))==0 else 0
else:
self.chosen_query= lambda x: 1 if is_in_relation(x, self.relations[relation_used],relation_used,constant) else 0
self.ig, self.justify= best_ig, 'hasword(X),X in relation: %s with %s'%(relation_used, constant)
min_ig_required= ig_from_one_retag(self.tagging)
if self.ig<= min_ig_required:
self.chosen_query=None
self.justify='not good enough'
return None,self.sons, [],[], []
clf_tagging= array([self.chosen_query(x) for x in self.objects])
test_val, p_val= statistic_test(self.tagging, clf_tagging) #high stat+low p->good
if p_val > P_THRESH: #10% confidence level
self.chosen_query=None
self.justify='not good enough'
return None,self.sons, [],[], []
if len(self.transforms)>= self.MAX_DEPTH:
self.justify=self.justify+' and max depth reached'
self.logfile.write(' '*len(self.transforms)+'chose query: '+self.justify+'. ig is '+str(self.ig)+'\n')
return split_and_subtree(self.chosen_query, self)
relevant_features, relation_avg_igs =self.filter_bad_rels(relevant_features, relation_avg_igs)
if len(relevant_features)==0: #had feature, now I don't
self.justify=self.justify+' also cannot recurse'
self.logfile.write(' '*len(self.transforms)+'chose query: '+self.justify+'. ig is '+str(self.ig)+'\n')
return split_and_subtree(self.chosen_query, self)
#sample relevent features n times(with replacement, so recursive n is the amount chosen)
choices=random.choice(relevant_features, self.n, True, relation_avg_igs/sum(relation_avg_igs))
temp={}
for relation in choices:
if temp.has_key(relation):
temp[relation]+=1
continue
temp[relation]=1
worthy_relations= temp.items()
self.bttoo=worthy_relations
tree_ig=0.0
before_all= time.time()
for relation_used_for_recursive,new_n in worthy_relations:
feature_vals=[is_in_relation(obj, self.relations[relation_used_for_recursive],relation_used_for_recursive) for obj in self.objects]#apply_transforms_other(self.relations, [relation_used_for_recursive], self.objects) #
new_objs, new_tagging= relabel(feature_vals, self.tagging) #flatten+relabel
#3)call TreeRecursiveSRLClassifier
classifier_chosen= TreeRecursiveSRLClassifier(new_objs, new_tagging, self.relations, self.transforms+[relation_used_for_recursive], new_n ,self.MAX_DEPTH, self.SPLIT_THRESH,self.logfile, self.cond)
inds= [i for i,v in enumerate(feature_vals) if len(v)>0]
def rep_zero(x):
if x==0:
return 1
return x
blop=0.
if len(new_tagging)!=0:
blop=len(find(new_tagging!=mode(new_tagging)[0]))*1.0/len(new_tagging)
self.logfile.write(' '*len(self.transforms+['a'])+'trying out tree with transform:'+str(self.transforms+[relation_used_for_recursive])+'. Number of N/A:'+str(len([p for p in feature_vals if len(p)==0]))+'. Ratio of new/old misclass ratios: '+str(
(blop)/rep_zero(len(find(self.tagging[inds]!=self.chosen_tag))*1.0/len(inds)) ) +'\n')
before= time.time()
classifier_chosen.train_vld_local()
self.logfile.write(' '*len(self.transforms)+'tree tried! Time: '+str(time.time()-before)+'\n')
query=lambda x, b=classifier_chosen: b.predict(x, True)
clf_tagging= array([query(x) for x in self.objects])
tree_ig=ig_ratio(self.tagging, clf_tagging)
tree_ig_penalty=1.0 #TODO? something to do with tree size and depth?
self.cool_things.append((classifier_chosen.transforms,tree_ig,self.ig))
if tree_ig/tree_ig_penalty >= self.ig: #if tree is better, it's the new classifier
test_val, p_val= statistic_test(self.tagging, clf_tagging) #high stat+low p->good
if p_val > P_THRESH: #1% confidence level
continue #tree not good enough!
self.is_rec= True
self.logfile.write(' '*len(self.transforms)+'chose tree with: '+str(self.transforms+[relation_used_for_recursive])+'. ig is '+str(tree_ig)+'\n')
self.chosen_query= lambda x, b=classifier_chosen: b.predict(x, True)
self.ig, self.justify= tree_ig, classifier_chosen.query_tree
else:
del classifier_chosen
self.logfile.write(' '*len(self.transforms)+'finished recursive part for node. Time: '+str(time.time()-before_all)+'\n')
if self.ig <= 0 : #no query is useful
self.justify='nothing useful for tagging'
return None,self.sons, [],[], []
self.logfile.write(' '*len(self.transforms)+'chose query: '+str(self.justify)+'. ig is '+str(self.ig)+'\n')
return split_and_subtree(self.chosen_query, self)
class TreeRecursiveSRLClassifier(object):
def __init__(self, objects, tagging, relations, transforms, n, MAX_DEPTH, SPLIT_THRESH, logfile, cond=False):
self.relations= relations
self.objects =objects
self.tagging=tagging
self.transforms=transforms
self.cond=cond
self.n=n
self.MAX_DEPTH=MAX_DEPTH
self.SPLIT_THRESH=SPLIT_THRESH
self.logfile= logfile
self.logfile.write(' '*len(self.transforms)+'Created tree with transforms: '+str(self.transforms)+'\n')
self.recursive_features=[] #the important thing in the end!
self.feature_justify= [] #the relation used for tree?
self.feature_trees= []
def train(self, stopthresh):
num_nodes= 0
depth= 0
self.tree_sets= [TreeRecursiveSRLStep(self.objects, self.tagging, self.relations, self.transforms, self.n, self.MAX_DEPTH, self.SPLIT_THRESH, self.logfile, stopthresh, self.cond)] #initally all in same node
self.tree_sets[0].depth= 1
for node in self.tree_sets:
if len(node.objects)<=self.SPLIT_THRESH or all(node.tagging==node.chosen_tag):#consistent/too small to split
node.justify='leafed(thresh/constistant)'
node.chosen_query=None
self.logfile.write('node became leaf\n')
continue #leaf
_,sons, rec_feature, justify, trees =node.pick_split_query()
if len(sons.keys())==0:
node.justify='leafed(weird stuff)'
node.chosen_query=None
self.logfile.write('node became leaf\n')
continue#another leaf case...
num_nodes+=1
self.recursive_features.extend(rec_feature)
self.feature_justify.extend(justify)
self.feature_trees.extend(trees)
depth= max(depth, node.depth)
for son in sons.values():
son.depth= node.depth+1
self.tree_sets.extend(sons.values())
self.query_tree=self.tree_sets[0] #root
self.logfile.write('training done. num_nodes: '+str(num_nodes)+'. depth: '+str(depth)+'\n')
def train_vld_local(self):
num_nodes= 0
depth= 0
self.tree_sets=[TreeRecursiveSRLStep(self.objects, self.tagging, self.relations, self.transforms,self.n, self.MAX_DEPTH, self.SPLIT_THRESH,self.logfile, inf, self.cond)] #initally all in same node
self.tree_sets[0].depth= 1
self.query_tree=self.tree_sets[0] #root
for node in self.tree_sets:
if (len(node.objects)<self.SPLIT_THRESH or all(node.tagging==node.chosen_tag)):#consistent/too small to split
node.justify='leafed(thresh/constistant)'
node.chosen_query=None
self.logfile.write(' '*len(self.transforms)+'node became leaf\n')
continue #leaf
_,sons,_,_,_ =node.pick_split_vld_local()
if len(sons.keys())==0:
node.justify='leafed(weird stuff)'
node.chosen_query=None
self.logfile.write(' '*len(self.transforms)+'node became leaf\n')
continue#another leaf case...
num_nodes+=1
depth= max(depth, node.depth)
for son in sons.values():
son.depth= node.depth+1
self.tree_sets.extend(sons.values())
self.query_tree=self.tree_sets[0] #root
self.logfile.write(' '*len(self.transforms)+'training done. num_nodes: '+str(num_nodes)+'. depth: '+str(depth)+'\n')
def predict(self, new_object, flag=False):
NA_VAL= -100
curr_node= self.query_tree
if curr_node.chosen_tag is None:#edge case in the case of consistent
return 0#some arbitrary rule
while curr_node.chosen_query is not None:
if len(curr_node.sons.keys())==1: #only one son
curr_node=curr_node.sons[curr_node.sons.keys()[0]]
continue
transformed_obj= apply_transforms(curr_node.relations, curr_node.transforms, [new_object])
if flag:
transformed_obj= apply_transforms_other(curr_node.relations, curr_node.transforms[-1:], [new_object])
query_val= None
if len(transformed_obj[0])==0:
query_val= NA_VAL
if len(self.transforms)>0:
return NA_VAL
#if not lvl0, return -1 for this
elif not curr_node.is_rec:
query_val= curr_node.chosen_query(transformed_obj[0])
else:
vals=[]
if len(self.transforms)==0: #need apply trans
vals= [curr_node.chosen_query([x]) for x in transformed_obj[0] if len(apply_transforms(curr_node.relations, curr_node.justify.transforms, [[x]])[0])>0]
else:
vals= [curr_node.chosen_query([x]) for x in transformed_obj[0] if len(apply_transforms_other(curr_node.relations, curr_node.justify.transforms[-1:], [[x]])[0])>0]
if len(vals)>0:
query_val= int(mode(vals)[0][0]) #ISSUE: mode is problem if equal...
else:
query_val= NA_VAL #query for tree is -1
tmp= curr_node.chosen_tag
curr_node=curr_node.sons.get(query_val)
if curr_node is None: #tried tree that has no N/A in train, but does in test/ example was []
return tmp #best possible guess
return int(curr_node.chosen_tag)
class FeatureGenerationFromRDF(object):
def __init__(self, objects, tagging, relations):
self.objects= objects
self.tagging= tagging
self.relations= relations
self.new_features= []
self.new_justify= []
self.feature_trees= []
def generate_features(self, n, max_depth, split_thresh, logfile, STOPTHRESH= 10, version=1):
if version==1: #first version: stop-thresh+take all better.
tree= TreeRecursiveSRLClassifier(self.objects, self.tagging, self.relations, [], n, max_depth, split_thresh, logfile)
tree.train(STOPTHRESH) #minimum number of objects!
self.new_features= list(tree.recursive_features)
self.new_justify= list(tree.feature_justify)
self.feature_trees= list(tree.feature_trees)
return
elif version==2: #second version will be the stochastic thing on examples
for i in xrange(10):
inds= random.choice(len(self.objects), len(self.objects)/2, replace=False)
tree= TreeRecursiveSRLClassifier(self.objects[inds], self.tagging[inds], self.relations, [], n, max_depth, split_thresh, logfile)
tree.train(STOPTHRESH)
self.new_features.extend(tree.recursive_features)
self.new_justify.extend(tree.feature_justify)
self.feature_trees.extend(tree.feature_trees)
return
def get_new_table(self, test):
all_words=set()
for words in self.objects:
all_words.update(words)
self.table= zeros((len(self.objects), len(all_words)+len(self.new_features)))
self.test= zeros((len(test), len(all_words)+len(self.new_features)))
self.feature_names=[]
for i,word in enumerate(all_words):
self.table[:,i]= array([1 if (word in obj) else 0 for obj in self.objects])
self.test[:, i]= array([1 if (word in obj) else 0 for obj in test])
self.feature_names.append('has word:%s'%(word))
for j,new_feature in enumerate(self.new_features):
self.table[:, len(all_words)+j]= array([new_feature(obj) for obj in self.objects])
self.test[:, len(all_words)+j]= array([new_feature(obj) for obj in test])
self.feature_names.append(self.new_justify[j])
return self.table, self.tagging, self.test, self.feature_names, self.feature_trees
if __name__=='__main__':
#Toy example for debugging
messages=['cocoa price increase in 1964 because of cuban_missile_crisis',
'cocoa kill person according to research from france university',
'rice price worldwide in constant decrease due to export increase from china since 1990',
'pineapple cake serve in oslo_peace_conference',
'apple is not actual forbidden fruit scientist say actually pear instead',
'20 person dead 40 injure in earthquake in turkey',
'u.s. is no longer largest consumer of goods according to survey',
'potato consumption in u.s. increase due new potato craze',
'edward_snoden_leak put nsa in bad spot president barrack_obama to give statement tomorrow',
'dog not allergic to cocoa according to new study from mit',
'ireland_potato_famine memorial day riot cause 4 dead', #hard one since potato is america but still stuff. Mby a noisy example?
'wheat and cucumber consumption on worldwide decline except in u.s.',
'new corn based recipe will rock your word',
'broccoli vote funny word of year read more inside',
'new president of mexico allergic to avocado cannot eat guacamole',
'india origin of moussaka eggplant import to turkey from india',
'oslo_peace_conference best thing ever',
'10 year since oslo_peace_conference what change',
'cuban_missile_crisis cause rise in potato price',
'paris celebrate memorial french_revolution with cake',
'orange most cultivate fruit in world according to recent survey',
'sweet_potato diet increase in popularity due to celebrity endorsement',
'cat allergic to pepper according to new study from mit',
'ginger cost skyrocket due to u.s. sushi craze in los_angeles',
'bible forbid sweet_potato according to rabi from israel',
'2016_olympics possible not take place in brazil but in mexico',
'canada_squash soup recipe popular in u.s.'
] #messages on fruits/veggies that originally from america is concept. have some fruit, some america, some both, some neither
msg_objs=array([a.split(' ') for a in messages], dtype=object)
message_labels = (array([1,1,-1,1,-1,-1,-1,1,-1,1,1,-1,1,-1,1,-1,-1,
-1,1,-1,-1,1,1,-1,1,-1,1])+1)/2
test_msgs= ['potato and tomato sound the same and also come from same continent list of 10 things from the new world which surprise',
'2014_israel_president_election soon 6 candidate for title',
'eggplant soup popular in asia',
'pumpkin cost worldwide increase 40 percent during halloween',
'tomato favourite fruit of italy',
'massive news coverage of 2016_olympics expect due to location',
'rice has medical quality according to mit research',
'pumpkin may color urine if consume in large quantity',
'religious riot in the_netherlands',
'cocoa ban in china lifted']#this test set is too hard. pumpkin is impossible, and cocoa_ban is kind of also impossible
test=[a.split(' ') for a in test_msgs]
test_lbl= (array([1,-1,-1,1,1,-1,-1,1,-1,1])+1)/2
vld_msgs=['rome less visit than vatican_city according to census data',
'why the french_revolution help shape the world today',
'sweet_potato famine suspect in ireland connection to ireland_potato_famine suspect',
'doctor treat cancer with salad claim pepper and tomato have medicinal effects',
'russia annex crimea_peninsula president vladimir_putin to make statement',
'fish cost worldwide increase due to over-fishing',
'cocoa flavor orange tree develop in mit',
'pineapple goes well with avocado according to flavor specialist',
'orange orange in the_netherlands',
'corn voted most corny new world food']
vld=[a.split(' ') for a in vld_msgs]
vld_lbls=(array([-1,-1,1,1,-1,-1,1,1,-1,1])+1)/2
relations={}
relations['type']={'potato':'vegetable', 'cuban_missile_crisis':'event', 'cocoa':'fruit', 'france':'country', 'rice':'cereal', 'china':'country', 'pineapple':'fruit', 'oslo_peace_conference':'event'
, 'apple':'fruit', 'pear':'fruit', 'turkey':'country', 'u.s.':'country', 'edward_snoden_leak':'event', 'nsa':'organization', 'obama':'person', 'dog':'animal', 'mit':'university',
'ireland_potato_famine':'event', 'wheat':'cereal', 'cucumber':'vegetable', 'chile':'country', 'cuba':'country', 'venezuela':'country', 'brazil':'country', 'norway':'country',
'italy':'country', 'syria':'country', 'india':'country', 'norway':'country', 'ireland':'country', 'north_america':'continent', 'south_america':'continent', 'europe':'continent',
'asia':'continent', 'tomato':'fruit', '2014_israel_president_election':'event', 'israel':'country', 'mexico':'country'}
relations['country_of_origin']={'potato':'chile', 'cocoa':'venezuela', 'rice':'china', 'pineapple':'brazil', 'apple':'turkey', 'pear':'italy', 'wheat':'syria', 'cucumber':'india', 'tomato':'mexico',
'broccoli':'italy', 'corn':'mexico', 'avocado':'mexico', 'eggplant':'india', 'orange':'china', 'sweet_potato':'peru','pumpkin':'u.s.','pepper':'mexico','ginger':'china', 'canada_squash':'canada',
'blarf':'ggg','nof':'fluff','poo':'goffof','fgfgfgfg':'gggg','a':'b', 'r':'f','t':'t'}#applys to fruits/vegs/crops
relations['continent']={'cuba':'south_america', 'france':'europe', 'china':'asia', 'norway':'europe', 'turkey':'asia', 'u.s.':'north_america',
'chile':'south_america', 'venezuela':'south_america', 'brazil':'south_america', 'italy':'europe', 'ireland':'europe', 'syria':'asia', 'india':'asia',
'mexico':'south_america', 'israel':'asia', 'vatican':'europe','russia':'asia', 'peru':'south_america', 'canada':'north_america',
'f':'g','b':'c','ggg':'fff','fluff':'t','t':'t','d':'d'}#apply to country
relations['capital_of']={'paris':'france', 'washington_dc':'u.s.','china':'beijing','mexico':'mexico_city','brazil':'brasilia','cuba':'havana','norway':'oslo','turkey':'ankara','chile':'santiago','venezuela':'caracas','italy':'rome','vatican':'vatican_city','ireland':'dublin','syria':'damascus','india':'new_delhi', 'russia':'muscow',
'f':'f','r':'r','d':'d','q':'p','fff':'ffg'}
relations['city_of']={'paris':'france','los_angeles':'u.s.', 'washington_dc':'u.s.','china':'beijing','mexico':'mexico_city','brazil':'brasilia','cuba':'havana','norway':'oslo','turkey':'ankara','chile':'santiago','venezuela':'caracas','italy':'rome','vatican':'vatican_city','ireland':'dublin','syria':'damascus','india':'new_delhi', 'russia':'muscow',
'f':'f','t':'t','q':'q','p':'p'}
relations['president_of']={'vladimir_putin':'russia','barrack_obama':'u.s.',
'q':'f', 'r':'r', 'f':'f','b':'c','t':'t','d':'d'}
# relations['calorie_content_kcal']={'tomato':18, 'potato':77, 'rice':365, 'pineapple':50, 'apple':52, 'pear':57, 'wheat':327, 'cucumber':16}#apply to fruit/vegetable, numeric. missing for cocoa
relations['happend_in_place']={'cuban_missile_crisis':'cuba', 'oslo_peace_conference':'norway', 'edward_snoden_leak':'u.s.', 'ireland_potato_famine':'ireland', '2014_israel_president_election':'israel','french_revolution':'france','2016_olympics':'brazil', 'cocoa_ban':'china',
'fu':'f','r':'r','b':'b','c':'c','d':'d'}#apply to event(cuba missile crisis)
#relations['happend_on_date']={'cuban_missile_crisis':1962, 'oslo_peace_conference':1993, 'edward_snoden_leak':2013, 'ireland_potato_famine':1845, '2014_israel_president_election':2014} #apply to event, numeric
for key in relations.keys():
new_key= 'reverse_'+key
relations[new_key]= {}
for (a,b) in relations[key].items():
if relations[new_key].has_key(b):
relations[new_key][b].append(a)
continue
relations[new_key][b]= [a]
logfile= open('run_log.txt','w')
blor= FeatureGenerationFromRDF(msg_objs, message_labels, relations)
before=time.time()
blor.generate_features(200, 2, 3, logfile, 1, 1)
#blah3=TreeRecursiveSRLClassifier(msg_objs, message_labels, relations, [], 200, 2, 3, logfile)
#blah3.train(1)
print time.time()-before
logfile.close()
trn, trn_lbl, tst, feature_names, floo= blor.get_new_table(test)
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import SelectKBest
# feature_selector= SelectKBest(chi2, k=100)
# filtered_trn= feature_selector.fit_transform(trn, trn_lbl)
# filtered_tst= feature_selector.transform(tst)
blah3= SVC(kernel='linear', C=inf)
# blah3= KNeighborsClassifier(n_neighbors=5)
# blah3= DecisionTreeClassifier(criterion='entropy', min_samples_split=2)
blah3.fit(trn, trn_lbl)
pred3trn=blah3.predict(trn)
print mean(pred3trn!=trn_lbl)
pred3tst=blah3.predict(tst)
print mean(pred3tst!=test_lbl)
print len(blor.new_features)
| lioritan/Thesis | problems/alg10_ficuslike.py | Python | gpl-2.0 | 41,817 | [
"VisIt"
] | e2217bb11ba632cc7f320a00a2c637ba8a69019efb21c36afdf099a887892bed |
'''
Finding matches and no matches between a fasta and a BLAST file
Usage: python find_match_2.py <fasta_file> <blast_file> <kind of sequence in blast rna/prot>
Author: Nicolas Schmelling
'''
import sys
from Bio import SeqIO
def compare(fasta_file, blast_file, kind):
RNA_hit = {}
fasta_hit = {}
num_match = 0
num_no_match = 0
with open(blast_file, 'r') as blast:
match = open('match_'+kind+'.txt','w')
no_match = open('no_match_'+kind+'.txt','w')
for line in blast:
RNA_hit.update({line.split()[0]:line.split()[1]})
for seq_record in SeqIO.parse(fasta_file, "fasta"):
fasta_hit.update({seq_record.id:seq_record.description})
for hit in fasta_hit:
if hit in RNA_hit:
match.write(str(fasta_hit.get(hit)) + '\t' + str(RNA_hit.get(hit)) + '\n')
num_match += 1
else:
no_match.write(str(fasta_hit.get(hit)) + '\n')
num_no_match += 1
match.close()
no_match.close()
print 'Number of matches: ' + str(num_match)
print 'Number of no matches: ' + str(num_no_match)
if __name__ == "__main__":
fasta_file = sys.argv[1]
blast_file = sys.argv[2]
kind = sys.argv[3]
compare(fasta_file, blast_file, kind)
| luizirber/galGal | scripts/find_match_2.py | Python | bsd-3-clause | 1,332 | [
"BLAST"
] | 37fad626644d52e710f41032992c1330a455594f7d6388ba031c8208e2420e62 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
from math import *
from random import *
import numpy as np
class Distributions():
def __init__(self):
"""A distribution is a set of values with certain statistical properties
Methods/properties that must be implemented by subclasses
- getNext() -- Returns the next value for the distribution
- getData(n) -- Returns n values for the distribution
- getDescription() -- Returns a dict of parameters pertinent to the
distribution, if any as well as state variables.
"""
def getNext(self):
""" Returns the next value of the disribution using knowledge about the
current state of the distribution as stored in numValues.
"""
raise Exception("getNext must be implemented by all subclasses")
def getData(self, n):
"""Returns the next n values for the distribution as a list."""
records = [self.getNext() for x in range(n)]
return records
def getDescription(self):
"""Returns a dict of parameters pertinent to the distribution (if any) as
well as state variables such as numValues."""
raise Exception("getDescription must be implemented by all subclasses")
class SineWave(Distributions):
"""Generates a sinewave of a given period, amplitude and phase shift"""
def __init__(self, params={}):
if 'period' in params: self.period=params.pop('period')
else: self.period=pi
if 'amplitude' in params:
self.amplitude=params.pop('amplitude')
else: self.amplitude=1
if 'phaseShift' in params: self.phaseShift = params.pop('phaseShift')
else: self.phaseShift=0
self.valueNum=0
def getNext(self):
nextVal = self.amplitude*np.sin(2*pi*(self.period)*self.valueNum*(pi/180) - \
self.phaseShift)
self.valueNum+=1
return nextVal
def getData(self, numOfValues):
return Distributions.getData(self, numOfValues)
def getDescription(self):
description = dict(name='SineWave', period=self.period, amplitude=self.amplitude, \
phaseShift=self.phaseShift, numOfValues=self.valueNum)
return description
class RandomCategories(Distributions):
"""Generates random categories"""
def __init__(self, params={}):
self.valueNum=0
self.alphabet = 'abcdefghijklmnopqrstuvwxyz'
def getNext(self):
self.valueNum+=1
return ''.join(x for x in sample(self.alphabet, randint(3,15)))
def getData(self, numOfValues):
return Distributions.getData(self, numOfValues)
def getDescription(self):
description = dict(name='Random Categories', numOfValues=self.valueNum)
return description
class GaussianDistribution(Distributions):
"""Generates a gaussian distribution"""
def __init__(self, params={}):
self.valueNum=0
assert 'numOfValues' in params
self.numOfValues = params.pop('numOfValues')
if 'mean' in params: self.mean = params.pop('mean')
else: self.mean = 0
if 'std' in params: self.std=params.pop('std')
else: self.std = 0.6
self.records = np.random.normal(self.mean, self.std, self.numOfValues)
def getNext(self):
assert (self.numOfValues>self.valueNum)
nextValue = self.records[self.valueNum]
self.valueNum+=1
return nextValue
def getData(self):
return Distributions.getData(self, self.numOfValues)
def getDescription(self):
description = dict(name='GaussianDistribution', mean=self.mean,
standardDeviation=self.std, numOfValues=self.valueNum)
| EricSB/nupic | src/nupic/data/generators/distributions.py | Python | agpl-3.0 | 4,449 | [
"Gaussian"
] | ab3b74cba6dfaa319b87af053cf87beca3dd0b13126d40459fb913f3c774a667 |
# This script is executed in the main console namespace so
# that all the variables defined here become console variables.
from __future__ import division
import director
from director import irisdriver
import os
import sys
import PythonQt
import json
from PythonQt import QtCore, QtGui
from time import time
import imp
import director.applogic as app
from director import drcargs
from director import vtkAll as vtk
from director import matlab
from director import jointcontrol
from director import callbacks
from director import camerabookmarks
from director import cameracontrol
from director import cameracontrolpanel
from director import bihandeddemo
from director import debrisdemo
from director import doordemo
from director import drilldemo
from director import valvedemo
from director import drivingplanner
from director import egressplanner
from director import polarisplatformplanner
from director import surprisetask
from director import continuouswalkingdemo
from director import sitstandplanner
from director import walkingtestdemo
from director import terraintask
from director import ikplanner
from director import objectmodel as om
from director import spreadsheet
from director import transformUtils
from director import tdx
from director import skybox
from director import perception
from director import segmentation
from director import cameraview
from director import colorize
from director import drakevisualizer
from director.fieldcontainer import FieldContainer
from director import robotstate
from director import roboturdf
from director import robotsystem
from director import affordancepanel
from director import filterUtils
from director import footstepsdriver
from director import footstepsdriverpanel
from director import framevisualization
from director import lcmloggerwidget
from director import lcmgl
from director import lcmoctomap
from director import lcmcollections
from director import atlasdriver
from director import atlasdriverpanel
from director import multisensepanel
from director import navigationpanel
from director import handcontrolpanel
from director import sensordatarequestpanel
from director import tasklaunchpanel
from director.jointpropagator import JointPropagator
from director import planningutils
from director import viewcolors
from director import coursemodel
from director import copmonitor
from director import robotplanlistener
from director import handdriver
from director import planplayback
from director import playbackpanel
from director import screengrabberpanel
from director import splinewidget
from director import teleoppanel
from director import motionplanningpanel
from director import vtkNumpy as vnp
from director import visualization as vis
from director import actionhandlers
from director.timercallback import TimerCallback
from director.pointpicker import PointPicker, ImagePointPicker
from director import segmentationpanel
from director import lcmUtils
from director.utime import getUtime
from director.shallowCopy import shallowCopy
from director import segmentationroutines
from director import trackers
from director import gamepad
from director import blackoutmonitor
from director.tasks import robottasks as rt
from director.tasks import taskmanagerwidget
from director.tasks.descriptions import loadTaskDescriptions
import drc as lcmdrc
import bot_core as lcmbotcore
import maps as lcmmaps
import atlas
from collections import OrderedDict
import functools
import math
import numpy as np
from director.debugVis import DebugData
from director import ioUtils as io
drcargs.requireStrict()
drcargs.args()
app.startup(globals())
om.init(app.getMainWindow().objectTree(), app.getMainWindow().propertiesPanel())
actionhandlers.init()
quit = app.quit
exit = quit
view = app.getDRCView()
camera = view.camera()
tree = app.getMainWindow().objectTree()
orbit = cameracontrol.OrbitController(view)
showPolyData = segmentation.showPolyData
updatePolyData = segmentation.updatePolyData
###############################################################################
robotSystem = robotsystem.create(view)
globals().update(dict(robotSystem))
useIk = True
useRobotState = True
usePerception = True
useGrid = True
useSpreadsheet = True
useFootsteps = True
useHands = True
usePlanning = True
useHumanoidDRCDemos = True
useAtlasDriver = True
useLCMGL = True
useOctomap = True
useCollections = True
useLightColorScheme = True
useLoggingWidget = True
useDrakeVisualizer = True
useNavigationPanel = True
useFootContactVis = False
useFallDetectorVis = True
useCameraFrustumVisualizer = True
useControllerRate = True
useForceDisplay = True
useSkybox = False
useDataFiles = True
useGamepad = True
useBlackoutText = False
useRandomWalk = True
useCOPMonitor = True
useCourseModel = False
useLimitJointsSentToPlanner = False
useFeetlessRobot = False
# Sensor Flags
useKinect = False
useMultisense = True
useOpenniDepthImage = False
poseCollection = PythonQt.dd.ddSignalMap()
costCollection = PythonQt.dd.ddSignalMap()
if 'userConfig' in drcargs.getDirectorConfig():
if 'fixedBaseArm' in drcargs.getDirectorConfig()['userConfig']:
ikPlanner.fixedBaseArm = True
if 'disableComponents' in drcargs.getDirectorConfig():
for component in drcargs.getDirectorConfig()['disableComponents']:
print "Disabling", component
locals()[component] = False
if 'enableComponents' in drcargs.getDirectorConfig():
for component in drcargs.getDirectorConfig()['enableComponents']:
print "Enabling", component
locals()[component] = True
if useSpreadsheet:
spreadsheet.init(poseCollection, costCollection)
if useIk:
def onIkStartup(ikServer, startSuccess):
if startSuccess:
app.getMainWindow().statusBar().showMessage('Planning server started.', 2000)
else:
app.showErrorMessage('Error detected while starting the matlab planning server. '
'Please check the output console for more information.', title='Error starting matlab')
ikServer.outputConsole = app.getOutputConsole()
ikServer.infoFunc = app.displaySnoptInfo
ikServer.connectStartupCompleted(onIkStartup)
startIkServer()
if useAtlasDriver:
atlasdriver.systemStatus.outputConsole = app.getOutputConsole()
atlasdriverpanel.init(atlasDriver)
else:
app.removeToolbarMacro('ActionAtlasDriverPanel')
if usePerception:
segmentationpanel.init()
cameraview.init()
colorize.init()
cameraview.cameraView.initImageRotations(robotStateModel)
cameraview.cameraView.rayCallback = segmentation.extractPointsAlongClickRay
if useMultisense:
multisensepanel.init(perception.multisenseDriver)
else:
app.removeToolbarMacro('ActionMultisensePanel')
sensordatarequestpanel.init()
# for kintinuous, use 'CAMERA_FUSED', 'CAMERA_TSDF'
disparityPointCloud = segmentation.DisparityPointCloudItem('stereo point cloud', 'MULTISENSE_CAMERA', 'MULTISENSE_CAMERA_LEFT', cameraview.imageManager)
disparityPointCloud.addToView(view)
om.addToObjectModel(disparityPointCloud, parentObj=om.findObjectByName('sensors'))
def createPointerTracker():
return trackers.PointerTracker(robotStateModel, disparityPointCloud)
if useOpenniDepthImage:
openniDepthPointCloud = segmentation.DisparityPointCloudItem('openni point cloud', 'OPENNI_FRAME', 'OPENNI_FRAME_LEFT', cameraview.imageManager)
openniDepthPointCloud.addToView(view)
om.addToObjectModel(openniDepthPointCloud, parentObj=om.findObjectByName('sensors'))
if useGrid:
grid = vis.showGrid(view, color=[0,0,0], alpha=0.1)
grid.setProperty('Surface Mode', 'Surface with edges')
app.setBackgroundColor([0.3, 0.3, 0.35], [0.95,0.95,1])
viewOptions = vis.ViewOptionsItem(view)
om.addToObjectModel(viewOptions, parentObj=om.findObjectByName('sensors'))
viewBackgroundLightHandler = viewcolors.ViewBackgroundLightHandler(viewOptions, grid,
app.getToolsMenuActions()['ActionToggleBackgroundLight'])
if not useLightColorScheme:
viewBackgroundLightHandler.action.trigger()
if useHands:
handcontrolpanel.init(lHandDriver, rHandDriver, robotStateModel, robotStateJointController, view)
else:
app.removeToolbarMacro('ActionHandControlPanel')
if useFootsteps:
footstepsPanel = footstepsdriverpanel.init(footstepsDriver, robotStateModel, robotStateJointController, irisDriver)
else:
app.removeToolbarMacro('ActionFootstepPanel')
if useLCMGL:
lcmglManager = lcmgl.init(view)
app.MenuActionToggleHelper('Tools', 'Renderer - LCM GL', lcmglManager.isEnabled, lcmglManager.setEnabled)
if useOctomap:
octomapManager = lcmoctomap.init(view)
app.MenuActionToggleHelper('Tools', 'Renderer - Octomap', octomapManager.isEnabled, octomapManager.setEnabled)
if useCollections:
collectionsManager = lcmcollections.init(view)
app.MenuActionToggleHelper('Tools', 'Renderer - Collections', collectionsManager.isEnabled, collectionsManager.setEnabled)
if useDrakeVisualizer:
drakeVisualizer = drakevisualizer.DrakeVisualizer(view)
app.MenuActionToggleHelper('Tools', 'Renderer - Drake', drakeVisualizer.isEnabled, drakeVisualizer.setEnabled)
if useNavigationPanel:
navigationPanel = navigationpanel.init(robotStateJointController, footstepsDriver)
picker = PointPicker(view, callback=navigationPanel.pointPickerStoredFootsteps, numberOfPoints=2)
#picker.start()
if usePlanning:
def showPose(pose):
playbackRobotModel.setProperty('Visible', True)
playbackJointController.setPose('show_pose', pose)
def playPlan(plan):
playPlans([plan])
def playPlans(plans):
planPlayback.stopAnimation()
playbackRobotModel.setProperty('Visible', True)
planPlayback.playPlans(plans, playbackJointController)
def playManipPlan():
playPlan(manipPlanner.lastManipPlan)
def playWalkingPlan():
playPlan(footstepsDriver.lastWalkingPlan)
def plotManipPlan():
planPlayback.plotPlan(manipPlanner.lastManipPlan)
def planStand():
ikPlanner.computeStandPlan(robotStateJointController.q)
def planNominal():
ikPlanner.computeNominalPlan(robotStateJointController.q)
def planHomeStand():
''' Move the robot back to a safe posture, 1m above its feet, w/o moving the hands '''
ikPlanner.computeHomeStandPlan(robotStateJointController.q, footstepsDriver.getFeetMidPoint(robotStateModel), 1.0167)
def planHomeNominal():
''' Move the robot back to a safe posture, 1m above its feet, w/o moving the hands '''
ikPlanner.computeHomeNominalPlan(robotStateJointController.q, footstepsDriver.getFeetMidPoint(robotStateModel), 1.0167)
if useMultisense:
def fitDrillMultisense():
pd = om.findObjectByName('Multisense').model.revPolyData
om.removeFromObjectModel(om.findObjectByName('debug'))
segmentation.findAndFitDrillBarrel(pd)
def refitBlocks(autoApprove=True):
polyData = om.findObjectByName('Multisense').model.revPolyData
segmentation.updateBlockAffordances(polyData)
if autoApprove:
approveRefit()
def approveRefit():
for obj in om.getObjects():
if isinstance(obj, segmentation.BlockAffordanceItem):
if 'refit' in obj.getProperty('Name'):
originalObj = om.findObjectByName(obj.getProperty('Name').replace(' refit', ''))
if originalObj:
originalObj.params = obj.params
originalObj.polyData.DeepCopy(obj.polyData)
originalObj.actor.GetUserTransform().SetMatrix(obj.actor.GetUserTransform().GetMatrix())
originalObj.actor.GetUserTransform().Modified()
obj.setProperty('Visible', False)
def sendDataRequest(requestType, repeatTime=0.0):
msg = lcmmaps.data_request_t()
msg.type = requestType
msg.period = int(repeatTime*10) # period is specified in tenths of a second
msgList = lcmmaps.data_request_list_t()
msgList.utime = getUtime()
msgList.requests = [msg]
msgList.num_requests = len(msgList.requests)
lcmUtils.publish('DATA_REQUEST', msgList)
def sendSceneHeightRequest(repeatTime=0.0):
sendDataRequest(lcmmaps.data_request_t.HEIGHT_MAP_SCENE, repeatTime)
def sendWorkspaceDepthRequest(repeatTime=0.0):
sendDataRequest(lcmmaps.data_request_t.DEPTH_MAP_WORKSPACE_C, repeatTime)
def sendSceneDepthRequest(repeatTime=0.0):
sendDataRequest(lcmmaps.data_request_t.DEPTH_MAP_SCENE, repeatTime)
def sendFusedDepthRequest(repeatTime=0.0):
sendDataRequest(lcmmaps.data_request_t.FUSED_DEPTH, repeatTime)
def sendFusedHeightRequest(repeatTime=0.0):
sendDataRequest(lcmmaps.data_request_t.FUSED_HEIGHT, repeatTime)
handJoints = []
if drcargs.args().directorConfigFile.find('atlas') != -1:
handJoints = roboturdf.getRobotiqJoints() + ['neck_ay']
else:
for handModel in ikPlanner.handModels:
handJoints += handModel.handModel.model.getJointNames()
# filter base joints out
handJoints = [ joint for joint in handJoints if joint.find('base')==-1 ]
teleopJointPropagator = JointPropagator(robotStateModel, teleopRobotModel, handJoints)
playbackJointPropagator = JointPropagator(robotStateModel, playbackRobotModel, handJoints)
def doPropagation(model=None):
if teleopRobotModel.getProperty('Visible'):
teleopJointPropagator.doPropagation()
if playbackRobotModel.getProperty('Visible'):
playbackJointPropagator.doPropagation()
robotStateModel.connectModelChanged(doPropagation)
#app.addToolbarMacro('scene height', sendSceneHeightRequest)
#app.addToolbarMacro('scene depth', sendSceneDepthRequest)
#app.addToolbarMacro('stereo height', sendFusedHeightRequest)
#app.addToolbarMacro('stereo depth', sendFusedDepthRequest)
if useLimitJointsSentToPlanner:
planningUtils.clampToJointLimits = True
jointLimitChecker = teleoppanel.JointLimitChecker(robotStateModel, robotStateJointController)
jointLimitChecker.setupMenuAction()
jointLimitChecker.start()
if useMultisense:
spindleSpinChecker = multisensepanel.SpindleSpinChecker(spindleMonitor)
spindleSpinChecker.setupMenuAction()
postureShortcuts = teleoppanel.PosturePlanShortcuts(robotStateJointController, ikPlanner, planningUtils)
if useMultisense:
def drillTrackerOn():
om.findObjectByName('Multisense').model.showRevolutionCallback = fitDrillMultisense
def drillTrackerOff():
om.findObjectByName('Multisense').model.showRevolutionCallback = None
def fitPosts():
segmentation.fitVerticalPosts(segmentation.getCurrentRevolutionData())
affordancePanel.onGetRaycastTerrain()
ikPlanner.addPostureGoalListener(robotStateJointController)
playbackpanel.addPanelToMainWindow(playbackPanel)
teleoppanel.addPanelToMainWindow(teleopPanel)
motionPlanningPanel = motionplanningpanel.init(planningUtils, robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController,
ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan, footstepsDriver)
if useGamepad:
gamePad = gamepad.Gamepad(teleopPanel, teleopJointController, ikPlanner, view)
if useBlackoutText:
blackoutMonitor = blackoutmonitor.BlackoutMonitor(robotStateJointController, view, cameraview, mapServerSource)
taskPanels = OrderedDict()
if useHumanoidDRCDemos:
debrisDemo = debrisdemo.DebrisPlannerDemo(robotStateModel, robotStateJointController, playbackRobotModel,
ikPlanner, manipPlanner, atlasdriver.driver, lHandDriver,
perception.multisenseDriver, refitBlocks)
drillDemo = drilldemo.DrillPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, teleopPanel.showPose, cameraview, segmentationpanel)
drillTaskPanel = drilldemo.DrillTaskPanel(drillDemo)
valveDemo = valvedemo.ValvePlannerDemo(robotStateModel, footstepsDriver, footstepsPanel, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, robotStateJointController)
valveTaskPanel = valvedemo.ValveTaskPanel(valveDemo)
continuouswalkingDemo = continuouswalkingdemo.ContinousWalkingDemo(robotStateModel, footstepsPanel, footstepsDriver, playbackPanel, robotStateJointController, ikPlanner,
teleopJointController, navigationPanel, cameraview)
continuousWalkingTaskPanel = continuouswalkingdemo.ContinuousWalkingTaskPanel(continuouswalkingDemo)
useDrivingPlanner = drivingplanner.DrivingPlanner.isCompatibleWithConfig()
if useDrivingPlanner:
drivingPlannerPanel = drivingplanner.DrivingPlannerPanel(robotSystem)
walkingDemo = walkingtestdemo.walkingTestDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
robotStateJointController,
playPlans, showPose)
bihandedDemo = bihandeddemo.BihandedPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, showPose, cameraview, segmentationpanel)
doorDemo = doordemo.DoorDemo(robotStateModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, showPose)
doorTaskPanel = doordemo.DoorTaskPanel(doorDemo)
terrainTaskPanel = terraintask.TerrainTaskPanel(robotSystem)
terrainTask = terrainTaskPanel.terrainTask
surpriseTaskPanel = surprisetask.SurpriseTaskPanel(robotSystem)
surpriseTask = surpriseTaskPanel.planner
egressPanel = egressplanner.EgressPanel(robotSystem)
egressPlanner = egressPanel.egressPlanner
if useDrivingPlanner:
taskPanels['Driving'] = drivingPlannerPanel.widget
taskPanels['Egress'] = egressPanel.widget
taskPanels['Door'] = doorTaskPanel.widget
taskPanels['Valve'] = valveTaskPanel.widget
taskPanels['Drill'] = drillTaskPanel.widget
taskPanels['Surprise'] = surpriseTaskPanel.widget
taskPanels['Terrain'] = terrainTaskPanel.widget
taskPanels['Continuous Walking'] = continuousWalkingTaskPanel.widget
tasklaunchpanel.init(taskPanels)
splinewidget.init(view, handFactory, robotStateModel)
rt.robotSystem = robotSystem
taskManagerPanel = taskmanagerwidget.init()
for taskDescription in loadTaskDescriptions():
taskManagerPanel.taskQueueWidget.loadTaskDescription(taskDescription[0], taskDescription[1])
taskManagerPanel.taskQueueWidget.setCurrentQueue('Task library')
for obj in om.getObjects():
obj.setProperty('Deletable', False)
if useCOPMonitor and not ikPlanner.fixedBaseArm:
copMonitor = copmonitor.COPMonitor(robotSystem, view);
if useLoggingWidget:
w = lcmloggerwidget.LCMLoggerWidget(statusBar=app.getMainWindow().statusBar())
app.getMainWindow().statusBar().addPermanentWidget(w.button)
if useControllerRate:
class ControllerRateLabel(object):
'''
Displays a controller frequency in the status bar
'''
def __init__(self, atlasDriver, statusBar):
self.atlasDriver = atlasDriver
self.label = QtGui.QLabel('')
statusBar.addPermanentWidget(self.label)
self.timer = TimerCallback(targetFps=1)
self.timer.callback = self.showRate
self.timer.start()
def showRate(self):
rate = self.atlasDriver.getControllerRate()
rate = 'unknown' if rate is None else '%d hz' % rate
self.label.text = 'Controller rate: %s' % rate
controllerRateLabel = ControllerRateLabel(atlasDriver, app.getMainWindow().statusBar())
if useForceDisplay:
class LCMForceDisplay(object):
'''
Displays foot force sensor signals in a status bar widget or label widget
'''
def onRobotState(self,msg):
self.l_foot_force_z = msg.force_torque.l_foot_force_z
self.r_foot_force_z = msg.force_torque.r_foot_force_z
def __init__(self, channel, statusBar=None):
self.sub = lcmUtils.addSubscriber(channel, lcmbotcore.robot_state_t, self.onRobotState)
self.label = QtGui.QLabel('')
statusBar.addPermanentWidget(self.label)
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.showRate
self.timer.start()
self.l_foot_force_z = 0
self.r_foot_force_z = 0
def __del__(self):
lcmUtils.removeSubscriber(self.sub)
def showRate(self):
global leftInContact, rightInContact
self.label.text = '%.2f | %.2f' % (self.l_foot_force_z,self.r_foot_force_z)
rateComputer = LCMForceDisplay('EST_ROBOT_STATE', app.getMainWindow().statusBar())
if useSkybox:
skyboxDataDir = os.path.expanduser('~/Downloads/skybox')
imageMap = skybox.getSkyboxImages(skyboxDataDir)
skyboxObjs = skybox.createSkybox(imageMap, view)
skybox.connectSkyboxCamera(view)
#skybox.createTextureGround(os.path.join(skyboxDataDir, 'Dirt_seamless.jpg'), view)
#view.camera().SetViewAngle(60)
class RobotLinkHighligher(object):
def __init__(self, robotModel):
self.robotModel = robotModel
self.previousColors = {}
def highlightLink(self, linkName, color):
currentColor = self.robotModel.model.getLinkColor(linkName)
if not currentColor.isValid():
return
if linkName not in self.previousColors:
self.previousColors[linkName] = currentColor
alpha = self.robotModel.getProperty('Alpha')
newColor = QtGui.QColor(color[0]*255, color[1]*255, color[2]*255, alpha*255)
self.robotModel.model.setLinkColor(linkName, newColor)
def dehighlightLink(self, linkName):
color = self.previousColors.pop(linkName, None)
if color is None:
return
color.setAlpha(self.robotModel.getProperty('Alpha')*255)
self.robotModel.model.setLinkColor(linkName, color)
robotHighlighter = RobotLinkHighligher(robotStateModel)
if useFootContactVis:
class LCMContactDisplay(object):
'''
Displays (controller) contact state by changing foot mesh color
'''
def onFootContact(self, msg):
for linkName, inContact in [[self.leftFootLink, msg.left_contact > 0.0], [self.rightFootLink, msg.right_contact > 0.0]]:
if inContact:
robotHighlighter.highlightLink(linkName, [0, 0, 1])
else:
robotHighlighter.dehighlightLink(linkName)
def __init__(self, channel):
self.leftFootLink = drcargs.getDirectorConfig()['leftFootLink']
self.rightFootLink = drcargs.getDirectorConfig()['rightFootLink']
footContactSub = lcmUtils.addSubscriber(channel, lcmdrc.foot_contact_estimate_t, self.onFootContact)
footContactSub.setSpeedLimit(60)
footContactVis = LCMContactDisplay('FOOT_CONTACT_ESTIMATE')
if useFallDetectorVis:
def onPlanStatus(msg):
links = ['pelvis', 'utorso']
if msg.plan_type == lcmdrc.plan_status_t.RECOVERING:
for link in links:
robotHighlighter.highlightLink(link, [1,0.4,0.0])
elif msg.plan_type == lcmdrc.plan_status_t.BRACING:
for link in links:
robotHighlighter.highlightLink(link, [1, 0, 0])
else:
for link in links:
robotHighlighter.dehighlightLink(link)
fallDetectorSub = lcmUtils.addSubscriber("PLAN_EXECUTION_STATUS", lcmdrc.plan_status_t, onPlanStatus)
fallDetectorSub.setSpeedLimit(10)
if useDataFiles:
for filename in drcargs.args().data_files:
actionhandlers.onOpenFile(filename)
if useCameraFrustumVisualizer and cameraview.CameraFrustumVisualizer.isCompatibleWithConfig():
cameraFrustumVisualizer = cameraview.CameraFrustumVisualizer(robotStateModel, cameraview.imageManager, 'MULTISENSE_CAMERA_LEFT')
class ImageOverlayManager(object):
def __init__(self):
self.viewName = 'MULTISENSE_CAMERA_LEFT'
self.desiredWidth = 400
self.position = [0, 0]
self.usePicker = False
self.imageView = None
self.imagePicker = None
self._prevParent = None
self._updateAspectRatio()
def setWidth(self, width):
self.desiredWidth = width
self._updateAspectRatio()
self.hide()
self.show()
def _updateAspectRatio(self):
imageExtent = cameraview.imageManager.images[self.viewName].GetExtent()
if imageExtent[1] != -1 and imageExtent[3] != -1:
self.imageSize = [imageExtent[1]+1, imageExtent[3]+1]
imageAspectRatio = self.imageSize[0] / self.imageSize[1]
self.size = [self.desiredWidth, self.desiredWidth / imageAspectRatio]
def show(self):
if self.imageView:
return
imageView = cameraview.views[self.viewName]
self.imageView = imageView
self._prevParent = imageView.view.parent()
self._updateAspectRatio()
imageView.view.hide()
imageView.view.setParent(view)
imageView.view.resize(self.size[0], self.size[1])
imageView.view.move(self.position[0], self.position[1])
imageView.view.show()
if self.usePicker:
self.imagePicker = ImagePointPicker(imageView)
self.imagePicker.start()
def hide(self):
if self.imageView:
self.imageView.view.hide()
self.imageView.view.setParent(self._prevParent)
self.imageView.view.show()
self.imageView = None
if self.imagePicker:
self.imagePicker.stop()
class ToggleImageViewHandler(object):
def __init__(self, manager):
self.action = app.getToolsMenuActions()['ActionToggleImageView']
self.action.connect('triggered()', self.toggle)
self.manager = manager
def toggle(self):
if self.action.checked:
self.manager.show()
else:
self.manager.hide()
imageOverlayManager = ImageOverlayManager()
imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'MULTISENSE_CAMERA_LEFT', view, visible=False)
imageViewHandler = ToggleImageViewHandler(imageWidget)
setImageWidgetSource = imageWidget.setImageName
screengrabberpanel.init(view)
framevisualization.init(view)
affordancePanel = affordancepanel.init(view, affordanceManager, robotStateJointController, raycastDriver)
camerabookmarks.init(view)
cameraControlPanel = cameracontrolpanel.CameraControlPanel(view)
app.addWidgetToDock(cameraControlPanel.widget, action=None).hide()
def getLinkFrame(linkName, model=None):
model = model or robotStateModel
return model.getLinkFrame(linkName)
def getBotFrame(frameName):
t = vtk.vtkTransform()
t.PostMultiply()
cameraview.imageManager.queue.getTransform(frameName, 'local', t)
return t
def showLinkFrame(linkName, model=None):
frame = getLinkFrame(linkName, model)
if not frame:
raise Exception('Link not found: ' + linkName)
return vis.updateFrame(frame, linkName, parent='link frames')
def sendEstRobotState(pose=None):
if pose is None:
pose = robotStateJointController.q
msg = robotstate.drakePoseToRobotState(pose)
lcmUtils.publish('EST_ROBOT_STATE', msg)
estRobotStatePublisher = TimerCallback(callback=sendEstRobotState)
def enableArmEncoders():
msg = lcmdrc.utime_t()
msg.utime = 1
lcmUtils.publish('ENABLE_ENCODERS', msg)
def disableArmEncoders():
msg = lcmdrc.utime_t()
msg.utime = -1
lcmUtils.publish('ENABLE_ENCODERS', msg)
def sendDesiredPumpPsi(desiredPsi):
atlasDriver.sendDesiredPumpPsi(desiredPsi)
app.setCameraTerrainModeEnabled(view, True)
app.resetCamera(viewDirection=[-1,0,0], view=view)
# Drill Demo Functions for in-image rendering:
useDrillDemo = False
if useDrillDemo:
def spawnHandAtCurrentLocation(side='left'):
if (side is 'left'):
tf = transformUtils.copyFrame( getLinkFrame( 'l_hand_face') )
handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'left')
else:
tf = transformUtils.copyFrame( getLinkFrame( 'right_pointer_tip') )
handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'right')
def drawFrameInCamera(t, frameName='new frame',visible=True):
v = imageView.view
q = cameraview.imageManager.queue
localToCameraT = vtk.vtkTransform()
q.getTransform('local', 'MULTISENSE_CAMERA_LEFT', localToCameraT)
res = vis.showFrame( vtk.vtkTransform() , 'temp',view=v, visible=True, scale = 0.2)
om.removeFromObjectModel(res)
pd = res.polyData
pd = filterUtils.transformPolyData(pd, t)
pd = filterUtils.transformPolyData(pd, localToCameraT)
q.projectPoints('MULTISENSE_CAMERA_LEFT', pd )
vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible)
def drawObjectInCamera(objectName,visible=True):
v = imageView.view
q = cameraview.imageManager.queue
localToCameraT = vtk.vtkTransform()
q.getTransform('local', 'MULTISENSE_CAMERA_LEFT', localToCameraT)
obj = om.findObjectByName(objectName)
if obj is None:
return
objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform())
objPolyDataOriginal = obj.polyData
pd = objPolyDataOriginal
pd = filterUtils.transformPolyData(pd, objToLocalT)
pd = filterUtils.transformPolyData(pd, localToCameraT)
q.projectPoints('MULTISENSE_CAMERA_LEFT', pd)
vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible)
def projectDrillDemoInCamera():
q = om.findObjectByName('camera overlay')
om.removeFromObjectModel(q)
imageView = cameraview.views['MULTISENSE_CAMERA_LEFT']
imageView.imageActor.SetOpacity(.2)
drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False)
tf = transformUtils.copyFrame( drillDemo.drill.frame.transform )
tf.PreMultiply()
tf.Concatenate( drillDemo.drill.drillToButtonTransform )
drawFrameInCamera(tf, 'drill button')
tf2 = transformUtils.copyFrame( tf )
tf2.PreMultiply()
tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) )
drawFrameInCamera(tf2, 'drill button flip')
drawObjectInCamera('drill',visible=False)
drawObjectInCamera('sensed pointer tip')
obj = om.findObjectByName('sensed pointer tip frame')
if (obj is not None):
drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False)
#drawObjectInCamera('left robotiq',visible=False)
#drawObjectInCamera('right pointer',visible=False)
v = imageView.view
v.render()
showImageOverlay()
drillDemo.pointerTracker = createPointerTracker()
drillDemo.projectCallback = projectDrillDemoInCamera
drillYawPreTransform = vtk.vtkTransform()
drillYawPreTransform.PostMultiply()
def onDrillYawSliderChanged(value):
yawOffset = value - 180.0
drillDemo.drillYawSliderValue = yawOffset
drillDemo.updateDrillToHand()
app.getMainWindow().macrosToolBar().addWidget(QtGui.QLabel('drill yaw:'))
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setMaximum(360)
slider.setValue(180)
slider.setMaximumWidth(200)
slider.connect('valueChanged(int)', onDrillYawSliderChanged)
app.getMainWindow().macrosToolBar().addWidget(slider)
def sendPointerPrep():
drillDemo.planPointerPressGaze(-0.05)
def sendPointerPress():
drillDemo.planPointerPressGaze(0.01)
def sendPointerPressDeep():
drillDemo.planPointerPressGaze(0.015)
app.addToolbarMacro('drill posture', drillDemo.planBothRaisePowerOn)
app.addToolbarMacro('pointer prep', sendPointerPrep)
app.addToolbarMacro('pointer press', sendPointerPress)
app.addToolbarMacro('pointer press deep', sendPointerPressDeep)
import signal
def sendMatlabSigint():
ikServer.comm.client.proc.send_signal(signal.SIGINT)
#app.addToolbarMacro('Ctrl+C MATLAB', sendMatlabSigint)
class AffordanceTextureUpdater(object):
def __init__(self, affordanceManager):
self.affordanceManager = affordanceManager
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.updateTextures
self.timer.start()
def updateTexture(self, obj):
if obj.getProperty('Camera Texture Enabled'):
cameraview.applyCameraTexture(obj, cameraview.imageManager)
else:
cameraview.disableCameraTexture(obj)
obj._renderAllViews()
def updateTextures(self):
for aff in affordanceManager.getAffordances():
self.updateTexture(aff)
affordanceTextureUpdater = AffordanceTextureUpdater(affordanceManager)
def drawCenterOfMass(model):
stanceFrame = footstepsDriver.getFeetMidPoint(model)
com = list(model.model.getCenterOfMass())
com[2] = stanceFrame.GetPosition()[2]
d = DebugData()
d.addSphere(com, radius=0.015)
obj = vis.updatePolyData(d.getPolyData(), 'COM %s' % model.getProperty('Name'), color=[1,0,0], visible=False, parent=model)
def initCenterOfMassVisulization():
for model in [robotStateModel, teleopRobotModel, playbackRobotModel]:
model.connectModelChanged(drawCenterOfMass)
drawCenterOfMass(model)
if not ikPlanner.fixedBaseArm:
initCenterOfMassVisulization()
class RobotMoverWidget(object):
def __init__(self, jointController):
self.jointController = jointController
pos, rpy = jointController.q[:3], jointController.q[3:6]
t = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy))
self.frame = vis.showFrame(t, 'mover widget', scale=0.3)
self.frame.setProperty('Edit', True)
self.frame.connectFrameModified(self.onFrameModified)
def onFrameModified(self, frame):
pos, rpy = self.frame.transform.GetPosition(), transformUtils.rollPitchYawFromTransform(self.frame.transform)
q = self.jointController.q.copy()
q[:3] = pos
q[3:6] = rpy
self.jointController.setPose('moved_pose', q)
class RobotGridUpdater(object):
def __init__(self, gridFrame, robotModel, jointController):
self.gridFrame = gridFrame
self.robotModel = robotModel
self.jointController = jointController
self.robotModel.connectModelChanged(self.updateGrid)
def updateGrid(self, model):
pos = self.jointController.q[:3]
x = int(np.round(pos[0])) / 10
y = int(np.round(pos[1])) / 10
z = int(np.round(pos[2] - 0.85)) / 1
t = vtk.vtkTransform()
t.Translate((x*10,y*10,z))
self.gridFrame.copyFrame(t)
#gridUpdater = RobotGridUpdater(grid.getChildFrame(), robotStateModel, robotStateJointController)
class IgnoreOldStateMessagesSelector(object):
def __init__(self, jointController):
self.jointController = jointController
self.action = app.addMenuAction('Tools', 'Ignore Old State Messages')
self.action.setCheckable(True)
self.action.setChecked(self.jointController.ignoreOldStateMessages)
self.action.connect('triggered()', self.toggle)
def toggle(self):
self.jointController.ignoreOldStateMessages = bool(self.action.checked)
IgnoreOldStateMessagesSelector(robotStateJointController)
class RandomWalk(object):
def __init__(self, max_distance_per_plan=2):
self.subs = []
self.max_distance_per_plan=max_distance_per_plan
def handleStatus(self, msg):
if msg.plan_type == msg.STANDING:
goal = transformUtils.frameFromPositionAndRPY(
np.array([robotStateJointController.q[0] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5),
robotStateJointController.q[1] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5),
robotStateJointController.q[2] - 0.84]),
[0, 0, robotStateJointController.q[5] + 2 * np.degrees(np.pi) * (np.random.random() - 0.5)])
request = footstepsDriver.constructFootstepPlanRequest(robotStateJointController.q, goal)
request.params.max_num_steps = 18
footstepsDriver.sendFootstepPlanRequest(request)
def handleFootstepPlan(self, msg):
footstepsDriver.commitFootstepPlan(msg)
def start(self):
sub = lcmUtils.addSubscriber('PLAN_EXECUTION_STATUS', lcmdrc.plan_status_t, self.handleStatus)
sub.setSpeedLimit(0.2)
self.subs.append(sub)
self.subs.append(lcmUtils.addSubscriber('FOOTSTEP_PLAN_RESPONSE', lcmdrc.footstep_plan_t, self.handleFootstepPlan))
def stop(self):
for sub in self.subs:
lcmUtils.removeSubscriber(sub)
if useRandomWalk:
randomWalk = RandomWalk()
if useCourseModel:
courseModel = coursemodel.CourseModel()
if useKinect:
import kinectlcm
imageOverlayManager.viewName = "KINECT_RGB"
#kinectlcm.startButton()
if useFeetlessRobot:
ikPlanner.robotNoFeet = True
for scriptArgs in drcargs.args().scripts:
execfile(scriptArgs[0])
| patmarion/director | src/python/director/startup.py | Python | bsd-3-clause | 38,781 | [
"VTK"
] | 62924e445d9d0b89b8cd1149e4e311d77305c49f5be81b72006da6504280efbe |
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
import numpy as np
import math
class ProteinBase(object):
'''
Base class for other Protein classes.
Provides functionality for translation/rotation and adding H-bonds.
'''
def __init__(self):
self._translation_vector = np.zeros(3)
self._rotatation_matrix = np.eye(3)
self._disulfide_list = []
self._general_bond = []
self._prep_files = []
self._frcmod_files = []
self._lib_files = []
def set_translation(self, translation_vector):
'''
Set the translation vector.
:param translation_vector: ``numpy.array(3)`` in nanometers
Translation happens after rotation.
'''
self._translation_vector = np.array(translation_vector)
def set_rotation(self, rotation_axis, theta):
'''
Set the rotation.
:param rotation_axis: ``numpy.array(3)`` in nanometers
:param theta: angle of rotation in degrees
Rotation happens after translation.
'''
theta = theta * 180 / math.pi
rotation_axis = rotation_axis / np.linalg.norm(rotation_axis)
a = np.cos(theta / 2.)
b, c, d = -rotation_axis * np.sin(theta / 2.)
self._rotatation_matrix = np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def add_bond(self, res_index_i, res_index_j, atom_name_i, atom_name_j, bond_type):
'''
Add a general bond.
:param res_index_i: one-based index of residue i
:param res_index_j: one-based index of residue j
:param atom_name_i: string name of i
:param atom_name_j: string name of j
:param bond_type: string specifying the "S", "D","T"... bond
.. note::
indexing starts from one and the residue numbering from the PDB file is ignored.
'''
self._general_bond.append((res_index_i, res_index_j,atom_name_i,atom_name_j,bond_type))
def add_disulfide(self, res_index_i, res_index_j):
'''
Add a disulfide bond.
:param res_index_i: one-based index of residue i
:param res_index_j: one-based index of residue j
.. note::
indexing starts from one and the residue numbering from the PDB file is ignored. When loading
from a PDB or creating a sequence, residue name must be CYX, not CYS.
'''
self._disulfide_list.append((res_index_i, res_index_j))
def add_prep_file(self,fname):
'''
Add a prep file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._prep_files.append(fname)
def add_frcmod_file(self,fname):
'''
Add a frcmod file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._frcmod_files.append(fname)
def add_lib_file(self,fname):
'''
Add a lib file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._lib_files.append(fname)
def _gen_translation_string(self, mol_id):
return '''translate {mol_id} {{ {x} {y} {z} }}'''.format(mol_id=mol_id,
x=self._translation_vector[0],
y=self._translation_vector[1],
z=self._translation_vector[2])
def _gen_rotation_string(self, mol_id):
return ''
def _gen_bond_string(self,mol_id):
bond_strings = []
for i,j,a,b,t in self._general_bond:
d = 'bond {mol_id}.{i}.{a} {mol_id}.{j}.{b} "{t}"'.format(mol_id=mol_id, i=i, j=j, a=a, b=b, t=t)
bond_strings.append(d)
return bond_strings
def _gen_disulfide_string(self, mol_id):
disulfide_strings = []
for i, j in self._disulfide_list:
d = 'bond {mol_id}.{i}.SG {mol_id}.{j}.SG'.format(mol_id=mol_id, i=i, j=j)
disulfide_strings.append(d)
return disulfide_strings
def _gen_read_prep_string(self):
prep_string = []
for p in self._prep_files:
prep_string.append('loadAmberPrep {}'.format(p))
return prep_string
def _gen_read_frcmod_string(self):
frcmod_string = []
for p in self._frcmod_files:
frcmod_string.append('loadAmberParams {}'.format(p))
return frcmod_string
def _gen_read_lib_string(self):
lib_string = []
for p in self._lib_files:
lib_string.append('loadoff {}'.format(p))
return lib_string
class ProteinMoleculeFromSequence(ProteinBase):
'''
Class to create a protein from sequence. This class will create a protein molecule from sequence. This class is pretty dumb and relies on AmberTools
to do all of the heavy lifting.
:param sequence: sequence of the protein to create
The sequence is specified in Amber/Leap format. There are special NRES and CRES variants for the N-
and C-termini. Different protonation states are also available via different residue names. E.g. ASH
for neutral ASP.
'''
def __init__(self, sequence):
super(ProteinMoleculeFromSequence, self).__init__()
self._sequence = sequence
def prepare_for_tleap(self, mol_id):
# we don't need to do anything
pass
def generate_tleap_input(self, mol_id):
leap_cmds = []
leap_cmds.append('source leaprc.gaff')
leap_cmds.extend(self._gen_read_frcmod_string())
leap_cmds.extend(self._gen_read_prep_string())
leap_cmds.extend(self._gen_read_lib_string())
leap_cmds.append('{mol_id} = sequence {{ {seq} }}'.format(mol_id=mol_id, seq=self._sequence))
leap_cmds.extend(self._gen_disulfide_string(mol_id))
leap_cmds.extend(self._gen_bond_string(mol_id))
leap_cmds.append(self._gen_rotation_string(mol_id))
leap_cmds.append(self._gen_translation_string(mol_id))
return leap_cmds
class ProteinMoleculeFromPdbFile(ProteinBase):
'''
Create a new protein molecule from a pdb file.
This class is dumb and relies on AmberTools for the heavy lifting.
:param pdb_path: string path to the pdb file
.. note::
no processing happens to this pdb file. It must be understandable by tleap and atoms/residues may
need to be added/deleted/renamed. These manipulations should happen to the file before MELD is invoked.
'''
def __init__(self, pdb_path):
super(ProteinMoleculeFromPdbFile, self).__init__()
with open(pdb_path) as pdb_file:
self._pdb_contents = pdb_file.read()
def prepare_for_tleap(self, mol_id):
# copy the contents of the pdb file into the current working directory
pdb_path = '{mol_id}.pdb'.format(mol_id=mol_id)
with open(pdb_path, 'w') as pdb_file:
pdb_file.write(self._pdb_contents)
def generate_tleap_input(self, mol_id):
leap_cmds = []
leap_cmds.append('source leaprc.gaff')
leap_cmds.extend(self._gen_read_frcmod_string())
leap_cmds.extend(self._gen_read_prep_string())
leap_cmds.extend(self._gen_read_lib_string())
leap_cmds.append('{mol_id} = loadPdb {mol_id}.pdb'.format(mol_id=mol_id))
leap_cmds.extend(self._gen_bond_string(mol_id))
leap_cmds.extend(self._gen_disulfide_string(mol_id))
leap_cmds.append(self._gen_rotation_string(mol_id))
leap_cmds.append(self._gen_translation_string(mol_id))
#print leap_cmds
return leap_cmds
| laufercenter/meld | meld/system/protein.py | Python | mit | 7,985 | [
"Amber"
] | cf5492a674cbbdb8758e4d7b6fba8184bb211e1fc49aa474c87ae86f5e04b436 |
# -*- encoding: utf-8 -*-
from shapely.wkt import loads as wkt_loads
import dsl
from . import FixtureTest
class RestAreaServices(FixtureTest):
def test_rest_area_node(self):
self.generate_fixtures(dsl.way(159773030, wkt_loads('POINT (-76.73912905210828 40.99079246918038)'), {u'source': u'openstreetmap.org', u'highway': u'rest_area', u'name': u'Foo Rest Area'})) # noqa
self.assert_has_feature(
16, 18798, 24573, 'pois',
{'kind': 'rest_area', 'id': 159773030, 'min_zoom': 13})
def test_rest_area_way(self):
# Way: Crystal Springs Rest Area (97057565)
self.generate_fixtures(dsl.way(97057565, wkt_loads('POLYGON ((-122.365488944754 37.54048597695269, -122.363673359734 37.53894968362569, -122.363521634282 37.53881541316188, -122.363421831454 37.53868392047339, -122.363355445955 37.53852749658311, -122.363020823511 37.53696630264469, -122.36330406232 37.5367516065384, -122.365488944754 37.54048597695269))'), {u'drinking_water': u'yes', u'toilets': u'yes', u'handicapped_accessible': u'yes', u'vending': u'yes', u'name': u'Crystal Springs Rest Area', u'sanitation': u'no', u'area': u'yes', u'route': u'280', u'way_area': u'35229.6', u'pet_area': u'yes', u'phone': u'yes', u'picnic_tables': u'yes', u'source': u'openstreetmap.org', u'addr:county': u'San Mateo', u'attribution': u'Caltrans', u'caltrans:district': u'4', u'highway': u'rest_area', u'description': u'Near San Francisco Reservoir'})) # noqa
self.assert_has_feature(
16, 10492, 25385, 'landuse',
{'kind': 'rest_area', 'id': 97057565, 'sort_rank': 44})
def test_service_area_node(self):
# NOTE: this has been remapped as an area now. the test data here
# is superseded by the 1698-too-many-service-areas test.
# node: Tiffin River
self.generate_fixtures(dsl.way(200412620, wkt_loads('POINT (-84.41292493378698 41.6045519557572)'), {u'source': u'openstreetmap.org', u'name': u'Tiffin River', u'highway': u'services'})) # noqa
self.assert_has_feature(
16, 17401, 24424, 'pois',
{'kind': 'service_area', 'id': 200412620, 'min_zoom': 17})
def test_service_area_way(self):
# Way: Nicole Driveway (274732386)
self.generate_fixtures(dsl.way(274732386, wkt_loads('POLYGON ((-120.123766060274 38.09757738412661, -120.123761209371 38.0977196908478, -120.123658621766 38.0979925683359, -120.123633379106 38.0982482663423, -120.123585319239 38.098378271151, -120.123533216952 38.09837445372108, -120.123577234401 38.09825915310519, -120.123617928083 38.09797468287368, -120.123713957987 38.09768759586379, -120.123702639215 38.09747749355018, -120.123762826339 38.09746978790201, -120.123766060274 38.09757738412661))'), {u'source': u'openstreetmap.org', u'way_area': u'744.019', u'name': u'Nicole Driveway', u'highway': u'services', u'area': u'yes'})) # noqa
self.assert_has_feature(
16, 10900, 25256, 'landuse',
{'kind': 'service_area', 'id': 274732386, 'sort_rank': 45})
| mapzen/vector-datasource | integration-test/480-rest_area-services.py | Python | mit | 3,053 | [
"CRYSTAL"
] | 7a46067d45c4a97461567614e7b069a7b99b5299446a7c8b59a60ed9684d058b |
#!/usr/bin/env python3
'''
Write a function named printTable() that takes a list of lists
of strings and displays it in a well-organized table with each
column right-justified. Assume that all the inner lists will
contain the same number of strings. For example, the value
could look like this:
tableData = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
Your printTable() function would print the following:
apples Alice dogs
oranges Bob cats
cherries Carol moose
banana David goose
'''
tableData = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
def printTable(table):
# find maximum word length for each column
colWidths = [0] * len(table)
for i in range(len(table)):
for j in range(len(table[i])):
if len(table[i][j]) > colWidths[i]:
colWidths[i] = len(table[i][j])
# print each column justified to the right
innerLength = len(table[0])
i = 0
while i < innerLength:
for j in range(len(table)):
print(table[j][i].rjust(colWidths[j]), end=' ')
print('')
i += 1
printTable(tableData)
| apaksoy/automatetheboringstuff | practice projects/chap 06/table printer chap 6.py | Python | mit | 1,327 | [
"MOOSE"
] | d859b323da58ce8c884fbce6d575decac5f72914736c4c90cd8b89478748d1b5 |
"""
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import IntegrityError, transaction
from django.http import HttpResponseRedirect, Http404
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from django.utils.translation import ugettext as _
from sudo.decorators import sudo_required
from sentry.models import (
UserEmail, LostPasswordHash, Project, UserOption, Authenticator
)
from sentry.signals import email_verified
from sentry.web.decorators import login_required, signed_auth_required
from sentry.web.forms.accounts import (
AccountSettingsForm, AppearanceSettingsForm,
RecoverPasswordForm, ChangePasswordRecoverForm,
EmailForm
)
from sentry.web.helpers import render_to_response
from sentry.utils import auth
def send_password_recovery_mail(user):
password_hash, created = LostPasswordHash.objects.get_or_create(
user=user
)
if not password_hash.is_valid():
password_hash.date_added = timezone.now()
password_hash.set_hash()
password_hash.save()
password_hash.send_recover_mail()
return password_hash
@login_required
def login_redirect(request):
login_url = auth.get_login_redirect(request)
return HttpResponseRedirect(login_url)
def expired(request, user):
password_hash = send_password_recovery_mail(user)
return render_to_response('sentry/account/recover/expired.html', {
'email': password_hash.user.email,
}, request)
def recover(request):
form = RecoverPasswordForm(request.POST or None)
if form.is_valid():
password_hash = send_password_recovery_mail(form.cleaned_data['user'])
return render_to_response('sentry/account/recover/sent.html', {
'email': password_hash.user.email,
}, request)
context = {
'form': form,
}
return render_to_response('sentry/account/recover/index.html', context, request)
def recover_confirm(request, user_id, hash):
try:
password_hash = LostPasswordHash.objects.get(user=user_id, hash=hash)
if not password_hash.is_valid():
password_hash.delete()
raise LostPasswordHash.DoesNotExist
user = password_hash.user
except LostPasswordHash.DoesNotExist:
context = {}
tpl = 'sentry/account/recover/failure.html'
else:
tpl = 'sentry/account/recover/confirm.html'
if request.method == 'POST':
form = ChangePasswordRecoverForm(request.POST)
if form.is_valid():
user.set_password(form.cleaned_data['password'])
user.save()
# Ugly way of doing this, but Django requires the backend be set
user = authenticate(
username=user.username,
password=form.cleaned_data['password'],
)
login_user(request, user)
password_hash.delete()
return login_redirect(request)
else:
form = ChangePasswordRecoverForm()
context = {
'form': form,
}
return render_to_response(tpl, context, request)
@login_required
def start_confirm_email(request):
has_unverified_emails = request.user.has_unverified_emails()
if has_unverified_emails:
request.user.send_confirm_emails()
unverified_emails = [e.email for e in request.user.get_unverified_emails()]
msg = _('A verification email has been sent to %s.') % (', ').join(unverified_emails)
else:
msg = _('Your email (%s) has already been verified.') % request.user.email
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
def confirm_email(request, user_id, hash):
msg = _('Thanks for confirming your email')
level = messages.SUCCESS
try:
email = UserEmail.objects.get(user=user_id, validation_hash=hash)
if not email.hash_is_valid():
raise UserEmail.DoesNotExist
except UserEmail.DoesNotExist:
if request.user.is_anonymous() or request.user.has_unverified_emails():
msg = _('There was an error confirming your email. Please try again or '
'visit your Account Settings to resend the verification email.')
level = messages.ERROR
else:
email.is_verified = True
email.validation_hash = ''
email.save()
email_verified.send(email=email.email, sender=email)
messages.add_message(request, level, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def settings(request):
user = request.user
form = AccountSettingsForm(
user, request.POST or None,
initial={
'email': UserEmail.get_primary_email(user).email,
'username': user.username,
'name': user.name,
},
)
if form.is_valid():
old_email = user.email
form.save()
# remove previously valid email address
# TODO(dcramer): we should maintain validation here when we support
# multiple email addresses
if request.user.email != old_email:
UserEmail.objects.filter(user=user, email=old_email).delete()
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_emails()
messages.add_message(
request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'settings',
'has_2fa': Authenticator.objects.user_has_2fa(request.user),
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/settings.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def twofactor_settings(request):
interfaces = Authenticator.objects.all_interfaces_for_user(
request.user, return_missing=True)
if request.method == 'POST' and 'back' in request.POST:
return HttpResponseRedirect(reverse('sentry-account-settings'))
context = csrf(request)
context.update({
'page': 'security',
'has_2fa': any(x.is_enrolled and not x.is_backup_interface for x in interfaces),
'interfaces': interfaces,
})
return render_to_response('sentry/account/twofactor.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def avatar_settings(request):
context = csrf(request)
context.update({
'page': 'avatar',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/avatar.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def appearance_settings(request):
from django.conf import settings
options = UserOption.objects.get_all_values(user=request.user, project=None)
form = AppearanceSettingsForm(request.user, request.POST or None, initial={
'language': options.get('language') or request.LANGUAGE_CODE,
'stacktrace_order': int(options.get('stacktrace_order', -1) or -1),
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
'clock_24_hours': options.get('clock_24_hours') or False,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'appearance',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/appearance.html', context, request)
@csrf_protect
@never_cache
@signed_auth_required
@transaction.atomic
def email_unsubscribe_project(request, project_id):
# For now we only support getting here from the signed link.
if not request.user_from_signed_request:
raise Http404()
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404()
if request.method == 'POST':
if 'cancel' not in request.POST:
UserOption.objects.set_value(
request.user, project, 'mail:alert', 0)
return HttpResponseRedirect(auth.get_login_url())
context = csrf(request)
context['project'] = project
return render_to_response('sentry/account/email_unsubscribe_project.html',
context, request)
@csrf_protect
@never_cache
@login_required
def list_identities(request):
from social_auth.models import UserSocialAuth
identity_list = list(UserSocialAuth.objects.filter(user=request.user))
AUTH_PROVIDERS = auth.get_auth_providers()
context = csrf(request)
context.update({
'identity_list': identity_list,
'page': 'identities',
'AUTH_PROVIDERS': AUTH_PROVIDERS,
})
return render_to_response('sentry/account/identities.html', context, request)
@csrf_protect
@never_cache
@login_required
def show_emails(request):
user = request.user
primary_email = UserEmail.get_primary_email(user)
alt_emails = user.emails.all().exclude(email=primary_email.email)
email_form = EmailForm(user, request.POST or None,
initial={
'primary_email': primary_email.email,
},
)
if 'remove' in request.POST:
email = request.POST.get('email')
del_email = UserEmail.objects.filter(user=user, email=email)
del_email.delete()
return HttpResponseRedirect(request.path)
if email_form.is_valid():
old_email = user.email
email_form.save()
if user.email != old_email:
useroptions = UserOption.objects.filter(user=user, value=old_email)
for option in useroptions:
option.value = user.email
option.save()
UserEmail.objects.filter(user=user, email=old_email).delete()
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_emails()
alternative_email = email_form.cleaned_data['alt_email']
# check if this alternative email already exists for user
if alternative_email and not UserEmail.objects.filter(user=user, email=alternative_email):
# create alternative email for user
try:
with transaction.atomic():
new_email = UserEmail.objects.create(
user=user,
email=alternative_email
)
except IntegrityError:
pass
else:
new_email.set_hash()
new_email.save()
# send confirmation emails to any non verified emails
user.send_confirm_emails()
messages.add_message(
request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'email_form': email_form,
'primary_email': primary_email,
'alt_emails': alt_emails,
'page': 'emails',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/emails.html', context, request)
| fotinakis/sentry | src/sentry/web/frontend/accounts.py | Python | bsd-3-clause | 12,500 | [
"VisIt"
] | 930fde4de89da4a7abcc885824768fcfcfa6eb99755263c1303db08adda3c3b2 |
"""
File: pitch_constraint_solver.py
Purpose: For a melody define as a p_map, and a set of policy constraints, solve for compatible melodies
satisfying constraints, as a set of p_map's.
"""
from melody.solver.p_map import PMap
from structure.note import Note
from misc.ordered_set import OrderedSet
class PitchConstraintSolver(object):
"""
Implementation class for a constraint solver that attempts to find pitch solutions to pitch constraints.
"""
def __init__(self, policies):
"""
Constructor.
:param policies: non-null set of policies
"""
if policies is None or (not isinstance(policies, set) and not isinstance(policies, list) and \
not isinstance(policies, OrderedSet)):
raise Exception('Policies must be non-null and a Set')
self._policies = OrderedSet(policies) #set(policies)
self.v_policy_map = dict()
self._build_v_policy_map()
self.__instance_limit = 0
self.__num_instances = 0
self.__full_results = list()
@property
def policies(self):
return [p for p in self._policies]
@property
def num_instances(self):
return self.__num_instances
@property
def instance_limit(self):
return self.__instance_limit
@property
def full_results(self):
return self.__full_results
def solve(self, p_map_param, instance_limit=-1, accept_partials=False):
"""
Solve the constraints constraint system using p_map_param as the start.
:param p_map_param: Initial PMap to fill out.
:param instance_limit: Number of full results to limit search; -1 no limit
:param accept_partials: Boolean, True means return some partial results
:return:
"""
p_map = p_map_param if isinstance(p_map_param, PMap) else PMap(p_map_param)
self._check_p_map(p_map)
self.__instance_limit = instance_limit
self.__num_instances = 0 # reset
self.__full_results = list()
partial_results = [p_map]
# list of tuples (v_note, {solution to v_note's policies}) sorted by low number of solutions.
unsolved_nodes = [t[0] for t in self._build_potential_values(p_map, p_map.keys())]
if len(unsolved_nodes) == 0:
raise Exception('Policies insufficient for solution or parameter map is full.')
while len(unsolved_nodes) != 0:
v_node = unsolved_nodes[0]
results_prime = list()
for p_map in partial_results:
if p_map[v_node] is None or p_map[v_node].note is None:
visit_results = self._visit(p_map, v_node)
if len(visit_results) == 0:
if accept_partials: # if accept partials, keep in partial results.
results_prime.append(p_map)
else:
results_prime.extend(visit_results) # note: contains possible extensions of pmap
else: # v_note already filled, keep it in partial_results.
results_prime.append(p_map)
partial_results = results_prime
if self.instance_limit != -1 and len(partial_results) >= self.instance_limit:
break
unsolved_nodes.remove(v_node)
return self.full_results, partial_results if accept_partials else list()
def _check_p_map(self, p_map):
for key in self.v_policy_map.keys():
if key not in p_map.keys():
raise Exception('PMap keys and policy actor keys do not match.')
# if self.v_policy_map.keys() != p_map.keys():
# raise Exception('PMap keys and policy actor keys do not match.')
@staticmethod
def pmap_full(p_map):
v = [p_map[key].note for key in p_map.keys()]
if None in v:
return False
return True
# return len([v_note for v_note in p_map.keys() if p_map[v_note].note is None]) == 0
def _visit(self, p_map, v_note):
"""
Recursive method used to derive sets of solution to the constraints constraint problem.
:param p_map: PMap
:param v_note: ContextualNote, source key of PMap
:return: A set of pmaps
"""
if p_map[v_note].note is not None: # setting means visited
return {}
results = OrderedSet()
# list of tuples (v_note, {solution to v_note's policies})
result_values = self._build_potential_values(p_map, {v_note})
if len(result_values) == 0:
return results
for value in result_values[0][1]: # [0] is for v_note; [1] is the set of values.
p_map[v_note].note = value
value_results = OrderedSet() # results for this value for v_note + solutions for ALL peers to v_note
# Advantage in the following, setting above partially solves each policy v_note is involved in.
# For this 'value' for v_note, dive depth first through all v_note peers (all policies v_note is in)
# which collectively we call a branch.
# peer_candidates are all unassigned actors in v_note's policies.
peer_candidates = self._candidate_closure(p_map, v_note)
if len(peer_candidates) == 0:
# We reached the end of a branch, save the result.
if self._full_check_and_validate(p_map):
if self.instance_limit != -1 and self.__num_instances >= self.instance_limit:
return results
self.full_results.append(p_map.replicate())
self.__num_instances = self.__num_instances + 1
else:
value_results.add(p_map.replicate()) # We only need a shallow copy
else:
for c_note in peer_candidates:
if self.instance_limit != -1 and self.__num_instances >= self.instance_limit:
results = results.union(value_results)
return results
if len(value_results) == 0: # first time through this loop per value, visit with p_map!
value_results = self._visit(p_map, c_note)
if len(value_results) == 0: # Indicates failure to assign c_note, move to next value.
break # If one peer fails, they all will, for this 'value'!
else:
value_results_copy = value_results.copy()
# for peer c_note, if all r below fails (len(cand_results) == 0) should we also move on to
# next value? Add 'found' flag, set after union, after loop, check if False, to break
found = False
for r in value_results_copy:
if r[c_note].note is None:
cand_results = self._visit(r, c_note)
if len(cand_results) != 0:
value_results = value_results.union(cand_results)
found = True
value_results.remove(r) # r has no c_note assigned, what was returned did!
# If not, r's peers cannot be assigned!
if found is False: # Same as if part, if c_note produces no results, it cannot be assigned.
break # If one peer fails, they all will!
results = results.union(value_results)
p_map[v_note].note = None
return results
def _build_v_policy_map(self):
for p in self.policies:
for v_note in p.actors:
if v_note not in self.v_policy_map:
self.v_policy_map[v_note] = []
self.v_policy_map[v_note].append(p)
def _build_potential_values(self, p_map, v_notes):
"""
Compute a list of tuples (v_note, {solution to v_note's policies}), the list being sorted by the number of
solution values.
:param p_map: PMap
:param v_notes: list/set of ContextualNote sources to PMap
:return: list of tuples (v_note, {solution to v_note's policies})
"""
ranked_list = list() # A list of tuples (v_note, {solution values})
for v_note in v_notes:
if p_map[v_note] is not None and p_map[v_note].note is not None:
continue
if v_note not in self.v_policy_map.keys():
continue
values = self._policy_values(p_map, v_note)
if len(values) == 0:
continue
ranked_list.append((v_note, values))
ranked_list = sorted(ranked_list, key=lambda x: len(x[1]))
return ranked_list
def _policy_values(self, p_map, v_note):
"""
For v_note, find all note values for its target that satisfy all policies in which v_note is involved.
:param p_map: PMap
:param v_note: ContextualNote
:return: A set of notes with same duration as v_note, varying in pitch.
"""
pitches = None
for p in self.v_policy_map[v_note]:
p_values = p.values(p_map, v_note)
if p_values is None:
returned_pitches = OrderedSet() # None means p cannot be satisfied!
else:
returned_pitches = OrderedSet()
for n in p_values:
returned_pitches.add(n.diatonic_pitch)
#returned_pitches = {n.diatonic_pitch for n in p_values}
pitches = returned_pitches if pitches is None else pitches.intersection(returned_pitches)
retset = OrderedSet()
for p in pitches:
retset.add(Note(p, v_note.base_duration, v_note.num_dots))
return retset
#return {Note(p, v_note.base_duration, v_note.num_dots) for p in pitches}
def _candidate_closure(self, p_map, v_note):
"""
Find all policies that have v_note as a parameter, and collect their unassigned actors into a set
without replication.
:param p_map: PMap
:param v_note: ContextualNote
:return:
"""
policies = self.v_policy_map[v_note]
candidates = OrderedSet()
for p in policies:
candidates = candidates.union(p_map.unassigned_actors(p))
return candidates
def _full_check_and_validate(self, p_map):
"""
Check if p_map parameter is full and if so, satisfies all policies.
:param p_map: p_map to be checked
:return:
"""
if PitchConstraintSolver.pmap_full(p_map):
for p in self._policies:
if not p.verify(p_map):
return False
return True
else:
return False
@staticmethod
def print_starting_nodes(starting_nodes):
lst = list()
for t in starting_nodes:
s = t[1]
s_text = '[]' if len(s) == 0 else ', '.join('{0}'.format(n.diatonic_pitch) for n in s)
full_text = '{0} <== {1}'.format(t[0], s_text)
lst.append((t[0], full_text))
ss = sorted(lst, key=lambda x: x[0].get_absolute_position())
for i in range(0, len(ss)):
s = ss[i]
print('[{0}] {1}'.format(i, s[1]))
'''
Suppose we have a set of non-essential constraints.
Start by solving on the essential ones.
Then:
For each c in non-essentials.
p_map = PMap()
pick a v_note in c.actors and get its set of solutions
for each result in solutions:
p_map[v_note] = value
for each peer in c.actor not v_note:
solve for peer's values
recurse this process until all peer's are met.
This give a set of pmaps that solve c
cmap[c] = pmaps
Sort this map by len(pmaps) least to highest.
For each pmap in essential pmaps:
for each c in non-essential ordered by low #pmaps
for each p in cmap[c]:
pmap = pmap 'ored' p
recurse on this process until you meet some threshold on the solution set size.
'''
def solve1(self, p_map_param, accept_partials=False, instance_limit=-1):
"""
Solve the constraints constraint system using p_map as the start.
:param p_map_param:
:param accept_partials:
:param instance_limit:
:return:
"""
p_map = p_map_param if isinstance(p_map_param, PMap) else PMap(p_map_param)
self._check_p_map(p_map)
self.__instance_limit = instance_limit
self.__num_instances = 0 # reset
results = [p_map]
# list of tuples (v_note, {solution to v_note's policies})
starting_nodes_prime = self._build_potential_values(p_map, p_map.keys())
# PitchConstraintSolver.print_starting_nodes(starting_nodes_prime)
if len(starting_nodes_prime) == 0:
raise Exception('Insufficient initial information for solution')
starting_nodes = list() # list of tuples (v_note, {solution to v_note's policies})
has_failures = False
while {a[0] for a in starting_nodes_prime} != {a[0] for a in starting_nodes} and len(starting_nodes_prime) != 0:
starting_nodes = starting_nodes_prime
results_prime = list()
for p_map in results:
results_prime.extend(self._visit(p_map, starting_nodes[0][0]))
results = results_prime
if len(results) == 0:
has_failures = True
break
# TODO: This is an issue when has_failures is True and so results is empty.
# Idea: remove starting_nodes[0][0] and use next, and if values set is empty, drop out?
starting_nodes_prime = self._build_potential_values(results[0], results[0].keys())
if has_failures:
return results if accept_partials else list()
return results
def _visit_old(self, p_map, v_note):
"""
Recursive method used to derive sets of solution to the constraints constraint problem.
:param p_map: PMap
:param v_note: ContextualNote, source key of PMap
:return: A set of pmaps
"""
# print 'Entering Visit vnote={0}'.format(v_note.note.diatonic_pitch)
# print ' pmap = {0}'.format(p_map)
if p_map[v_note].note is not None: # setting means visited
return {}
results = set()
result_values = self._build_potential_values(p_map, {v_note})
if len(result_values) == 0:
p_map[v_note].note = None
return results
for value in result_values[0][1]:
value_results = set() # results for this value for v_note + solutions for all peers to v_mote
p_map[v_note].note = value
# Advantage in following peers as above setting partially solves each policy v_note is involved in.
peer_candidates = self._candidate_closure(p_map, v_note)
if len(peer_candidates) == 0:
pmap_full = PitchConstraintSolver.pmap_full(p_map)
if pmap_full and self.instance_limit != -1 and self.__num_instances >= self.instance_limit:
results = results.union(value_results)
return results
# We reached the end of a branch, save the result.
value_results.add(p_map.replicate()) # We only need a shallow copy
if pmap_full:
self.__num_instances = self.__num_instances + 1
else:
for c_note in peer_candidates:
if len(value_results) == 0: # first time through, visit with p_map!
value_results = self._visit(p_map, c_note)
if len(value_results) == 0: # Indicates failure to assign c_note, move to next value.
break # If one peer fails, they all will!
else:
if self.instance_limit != -1 and self.__num_instances >= self.instance_limit:
results = results.union(value_results)
return results
value_results_copy = value_results.copy()
# for peer c_note, if all r below fails (len(cand_results) == 0) should we also move on to
# next value? Add 'found' flag, set after union, after loop, check if False, to break
found = False
for r in value_results_copy:
if r[c_note].note is None:
cand_results = self._visit(r, c_note)
if len(cand_results) != 0:
value_results = value_results.union(cand_results)
found = True
value_results.remove(r)
if found is False: # Same as if part, if c_note produces not results, it cannot be assigned.
break # If one peer fails, they all will!
results = results.union(value_results)
p_map[v_note].note = None
return results
| dpazel/music_rep | melody/solver/pitch_constraint_solver.py | Python | mit | 17,648 | [
"VisIt"
] | a51cf8e0f37ff5e6f22af278d3312491f58f9733bd3065b5ee76b32dd7d64b3d |
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision$
# $Date$
# $Author$
# $HeadURL$
class glue_test:
callerName = "not set"
failCount = 0
testCount = 0
def __init__(self, callerName):
self.callerName = callerName
self.failCount = 0
self.testCount = 0
def check_fail(self, didFail):
self.testCount=self.testCount+1
if didFail:
self.failCount=self.failCount+1
print "Failed test "+str(self.testCount)
def getFailCount(self):
return self.failCount
def get_summary(self):
if self.failCount>0:
return "Failed "+str(self.failCount)+" / "+str(self.testCount)+" in "+self.callerName;
else:
return "Passed all "+str(self.testCount)+" checks in "+self.callerName;
| pclarke91/rl-glue-ext | projects/codecs/Python/src/tests/glue_test.py | Python | apache-2.0 | 1,296 | [
"Brian"
] | 9a84daf9d99b2f2b0e72f0605ce3ab814b86429946dbe6798e320a04344d462c |
# This file is adapted from https://github.com/ray-project/ray/blob/master
# /examples/rl_pong/driver.py
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# play Pong https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import time
import gym
import numpy as np
import ray
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca import OrcaContext
os.environ["LANG"] = "C.UTF-8"
# Define some hyperparameters.
# The number of hidden layer neurons.
H = 200
learning_rate = 1e-4
# Discount factor for reward.
gamma = 0.99
# The decay factor for RMSProp leaky sum of grad^2.
decay_rate = 0.99
# The input dimensionality: 80x80 grid.
D = 80 * 80
def sigmoid(x):
# Sigmoid "squashing" function to interval [0, 1].
return 1.0 / (1.0 + np.exp(-x))
def preprocess(img):
"""Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
# Crop the image.
img = img[35:195]
# Downsample by factor of 2.
img = img[::2, ::2, 0]
# Erase background (background type 1).
img[img == 144] = 0
# Erase background (background type 2).
img[img == 109] = 0
# Set everything else (paddles, ball) to 1.
img[img != 0] = 1
return img.astype(np.float).ravel()
def discount_rewards(r):
"""take 1D float array of rewards and compute discounted reward"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
# Reset the sum, since this was a game boundary (pong specific!).
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# defines the policy network
# x is a vector that holds the preprocessed pixel information
def policy_forward(x, model):
# neurons in the hidden layer (W1) can detect various game senarios
h = np.dot(model["W1"], x) # compute hidden layer neuron activations
h[h < 0] = 0 # ReLU nonlinearity. threhold at zero
# weights in W2 can then decide if each case we should go UP or DOWN
logp = np.dot(model["W2"], h) # compuate the log probability of going up
p = sigmoid(logp)
# Return probability of taking action 2, and hidden state.
return p, h
def policy_backward(eph, epx, epdlogp, model):
"""backward pass. (eph is array of intermediate hidden states)"""
# the way to change the policy parameters is to
# do some rollouts, take the gradient of the sampled actions
# multiply it by the score and add everything
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model["W2"])
# Backprop relu.
dh[eph <= 0] = 0
dW1 = np.dot(dh.T, epx)
return {"W1": dW1, "W2": dW2}
@ray.remote
class PongEnv(object):
def __init__(self):
# Tell numpy to only use one core. If we don't do this, each actor may
# try to use all of the cores and the resulting contention may result
# in no speedup over the serial version. Note that if numpy is using
# OpenBLAS, then you need to set OPENBLAS_NUM_THREADS=1, and you
# probably need to do it from the command line (so it happens before
# numpy is imported).
os.environ["MKL_NUM_THREADS"] = "1"
self.env = gym.make("Pong-v0")
def compute_gradient(self, model):
# model = {'W1':W1, 'W2':W2}
# given a model, run for one episode and return the parameter
# to be updated and sum(reward)
# Reset the game.
observation = self.env.reset()
# Note that prev_x is used in computing the difference frame.
prev_x = None
xs, hs, dlogps, drs = [], [], [], []
reward_sum = 0
done = False
while not done:
cur_x = preprocess(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
# feed difference frames into the network
# so that it can detect motion
aprob, h = policy_forward(x, model)
# Sample an action.
action = 2 if np.random.uniform() < aprob else 3
# The observation.
xs.append(x)
# The hidden state.
hs.append(h)
y = 1 if action == 2 else 0 # A "fake label".
# The gradient that encourages the action that was taken to be
# taken (see http://cs231n.github.io/neural-networks-2/#losses if
# confused).
dlogps.append(y - aprob)
observation, reward, done, info = self.env.step(action)
reward_sum += reward
# Record reward (has to be done after we call step() to get reward
# for previous action).
drs.append(reward)
epx = np.vstack(xs)
eph = np.vstack(hs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
# Reset the array memory.
xs, hs, dlogps, drs = [], [], [], []
# Compute the discounted reward backward through time.
discounted_epr = discount_rewards(epr)
# Standardize the rewards to be unit normal (helps control the gradient
# estimator variance).
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
# Modulate the gradient with advantage (the policy gradient magic
# happens right here).
epdlogp *= discounted_epr
return policy_backward(eph, epx, epdlogp, model), reward_sum
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train an RL agent")
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local, yarn or spark-submit.')
parser.add_argument("--batch_size", default=10, type=int,
help="The number of roll-outs to do per batch.")
parser.add_argument("--iterations", default=-1, type=int,
help="The number of model updates to perform. By "
"default, training will not terminate.")
parser.add_argument("--slave_num", type=int, default=2,
help="The number of slave nodes")
parser.add_argument("--executor_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--executor_memory", type=str, default="10g",
help="The size of slave(executor)'s memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_memory", type=str, default="2g",
help="The size of driver's memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--extra_executor_memory_for_ray", type=str, default="20g",
help="The extra executor memory to store some data."
"You can change it depending on your own cluster setting.")
parser.add_argument("--object_store_memory", type=str, default="4g",
help="The memory to store data on local."
"You can change it depending on your own cluster setting.")
args = parser.parse_args()
cluster_mode = args.cluster_mode
if cluster_mode.startswith("yarn"):
sc = init_orca_context(cluster_mode=cluster_mode,
cores=args.executor_cores,
memory=args.executor_memory,
init_ray_on_spark=True,
num_executors=args.slave_num,
driver_memory=args.driver_memory,
driver_cores=args.driver_cores,
extra_executor_memory_for_ray=args.extra_executor_memory_for_ray,
object_store_memory=args.object_store_memory)
ray_ctx = OrcaContext.get_ray_context()
elif cluster_mode == "local":
sc = init_orca_context(cores=args.driver_cores)
ray_ctx = OrcaContext.get_ray_context()
elif cluster_mode == "spark-submit":
sc = init_orca_context(cluster_mode=cluster_mode)
ray_ctx = OrcaContext.get_ray_context()
else:
print("init_orca_context failed. cluster_mode should be one of 'local', 'yarn' and 'spark-submit' but got "
+ cluster_mode)
batch_size = args.batch_size
# Run the reinforcement learning.
running_reward = None
batch_num = 1
model = {}
# "Xavier" initialization.
model["W1"] = np.random.randn(H, D) / np.sqrt(D)
model["W2"] = np.random.randn(H) / np.sqrt(H)
# Update buffers that add up gradients over a batch.
grad_buffer = {k: np.zeros_like(v) for k, v in model.items()}
# Update the rmsprop memory.
rmsprop_cache = {k: np.zeros_like(v) for k, v in model.items()}
actors = [PongEnv.remote() for _ in range(batch_size)]
iteration = 0
while iteration != args.iterations:
iteration += 1
model_id = ray.put(model)
actions = []
# Launch tasks to compute gradients from multiple rollouts in parallel.
start_time = time.time()
# run rall_out for batch_size times
for i in range(batch_size):
# compute_gradient returns two variables, so action_id is a list
action_id = actors[i].compute_gradient.remote(model_id)
actions.append(action_id)
for i in range(batch_size):
# wait for one actor to finish its operation
# action_id is the ready object id
action_id, actions = ray.wait(actions)
grad, reward_sum = ray.get(action_id[0])
# Accumulate the gradient of each weight parameter over batch.
for k in model:
grad_buffer[k] += grad[k]
running_reward = (reward_sum if running_reward is None else
running_reward * 0.99 + reward_sum * 0.01)
end_time = time.time()
print("Batch {} computed {} rollouts in {} seconds, "
"running mean is {}".format(batch_num, batch_size,
end_time - start_time,
running_reward))
# update gradient after one iteration
for k, v in model.items():
g = grad_buffer[k]
rmsprop_cache[k] = (decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g ** 2)
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
# Reset the batch gradient buffer.
grad_buffer[k] = np.zeros_like(v)
batch_num += 1
stop_orca_context()
| intel-analytics/BigDL | python/orca/example/ray_on_spark/rl_pong/rl_pong.py | Python | apache-2.0 | 11,878 | [
"NEURON",
"ORCA"
] | 8808839e8dece9d03373e09de8394e82f79a89fadd07b5b2e0252a1d0f608e99 |
import numpy as np
from ase.lattice import bulk
from gpaw import GPAW
from gpaw.response.df import DielectricFunction
# Part 1: Ground state calculation
atoms = bulk('Si', 'diamond', a=5.431) # Generate diamond crystal structure for silicon
calc = GPAW(mode='pw', kpts=(4,4,4)) # GPAW calculator initialization
atoms.set_calculator(calc)
atoms.get_potential_energy() # Ground state calculation is performed
calc.write('si.gpw', 'all') # Use 'all' option to write wavefunction
# Part 2 : Spectrum calculation # DF: dielectric function object
df = DielectricFunction(calc='si.gpw', # Ground state gpw file (with wavefunction) as input
domega0=0.05) # Using nonlinear frequency grid
df.get_dielectric_function() # By default, a file called 'df.csv' is generated
| robwarm/gpaw-symm | doc/tutorials/dielectric_response/silicon_ABS_simpleversion.py | Python | gpl-3.0 | 860 | [
"ASE",
"CRYSTAL",
"GPAW"
] | fdffb6f1d902422c6a3f81077527dc824df8410445a6ede774b5e12980aeffcf |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
template source file.
https://github.com/kallimachos/template
Copyright (C) 2019 Brian Moss
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
def square(x):
"""Square x.
:param x: number to square
:type x: int
:returns: square of x
:rtype: int
>>> square(5)
25
"""
return x * x
if __name__ == "__main__":
print(square(5))
| kallimachos/template | template/template.py | Python | gpl-3.0 | 988 | [
"Brian"
] | 9bbbfb6f9a3626ea6b6f2d8821c8fe9d890440ccfba5a7e49dbf9cd9920dea57 |
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import string
import random
import itertools
from Bio import Alphabet, pairwise2
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
import numpy as np
import scipy as sp
import scipy.stats
from jellyfish import hamming_distance
from ulutil import unafold
from ulutil.pyutils import as_handle
random.seed()
# ==============================
# = General sequence utilities =
# ==============================
def substitute(seq,pos,sub):
return seq[:pos] + sub + seq[pos+1:]
complement_table = string.maketrans('ACGTRYSWKMBDHVN','TGCAYRSWMKVHDBN')
def reverse(seq):
return seq[::-1]
def complement(seq):
return seq.upper().translate(complement_table)
def reverse_complement(seq):
"""Compute reverse complement of sequence.
Mindful of IUPAC ambiguities.
Return all uppercase.
"""
return reverse(complement(seq))
# return seq.upper().translate(complement_table)[::-1]
def translate(seq):
return Seq(seq.replace('-','N'),Alphabet.DNAAlphabet()).translate().tostring()
def gc_content(seq):
gc = seq.lower().count('g') + seq.lower().count('c')
return float(gc) / len(seq)
def random_dna_seq(n):
choice = random.choice
return reduce(lambda cumul,garbage:cumul+choice('ACGT'),xrange(n),'')
global_align = lambda seq1,seq2: pairwise2.align.globalms(seq1,seq2,0.5,-0.75,-2.,-1.5,one_alignment_only=True)[0]
def percent_id(seq1,seq2):
alignment = global_align(seq1,seq2)
return (1. - hamming_distance(alignment[0],alignment[1]) / float(len(alignment[0]))) * 100.
# barcode mapping fns
def barcode_hamming(observed,barcodes):
"""Compute entropy of probabilistic barcode assignment.
observed -- SeqRecord of the barcode
barcodes -- list of barcode possibilities (python strings)
"""
obs_seq = observed.seq.tostring()
distances = [(barcode,hamming_distance(obs_seq,barcode)) for barcode in barcodes]
closest = min(distances,key=lambda p: p[1])
return closest # tuple of (barcode, distance)
def barcode_probabilities(observed,barcodes):
"""Compute entropy of probabilistic barcode assignment.
observed -- 'fastq' SeqRecord of the barcode
barcodes -- list of barcode possibilities (python strings)
"""
obs_seq = np.array(list(observed.seq.tostring()))
obs_qual = np.array(observed.letter_annotations['phred_quality'])
barcodes = np.array([list(bc) for bc in barcodes])
choice = np.zeros(barcodes.shape, dtype=np.int)
choice[barcodes == obs_seq] = 1
choice[barcodes != obs_seq] = 2
choice[:, obs_seq == 'N'] = 0
N = np.zeros((1,barcodes.shape[1]))
E = np.log1p(-np.power(10, -obs_qual / 10.))
D = -np.log(3) - (obs_qual / 10.) * np.log(3)
B = np.exp(np.sum(np.choose(choice, [N,E,D]), axis=1))
return B / np.sum(B)
def barcode_entropy(observed, barcodes):
"""Compute entropy of probabilistic barcode assignment.
observed -- 'fastq' SeqRecord of the barcode
barcodes -- list of barcode possibilities (python strings)
"""
P = barcode_probabilities(observed, barcodes)
return sp.stats.entropy(P)
# for generating 'safe' filenames from identifiers
cleanup_table = string.maketrans('/*|><+ ','_____p_')
def cleanup_id(identifier):
return identifier.translate(cleanup_table)
def seqhist(seqlist):
seqdict = dict()
for seq in seqlist:
seqdict[seq] = seqdict.get(seq,0) + 1
return seqdict
def seqmode(seqs):
if isinstance(seqs,list):
seqs = seqhist(seqs)
return max(seqs.iterkeys(),key=lambda k: seqs[k])
def dimer_dG(seq1,seq2):
"""Compute a primer-dimer score using UNAFOLD hybrid_min"""
scores = []
subseqs1 = []
subseqs2 = []
for i in xrange( min(len(seq1),len(seq2)) ):
subseqs1.append( seq1[-i-1:] )
subseqs2.append( seq2[-i-1:] )
scores = unafold.hybrid_min_list(subseqs1,subseqs2,NA='DNA')
return -min(scores)
def dimer_overlap(seq1,seq2,weight_3=10):
"""Compute a primer-dimer score by counting overlaps
weight_3 is the num of 3' bases to add extra weight to either primer
"""
# import pdb
# pdb.set_trace()
overlap_score = lambda s1,s2: sum(1 if c1.lower() == c2.lower() else -1 for c1, c2 in itertools.izip(s1,s2))
seq2rc = reverse_complement(seq1)
scores = []
for i in xrange( min(len(seq1),len(seq2)) ):
subseq1 = seq1[-i-1:]
subseq2 = seq2rc[:i+1]
score = 0
if (i+1) <= 2*weight_3:
score += overlap_score(subseq1,subseq2) * 2
else:
score += overlap_score(subseq1[:weight_3],subseq2[:weight_3]) * 2
score += overlap_score(subseq1[weight_3:-weight_3],subseq2[weight_3:-weight_3])
score += overlap_score(subseq1[-weight_3:],subseq2[-weight_3:]) * 2
scores.append(score)
return max(scores)
# ==========================
# = Manual FASTA iteration =
# ==========================
# taken from biopython
identity = string.maketrans('','')
nonalpha = identity.translate(identity,string.ascii_letters)
def FastaIterator(handleish,title2ids=lambda s: s):
with as_handle(handleish,'r') as handle:
while True:
line = handle.readline()
if line == '' : return
if line[0] == '>':
break
while True:
if line[0] != '>':
raise ValueError("Records in Fasta files should start with '>' character")
descr = title2ids(line[1:].rstrip())
fullline = ''
line = handle.readline()
while True:
if not line : break
if line[0] == '>': break
fullline += line.translate(identity,nonalpha)
line = handle.readline()
yield (descr,fullline)
if not line : return #StopIteration
assert False, "Should not reach this line"
# ============================
# = biopython-specific tools =
# ============================
def make_SeqRecord(name,seq):
return SeqRecord(Seq(seq),id=name,name=name,description=name)
def get_string(seqobj):
if isinstance(seqobj,SeqRecord):
seq = seqobj.seq.tostring().upper()
elif isinstance(seqobj,Seq):
seq = seqobj.tostring().upper()
elif isinstance(seqobj,str):
seq = seqobj.upper()
return seq
def get_features(feature_list,feature_type):
target_features = []
for feature in feature_list:
if feature.type == feature_type:
target_features.append(feature)
return target_features
def advance_to_features(feature_iter,feature_types):
# note, here feature_types is a list of possible stopping points
for feature in feature_iter:
if feature.type in feature_types:
return feature
raise ValueError, "didn't find %s in record" % feature_types
def advance_to_feature(feature_iter,feature_type):
return advance_to_features(feature_iter,[feature_type])
def map_feature( feature, coord_mapping, offset=0, erase=[] ):
new_feature = copy.deepcopy(feature)
new_start = coord_mapping[feature.location.start.position][-1] + offset
new_end = coord_mapping[feature.location.end.position][0] + offset
new_location = FeatureLocation(new_start,new_end)
new_feature.location = new_location
for qual in erase:
new_feature.qualifiers.pop(qual,None)
return new_feature
def copy_features( record_from, record_to, coord_mapping, offset=0, erase=[], replace=False ):
if replace:
# index record_to features:
feature_index = {}
for (i,feature) in enumerate(record_to.features):
feature_index.setdefault(feature.type,[]).append(i)
feat_idx_to_delete = []
for feature in record_from.features:
if replace:
feat_idx_to_delete += feature_index.get(feature.type,[])
new_feature = map_feature( feature, coord_mapping, offset, erase )
record_to.features.append(new_feature)
if replace:
for idx in sorted(feat_idx_to_delete,reverse=True):
record_to.features.pop(idx)
def translate_features( record ):
for feature in record.features:
offset = int(feature.qualifiers.get('codon_start',[1])[0]) - 1
feature.qualifiers['translation'] = feature.extract(record.seq)[offset:].translate()
# SeqRecord <-> JSON-serializable
def simplifySeq(seq):
obj = {}
obj['__Seq__'] = True
obj['seq'] = seq.tostring()
obj['alphabet'] = seq.alphabet.__repr__().rstrip(')').rstrip('(')
return obj
def complicateSeq(obj):
if '__Seq__' not in obj:
raise ValueError, "object must be converable to Bio.Seq"
# Figure out which alphabet to use
try:
alphabet = Alphabet.__getattribute__(obj['alphabet'])()
except AttributeError:
pass
try:
alphabet = Alphabet.IUPAC.__getattribute__(obj['alphabet'])()
except AttributeError:
raise
seq = Seq(obj['seq'],alphabet=alphabet)
return seq
def simplifySeqFeature(feature):
obj = {}
obj['__SeqFeature__'] = True
obj['location'] = (feature.location.nofuzzy_start,feature.location.nofuzzy_end)
obj['type'] = feature.type
obj['strand'] = feature.strand
obj['id'] = feature.id
obj['qualifiers'] = feature.qualifiers
return obj
def complicateSeqFeature(obj):
if '__SeqFeature__' not in obj:
raise ValueError, "object must be converable to Bio.SeqFeature"
location = FeatureLocation(*obj['location'])
feature = SeqFeature(location=location,type=obj['type'],strand=obj['strand'],id=obj['id'],qualifiers=obj['qualifiers'])
return feature
def simplifySeqRecord(record):
obj = {}
obj['__SeqRecord__'] = True
obj['seq'] = simplifySeq(record.seq)
obj['id'] = record.id
obj['name'] = record.name
obj['description'] = record.description
obj['dbxrefs'] = record.dbxrefs
obj['annotations'] = record.annotations
obj['letter_annotations'] = record.letter_annotations # should work because it is actually a _RestrictedDict obj which subclasses dict
obj['features'] = map(simplifySeqFeature,record.features)
return obj
def complicateSeqRecord(obj):
if '__SeqRecord__' not in obj:
raise ValueError, "object must be converable to Bio.SeqRecord"
features = map(complicateSeqFeature,obj['features'])
record = SeqRecord(seq=complicateSeq(obj['seq']),id=obj['id'],name=obj['name'],description=obj['description'],dbxrefs=obj['dbxrefs'],features=features,annotations=obj['annotations'],letter_annotations=obj['letter_annotations'])
return record
| churchlab/ulutil | ulutil/seqtools.py | Python | apache-2.0 | 11,310 | [
"Biopython"
] | d6fa99f57c5efc12d926d75eb78569edd9ce4ca0c4673ff4d89eceaa316cfd37 |
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" This module provides the LDAP base functions
with a subset of the functions from the real ldap module. """
import logging
import ssl
from typing import Callable, Generator, Optional, Tuple, TypeVar
from urllib.parse import urlparse
import ldap3
import ldap3.core.exceptions as exceptions
logger = logging.getLogger(__name__)
def _debug(*argv):
argv = [str(arg) for arg in argv]
logger.debug(" ".join(argv))
Entity = TypeVar('Entity')
class LdapBase(object):
""" The vase LDAP connection class. """
def __init__(self, settings_dict: dict) -> None:
self.settings_dict = settings_dict
self._obj = None
self._connection_class = ldap3.Connection
def close(self) -> None:
if self._obj is not None:
self._obj.unbind()
self._obj = None
#########################
# Connection Management #
#########################
def set_connection_class(self, connection_class):
self._connection_class = connection_class
def check_password(self, dn: str, password: str) -> bool:
try:
conn = self._connect(user=dn, password=password)
conn.unbind()
return True
except exceptions.LDAPInvalidCredentialsResult:
return False
except exceptions.LDAPUnwillingToPerformResult:
return False
def _connect(self, user: str, password: str) -> ldap3.Connection:
settings = self.settings_dict
_debug("connecting")
url = urlparse(settings['URI'])
if url.scheme == "ldaps":
use_ssl = True
elif url.scheme == "ldap":
use_ssl = False
else:
raise RuntimeError("Unknown scheme '%s'" % url.scheme)
if ":" in url.netloc:
host, port = url.netloc.split(":")
port = int(port)
else:
host = url.netloc
if use_ssl:
port = 636
else:
port = 389
start_tls = False
if 'START_TLS' in settings and settings['START_TLS']:
start_tls = True
tls = None
if use_ssl or start_tls:
tls = ldap3.Tls()
if 'TLS_CA' in settings and settings['TLS_CA']:
tls.ca_certs_file = settings['TLS_CA']
if 'REQUIRE_TLS' in settings and settings['REQUIRE_TLS']:
tls.validate = ssl.CERT_REQUIRED
s = ldap3.Server(host, port=port, use_ssl=use_ssl, tls=tls)
c = self._connection_class(
s, # client_strategy=ldap3.STRATEGY_SYNC_RESTARTABLE,
user=user, password=password, authentication=ldap3.SIMPLE)
c.strategy.restartable_sleep_time = 0
c.strategy.restartable_tries = 1
c.raise_exceptions = True
c.open()
if start_tls:
c.start_tls()
try:
c.bind()
except: # noqa: E722
c.unbind()
raise
return c
def _reconnect(self) -> None:
settings = self.settings_dict
try:
self._obj = self._connect(
user=settings['USER'], password=settings['PASSWORD'])
except Exception:
self._obj = None
raise
assert self._obj is not None
def _do_with_retry(self, fn: Callable[[ldap3.Connection], Entity]) -> Entity:
if self._obj is None:
self._reconnect()
assert self._obj is not None
try:
return fn(self._obj)
except ldap3.core.exceptions.LDAPSessionTerminatedByServerError:
# if it fails, reconnect then retry
_debug("SERVER_DOWN, reconnecting")
self._reconnect()
return fn(self._obj)
###################
# read only stuff #
###################
def search(self, base, scope, filterstr='(objectClass=*)',
attrlist=None, limit=None) -> Generator[Tuple[str, dict], None, None]:
"""
Search for entries in LDAP database.
"""
_debug("search", base, scope, filterstr, attrlist, limit)
# first results
if attrlist is None:
attrlist = ldap3.ALL_ATTRIBUTES
elif isinstance(attrlist, set):
attrlist = list(attrlist)
def first_results(obj):
_debug("---> searching ldap", limit)
obj.search(
base, filterstr, scope, attributes=attrlist, paged_size=limit)
return obj.response
# get the 1st result
result_list = self._do_with_retry(first_results)
# Loop over list of search results
for result_item in result_list:
# skip searchResRef for now
if result_item['type'] != "searchResEntry":
continue
dn = result_item['dn']
attributes = result_item['raw_attributes']
# did we already retrieve this from cache?
_debug("---> got ldap result", dn)
_debug("---> yielding", result_item)
yield (dn, attributes)
# we are finished - return results, eat cake
_debug("---> done")
return
####################
# Cache Management #
####################
def reset(self, force_flush_cache: bool = False) -> None:
"""
Reset transaction back to original state, discarding all
uncompleted transactions.
"""
pass
##########################
# Transaction Management #
##########################
# Fake it
def is_dirty(self) -> bool:
""" Are there uncommitted changes? """
raise NotImplementedError()
def is_managed(self) -> bool:
""" Are we inside transaction management? """
raise NotImplementedError()
def enter_transaction_management(self) -> None:
""" Start a transaction. """
raise NotImplementedError()
def leave_transaction_management(self) -> None:
"""
End a transaction. Must not be dirty when doing so. ie. commit() or
rollback() must be called if changes made. If dirty, changes will be
discarded.
"""
raise NotImplementedError()
def commit(self) -> None:
"""
Attempt to commit all changes to LDAP database. i.e. forget all
rollbacks. However stay inside transaction management.
"""
raise NotImplementedError()
def rollback(self) -> None:
"""
Roll back to previous database state. However stay inside transaction
management.
"""
raise NotImplementedError()
##################################
# Functions needing Transactions #
##################################
def add(self, dn: str, mod_list: dict) -> None:
"""
Add a DN to the LDAP database; See ldap module. Doesn't return a result
if transactions enabled.
"""
raise NotImplementedError()
def modify(self, dn: str, mod_list: dict) -> None:
"""
Modify a DN in the LDAP database; See ldap module. Doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
def modify_no_rollback(self, dn: str, mod_list: dict) -> None:
"""
Modify a DN in the LDAP database; See ldap module. Doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
def delete(self, dn: str) -> None:
"""
delete a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:
"""
rename a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
| Karaage-Cluster/python-tldap | tldap/backend/base.py | Python | gpl-3.0 | 8,635 | [
"Brian"
] | 65e56cb3c6d6f832af0ffebede404de9a0aeee298cb0700fb90d85c90282ab79 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from itertools import count
import re, os, cStringIO, time, cgi, string, urlparse
from xml.dom import minidom as dom
from xml.sax.handler import ErrorHandler, feature_validation
from xml.dom.pulldom import SAX2DOM
from xml.sax import make_parser
from xml.sax.xmlreader import InputSource
from twisted.python import htmlizer, text
from twisted.python.filepath import FilePath
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.web import domhelpers
import process, latex, indexer, numberer, htmlbook
# relative links to html files
def fixLinks(document, ext):
"""
Rewrite links to XHTML lore input documents so they point to lore XHTML
output documents.
Any node with an C{href} attribute which does not contain a value starting
with C{http}, C{https}, C{ftp}, or C{mailto} and which does not have a
C{class} attribute of C{absolute} or which contains C{listing} and which
does point to an URL ending with C{html} will have that attribute value
rewritten so that the filename extension is C{ext} instead of C{html}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@return: C{None}
"""
supported_schemes=['http', 'https', 'ftp', 'mailto']
for node in domhelpers.findElementsWithAttribute(document, 'href'):
href = node.getAttribute("href")
if urlparse.urlparse(href)[0] in supported_schemes:
continue
if node.getAttribute("class") == "absolute":
continue
if node.getAttribute("class").find('listing') != -1:
continue
# This is a relative link, so it should be munged.
if href.endswith('html') or href[:href.rfind('#')].endswith('html'):
fname, fext = os.path.splitext(href)
if '#' in fext:
fext = ext+'#'+fext.split('#', 1)[1]
else:
fext = ext
node.setAttribute("href", fname + fext)
def addMtime(document, fullpath):
"""
Set the last modified time of the given document.
@type document: A DOM Node or Document
@param document: The output template which defines the presentation of the
last modified time.
@type fullpath: C{str}
@param fullpath: The file name from which to take the last modified time.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class","mtime"):
txt = dom.Text()
txt.data = time.ctime(os.path.getmtime(fullpath))
node.appendChild(txt)
def _getAPI(node):
"""
Retrieve the fully qualified Python name represented by the given node.
The name is represented by one or two aspects of the node: the value of the
node's first child forms the end of the name. If the node has a C{base}
attribute, that attribute's value is prepended to the node's value, with
C{.} separating the two parts.
@rtype: C{str}
@return: The fully qualified Python name.
"""
base = ""
if node.hasAttribute("base"):
base = node.getAttribute("base") + "."
return base+node.childNodes[0].nodeValue
def fixAPI(document, url):
"""
Replace API references with links to API documentation.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@return: C{None}
"""
# API references
for node in domhelpers.findElementsWithAttribute(document, "class", "API"):
fullname = _getAPI(node)
anchor = dom.Element('a')
anchor.setAttribute('href', url % (fullname,))
anchor.setAttribute('title', fullname)
while node.childNodes:
child = node.childNodes[0]
node.removeChild(child)
anchor.appendChild(child)
node.appendChild(anchor)
if node.hasAttribute('base'):
node.removeAttribute('base')
def fontifyPython(document):
"""
Syntax color any node in the given document which contains a Python source
listing.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
def matcher(node):
return (node.nodeName == 'pre' and node.hasAttribute('class') and
node.getAttribute('class') == 'python')
for node in domhelpers.findElements(document, matcher):
fontifyPythonNode(node)
def fontifyPythonNode(node):
"""
Syntax color the given node containing Python source code.
The node must have a parent.
@return: C{None}
"""
oldio = cStringIO.StringIO()
latex.getLatexText(node, oldio.write,
entities={'lt': '<', 'gt': '>', 'amp': '&'})
oldio = cStringIO.StringIO(oldio.getvalue().strip()+'\n')
howManyLines = len(oldio.getvalue().splitlines())
newio = cStringIO.StringIO()
htmlizer.filter(oldio, newio, writer=htmlizer.SmallerHTMLWriter)
lineLabels = _makeLineNumbers(howManyLines)
newel = dom.parseString(newio.getvalue()).documentElement
newel.setAttribute("class", "python")
node.parentNode.replaceChild(newel, node)
newel.insertBefore(lineLabels, newel.firstChild)
def addPyListings(document, dir):
"""
Insert Python source listings into the given document from files in the
given directory based on C{py-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{py-listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
If a node has a C{skipLines} attribute, its value will be parsed as an
integer and that many lines will be skipped at the beginning of the source
file.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced Python listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"py-listing"):
filename = node.getAttribute("href")
outfile = cStringIO.StringIO()
lines = map(string.rstrip, open(os.path.join(dir, filename)).readlines())
skip = node.getAttribute('skipLines') or 0
lines = lines[int(skip):]
howManyLines = len(lines)
data = '\n'.join(lines)
data = cStringIO.StringIO(text.removeLeadingTrailingBlanks(data))
htmlizer.filter(data, outfile, writer=htmlizer.SmallerHTMLWriter)
sourceNode = dom.parseString(outfile.getvalue()).documentElement
sourceNode.insertBefore(_makeLineNumbers(howManyLines), sourceNode.firstChild)
_replaceWithListing(node, sourceNode.toxml(), filename, "py-listing")
def _makeLineNumbers(howMany):
"""
Return an element which will render line numbers for a source listing.
@param howMany: The number of lines in the source listing.
@type howMany: C{int}
@return: An L{dom.Element} which can be added to the document before
the source listing to add line numbers to it.
"""
# Figure out how many digits wide the widest line number label will be.
width = len(str(howMany))
# Render all the line labels with appropriate padding
labels = ['%*d' % (width, i) for i in range(1, howMany + 1)]
# Create a p element with the right style containing the labels
p = dom.Element('p')
p.setAttribute('class', 'py-linenumber')
t = dom.Text()
t.data = '\n'.join(labels) + '\n'
p.appendChild(t)
return p
def _replaceWithListing(node, val, filename, class_):
captionTitle = domhelpers.getNodeText(node)
if captionTitle == os.path.basename(filename):
captionTitle = 'Source listing'
text = ('<div class="%s">%s<div class="caption">%s - '
'<a href="%s"><span class="filename">%s</span></a></div></div>' %
(class_, val, captionTitle, filename, filename))
newnode = dom.parseString(text).documentElement
node.parentNode.replaceChild(newnode, node)
def addHTMLListings(document, dir):
"""
Insert HTML source listings into the given document from files in the given
directory based on C{html-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{html-listing}
will have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced HTML listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"html-listing"):
filename = node.getAttribute("href")
val = ('<pre class="htmlsource">\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "html-listing")
def addPlainListings(document, dir):
"""
Insert text listings into the given document from files in the given
directory based on C{listing} nodes.
Any node in C{document} with a C{class} attribute set to C{listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced text listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"listing"):
filename = node.getAttribute("href")
val = ('<pre>\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "listing")
def getHeaders(document):
"""
Return all H2 and H3 nodes in the given document.
@type document: A DOM Node or Document
@rtype: C{list}
"""
return domhelpers.findElements(
document,
lambda n, m=re.compile('h[23]$').match: m(n.nodeName))
def generateToC(document):
"""
Create a table of contents for the given document.
@type document: A DOM Node or Document
@rtype: A DOM Node
@return: a Node containing a table of contents based on the headers of the
given document.
"""
subHeaders = None
headers = []
for element in getHeaders(document):
if element.tagName == 'h2':
subHeaders = []
headers.append((element, subHeaders))
elif subHeaders is None:
raise ValueError(
"No H3 element is allowed until after an H2 element")
else:
subHeaders.append(element)
auto = count().next
def addItem(headerElement, parent):
anchor = dom.Element('a')
name = 'auto%d' % (auto(),)
anchor.setAttribute('href', '#' + name)
text = dom.Text()
text.data = domhelpers.getNodeText(headerElement)
anchor.appendChild(text)
headerNameItem = dom.Element('li')
headerNameItem.appendChild(anchor)
parent.appendChild(headerNameItem)
anchor = dom.Element('a')
anchor.setAttribute('name', name)
headerElement.appendChild(anchor)
toc = dom.Element('ol')
for headerElement, subHeaders in headers:
addItem(headerElement, toc)
if subHeaders:
subtoc = dom.Element('ul')
toc.appendChild(subtoc)
for subHeaderElement in subHeaders:
addItem(subHeaderElement, subtoc)
return toc
def putInToC(document, toc):
"""
Insert the given table of contents into the given document.
The node with C{class} attribute set to C{toc} has its children replaced
with C{toc}.
@type document: A DOM Node or Document
@type toc: A DOM Node
"""
tocOrig = domhelpers.findElementsWithAttribute(document, 'class', 'toc')
if tocOrig:
tocOrig= tocOrig[0]
tocOrig.childNodes = [toc]
def removeH1(document):
"""
Replace all C{h1} nodes in the given document with empty C{span} nodes.
C{h1} nodes mark up document sections and the output template is given an
opportunity to present this information in a different way.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
h1 = domhelpers.findNodesNamed(document, 'h1')
empty = dom.Element('span')
for node in h1:
node.parentNode.replaceChild(empty, node)
def footnotes(document):
"""
Find footnotes in the given document, move them to the end of the body, and
generate links to them.
A footnote is any node with a C{class} attribute set to C{footnote}.
Footnote links are generated as superscript. Footnotes are collected in a
C{ol} node at the end of the document.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
footnotes = domhelpers.findElementsWithAttribute(document, "class",
"footnote")
if not footnotes:
return
footnoteElement = dom.Element('ol')
id = 1
for footnote in footnotes:
href = dom.parseString('<a href="#footnote-%(id)d">'
'<super>%(id)d</super></a>'
% vars()).documentElement
text = ' '.join(domhelpers.getNodeText(footnote).split())
href.setAttribute('title', text)
target = dom.Element('a')
target.setAttribute('name', 'footnote-%d' % (id,))
target.childNodes = [footnote]
footnoteContent = dom.Element('li')
footnoteContent.childNodes = [target]
footnoteElement.childNodes.append(footnoteContent)
footnote.parentNode.replaceChild(href, footnote)
id += 1
body = domhelpers.findNodesNamed(document, "body")[0]
header = dom.parseString('<h2>Footnotes</h2>').documentElement
body.childNodes.append(header)
body.childNodes.append(footnoteElement)
def notes(document):
"""
Find notes in the given document and mark them up as such.
A note is any node with a C{class} attribute set to C{note}.
(I think this is a very stupid feature. When I found it I actually
exclaimed out loud. -exarkun)
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
notes = domhelpers.findElementsWithAttribute(document, "class", "note")
notePrefix = dom.parseString('<strong>Note: </strong>').documentElement
for note in notes:
note.childNodes.insert(0, notePrefix)
def compareMarkPos(a, b):
"""
Perform in every way identically to L{cmp} for valid inputs.
"""
linecmp = cmp(a[0], b[0])
if linecmp:
return linecmp
return cmp(a[1], b[1])
compareMarkPos = deprecated(Version('Twisted', 9, 0, 0))(compareMarkPos)
def comparePosition(firstElement, secondElement):
"""
Compare the two elements given by their position in the document or
documents they were parsed from.
@type firstElement: C{dom.Element}
@type secondElement: C{dom.Element}
@return: C{-1}, C{0}, or C{1}, with the same meanings as the return value
of L{cmp}.
"""
return cmp(firstElement._markpos, secondElement._markpos)
comparePosition = deprecated(Version('Twisted', 9, 0, 0))(comparePosition)
def findNodeJustBefore(target, nodes):
"""
Find the last Element which is a sibling of C{target} and is in C{nodes}.
@param target: A node the previous sibling of which to return.
@param nodes: A list of nodes which might be the right node.
@return: The previous sibling of C{target}.
"""
while target is not None:
node = target.previousSibling
while node is not None:
if node in nodes:
return node
node = node.previousSibling
target = target.parentNode
raise RuntimeError("Oops")
def getFirstAncestorWithSectionHeader(entry):
"""
Visit the ancestors of C{entry} until one with at least one C{h2} child
node is found, then return all of that node's C{h2} child nodes.
@type entry: A DOM Node
@param entry: The node from which to begin traversal. This node itself is
excluded from consideration.
@rtype: C{list} of DOM Nodes
@return: All C{h2} nodes of the ultimately selected parent node.
"""
for a in domhelpers.getParents(entry)[1:]:
headers = domhelpers.findNodesNamed(a, "h2")
if len(headers) > 0:
return headers
return []
def getSectionNumber(header):
"""
Retrieve the section number of the given node.
This is probably intended to interact in a rather specific way with
L{numberDocument}.
@type header: A DOM Node or L{None}
@param header: The section from which to extract a number. The section
number is the value of this node's first child.
@return: C{None} or a C{str} giving the section number.
"""
if not header:
return None
return domhelpers.gatherTextNodes(header.childNodes[0])
def getSectionReference(entry):
"""
Find the section number which contains the given node.
This function looks at the given node's ancestry until it finds a node
which defines a section, then returns that section's number.
@type entry: A DOM Node
@param entry: The node for which to determine the section.
@rtype: C{str}
@return: The section number, as returned by C{getSectionNumber} of the
first ancestor of C{entry} which defines a section, as determined by
L{getFirstAncestorWithSectionHeader}.
"""
headers = getFirstAncestorWithSectionHeader(entry)
myHeader = findNodeJustBefore(entry, headers)
return getSectionNumber(myHeader)
def index(document, filename, chapterReference):
"""
Extract index entries from the given document and store them for later use
and insert named anchors so that the index can link back to those entries.
Any node with a C{class} attribute set to C{index} is considered an index
entry.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type filename: C{str}
@param filename: A link to the output for the given document which will be
included in the index to link to any index entry found here.
@type chapterReference: ???
@param chapterReference: ???
@return: C{None}
"""
entries = domhelpers.findElementsWithAttribute(document, "class", "index")
if not entries:
return
i = 0;
for entry in entries:
i += 1
anchor = 'index%02d' % i
if chapterReference:
ref = getSectionReference(entry) or chapterReference
else:
ref = 'link'
indexer.addEntry(filename, anchor, entry.getAttribute('value'), ref)
# does nodeName even affect anything?
entry.nodeName = entry.tagName = entry.endTagName = 'a'
for attrName in entry.attributes.keys():
entry.removeAttribute(attrName)
entry.setAttribute('name', anchor)
def setIndexLink(template, indexFilename):
"""
Insert a link to an index document.
Any node with a C{class} attribute set to C{index-link} will have its tag
name changed to C{a} and its C{href} attribute set to C{indexFilename}.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type indexFilename: C{str}
@param indexFilename: The address of the index document to which to link.
If any C{False} value, this function will remove all index-link nodes.
@return: C{None}
"""
indexLinks = domhelpers.findElementsWithAttribute(template,
"class",
"index-link")
for link in indexLinks:
if indexFilename is None:
link.parentNode.removeChild(link)
else:
link.nodeName = link.tagName = link.endTagName = 'a'
for attrName in link.attributes.keys():
link.removeAttribute(attrName)
link.setAttribute('href', indexFilename)
def numberDocument(document, chapterNumber):
"""
Number the sections of the given document.
A dot-separated chapter, section number is added to the beginning of each
section, as defined by C{h2} nodes.
This is probably intended to interact in a rather specific way with
L{getSectionNumber}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document.
@return: C{None}
"""
i = 1
for node in domhelpers.findNodesNamed(document, "h2"):
label = dom.Text()
label.data = "%s.%d " % (chapterNumber, i)
node.insertBefore(label, node.firstChild)
i += 1
def fixRelativeLinks(document, linkrel):
"""
Replace relative links in C{str} and C{href} attributes with links relative
to C{linkrel}.
@type document: A DOM Node or Document
@param document: The output template.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
"""
for attr in 'src', 'href':
for node in domhelpers.findElementsWithAttribute(document, attr):
href = node.getAttribute(attr)
if not href.startswith('http') and not href.startswith('/'):
node.setAttribute(attr, linkrel+node.getAttribute(attr))
def setTitle(template, title, chapterNumber):
"""
Add title and chapter number information to the template document.
The title is added to the end of the first C{title} tag and the end of the
first tag with a C{class} attribute set to C{title}. If specified, the
chapter is inserted before the title.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type title: C{list} of DOM Nodes
@param title: Nodes from the input document defining its title.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document. If not applicable, any C{False} value will result in this
information being omitted.
@return: C{None}
"""
if numberer.getNumberSections() and chapterNumber:
titleNode = dom.Text()
# This is necessary in order for cloning below to work. See Python
# isuse 4851.
titleNode.ownerDocument = template.ownerDocument
titleNode.data = '%s. ' % (chapterNumber,)
title.insert(0, titleNode)
for nodeList in (domhelpers.findNodesNamed(template, "title"),
domhelpers.findElementsWithAttribute(template, "class",
'title')):
if nodeList:
for titleNode in title:
nodeList[0].appendChild(titleNode.cloneNode(True))
def setAuthors(template, authors):
"""
Add author information to the template document.
Names and contact information for authors are added to each node with a
C{class} attribute set to C{authors} and to the template head as C{link}
nodes.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type authors: C{list} of two-tuples of C{str}
@param authors: List of names and contact information for the authors of
the input document.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template,
"class", 'authors'):
# First, similarly to setTitle, insert text into an <div
# class="authors">
container = dom.Element('span')
for name, href in authors:
anchor = dom.Element('a')
anchor.setAttribute('href', href)
anchorText = dom.Text()
anchorText.data = name
anchor.appendChild(anchorText)
if (name, href) == authors[-1]:
if len(authors) == 1:
container.appendChild(anchor)
else:
andText = dom.Text()
andText.data = 'and '
container.appendChild(andText)
container.appendChild(anchor)
else:
container.appendChild(anchor)
commaText = dom.Text()
commaText.data = ', '
container.appendChild(commaText)
node.appendChild(container)
# Second, add appropriate <link rel="author" ...> tags to the <head>.
head = domhelpers.findNodesNamed(template, 'head')[0]
authors = [dom.parseString('<link rel="author" href="%s" title="%s"/>'
% (href, name)).childNodes[0]
for name, href in authors]
head.childNodes.extend(authors)
def setVersion(template, version):
"""
Add a version indicator to the given template.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type version: C{str}
@param version: The version string to add to the template.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template, "class",
"version"):
text = dom.Text()
text.data = version
node.appendChild(text)
def getOutputFileName(originalFileName, outputExtension, index=None):
"""
Return a filename which is the same as C{originalFileName} except for the
extension, which is replaced with C{outputExtension}.
For example, if C{originalFileName} is C{'/foo/bar.baz'} and
C{outputExtension} is C{'quux'}, the return value will be
C{'/foo/bar.quux'}.
@type originalFileName: C{str}
@type outputExtension: C{stR}
@param index: ignored, never passed.
@rtype: C{str}
"""
return os.path.splitext(originalFileName)[0]+outputExtension
def munge(document, template, linkrel, dir, fullpath, ext, url, config, outfileGenerator=getOutputFileName):
"""
Mutate C{template} until it resembles C{document}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type template: A DOM Node or Document
@param template: The template document which defines the desired
presentation format of the content.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type dir: C{str}
@param dir: The directory in which to search for source listing files.
@type fullpath: C{str}
@param fullpath: The file name which contained the input document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type config: C{dict}
@param config: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
fixRelativeLinks(template, linkrel)
addMtime(template, fullpath)
removeH1(document)
if not config.get('noapi', False):
fixAPI(document, url)
fontifyPython(document)
fixLinks(document, ext)
addPyListings(document, dir)
addHTMLListings(document, dir)
addPlainListings(document, dir)
putInToC(template, generateToC(document))
footnotes(document)
notes(document)
setIndexLink(template, indexer.getIndexFilename())
setVersion(template, config.get('version', ''))
# Insert the document into the template
chapterNumber = htmlbook.getNumber(fullpath)
title = domhelpers.findNodesNamed(document, 'title')[0].childNodes
setTitle(template, title, chapterNumber)
if numberer.getNumberSections() and chapterNumber:
numberDocument(document, chapterNumber)
index(document, outfileGenerator(os.path.split(fullpath)[1], ext),
htmlbook.getReference(fullpath))
authors = domhelpers.findNodesNamed(document, 'link')
authors = [(node.getAttribute('title') or '',
node.getAttribute('href') or '')
for node in authors
if node.getAttribute('rel') == 'author']
setAuthors(template, authors)
body = domhelpers.findNodesNamed(document, "body")[0]
tmplbody = domhelpers.findElementsWithAttribute(template, "class",
"body")[0]
tmplbody.childNodes = body.childNodes
tmplbody.setAttribute("class", "content")
class _LocationReportingErrorHandler(ErrorHandler):
"""
Define a SAX error handler which can report the location of fatal
errors.
Unlike the errors reported during parsing by other APIs in the xml
package, this one tries to mismatched tag errors by including the
location of both the relevant opening and closing tags.
"""
def __init__(self, contentHandler):
self.contentHandler = contentHandler
def fatalError(self, err):
# Unfortunately, the underlying expat error code is only exposed as
# a string. I surely do hope no one ever goes and localizes expat.
if err.getMessage() == 'mismatched tag':
expect, begLine, begCol = self.contentHandler._locationStack[-1]
endLine, endCol = err.getLineNumber(), err.getColumnNumber()
raise process.ProcessingFailure(
"mismatched close tag at line %d, column %d; expected </%s> "
"(from line %d, column %d)" % (
endLine, endCol, expect, begLine, begCol))
raise process.ProcessingFailure(
'%s at line %d, column %d' % (err.getMessage(),
err.getLineNumber(),
err.getColumnNumber()))
class _TagTrackingContentHandler(SAX2DOM):
"""
Define a SAX content handler which keeps track of the start location of
all open tags. This information is used by the above defined error
handler to report useful locations when a fatal error is encountered.
"""
def __init__(self):
SAX2DOM.__init__(self)
self._locationStack = []
def setDocumentLocator(self, locator):
self._docLocator = locator
SAX2DOM.setDocumentLocator(self, locator)
def startElement(self, name, attrs):
self._locationStack.append((name, self._docLocator.getLineNumber(), self._docLocator.getColumnNumber()))
SAX2DOM.startElement(self, name, attrs)
def endElement(self, name):
self._locationStack.pop()
SAX2DOM.endElement(self, name)
class _LocalEntityResolver(object):
"""
Implement DTD loading (from a local source) for the limited number of
DTDs which are allowed for Lore input documents.
@ivar filename: The name of the file containing the lore input
document.
@ivar knownDTDs: A mapping from DTD system identifiers to L{FilePath}
instances pointing to the corresponding DTD.
"""
s = FilePath(__file__).sibling
knownDTDs = {
None: s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd": s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd": s("xhtml1-transitional.dtd"),
"xhtml-lat1.ent": s("xhtml-lat1.ent"),
"xhtml-symbol.ent": s("xhtml-symbol.ent"),
"xhtml-special.ent": s("xhtml-special.ent"),
}
del s
def __init__(self, filename):
self.filename = filename
def resolveEntity(self, publicId, systemId):
source = InputSource()
source.setSystemId(systemId)
try:
dtdPath = self.knownDTDs[systemId]
except KeyError:
raise process.ProcessingFailure(
"Invalid DTD system identifier (%r) in %s. Only "
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd "
"is allowed." % (systemId, self.filename))
source.setByteStream(dtdPath.open())
return source
def parseFileAndReport(filename, _open=file):
"""
Parse and return the contents of the given lore XHTML document.
@type filename: C{str}
@param filename: The name of a file containing a lore XHTML document to
load.
@raise process.ProcessingFailure: When the contents of the specified file
cannot be parsed.
@rtype: A DOM Document
@return: The document contained in C{filename}.
"""
content = _TagTrackingContentHandler()
error = _LocationReportingErrorHandler(content)
parser = make_parser()
parser.setContentHandler(content)
parser.setErrorHandler(error)
# In order to call a method on the expat parser which will be used by this
# parser, we need the expat parser to be created. This doesn't happen
# until reset is called, normally by the parser's parse method. That's too
# late for us, since it will then go on to parse the document without
# letting us do any extra set up. So, force the expat parser to be created
# here, and then disable reset so that the parser created is the one
# actually used to parse our document. Resetting is only needed if more
# than one document is going to be parsed, and that isn't the case here.
parser.reset()
parser.reset = lambda: None
# This is necessary to make the xhtml1 transitional declaration optional.
# It causes LocalEntityResolver.resolveEntity(None, None) to be called.
# LocalEntityResolver handles that case by giving out the xhtml1
# transitional dtd. Unfortunately, there is no public API for manipulating
# the expat parser when using xml.sax. Using the private _parser attribute
# may break. It's also possible that make_parser will return a parser
# which doesn't use expat, but uses some other parser. Oh well. :(
# -exarkun
parser._parser.UseForeignDTD(True)
parser.setEntityResolver(_LocalEntityResolver(filename))
# This is probably no-op because expat is not a validating parser. Who
# knows though, maybe you figured out a way to not use expat.
parser.setFeature(feature_validation, False)
fObj = _open(filename)
try:
try:
parser.parse(fObj)
except IOError, e:
raise process.ProcessingFailure(
e.strerror + ", filename was '" + filename + "'")
finally:
fObj.close()
return content.document
def makeSureDirectoryExists(filename):
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
os.makedirs(dirname)
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
"""
Process the input document at C{filename} and write an output document.
@type filename: C{str}
@param filename: The path to the input file which will be processed.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type templ: A DOM Node or Document
@param templ: The template on which the output document will be based.
This is mutated and then serialized to the output file.
@type options: C{dict}
@param options: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
doc = parseFileAndReport(filename)
clonedNode = templ.cloneNode(1)
munge(doc, clonedNode, linkrel, os.path.dirname(filename), filename, ext,
url, options, outfileGenerator)
newFilename = outfileGenerator(filename, ext)
_writeDocument(newFilename, clonedNode)
def _writeDocument(newFilename, clonedNode):
"""
Serialize the given node to XML into the named file.
@param newFilename: The name of the file to which the XML will be
written. If this is in a directory which does not exist, the
directory will be created.
@param clonedNode: The root DOM node which will be serialized.
@return: C{None}
"""
makeSureDirectoryExists(newFilename)
f = open(newFilename, 'w')
f.write(clonedNode.toxml('utf-8'))
f.close()
| mzdaniel/oh-mainline | vendor/packages/twisted/twisted/lore/tree.py | Python | agpl-3.0 | 40,046 | [
"VisIt"
] | 7f078ee6de629f101ab5038123dc64038fe604f5bf482c5ee5c965d27ab3350c |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""
Quasi-Newton algorithm
"""
__docformat__ = 'reStructuredText'
import numpy as np
import weakref,time,sys
from ase import *
def f(lamda,Gbar,b,radius):
b1 = b - lamda
g = radius**2 - np.dot(Gbar/b1, Gbar/b1)
return g
def scale_radius_energy(f,r):
scale = 1.0
# if(r<=0.01):
# return scale
if f<0.01: scale*=1.4
if f<0.05: scale*=1.4
if f<0.10: scale*=1.4
if f<0.40: scale*=1.4
if f>0.5: scale *= 1./1.4
if f>0.7: scale *= 1./1.4
if f>1.0: scale *= 1./1.4
return scale
def scale_radius_force(f,r):
scale = 1.0
# if(r<=0.01):
# return scale
g = abs(f -1)
if g<0.01: scale*=1.4
if g<0.05: scale*=1.4
if g<0.10: scale*=1.4
if g<0.40: scale*=1.4
if g>0.5: scale *= 1./1.4
if g>0.7: scale *= 1./1.4
if g>1.0: scale *= 1./1.4
return scale
def find_lamda(upperlimit,Gbar,b,radius):
lowerlimit = upperlimit
eps = 1e-12
step = 0.1
while f(lowerlimit,Gbar,b,radius) < 0:
lowerlimit -= step
converged = False
while not converged:
midt = (upperlimit+lowerlimit)/2.
lamda = midt
fmidt = f(midt,Gbar,b,radius)
fupper = f(upperlimit,Gbar,b,radius)
flower = f(lowerlimit,Gbar,b,radius)
if fupper*fmidt<0:
lowerlimit = midt
else:
upperlimit = midt
if abs(upperlimit-lowerlimit)<1e-6:
converged = True
return lamda
def get_hessian_inertia(eigenvalues):
# return number of negative modes
n = 0
print 'eigenvalues ',eigenvalues[0],eigenvalues[1],eigenvalues[2]
while eigenvalues[n]<0:
n+=1
return n
from numpy.linalg import eigh, solve
from ase.optimize import Optimizer
class GoodOldQuasiNewton(Optimizer):
def __init__(self, atoms, restart=None, logfile='-', trajectory=None,
fmax=None, converged=None,
hessianupdate='BFGS',hessian=None,forcemin=True,
verbosity=None,maxradius=None,
diagonal=20.,radius=None,
transitionstate = False):
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
self.eps = 1e-12
self.hessianupdate = hessianupdate
self.forcemin = forcemin
self.verbosity = verbosity
self.diagonal = diagonal
self.atoms = atoms
n = len(self.atoms) * 3
if radius is None:
self.radius = 0.05*np.sqrt(n)/10.0
else:
self.radius = radius
if maxradius is None:
self.maxradius = 0.5*np.sqrt(n)
else:
self.maxradius = maxradius
# 0.01 < radius < maxradius
self.radius = max(min( self.radius, self.maxradius ), 0.0001)
self.transitionstate = transitionstate
# check if this is a nudged elastic band calculation
if hasattr(atoms,'springconstant'):
self.forcemin=False
self.t0 = time.time()
def initialize(self):pass
def write_log(self,text):
if self.logfile is not None:
self.logfile.write(text + '\n')
self.logfile.flush()
def set_max_radius(self, maxradius):
self.maxradius = maxradius
self.radius = min(self.maxradius, self.radius)
def set_hessian(self,hessian):
self.hessian = hessian
def get_hessian(self):
if not hasattr(self,'hessian'):
self.set_default_hessian()
return self.hessian
def set_default_hessian(self):
# set unit matrix
n = len(self.atoms) * 3
hessian = np.zeros((n,n))
for i in range(n):
hessian[i][i] = self.diagonal
self.set_hessian(hessian)
def read_hessian(self,filename):
import cPickle
f = open(filename,'r')
self.set_hessian(cPickle.load(f))
f.close()
def write_hessian(self,filename):
import cPickle
f = paropen(filename,'w')
cPickle.dump(self.get_hessian(),f)
f.close()
def write_to_restartfile(self):
import cPickle
f = paropen(self.restartfile,'w')
cPickle.dump((self.oldpos,
self.oldG,
self.oldenergy,
self.radius,
self.hessian,
self.energy_estimate),f)
f.close()
def update_hessian(self,pos,G):
import copy
if hasattr(self,'oldG'):
if self.hessianupdate=='BFGS':
self.update_hessian_bfgs(pos,G)
elif self.hessianupdate== 'Powell':
self.update_hessian_powell(pos,G)
else:
self.update_hessian_bofill(pos,G)
else:
if not hasattr(self,'hessian'):
self.set_default_hessian()
self.oldpos = copy.copy(pos)
self.oldG = copy.copy(G)
if self.verbosity:
print 'hessian ',self.hessian
def update_hessian_bfgs(self,pos,G):
n = len(self.hessian)
dgrad = G - self.oldG
dpos = pos - self.oldpos
absdpos = np.sqrt(np.dot(dpos, dpos))
dotg = np.dot(dgrad,dpos)
tvec = np.dot(dpos,self.hessian)
dott = np.dot(dpos,tvec)
if (abs(dott)>self.eps) and (abs(dotg)>self.eps):
for i in range(n):
for j in range(n):
h = dgrad[i]*dgrad[j]/dotg - tvec[i]*tvec[j]/dott
self.hessian[i][j] += h
def update_hessian_powell(self,pos,G):
n = len(self.hessian)
dgrad = G - self.oldG
dpos = pos - self.oldpos
absdpos = np.dot(dpos, dpos)
if absdpos<self.eps:
return
dotg = np.dot(dgrad,dpos)
tvec = dgrad-np.dot(dpos,self.hessian)
tvecdot = np.dot(tvec,tvec)
tvecdpos = np.dot(tvec,dpos)
ddot = tvecdpos/absdpos
dott = np.dot(dpos,tvec)
if (abs(dott)>self.eps) and (abs(dotg)>self.eps):
for i in range(n):
for j in range(n):
h = tvec[i]*dpos[j] + dpos[i]*tvec[j]-ddot*dpos[i]*dpos[j]
h *= 1./absdpos
self.hessian[i][j] += h
def update_hessian_bofill(self,pos,G):
print 'update Bofill'
n = len(self.hessian)
dgrad = G - self.oldG
dpos = pos - self.oldpos
absdpos = np.dot(dpos, dpos)
if absdpos<self.eps:
return
dotg = np.dot(dgrad,dpos)
tvec = dgrad-np.dot(dpos,self.hessian)
tvecdot = np.dot(tvec,tvec)
tvecdpos = np.dot(tvec,dpos)
ddot = tvecdpos/absdpos
coef1 = 1. - tvecdpos*tvecdpos/(absdpos*tvecdot)
coef2 = (1. - coef1)*absdpos/tvecdpos
coef3 = coef1*tvecdpos/absdpos
dott = np.dot(dpos,tvec)
if (abs(dott)>self.eps) and (abs(dotg)>self.eps):
for i in range(n):
for j in range(n):
h = coef1*(tvec[i]*dpos[j] + dpos[i]*tvec[j])-dpos[i]*dpos[j]*coef3 + coef2*tvec[i]*tvec[j]
h *= 1./absdpos
self.hessian[i][j] += h
def step(self, f):
""" Do one QN step
"""
pos = self.atoms.get_positions().ravel()
G = -self.atoms.get_forces().ravel()
energy = self.atoms.get_potential_energy()
self.write_iteration(energy,G)
if hasattr(self,'oldenergy'):
self.write_log('energies ' + str(energy) + ' ' + str(self.oldenergy))
if self.forcemin:
de = 1e-4
else:
de = 1e-2
if self.transitionstate:
de = 0.2
if (energy-self.oldenergy)>de:
self.write_log('reject step')
self.atoms.set_positions(self.oldpos.reshape((-1, 3)))
G = self.oldG
energy = self.oldenergy
self.radius *= 0.5
else:
self.update_hessian(pos,G)
de = energy - self.oldenergy
f = 1.0
if self.forcemin:
self.write_log("energy change; actual: %f estimated: %f "%(de,self.energy_estimate))
if abs(self.energy_estimate)>self.eps:
f = abs((de/self.energy_estimate)-1)
self.write_log('Energy prediction factor ' + str(f))
# fg = self.get_force_prediction(G)
self.radius *= scale_radius_energy(f,self.radius)
else:
self.write_log("energy change; actual: %f "%(de))
self.radius*=1.5
fg = self.get_force_prediction(G)
self.write_log("Scale factors %f %f "%(scale_radius_energy(f,self.radius),
scale_radius_force(fg,self.radius)))
self.radius = max(min(self.radius,self.maxradius), 0.0001)
else:
self.update_hessian(pos,G)
self.write_log("new radius %f "%(self.radius))
self.oldenergy = energy
b,V = eigh(self.hessian)
V=V.T.copy()
self.V = V
# calculate projection of G onto eigenvectors V
Gbar = np.dot(G,np.transpose(V))
lamdas = self.get_lambdas(b,Gbar)
D = -Gbar/(b-lamdas)
n = len(D)
step = np.zeros((n))
for i in range(n):
step += D[i]*V[i]
pos = self.atoms.get_positions().ravel()
pos += step
energy_estimate = self.get_energy_estimate(D,Gbar,b)
self.energy_estimate = energy_estimate
self.gbar_estimate = self.get_gbar_estimate(D,Gbar,b)
self.old_gbar = Gbar
self.atoms.set_positions(pos.reshape((-1, 3)))
def get_energy_estimate(self,D,Gbar,b):
de = 0.0
for n in range(len(D)):
de += D[n]*Gbar[n] + 0.5*D[n]*b[n]*D[n]
return de
def get_gbar_estimate(self,D,Gbar,b):
gbar_est = (D*b) + Gbar
self.write_log('Abs Gbar estimate ' + str(np.dot(gbar_est,gbar_est)))
return gbar_est
def get_lambdas(self,b,Gbar):
lamdas = np.zeros((len(b)))
D = -Gbar/b
#absD = np.sqrt(np.sum(D**2))
absD = np.sqrt(np.dot(D, D))
eps = 1e-12
nminus = self.get_hessian_inertia(b)
if absD < self.radius:
if not self.transitionstate:
self.write_log('Newton step')
return lamdas
else:
if nminus==1:
self.write_log('Newton step')
return lamdas
else:
self.write_log("Wrong inertia of Hessian matrix: %2.2f %2.2f "%(b[0],b[1]))
else:
self.write_log("Corrected Newton step: abs(D) = %2.2f "%(absD))
if not self.transitionstate:
# upper limit
upperlimit = min(0,b[0])-eps
lowerlimit = upperlimit
lamda = find_lamda(upperlimit,Gbar,b,self.radius)
lamdas += lamda
else:
# upperlimit
upperlimit = min(-b[0],b[1],0)-eps
lamda = find_lamda(upperlimit,Gbar,b,self.radius)
lamdas += lamda
lamdas[0] -= 2*lamda
return lamdas
def print_hessian(self):
hessian = self.get_hessian()
n = len(hessian)
for i in range(n):
for j in range(n):
print "%2.4f " %(hessian[i][j]),
print " "
def get_hessian_inertia(self,eigenvalues):
# return number of negative modes
self.write_log("eigenvalues %2.2f %2.2f %2.2f "%(eigenvalues[0],
eigenvalues[1],
eigenvalues[2]))
n = 0
while eigenvalues[n]<0:
n+=1
return n
def get_force_prediction(self,G):
# return measure of how well the forces are predicted
Gbar = np.dot(G,np.transpose(self.V))
dGbar_actual = Gbar-self.old_gbar
dGbar_predicted = Gbar-self.gbar_estimate
f = np.dot(dGbar_actual,dGbar_predicted)/np.dot(dGbar_actual,dGbar_actual)
self.write_log('Force prediction factor ' + str(f))
return f
def write_iteration(self,energy,G):pass
| freephys/python_ase | ase/optimize/oldqn.py | Python | gpl-3.0 | 11,761 | [
"ASE"
] | d7051a5081e15ef9b76b209bd62ce73ad0e3383007ec56c55b88d49558a89ab8 |
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: Snowball Stemmer
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Peter Michael Stahl <pemistahl@gmail.com>
# Peter Ljunglof <peter.ljunglof@heatherleaf.se> (revisions)
# Algorithms: Dr Martin Porter <martin@tartarus.org>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
u"""
Snowball stemmers and appendant demo function
This module provides a port of the Snowball stemmers
developed by U{Dr Martin Porter<http://tartarus.org/~martin/>}.
There is also a demo function demonstrating the different
algorithms. It can be invoked directly on the command line.
For more information take a look into the class C{SnowballStemmer}.
@author: Peter Michael Stahl
@contact: pemistahl@gmail.com
@contact: U{http://twitter.com/pemistahl}
"""
from api import *
from nltk.corpus import stopwords
from nltk.stem import porter
class SnowballStemmer(StemmerI):
u"""
A word stemmer based on the Snowball stemming algorithms.
At the moment, this port is able to stem words from fourteen
languages: Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
Furthermore, there is also the original English Porter algorithm:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by
U{Dr Martin Porter<http://tartarus.org/~martin/>}.
These stemmers are called Snowball, because he invented
a programming language with this name for creating
new stemming algorithms. There is more information available
on the U{Snowball Website<http://snowball.tartarus.org/>}.
The stemmer is invoked as shown below:
>>> from nltk import SnowballStemmer
>>> SnowballStemmer.languages # See which languages are supported
('danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian',
'italian', 'norwegian', 'porter', 'portuguese", 'romanian',
'russian', 'spanish', 'swedish')
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem(u"Autobahnen") # Stem a word
u'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem(u"Autobahnen")
u'autobahn'
@author: Peter Michael Stahl
@contact: pemistahl@gmail.com
@contact: U{http://twitter.com/pemistahl}
@cvar languages: A tuple that contains the available language names
@type languages: C{tuple}
@ivar stopwords: A list that contains stopwords for the respective language
in Unicode format.
@type stopwords: C{list}
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
u"""
Create a language specific instance of the Snowball stemmer.
@param language: The language whose subclass is instantiated.
@type language: C{str, unicode}
@param ignore_stopwords: If set to C{True}, stopwords are
not stemmed and returned unchanged.
Set to C{False} by default.
@type ignore_stopwords: C{bool}
@raise ValueError: If there is no stemmer for the specified
language, a C{ValueError} is raised.
"""
if language not in self.languages:
raise ValueError(u"The language '%s' is not supported." % language)
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
class _LanguageSpecificStemmer(StemmerI):
u"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
"""
def __init__(self, ignore_stopwords=False):
u"""
Create an instance of the Snowball stemmer.
@param ignore_stopwords: If set to C{True}, stopwords are
not stemmed and returned unchanged.
Set to C{False} by default.
@type ignore_stopwords: C{bool}
"""
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word.decode("utf-8"))
except IOError:
raise ValueError("%r has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'." % self)
def __repr__(self):
u"""
Print out the string representation of the respective class.
"""
return "<%s>" % type(self).__name__
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
L{nltk.stem.porter} for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
u"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
u"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
@param word: The word whose region R1 is determined.
@type word: C{str, unicode}
@param vowels: The vowels of the respective language that are
used to determine the region R1.
@type vowels: C{unicode}
@return: C{r1}, the region R1 for the respective word.
@rtype: C{unicode}
@note: This helper method is invoked by the respective stem method of
the subclasses L{DanishStemmer}, L{NorwegianStemmer}, and
L{SwedishStemmer}. It is not to be invoked directly!
"""
r1 = u""
for i in xrange(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
u"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
u"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
@param word: The word whose regions R1 and R2 are determined.
@type word: C{str, unicode}
@param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
@type vowels: C{unicode}
@return: C{(r1,r2)}, the regions R1 and R2 for the respective
word.
@rtype: C{tuple}
@note: This helper method is invoked by the respective stem method of
the subclasses L{DutchStemmer}, L{FinnishStemmer},
L{FrenchStemmer}, L{GermanStemmer}, L{ItalianStemmer},
L{PortugueseStemmer}, L{RomanianStemmer}, and L{SpanishStemmer}.
It is not to be invoked directly!
@note: A detailed description of how to define R1 and R2
can be found under U{http://snowball.tartarus.org/
texts/r1r2.html}.
"""
r1 = u""
r2 = u""
for i in xrange(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in xrange(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
u"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
@param word: The word whose region RV is determined.
@type word: C{str, unicode}
@param vowels: The vowels of the respective language that are
used to determine the region RV.
@type vowels: C{unicode}
@return: C{rv}, the region RV for the respective word.
@rtype: C{unicode}
@note: This helper method is invoked by the respective stem method of
the subclasses L{ItalianStemmer}, L{PortugueseStemmer},
L{RomanianStemmer}, and L{SpanishStemmer}. It is not to be
invoked directly!
"""
rv = u""
if len(word) >= 2:
if word[1] not in vowels:
for i in xrange(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[:2] in vowels:
for i in xrange(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
u"""
The Danish Snowball stemmer.
@cvar __vowels: The Danish vowels.
@type __vowels: C{unicode}
@cvar __consonants: The Danish consonants.
@type __consonants: C{unicode}
@cvar __double_consonants: The Danish double consonants.
@type __double_consonants: C{tuple}
@cvar __s_ending: Letters that may directly appear before a word final 's'.
@type __s_ending: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@note: A detailed description of the Danish
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/danish/stemmer.html}.
"""
# The language's vowels and other important characters are defined.
__vowels = u"aeiouy\xE6\xE5\xF8"
__consonants = u"bcdfghjklmnpqrstvwxz"
__double_consonants = (u"bb", u"cc", u"dd", u"ff", u"gg", u"hh", u"jj",
u"kk", u"ll", u"mm", u"nn", u"pp", u"qq", u"rr",
u"ss", u"tt", u"vv", u"ww", u"xx", u"zz")
__s_ending = u"abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = (u"erendes", u"erende", u"hedens", u"ethed",
u"erede", u"heden", u"heder", u"endes",
u"ernes", u"erens", u"erets", u"ered",
u"ende", u"erne", u"eren", u"erer", u"heds",
u"enes", u"eres", u"eret", u"hed", u"ene", u"ere",
u"ens", u"ers", u"ets", u"en", u"er", u"es", u"et",
u"e", u"s")
__step2_suffixes = (u"gd", u"dt", u"gt", u"kt")
__step3_suffixes = (u"elig", u"l\xF8st", u"lig", u"els", u"ig")
def stem(self, word):
u"""
Stem a Danish word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith(u"igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == u"l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
u"""
The Dutch Snowball stemmer.
@cvar __vowels: The Dutch vowels.
@type __vowels: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
@type __step3b_suffixes: C{tuple}
@note: A detailed description of the Dutch
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/dutch/stemmer.html}.
"""
__vowels = u"aeiouy\xE8"
__step1_suffixes = (u"heden", u"ene", u"en", u"se", u"s")
__step3b_suffixes = (u"baar", u"lijk", u"bar", u"end", u"ing", u"ig")
def stem(self, word):
u"""
Stem a Dutch word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace(u"\xE4", u"a").replace(u"\xE1", u"a")
.replace(u"\xEB", u"e").replace(u"\xE9", u"e")
.replace(u"\xED", u"i").replace(u"\xEF", u"i")
.replace(u"\xF6", u"o").replace(u"\xF3", u"o")
.replace(u"\xFC", u"u").replace(u"\xFA", u"u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith(u"y"):
word = u"".join((u"Y", word[1:]))
for i in xrange(1, len(word)):
if word[i-1] in self.__vowels and word[i] == u"y":
word = u"".join((word[:i], u"Y", word[i+1:]))
for i in xrange(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == u"i" and
word[i+1] in self.__vowels):
word = u"".join((word[:i], u"I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in xrange(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"heden":
word = u"".join((word[:-5], u"heid"))
r1 = u"".join((r1[:-5], u"heid"))
if r2.endswith(u"heden"):
r2 = u"".join((r2[:-5], u"heid"))
elif (suffix in (u"ene", u"en") and
not word.endswith(u"heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != u"gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in (u"se", u"s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != u"j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith(u"e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith(u"heid") and word[-5] != u"c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith(u"en") and word[-3] not in self.__vowels and
word[-5:-2] != u"gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in (u"end", u"ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith(u"ig") and word[-3] != u"e":
word = word[:-2]
else:
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
elif suffix == u"ig" and word[-3] != u"e":
word = word[:-2]
elif suffix == u"lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith(u"e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
elif suffix == u"baar":
word = word[:-4]
elif suffix == u"bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != u"I":
if word[-3:-1] in (u"aa", u"ee", u"oo", u"uu"):
if word[-4] not in self.__vowels:
word = u"".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace(u"I", u"i").replace(u"Y", u"y")
return word
class EnglishStemmer(_StandardStemmer):
u"""
The English Snowball stemmer.
@cvar __vowels: The English vowels.
@type __vowels: C{unicode}
@cvar __double_consonants: The English double consonants.
@type __double_consonants: C{tuple}
@cvar __li_ending: Letters that may directly appear before a word final 'li'.
@type __li_ending: C{unicode}
@cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
@type __step0_suffixes: C{tuple}
@cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
@type __step1a_suffixes: C{tuple}
@cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
@type __step1b_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
@type __step4_suffixes: C{tuple}
@cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
@type __step5_suffixes: C{tuple}
@cvar __special_words: A dictionary containing words
which have to be stemmed specially.
@type __special_words C{dict}
@note: A detailed description of the English
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/english/stemmer.html}.
"""
__vowels = u"aeiouy"
__double_consonants = (u"bb", u"dd", u"ff", u"gg", u"mm", u"nn",
u"pp", u"rr", u"tt")
__li_ending = u"cdeghkmnrt"
__step0_suffixes = (u"'s'", u"'s", u"'")
__step1a_suffixes = (u"sses", u"ied", u"ies", u"us", u"ss", u"s")
__step1b_suffixes = (u"eedly", u"ingly", u"edly", u"eed", u"ing", u"ed")
__step2_suffixes = (u'ization', u'ational', u'fulness', u'ousness',
u'iveness', u'tional', u'biliti', u'lessli',
u'entli', u'ation', u'alism', u'aliti', u'ousli',
u'iviti', u'fulli', u'enci', u'anci', u'abli',
u'izer', u'ator', u'alli', u'bli', u'ogi', u'li')
__step3_suffixes = (u'ational', u'tional', u'alize', u'icate', u'iciti',
u'ative', u'ical', u'ness', u'ful')
__step4_suffixes = (u'ement', u'ance', u'ence', u'able', u'ible', u'ment',
u'ant', u'ent', u'ism', u'ate', u'iti', u'ous',
u'ive', u'ize', u'ion', u'al', u'er', u'ic')
__step5_suffixes = (u"e", u"l")
__special_words = {u"skis" : u"ski",
u"skies" : u"sky",
u"dying" : u"die",
u"lying" : u"lie",
u"tying" : u"tie",
u"idly" : u"idl",
u"gently" : u"gentl",
u"ugly" : u"ugli",
u"early" : u"earli",
u"only" : u"onli",
u"singly" : u"singl",
u"sky" : u"sky",
u"news" : u"news",
u"howe" : u"howe",
u"atlas" : u"atlas",
u"cosmos" : u"cosmos",
u"bias" : u"bias",
u"andes" : u"andes",
u"inning" : u"inning",
u"innings" : u"inning",
u"outing" : u"outing",
u"outings" : u"outing",
u"canning" : u"canning",
u"cannings" : u"canning",
u"herring" : u"herring",
u"herrings" : u"herring",
u"earring" : u"earring",
u"earrings" : u"earring",
u"proceed" : u"proceed",
u"proceeds" : u"proceed",
u"proceeded" : u"proceed",
u"proceeding" : u"proceed",
u"exceed" : u"exceed",
u"exceeds" : u"exceed",
u"exceeded" : u"exceed",
u"exceeding" : u"exceed",
u"succeed" : u"succeed",
u"succeeds" : u"succeed",
u"succeeded" : u"succeed",
u"succeeding" : u"succeed"}
def stem(self, word):
u"""
Stem an English word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words.keys():
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace(u"\u2019", u"\x27")
.replace(u"\u2018", u"\x27")
.replace(u"\u201B", u"\x27"))
if word.startswith(u"\x27"):
word = word[1:]
if word.startswith(u"y"):
word = "".join((u"Y", word[1:]))
for i in xrange(1, len(word)):
if word[i-1] in self.__vowels and word[i] == u"y":
word = "".join((word[:i], u"Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = u""
r2 = u""
if word.startswith((u"gener", u"commun", u"arsen")):
if word.startswith((u"gener", u"arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in xrange(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == u"sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in (u"ied", u"ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == u"s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in (u"eed", u"eedly"):
if r1.endswith(suffix):
word = u"".join((word[:-len(suffix)], u"ee"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ee"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ee"))
else:
r2 = u""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith((u"at", u"bl", u"iz")):
word = u"".join((word, u"e"))
r1 = u"".join((r1, u"e"))
if len(word) > 5 or len(r1) >=3:
r2 = u"".join((r2, u"e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == u"" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in u"wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == u"" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = u"".join((word, u"e"))
if len(r1) > 0:
r1 = u"".join((r1, u"e"))
if len(r2) > 0:
r2 = u"".join((r2, u"e"))
break
# STEP 1c
if word[-1] in u"yY" and word[-2] not in self.__vowels and len(word) > 2:
word = u"".join((word[:-1], u"i"))
if len(r1) >= 1:
r1 = u"".join((r1[:-1], u"i"))
else:
r1 = u""
if len(r2) >= 1:
r2 = u"".join((r2[:-1], u"i"))
else:
r2 = u""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == u"tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in (u"enci", u"anci", u"abli"):
word = u"".join((word[:-1], u"e"))
if len(r1) >= 1:
r1 = u"".join((r1[:-1], u"e"))
else:
r1 = u""
if len(r2) >= 1:
r2 = u"".join((r2[:-1], u"e"))
else:
r2 = u""
elif suffix == u"entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in (u"izer", u"ization"):
word = u"".join((word[:-len(suffix)], u"ize"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ize"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ize"))
else:
r2 = u""
elif suffix in (u"ational", u"ation", u"ator"):
word = u"".join((word[:-len(suffix)], u"ate"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ate"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ate"))
else:
r2 = u"e"
elif suffix in (u"alism", u"aliti", u"alli"):
word = u"".join((word[:-len(suffix)], u"al"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"al"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"al"))
else:
r2 = u""
elif suffix == u"fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in (u"ousli", u"ousness"):
word = u"".join((word[:-len(suffix)], u"ous"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ous"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ous"))
else:
r2 = u""
elif suffix in (u"iveness", u"iviti"):
word = u"".join((word[:-len(suffix)], u"ive"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ive"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ive"))
else:
r2 = u"e"
elif suffix in (u"biliti", u"bli"):
word = u"".join((word[:-len(suffix)], u"ble"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ble"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ble"))
else:
r2 = u""
elif suffix == u"ogi" and word[-4] == u"l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in (u"fulli", u"lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == u"tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"ational":
word = u"".join((word[:-len(suffix)], u"ate"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ate"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ate"))
else:
r2 = u""
elif suffix == u"alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in (u"icate", u"iciti", u"ical"):
word = u"".join((word[:-len(suffix)], u"ic"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ic"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ic"))
else:
r2 = u""
elif suffix in (u"ful", u"ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == u"ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == u"ion":
if word[-4] in u"st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith(u"l") and word[-2] == u"l":
word = word[:-1]
elif r2.endswith(u"e"):
word = word[:-1]
elif r1.endswith(u"e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in u"wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace(u"Y", u"y")
return word
class FinnishStemmer(_StandardStemmer):
u"""
The Finnish Snowball stemmer.
@cvar __vowels: The Finnish vowels.
@type __vowels: C{unicode}
@cvar __restricted_vowels: A subset of the Finnish vowels.
@type __restricted_vowels: C{unicode}
@cvar __long_vowels: The Finnish vowels in their long forms.
@type __long_vowels: C{tuple}
@cvar __consonants: The Finnish consonants.
@type __consonants: C{unicode}
@cvar __double_consonants: The Finnish double consonants.
@type __double_consonants: C{tuple}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
@type __step4_suffixes: C{tuple}
@note: A detailed description of the Finnish
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/finnish/stemmer.html}.
"""
__vowels = u"aeiouy\xE4\xF6"
__restricted_vowels = u"aeiou\xE4\xF6"
__long_vowels = (u"aa", u"ee", u"ii", u"oo", u"uu", u"\xE4\xE4",
u"\xF6\xF6")
__consonants = u"bcdfghjklmnpqrstvwxz"
__double_consonants = (u"bb", u"cc", u"dd", u"ff", u"gg", u"hh", u"jj",
u"kk", u"ll", u"mm", u"nn", u"pp", u"qq", u"rr",
u"ss", u"tt", u"vv", u"ww", u"xx", u"zz")
__step1_suffixes = (u'kaan', u'k\xE4\xE4n', u'sti', u'kin', u'han',
u'h\xE4n', u'ko', u'k\xF6', u'pa', u'p\xE4')
__step2_suffixes = (u'nsa', u'ns\xE4', u'mme', u'nne', u'si', u'ni',
u'an', u'\xE4n', u'en')
__step3_suffixes = (u'siin', u'tten', u'seen', u'han', u'hen', u'hin',
u'hon', u'h\xE4n', u'h\xF6n', u'den', u'tta',
u'tt\xE4', u'ssa', u'ss\xE4', u'sta',
u'st\xE4', u'lla', u'll\xE4', u'lta',
u'lt\xE4', u'lle', u'ksi', u'ine', u'ta',
u't\xE4', u'na', u'n\xE4', u'a', u'\xE4',
u'n')
__step4_suffixes = (u'impi', u'impa', u'imp\xE4', u'immi', u'imma',
u'imm\xE4', u'mpi', u'mpa', u'mp\xE4', u'mmi',
u'mma', u'mm\xE4', u'eja', u'ej\xE4')
def stem(self, word):
u"""
Stem a Finnish word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in u"ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == u"si":
if word[-3] != u"k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(u"kse"):
word = u"".join((word[:-3], u"ksi"))
if r1.endswith(u"kse"):
r1 = u"".join((r1[:-3], u"ksi"))
if r2.endswith(u"kse"):
r2 = u"".join((r2[:-3], u"ksi"))
elif suffix == u"an":
if (word[-4:-2] in (u"ta", u"na") or
word[-5:-2] in (u"ssa", u"sta", u"lla", u"lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"\xE4n":
if (word[-4:-2] in (u"t\xE4", u"n\xE4") or
word[-5:-2] in (u"ss\xE4", u"st\xE4",
u"ll\xE4", u"lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"en":
if word[-5:-2] in (u"lle", u"ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in (u"han", u"hen", u"hin", u"hon", u"h\xE4n",
u"h\xF6n"):
if ((suffix == u"han" and word[-4] == u"a") or
(suffix == u"hen" and word[-4] == u"e") or
(suffix == u"hin" and word[-4] == u"i") or
(suffix == u"hon" and word[-4] == u"o") or
(suffix == u"h\xE4n" and word[-4] == u"\xE4") or
(suffix == u"h\xF6n" and word[-4] == u"\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in (u"siin", u"den", u"tten"):
if (word[-len(suffix)-1] == u"i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == u"seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in (u"a", u"\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in (u"tta", u"tt\xE4"):
if word[-4] == u"e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == u"n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == u"ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in (u"mpi", u"mpa", u"mp\xE4", u"mmi", u"mma",
u"mm\xE4"):
if word[-5:-3] != u"po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in u"ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == u"t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith(u"imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith(u"mma") and r2[-5:-3] != u"po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in u"a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith((u"oj", u"uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(u"jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in xrange(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = u"".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
u"""
The French Snowball stemmer.
@cvar __vowels: The French vowels.
@type __vowels: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
@type __step2a_suffixes: C{tuple}
@cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
@type __step2b_suffixes: C{tuple}
@cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
@type __step4_suffixes: C{tuple}
@note: A detailed description of the French
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/french/stemmer.html}.
"""
__vowels = u"aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = (u'issements', u'issement', u'atrices', u'atrice',
u'ateurs', u'ations', u'logies', u'usions',
u'utions', u'ements', u'amment', u'emment',
u'ances', u'iqUes', u'ismes', u'ables', u'istes',
u'ateur', u'ation', u'logie', u'usion', u'ution',
u'ences', u'ement', u'euses', u'ments', u'ance',
u'iqUe', u'isme', u'able', u'iste', u'ence',
u'it\xE9s', u'ives', u'eaux', u'euse', u'ment',
u'eux', u'it\xE9', u'ive', u'ifs', u'aux', u'if')
__step2a_suffixes = (u'issaIent', u'issantes', u'iraIent', u'issante',
u'issants', u'issions', u'irions', u'issais',
u'issait', u'issant', u'issent', u'issiez', u'issons',
u'irais', u'irait', u'irent', u'iriez', u'irons',
u'iront', u'isses', u'issez', u'\xEEmes',
u'\xEEtes', u'irai', u'iras', u'irez', u'isse',
u'ies', u'ira', u'\xEEt', u'ie', u'ir', u'is',
u'it', u'i')
__step2b_suffixes = (u'eraIent', u'assions', u'erions', u'assent',
u'assiez', u'\xE8rent', u'erais', u'erait',
u'eriez', u'erons', u'eront', u'aIent', u'antes',
u'asses', u'ions', u'erai', u'eras', u'erez',
u'\xE2mes', u'\xE2tes', u'ante', u'ants',
u'asse', u'\xE9es', u'era', u'iez', u'ais',
u'ait', u'ant', u'\xE9e', u'\xE9s', u'er',
u'ez', u'\xE2t', u'ai', u'as', u'\xE9', u'a')
__step4_suffixes = (u'i\xE8re', u'I\xE8re', u'ion', u'ier', u'Ier',
u'e', u'\xEB')
def stem(self, word):
u"""
Stem a French word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in xrange(1, len(word)):
if word[i-1] == u"q" and word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word[i] == u"i":
word = u"".join((word[:i], u"I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == u"y":
word = u"".join((word[:i], u"Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"eaux":
word = word[:-1]
step1_success = True
elif suffix in (u"euse", u"euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = u"".join((word[:-len(suffix)], u"eux"))
step1_success = True
elif suffix in (u"ement", u"ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == u"iv" and u"iv" in r2:
word = word[:-2]
if word[-2:] == u"at" and u"at" in r2:
word = word[:-2]
elif word[-3:] == u"eus":
if u"eus" in r2:
word = word[:-3]
elif u"eus" in r1:
word = u"".join((word[:-1], u"x"))
elif word[-3:] in (u"abl", u"iqU"):
if u"abl" in r2 or u"iqU" in r2:
word = word[:-3]
elif word[-3:] in (u"i\xE8r", u"I\xE8r"):
if u"i\xE8r" in rv or u"I\xE8r" in rv:
word = u"".join((word[:-3], u"i"))
elif suffix == u"amment" and suffix in rv:
word = u"".join((word[:-6], u"ant"))
rv = u"".join((rv[:-6], u"ant"))
rv_ending_found = True
elif suffix == u"emment" and suffix in rv:
word = u"".join((word[:-6], u"ent"))
rv_ending_found = True
elif (suffix in (u"ment", u"ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == u"aux" and suffix in r1:
word = u"".join((word[:-2], u"l"))
step1_success = True
elif (suffix in (u"issement", u"issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in (u"ance", u"iqUe", u"isme", u"able", u"iste",
u"eux", u"ances", u"iqUes", u"ismes",
u"ables", u"istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in (u"atrice", u"ateur", u"ation", u"atrices",
u"ateurs", u"ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == u"ic":
if u"ic" in r2:
word = word[:-2]
else:
word = u"".join((word[:-2], u"iqU"))
elif suffix in (u"logie", u"logies") and suffix in r2:
word = u"".join((word[:-len(suffix)], u"log"))
step1_success = True
elif (suffix in (u"usion", u"ution", u"usions", u"utions") and
suffix in r2):
word = u"".join((word[:-len(suffix)], u"u"))
step1_success = True
elif suffix in (u"ence", u"ences") and suffix in r2:
word = u"".join((word[:-len(suffix)], u"ent"))
step1_success = True
elif suffix in (u"it\xE9", u"it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == u"abil":
if u"abil" in r2:
word = word[:-4]
else:
word = u"".join((word[:-2], u"l"))
elif word[-2:] == u"ic":
if u"ic" in r2:
word = word[:-2]
else:
word = u"".join((word[:-2], u"iqU"))
elif word[-2:] == u"iv":
if u"iv" in r2:
word = word[:-2]
elif (suffix in (u"if", u"ive", u"ifs", u"ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == u"at" and u"at" in r2:
word = word[:-2]
if word[-2:] == u"ic":
if u"ic" in r2:
word = word[:-2]
else:
word = u"".join((word[:-2], u"iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == u"ions" and u"ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in (u'eraIent', u'erions', u'\xE8rent',
u'erais', u'erait', u'eriez',
u'erons', u'eront', u'erai', u'eras',
u'erez', u'\xE9es', u'era', u'iez',
u'\xE9e', u'\xE9s', u'er', u'ez',
u'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in (u'assions', u'assent', u'assiez',
u'aIent', u'antes', u'asses',
u'\xE2mes', u'\xE2tes', u'ante',
u'ants', u'asse', u'ais', u'ait',
u'ant', u'\xE2t', u'ai', u'as',
u'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith(u"e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == u"Y":
word = u"".join((word[:-1], u"i"))
elif word[-1] == u"\xE7":
word = u"".join((word[:-1], u"c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == u"s" and
word[-2] not in u"aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == u"ion" and suffix in r2 and
rv[-4] in u"st"):
word = word[:-3]
elif suffix in (u"ier", u"i\xE8re", u"Ier",
u"I\xE8re"):
word = u"".join((word[:-len(suffix)], u"i"))
elif suffix == u"e":
word = word[:-1]
elif suffix == u"\xEB" and word[-3:-1] == u"gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith((u"enn", u"onn", u"ett", u"ell", u"eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in xrange(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in (u"\xE9", u"\xE8"):
word = u"".join((word[:-i], u"e", word[-i+1:]))
break
word = (word.replace(u"I", u"i")
.replace(u"U", u"u")
.replace(u"Y", u"y"))
return word
def __rv_french(self, word, vowels):
u"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
@param word: The French word whose region RV is determined.
@type word: C{str, unicode}
@param vowels: The French vowels that are used to determine
the region RV.
@type vowels: C{unicode}
@return: C{rv}, the region RV for the respective French word.
@rtype: C{unicode}
@note: This helper method is invoked by the stem method of
the subclass L{FrenchStemmer}. It is not to be invoked directly!
"""
rv = u""
if len(word) >= 2:
if (word.startswith((u"par", u"col", u"tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in xrange(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
u"""
The German Snowball stemmer.
@cvar __vowels: The German vowels.
@type __vowels: C{unicode}
@cvar __s_ending: Letters that may directly appear before a word final 's'.
@type __s_ending: C{unicode}
@cvar __st_ending: Letter that may directly appear before a word final 'st'.
@type __st_ending: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@note: A detailed description of the German
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/german/stemmer.html}.
"""
__vowels = u"aeiouy\xE4\xF6\xFC"
__s_ending = u"bdfghklmnrt"
__st_ending = u"bdfghklmnt"
__step1_suffixes = (u"ern", u"em", u"er", u"en", u"es", u"e", u"s")
__step2_suffixes = (u"est", u"en", u"er", u"st")
__step3_suffixes = (u"isch", u"lich", u"heit", u"keit",
u"end", u"ung", u"ig", u"ik")
def stem(self, word):
u"""
Stem a German word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace(u"\xDF", u"ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word[i] == u"y":
word = u"".join((word[:i], u"Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in xrange(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in (u"en", u"es", u"e") and
word[-len(suffix)-4:-len(suffix)] == u"niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == u"s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == u"st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in (u"end", u"ung"):
if (u"ig" in r2[-len(suffix)-2:-len(suffix)] and
u"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in (u"ig", u"ik", u"isch") and
u"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in (u"lich", u"heit"):
if (u"er" in r1[-len(suffix)-2:-len(suffix)] or
u"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == u"keit":
if u"lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif u"ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace(u"\xE4", u"a").replace(u"\xF6", u"o")
.replace(u"\xFC", u"u").replace(u"U", u"u")
.replace(u"Y", u"y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
u"""
The Hungarian Snowball stemmer.
@cvar __vowels: The Hungarian vowels.
@type __vowels: C{unicode}
@cvar __digraphs: The Hungarian digraphs.
@type __digraphs: C{tuple}
@cvar __double_consonants: The Hungarian double consonants.
@type __double_consonants: C{tuple}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
@type __step4_suffixes: C{tuple}
@cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
@type __step5_suffixes: C{tuple}
@cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
@type __step6_suffixes: C{tuple}
@cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
@type __step7_suffixes: C{tuple}
@cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
@type __step8_suffixes: C{tuple}
@cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
@type __step9_suffixes: C{tuple}
@note: A detailed description of the Hungarian
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/hungarian/stemmer.html}.
"""
__vowels = u"aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = (u"cs", u"dz", u"dzs", u"gy", u"ly", u"ny", u"ty", u"zs")
__double_consonants = (u"bb", u"cc", u"ccs", u"dd", u"ff", u"gg",
u"ggy", u"jj", u"kk", u"ll", u"lly", u"mm",
u"nn", u"nny", u"pp", u"rr", u"ss", u"ssz",
u"tt", u"tty", u"vv", u"zz", u"zzs")
__step1_suffixes = (u"al", u"el")
__step2_suffixes = (u'k\xE9ppen', u'onk\xE9nt', u'enk\xE9nt',
u'ank\xE9nt', u'k\xE9pp', u'k\xE9nt', u'ban',
u'ben', u'nak', u'nek', u'val', u'vel', u't\xF3l',
u't\xF5l', u'r\xF3l', u'r\xF5l', u'b\xF3l',
u'b\xF5l', u'hoz', u'hez', u'h\xF6z',
u'n\xE1l', u'n\xE9l', u'\xE9rt', u'kor',
u'ba', u'be', u'ra', u're', u'ig', u'at', u'et',
u'ot', u'\xF6t', u'ul', u'\xFCl', u'v\xE1',
u'v\xE9', u'en', u'on', u'an', u'\xF6n',
u'n', u't')
__step3_suffixes = (u"\xE1nk\xE9nt", u"\xE1n", u"\xE9n")
__step4_suffixes = (u'astul', u'est\xFCl', u'\xE1stul',
u'\xE9st\xFCl', u'stul', u'st\xFCl')
__step5_suffixes = (u"\xE1", u"\xE9")
__step6_suffixes = (u'ok\xE9', u'\xF6k\xE9', u'ak\xE9',
u'ek\xE9', u'\xE1k\xE9', u'\xE1\xE9i',
u'\xE9k\xE9', u'\xE9\xE9i', u'k\xE9',
u'\xE9i', u'\xE9\xE9', u'\xE9')
__step7_suffixes = (u'\xE1juk', u'\xE9j\xFCk', u'\xFCnk',
u'unk', u'juk', u'j\xFCk', u'\xE1nk',
u'\xE9nk', u'nk', u'uk', u'\xFCk', u'em',
u'om', u'am', u'od', u'ed', u'ad', u'\xF6d',
u'ja', u'je', u'\xE1m', u'\xE1d', u'\xE9m',
u'\xE9d', u'm', u'd', u'a', u'e', u'o',
u'\xE1', u'\xE9')
__step8_suffixes = (u'jaitok', u'jeitek', u'jaink', u'jeink', u'aitok',
u'eitek', u'\xE1itok', u'\xE9itek', u'jaim',
u'jeim', u'jaid', u'jeid', u'eink', u'aink',
u'itek', u'jeik', u'jaik', u'\xE1ink',
u'\xE9ink', u'aim', u'eim', u'aid', u'eid',
u'jai', u'jei', u'ink', u'aik', u'eik',
u'\xE1im', u'\xE1id', u'\xE1ik', u'\xE9im',
u'\xE9id', u'\xE9ik', u'im', u'id', u'ai',
u'ei', u'ik', u'\xE1i', u'\xE9i', u'i')
__step9_suffixes = (u"\xE1k", u"\xE9k", u"\xF6k", u"ok",
u"ek", u"ak", u"k")
def stem(self, word):
u"""
Stem an Hungarian word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = u"".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = u"".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(u"\xE1"):
word = u"".join((word[:-1], u"a"))
r1 = u"".join((r1[:-1], u"a"))
elif r1.endswith(u"\xE9"):
word = u"".join((word[:-1], u"e"))
r1 = u"".join((r1[:-1], u"e"))
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == u"\xE9n":
word = u"".join((word[:-2], u"e"))
r1 = u"".join((r1[:-2], u"e"))
else:
word = u"".join((word[:-len(suffix)], u"a"))
r1 = u"".join((r1[:-len(suffix)], u"a"))
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == u"\xE1stul":
word = u"".join((word[:-5], u"a"))
r1 = u"".join((r1[:-5], u"a"))
elif suffix == u"\xE9st\xFCl":
word = u"".join((word[:-5], u"e"))
r1 = u"".join((r1[:-5], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = u"".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = u"".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in (u"\xE1k\xE9", u"\xE1\xE9i"):
word = u"".join((word[:-3], u"a"))
r1 = u"".join((r1[:-3], u"a"))
elif suffix in (u"\xE9k\xE9", u"\xE9\xE9i",
u"\xE9\xE9"):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in (u"\xE1nk", u"\xE1juk", u"\xE1m",
u"\xE1d", u"\xE1"):
word = u"".join((word[:-len(suffix)], u"a"))
r1 = u"".join((r1[:-len(suffix)], u"a"))
elif suffix in (u"\xE9nk", u"\xE9j\xFCk",
u"\xE9m", u"\xE9d", u"\xE9"):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in (u"\xE1im", u"\xE1id", u"\xE1i",
u"\xE1ink", u"\xE1itok", u"\xE1ik"):
word = u"".join((word[:-len(suffix)], u"a"))
r1 = u"".join((r1[:-len(suffix)], u"a"))
elif suffix in (u"\xE9im", u"\xE9id", u"\xE9i",
u"\xE9ink", u"\xE9itek", u"\xE9ik"):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == u"\xE1k":
word = u"".join((word[:-2], u"a"))
elif suffix == u"\xE9k":
word = u"".join((word[:-2], u"e"))
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
u"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
@param word: The Hungarian word whose region R1 is determined.
@type word: C{str, unicode}
@param vowels: The Hungarian vowels that are used to determine
the region R1.
@type vowels: C{unicode}
@param digraphs: The digraphs that are used to determine the
region R1.
@type digraphs: C{tuple}
@return: C{r1}, the region R1 for the respective word.
@rtype: C{unicode}
@note: This helper method is invoked by the stem method of the subclass
L{HungarianStemmer}. It is not to be invoked directly!
"""
r1 = u""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in xrange(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in xrange(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
u"""
The Italian Snowball stemmer.
@cvar __vowels: The Italian vowels.
@type __vowels: C{unicode}
@cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
@type __step0_suffixes: C{tuple}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@note: A detailed description of the Italian
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/italian/stemmer.html}.
"""
__vowels = u"aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = (u'gliela', u'gliele', u'glieli', u'glielo',
u'gliene', u'sene', u'mela', u'mele', u'meli',
u'melo', u'mene', u'tela', u'tele', u'teli',
u'telo', u'tene', u'cela', u'cele', u'celi',
u'celo', u'cene', u'vela', u'vele', u'veli',
u'velo', u'vene', u'gli', u'ci', u'la', u'le',
u'li', u'lo', u'mi', u'ne', u'si', u'ti', u'vi')
__step1_suffixes = (u'atrice', u'atrici', u'azione', u'azioni',
u'uzione', u'uzioni', u'usione', u'usioni',
u'amento', u'amenti', u'imento', u'imenti',
u'amente', u'abile', u'abili', u'ibile', u'ibili',
u'mente', u'atore', u'atori', u'logia', u'logie',
u'anza', u'anze', u'iche', u'ichi', u'ismo',
u'ismi', u'ista', u'iste', u'isti', u'ist\xE0',
u'ist\xE8', u'ist\xEC', u'ante', u'anti',
u'enza', u'enze', u'ico', u'ici', u'ica', u'ice',
u'oso', u'osi', u'osa', u'ose', u'it\xE0',
u'ivo', u'ivi', u'iva', u'ive')
__step2_suffixes = (u'erebbero', u'irebbero', u'assero', u'assimo',
u'eranno', u'erebbe', u'eremmo', u'ereste',
u'eresti', u'essero', u'iranno', u'irebbe',
u'iremmo', u'ireste', u'iresti', u'iscano',
u'iscono', u'issero', u'arono', u'avamo', u'avano',
u'avate', u'eremo', u'erete', u'erono', u'evamo',
u'evano', u'evate', u'iremo', u'irete', u'irono',
u'ivamo', u'ivano', u'ivate', u'ammo', u'ando',
u'asse', u'assi', u'emmo', u'enda', u'ende',
u'endi', u'endo', u'erai', u'erei', u'Yamo',
u'iamo', u'immo', u'irai', u'irei', u'isca',
u'isce', u'isci', u'isco', u'ano', u'are', u'ata',
u'ate', u'ati', u'ato', u'ava', u'avi', u'avo',
u'er\xE0', u'ere', u'er\xF2', u'ete', u'eva',
u'evi', u'evo', u'ir\xE0', u'ire', u'ir\xF2',
u'ita', u'ite', u'iti', u'ito', u'iva', u'ivi',
u'ivo', u'ono', u'uta', u'ute', u'uti', u'uto',
u'ar', u'ir')
def stem(self, word):
u"""
Stem an Italian word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace(u"\xE1", u"\xE0")
.replace(u"\xE9", u"\xE8")
.replace(u"\xED", u"\xEC")
.replace(u"\xF3", u"\xF2")
.replace(u"\xFA", u"\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in xrange(1, len(word)):
if word[i-1] == u"q" and word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word [i] == u"i":
word = u"".join((word[:i], u"I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in (u"ando", u"endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
(u"ar", u"er", u"ir")):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
r2 = u"".join((r2[:-len(suffix)], u"e"))
rv = u"".join((rv[:-len(suffix)], u"e"))
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith(u"iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith((u"os", u"ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith(u"abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in (u"amento", u"amenti",
u"imento", u"imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in (u"azione", u"azioni", u"atore", u"atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u"logia", u"logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in (u"uzione", u"uzioni",
u"usione", u"usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in (u"enza", u"enze"):
word = u"".join((word[:-2], u"te"))
rv = u"".join((rv[:-2], u"te"))
elif suffix == u"it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith((u"ic", u"iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(u"abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"ivo", u"ivi", u"iva", u"ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(u"at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith((u"a", u"e", u"i", u"o", u"\xE0", u"\xE8",
u"\xEC", u"\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith(u"i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith((u"ch", u"gh")):
word = word[:-1]
word = word.replace(u"I", u"i").replace(u"U", u"u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
u"""
The Norwegian Snowball stemmer.
@cvar __vowels: The Norwegian vowels.
@type __vowels: C{unicode}
@cvar __s_ending: Letters that may directly appear before a word final 's'.
@type __s_ending: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@note: A detailed description of the Norwegian
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/norwegian/stemmer.html}.
"""
__vowels = u"aeiouy\xE6\xE5\xF8"
__s_ending = u"bcdfghjlmnoprtvyz"
__step1_suffixes = (u"hetenes", u"hetene", u"hetens", u"heter",
u"heten", u"endes", u"ande", u"ende", u"edes",
u"enes", u"erte", u"ede", u"ane", u"ene", u"ens",
u"ers", u"ets", u"het", u"ast", u"ert", u"en",
u"ar", u"er", u"as", u"es", u"et", u"a", u"e", u"s")
__step2_suffixes = (u"dt", u"vt")
__step3_suffixes = (u"hetslov", u"eleg", u"elig", u"elov", u"slov",
u"leg", u"eig", u"lig", u"els", u"lov", u"ig")
def stem(self, word):
u"""
Stem a Norwegian word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in (u"erte", u"ert"):
word = u"".join((word[:-len(suffix)], u"er"))
r1 = u"".join((r1[:-len(suffix)], u"er"))
elif suffix == u"s":
if (word[-2] in self.__s_ending or
(word[-2] == u"k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
u"""
The Portuguese Snowball stemmer.
@cvar __vowels: The Portuguese vowels.
@type __vowels: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
@type __step4_suffixes: C{tuple}
@note: A detailed description of the Portuguese
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/portuguese/stemmer.html}.
"""
__vowels = u"aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = (u'amentos', u'imentos', u'uciones', u'amento',
u'imento', u'adoras', u'adores', u'a\xE7o~es',
u'log\xEDas', u'\xEAncias', u'amente',
u'idades', u'ismos', u'istas', u'adora',
u'a\xE7a~o', u'antes', u'\xE2ncia',
u'log\xEDa', u'uci\xF3n', u'\xEAncia',
u'mente', u'idade', u'ezas', u'icos', u'icas',
u'ismo', u'\xE1vel', u'\xEDvel', u'ista',
u'osos', u'osas', u'ador', u'ante', u'ivas',
u'ivos', u'iras', u'eza', u'ico', u'ica',
u'oso', u'osa', u'iva', u'ivo', u'ira')
__step2_suffixes = (u'ar\xEDamos', u'er\xEDamos', u'ir\xEDamos',
u'\xE1ssemos', u'\xEAssemos', u'\xEDssemos',
u'ar\xEDeis', u'er\xEDeis', u'ir\xEDeis',
u'\xE1sseis', u'\xE9sseis', u'\xEDsseis',
u'\xE1ramos', u'\xE9ramos', u'\xEDramos',
u'\xE1vamos', u'aremos', u'eremos', u'iremos',
u'ariam', u'eriam', u'iriam', u'assem', u'essem',
u'issem', u'ara~o', u'era~o', u'ira~o', u'arias',
u'erias', u'irias', u'ardes', u'erdes', u'irdes',
u'asses', u'esses', u'isses', u'astes', u'estes',
u'istes', u'\xE1reis', u'areis', u'\xE9reis',
u'ereis', u'\xEDreis', u'ireis', u'\xE1veis',
u'\xEDamos', u'armos', u'ermos', u'irmos',
u'aria', u'eria', u'iria', u'asse', u'esse',
u'isse', u'aste', u'este', u'iste', u'arei',
u'erei', u'irei', u'aram', u'eram', u'iram',
u'avam', u'arem', u'erem', u'irem',
u'ando', u'endo', u'indo', u'adas', u'idas',
u'ar\xE1s', u'aras', u'er\xE1s', u'eras',
u'ir\xE1s', u'avas', u'ares', u'eres', u'ires',
u'\xEDeis', u'ados', u'idos', u'\xE1mos',
u'amos', u'emos', u'imos', u'iras', u'ada', u'ida',
u'ar\xE1', u'ara', u'er\xE1', u'era',
u'ir\xE1', u'ava', u'iam', u'ado', u'ido',
u'ias', u'ais', u'eis', u'ira', u'ia', u'ei', u'am',
u'em', u'ar', u'er', u'ir', u'as',
u'es', u'is', u'eu', u'iu', u'ou')
__step4_suffixes = (u"os", u"a", u"i", u"o", u"\xE1",
u"\xED", u"\xF3")
def stem(self, word):
u"""
Stem a Portuguese word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace(u"\xE3", u"a~")
.replace(u"\xF5", u"o~"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith(u"iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith((u"os", u"ic", u"ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in (u"ira", u"iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == u"e"):
step1_success = True
word = u"".join((word[:-len(suffix)], u"ir"))
rv = u"".join((rv[:-len(suffix)], u"ir"))
elif r2.endswith(suffix):
step1_success = True
if suffix in (u"log\xEDa", u"log\xEDas"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u"uci\xF3n", u"uciones"):
word = u"".join((word[:-len(suffix)], u"u"))
rv = u"".join((rv[:-len(suffix)], u"u"))
elif suffix in (u"\xEAncia", u"\xEAncias"):
word = u"".join((word[:-len(suffix)], u"ente"))
rv = u"".join((rv[:-len(suffix)], u"ente"))
elif suffix == u"mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith((u"ante", u"avel", u"\xEDvel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"idade", u"idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith((u"ic", u"iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(u"abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"iva", u"ivo", u"ivas", u"ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith(u"i") and word[-2] == u"c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith((u"e", u"\xE9", u"\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith(u"gu") and rv.endswith(u"u")) or
(word.endswith(u"ci") and rv.endswith(u"i"))):
word = word[:-1]
elif word.endswith(u"\xE7"):
word = u"".join((word[:-1], u"c"))
word = word.replace(u"a~", u"\xE3").replace(u"o~", u"\xF5")
return word
class RomanianStemmer(_StandardStemmer):
u"""
The Romanian Snowball stemmer.
@cvar __vowels: The Romanian vowels.
@type __vowels: C{unicode}
@cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
@type __step0_suffixes: C{tuple}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@note: A detailed description of the Romanian
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/romanian/stemmer.html}.
"""
__vowels = u"aeiou\u0103\xE2\xEE"
__step0_suffixes = (u'iilor', u'ului', u'elor', u'iile', u'ilor',
u'atei', u'a\u0163ie', u'a\u0163ia', u'aua',
u'ele', u'iua', u'iei', u'ile', u'ul', u'ea',
u'ii')
__step1_suffixes = (u'abilitate', u'abilitati', u'abilit\u0103\u0163i',
u'ibilitate', u'abilit\u0103i', u'ivitate',
u'ivitati', u'ivit\u0103\u0163i', u'icitate',
u'icitati', u'icit\u0103\u0163i', u'icatori',
u'ivit\u0103i', u'icit\u0103i', u'icator',
u'a\u0163iune', u'atoare', u'\u0103toare',
u'i\u0163iune', u'itoare', u'iciva', u'icive',
u'icivi', u'iciv\u0103', u'icala', u'icale',
u'icali', u'ical\u0103', u'ativa', u'ative',
u'ativi', u'ativ\u0103', u'atori', u'\u0103tori',
u'itiva', u'itive', u'itivi', u'itiv\u0103',
u'itori', u'iciv', u'ical', u'ativ', u'ator',
u'\u0103tor', u'itiv', u'itor')
__step2_suffixes = (u'abila', u'abile', u'abili', u'abil\u0103',
u'ibila', u'ibile', u'ibili', u'ibil\u0103',
u'atori', u'itate', u'itati', u'it\u0103\u0163i',
u'abil', u'ibil', u'oasa', u'oas\u0103', u'oase',
u'anta', u'ante', u'anti', u'ant\u0103', u'ator',
u'it\u0103i', u'iune', u'iuni', u'isme', u'ista',
u'iste', u'isti', u'ist\u0103', u'i\u015Fti',
u'ata', u'at\u0103', u'ati', u'ate', u'uta',
u'ut\u0103', u'uti', u'ute', u'ita', u'it\u0103',
u'iti', u'ite', u'ica', u'ice', u'ici', u'ic\u0103',
u'osi', u'o\u015Fi', u'ant', u'iva', u'ive', u'ivi',
u'iv\u0103', u'ism', u'ist', u'at', u'ut', u'it',
u'ic', u'os', u'iv')
__step3_suffixes = (u'seser\u0103\u0163i', u'aser\u0103\u0163i',
u'iser\u0103\u0163i', u'\xE2ser\u0103\u0163i',
u'user\u0103\u0163i', u'seser\u0103m',
u'aser\u0103m', u'iser\u0103m', u'\xE2ser\u0103m',
u'user\u0103m', u'ser\u0103\u0163i', u'sese\u015Fi',
u'seser\u0103', u'easc\u0103', u'ar\u0103\u0163i',
u'ur\u0103\u0163i', u'ir\u0103\u0163i',
u'\xE2r\u0103\u0163i', u'ase\u015Fi',
u'aser\u0103', u'ise\u015Fi', u'iser\u0103',
u'\xe2se\u015Fi', u'\xE2ser\u0103',
u'use\u015Fi', u'user\u0103', u'ser\u0103m',
u'sesem', u'indu', u'\xE2ndu', u'eaz\u0103',
u'e\u015Fti', u'e\u015Fte', u'\u0103\u015Fti',
u'\u0103\u015Fte', u'ea\u0163i', u'ia\u0163i',
u'ar\u0103m', u'ur\u0103m', u'ir\u0103m',
u'\xE2r\u0103m', u'asem', u'isem',
u'\xE2sem', u'usem', u'se\u015Fi', u'ser\u0103',
u'sese', u'are', u'ere', u'ire', u'\xE2re',
u'ind', u'\xE2nd', u'eze', u'ezi', u'esc',
u'\u0103sc', u'eam', u'eai', u'eau', u'iam',
u'iai', u'iau', u'a\u015Fi', u'ar\u0103',
u'u\u015Fi', u'ur\u0103', u'i\u015Fi', u'ir\u0103',
u'\xE2\u015Fi', u'\xe2r\u0103', u'ase',
u'ise', u'\xE2se', u'use', u'a\u0163i',
u'e\u0163i', u'i\u0163i', u'\xe2\u0163i', u'sei',
u'ez', u'am', u'ai', u'au', u'ea', u'ia', u'ui',
u'\xE2i', u'\u0103m', u'em', u'im', u'\xE2m',
u'se')
def stem(self, word):
u"""
Stem a Romanian word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word[i] == u"i":
word = u"".join((word[:i], u"I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in (u"ul", u"ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = u""
elif (suffix == u"aua" or suffix == u"atei" or
(suffix == u"ile" and word[-5:-3] != u"ab")):
word = word[:-2]
elif suffix in (u"ea", u"ele", u"elor"):
word = u"".join((word[:-len(suffix)], u"e"))
if suffix in rv:
rv = u"".join((rv[:-len(suffix)], u"e"))
else:
rv = u""
elif suffix in (u"ii", u"iua", u"iei",
u"iile", u"iilor", u"ilor"):
word = u"".join((word[:-len(suffix)], u"i"))
if suffix in rv:
rv = u"".join((rv[:-len(suffix)], u"i"))
else:
rv = u""
elif suffix in (u"a\u0163ie", u"a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in (u"abilitate", u"abilitati",
u"abilit\u0103i",
u"abilit\u0103\u0163i"):
word = u"".join((word[:-len(suffix)], u"abil"))
elif suffix == u"ibilitate":
word = word[:-5]
elif suffix in (u"ivitate", u"ivitati",
u"ivit\u0103i",
u"ivit\u0103\u0163i"):
word = u"".join((word[:-len(suffix)], u"iv"))
elif suffix in (u"icitate", u"icitati", u"icit\u0103i",
u"icit\u0103\u0163i", u"icator",
u"icatori", u"iciv", u"iciva",
u"icive", u"icivi", u"iciv\u0103",
u"ical", u"icala", u"icale", u"icali",
u"ical\u0103"):
word = u"".join((word[:-len(suffix)], u"ic"))
elif suffix in (u"ativ", u"ativa", u"ative", u"ativi",
u"ativ\u0103", u"a\u0163iune",
u"atoare", u"ator", u"atori",
u"\u0103toare",
u"\u0103tor", u"\u0103tori"):
word = u"".join((word[:-len(suffix)], u"at"))
if suffix in r2:
r2 = u"".join((r2[:-len(suffix)], u"at"))
elif suffix in (u"itiv", u"itiva", u"itive", u"itivi",
u"itiv\u0103", u"i\u0163iune",
u"itoare", u"itor", u"itori"):
word = u"".join((word[:-len(suffix)], u"it"))
if suffix in r2:
r2 = u"".join((r2[:-len(suffix)], u"it"))
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in (u"iune", u"iuni"):
if word[-5] == u"\u0163":
word = u"".join((word[:-5], u"t"))
elif suffix in (u"ism", u"isme", u"ist", u"ista", u"iste",
u"isti", u"ist\u0103", u"i\u015Fti"):
word = u"".join((word[:-len(suffix)], u"ist"))
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in (u'seser\u0103\u0163i', u'seser\u0103m',
u'ser\u0103\u0163i', u'sese\u015Fi',
u'seser\u0103', u'ser\u0103m', u'sesem',
u'se\u015Fi', u'ser\u0103', u'sese',
u'a\u0163i', u'e\u0163i', u'i\u0163i',
u'\xE2\u0163i', u'sei', u'\u0103m',
u'em', u'im', u'\xE2m', u'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
u"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in (u"ie", u"a", u"e", u"i", u"\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace(u"I", u"i").replace(u"U", u"u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
u"""
The Russian Snowball stemmer.
@cvar __perfective_gerund_suffixes: Suffixes to be deleted.
@type __perfective_gerund_suffixes: C{tuple}
@cvar __adjectival_suffixes: Suffixes to be deleted.
@type __adjectival_suffixes: C{tuple}
@cvar __reflexive_suffixes: Suffixes to be deleted.
@type __reflexive_suffixes: C{tuple}
@cvar __verb_suffixes: Suffixes to be deleted.
@type __verb_suffixes: C{tuple}
@cvar __noun_suffixes: Suffixes to be deleted.
@type __noun_suffixes: C{tuple}
@cvar __superlative_suffixes: Suffixes to be deleted.
@type __superlative_suffixes: C{tuple}
@cvar __derivational_suffixes: Suffixes to be deleted.
@type __derivational_suffixes: C{tuple}
@note: A detailed description of the Russian
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/russian/stemmer.html}.
"""
__perfective_gerund_suffixes = (u"ivshis'", u"yvshis'", u"vshis'",
u"ivshi", u"yvshi", u"vshi", u"iv",
u"yv", u"v")
__adjectival_suffixes = (u'ui^ushchi^ui^u', u'ui^ushchi^ai^a',
u'ui^ushchimi', u'ui^ushchymi', u'ui^ushchego',
u'ui^ushchogo', u'ui^ushchemu', u'ui^ushchomu',
u'ui^ushchikh', u'ui^ushchykh',
u'ui^ushchui^u', u'ui^ushchaia',
u'ui^ushchoi^u', u'ui^ushchei^u',
u'i^ushchi^ui^u', u'i^ushchi^ai^a',
u'ui^ushchee', u'ui^ushchie',
u'ui^ushchye', u'ui^ushchoe', u'ui^ushchei`',
u'ui^ushchii`', u'ui^ushchyi`',
u'ui^ushchoi`', u'ui^ushchem', u'ui^ushchim',
u'ui^ushchym', u'ui^ushchom', u'i^ushchimi',
u'i^ushchymi', u'i^ushchego', u'i^ushchogo',
u'i^ushchemu', u'i^ushchomu', u'i^ushchikh',
u'i^ushchykh', u'i^ushchui^u', u'i^ushchai^a',
u'i^ushchoi^u', u'i^ushchei^u', u'i^ushchee',
u'i^ushchie', u'i^ushchye', u'i^ushchoe',
u'i^ushchei`', u'i^ushchii`',
u'i^ushchyi`', u'i^ushchoi`', u'i^ushchem',
u'i^ushchim', u'i^ushchym', u'i^ushchom',
u'shchi^ui^u', u'shchi^ai^a', u'ivshi^ui^u',
u'ivshi^ai^a', u'yvshi^ui^u', u'yvshi^ai^a',
u'shchimi', u'shchymi', u'shchego', u'shchogo',
u'shchemu', u'shchomu', u'shchikh', u'shchykh',
u'shchui^u', u'shchai^a', u'shchoi^u',
u'shchei^u', u'ivshimi', u'ivshymi',
u'ivshego', u'ivshogo', u'ivshemu', u'ivshomu',
u'ivshikh', u'ivshykh', u'ivshui^u',
u'ivshai^a', u'ivshoi^u', u'ivshei^u',
u'yvshimi', u'yvshymi', u'yvshego', u'yvshogo',
u'yvshemu', u'yvshomu', u'yvshikh', u'yvshykh',
u'yvshui^u', u'yvshai^a', u'yvshoi^u',
u'yvshei^u', u'vshi^ui^u', u'vshi^ai^a',
u'shchee', u'shchie', u'shchye', u'shchoe',
u'shchei`', u'shchii`', u'shchyi`', u'shchoi`',
u'shchem', u'shchim', u'shchym', u'shchom',
u'ivshee', u'ivshie', u'ivshye', u'ivshoe',
u'ivshei`', u'ivshii`', u'ivshyi`',
u'ivshoi`', u'ivshem', u'ivshim', u'ivshym',
u'ivshom', u'yvshee', u'yvshie', u'yvshye',
u'yvshoe', u'yvshei`', u'yvshii`',
u'yvshyi`', u'yvshoi`', u'yvshem',
u'yvshim', u'yvshym', u'yvshom', u'vshimi',
u'vshymi', u'vshego', u'vshogo', u'vshemu',
u'vshomu', u'vshikh', u'vshykh', u'vshui^u',
u'vshai^a', u'vshoi^u', u'vshei^u',
u'emi^ui^u', u'emi^ai^a', u'nni^ui^u',
u'nni^ai^a', u'vshee',
u'vshie', u'vshye', u'vshoe', u'vshei`',
u'vshii`', u'vshyi`', u'vshoi`',
u'vshem', u'vshim', u'vshym', u'vshom',
u'emimi', u'emymi', u'emego', u'emogo',
u'ememu', u'emomu', u'emikh', u'emykh',
u'emui^u', u'emai^a', u'emoi^u', u'emei^u',
u'nnimi', u'nnymi', u'nnego', u'nnogo',
u'nnemu', u'nnomu', u'nnikh', u'nnykh',
u'nnui^u', u'nnai^a', u'nnoi^u', u'nnei^u',
u'emee', u'emie', u'emye', u'emoe',
u'emei`', u'emii`', u'emyi`',
u'emoi`', u'emem', u'emim', u'emym',
u'emom', u'nnee', u'nnie', u'nnye', u'nnoe',
u'nnei`', u'nnii`', u'nnyi`',
u'nnoi`', u'nnem', u'nnim', u'nnym',
u'nnom', u'i^ui^u', u'i^ai^a', u'imi', u'ymi',
u'ego', u'ogo', u'emu', u'omu', u'ikh',
u'ykh', u'ui^u', u'ai^a', u'oi^u', u'ei^u',
u'ee', u'ie', u'ye', u'oe', u'ei`',
u'ii`', u'yi`', u'oi`', u'em',
u'im', u'ym', u'om')
__reflexive_suffixes = (u"si^a", u"s'")
__verb_suffixes = (u"esh'", u'ei`te', u'ui`te', u'ui^ut',
u"ish'", u'ete', u'i`te', u'i^ut', u'nno',
u'ila', u'yla', u'ena', u'ite', u'ili', u'yli',
u'ilo', u'ylo', u'eno', u'i^at', u'uet', u'eny',
u"it'", u"yt'", u'ui^u', u'la', u'na', u'li',
u'em', u'lo', u'no', u'et', u'ny', u"t'",
u'ei`', u'ui`', u'il', u'yl', u'im',
u'ym', u'en', u'it', u'yt', u'i^u', u'i`',
u'l', u'n')
__noun_suffixes = (u'ii^ami', u'ii^akh', u'i^ami', u'ii^am', u'i^akh',
u'ami', u'iei`', u'i^am', u'iem', u'akh',
u'ii^u', u"'i^u", u'ii^a', u"'i^a", u'ev', u'ov',
u'ie', u"'e", u'ei', u'ii', u'ei`',
u'oi`', u'ii`', u'em', u'am', u'om',
u'i^u', u'i^a', u'a', u'e', u'i', u'i`',
u'o', u'u', u'y', u"'")
__superlative_suffixes = (u"ei`she", u"ei`sh")
__derivational_suffixes = (u"ost'", u"ost")
def stem(self, word):
u"""
Stem a Russian word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in xrange(len(word)):
if ord(word[i]) not in xrange(256):
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in (u"v", u"vshi", u"vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in (u'i^ushchi^ui^u', u'i^ushchi^ai^a',
u'i^ushchui^u', u'i^ushchai^a', u'i^ushchoi^u',
u'i^ushchei^u', u'i^ushchimi', u'i^ushchymi',
u'i^ushchego', u'i^ushchogo', u'i^ushchemu',
u'i^ushchomu', u'i^ushchikh', u'i^ushchykh',
u'shchi^ui^u', u'shchi^ai^a', u'i^ushchee',
u'i^ushchie', u'i^ushchye', u'i^ushchoe',
u'i^ushchei`', u'i^ushchii`', u'i^ushchyi`',
u'i^ushchoi`', u'i^ushchem', u'i^ushchim',
u'i^ushchym', u'i^ushchom', u'vshi^ui^u',
u'vshi^ai^a', u'shchui^u', u'shchai^a',
u'shchoi^u', u'shchei^u', u'emi^ui^u',
u'emi^ai^a', u'nni^ui^u', u'nni^ai^a',
u'shchimi', u'shchymi', u'shchego', u'shchogo',
u'shchemu', u'shchomu', u'shchikh', u'shchykh',
u'vshui^u', u'vshai^a', u'vshoi^u', u'vshei^u',
u'shchee', u'shchie', u'shchye', u'shchoe',
u'shchei`', u'shchii`', u'shchyi`', u'shchoi`',
u'shchem', u'shchim', u'shchym', u'shchom',
u'vshimi', u'vshymi', u'vshego', u'vshogo',
u'vshemu', u'vshomu', u'vshikh', u'vshykh',
u'emui^u', u'emai^a', u'emoi^u', u'emei^u',
u'nnui^u', u'nnai^a', u'nnoi^u', u'nnei^u',
u'vshee', u'vshie', u'vshye', u'vshoe',
u'vshei`', u'vshii`', u'vshyi`', u'vshoi`',
u'vshem', u'vshim', u'vshym', u'vshom',
u'emimi', u'emymi', u'emego', u'emogo',
u'ememu', u'emomu', u'emikh', u'emykh',
u'nnimi', u'nnymi', u'nnego', u'nnogo',
u'nnemu', u'nnomu', u'nnikh', u'nnykh',
u'emee', u'emie', u'emye', u'emoe', u'emei`',
u'emii`', u'emyi`', u'emoi`', u'emem', u'emim',
u'emym', u'emom', u'nnee', u'nnie', u'nnye',
u'nnoe', u'nnei`', u'nnii`', u'nnyi`', u'nnoi`',
u'nnem', u'nnim', u'nnym', u'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in (u"la", u"na", u"ete", u"i`te", u"li",
u"i`", u"l", u"em", u"n", u"lo", u"no",
u"et", u"i^ut", u"ny", u"t'", u"esh'",
u"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
u"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
@param word: The Russian word whose regions RV and R2 are determined.
@type word: C{str, unicode}
@return: C{(rv, r2)}, the regions RV and R2 for the
respective Russian word.
@rtype: C{tuple}
@note: This helper method is invoked by the stem method of the subclass
L{RussianStemmer}. It is not to be invoked directly!
"""
r1 = u""
r2 = u""
rv = u""
vowels = (u"A", u"U", u"E", u"a", u"e", u"i", u"o", u"u", u"y")
word = (word.replace(u"i^a", u"A")
.replace(u"i^u", u"U")
.replace(u"e`", u"E"))
for i in xrange(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in xrange(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in xrange(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace(u"A", u"i^a")
.replace(u"U", u"i^u")
.replace(u"E", u"e`"))
rv = (rv.replace(u"A", u"i^a")
.replace(u"U", u"i^u")
.replace(u"E", u"e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
u"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
@param word: The word that is transliterated.
@type word: C{unicode}
@return: C{word}, the transliterated word.
@rtype: C{unicode}
@note: This helper method is invoked by the stem method of the subclass
L{RussianStemmer}. It is not to be invoked directly!
"""
word = (word.replace(u"\u0410", u"a").replace(u"\u0430", u"a")
.replace(u"\u0411", u"b").replace(u"\u0431", u"b")
.replace(u"\u0412", u"v").replace(u"\u0432", u"v")
.replace(u"\u0413", u"g").replace(u"\u0433", u"g")
.replace(u"\u0414", u"d").replace(u"\u0434", u"d")
.replace(u"\u0415", u"e").replace(u"\u0435", u"e")
.replace(u"\u0401", u"e").replace(u"\u0451", u"e")
.replace(u"\u0416", u"zh").replace(u"\u0436", u"zh")
.replace(u"\u0417", u"z").replace(u"\u0437", u"z")
.replace(u"\u0418", u"i").replace(u"\u0438", u"i")
.replace(u"\u0419", u"i`").replace(u"\u0439", u"i`")
.replace(u"\u041A", u"k").replace(u"\u043A", u"k")
.replace(u"\u041B", u"l").replace(u"\u043B", u"l")
.replace(u"\u041C", u"m").replace(u"\u043C", u"m")
.replace(u"\u041D", u"n").replace(u"\u043D", u"n")
.replace(u"\u041E", u"o").replace(u"\u043E", u"o")
.replace(u"\u041F", u"p").replace(u"\u043F", u"p")
.replace(u"\u0420", u"r").replace(u"\u0440", u"r")
.replace(u"\u0421", u"s").replace(u"\u0441", u"s")
.replace(u"\u0422", u"t").replace(u"\u0442", u"t")
.replace(u"\u0423", u"u").replace(u"\u0443", u"u")
.replace(u"\u0424", u"f").replace(u"\u0444", u"f")
.replace(u"\u0425", u"kh").replace(u"\u0445", u"kh")
.replace(u"\u0426", u"t^s").replace(u"\u0446", u"t^s")
.replace(u"\u0427", u"ch").replace(u"\u0447", u"ch")
.replace(u"\u0428", u"sh").replace(u"\u0448", u"sh")
.replace(u"\u0429", u"shch").replace(u"\u0449", u"shch")
.replace(u"\u042A", u"''").replace(u"\u044A", u"''")
.replace(u"\u042B", u"y").replace(u"\u044B", u"y")
.replace(u"\u042C", u"'").replace(u"\u044C", u"'")
.replace(u"\u042D", u"e`").replace(u"\u044D", u"e`")
.replace(u"\u042E", u"i^u").replace(u"\u044E", u"i^u")
.replace(u"\u042F", u"i^a").replace(u"\u044F", u"i^a"))
return word
def __roman_to_cyrillic(self, word):
u"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
@param word: The word that is transliterated.
@type word: C{str, unicode}
@return: C{word}, the transliterated word.
@rtype: C{unicode}
@note: This helper method is invoked by the stem method of the subclass
L{RussianStemmer}. It is not to be invoked directly!
"""
word = (word.replace(u"i^u", u"\u044E").replace(u"i^a", u"\u044F")
.replace(u"shch", u"\u0449").replace(u"kh", u"\u0445")
.replace(u"t^s", u"\u0446").replace(u"ch", u"\u0447")
.replace(u"e`", u"\u044D").replace(u"i`", u"\u0439")
.replace(u"sh", u"\u0448").replace(u"k", u"\u043A")
.replace(u"e", u"\u0435").replace(u"zh", u"\u0436")
.replace(u"a", u"\u0430").replace(u"b", u"\u0431")
.replace(u"v", u"\u0432").replace(u"g", u"\u0433")
.replace(u"d", u"\u0434").replace(u"e", u"\u0435")
.replace(u"z", u"\u0437").replace(u"i", u"\u0438")
.replace(u"l", u"\u043B").replace(u"m", u"\u043C")
.replace(u"n", u"\u043D").replace(u"o", u"\u043E")
.replace(u"p", u"\u043F").replace(u"r", u"\u0440")
.replace(u"s", u"\u0441").replace(u"t", u"\u0442")
.replace(u"u", u"\u0443").replace(u"f", u"\u0444")
.replace(u"''", u"\u044A").replace(u"y", u"\u044B")
.replace(u"'", u"\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
u"""
The Spanish Snowball stemmer.
@cvar __vowels: The Spanish vowels.
@type __vowels: C{unicode}
@cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
@type __step0_suffixes: C{tuple}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
@type __step2a_suffixes: C{tuple}
@cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
@type __step2b_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@note: A detailed description of the Spanish
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/spanish/stemmer.html}.
"""
__vowels = u"aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = (u"selas", u"selos", u"sela", u"selo", u"las",
u"les", u"los", u"nos", u"me", u"se", u"la", u"le",
u"lo")
__step1_suffixes = (u'amientos', u'imientos', u'amiento', u'imiento',
u'aciones', u'uciones', u'adoras', u'adores',
u'ancias', u'log\xEDas', u'encias', u'amente',
u'idades', u'anzas', u'ismos', u'ables', u'ibles',
u'istas', u'adora', u'aci\xF3n', u'antes',
u'ancia', u'log\xEDa', u'uci\xf3n', u'encia',
u'mente', u'anza', u'icos', u'icas', u'ismo',
u'able', u'ible', u'ista', u'osos', u'osas',
u'ador', u'ante', u'idad', u'ivas', u'ivos',
u'ico',
u'ica', u'oso', u'osa', u'iva', u'ivo')
__step2a_suffixes = (u'yeron', u'yendo', u'yamos', u'yais', u'yan',
u'yen', u'yas', u'yes', u'ya', u'ye', u'yo',
u'y\xF3')
__step2b_suffixes = (u'ar\xEDamos', u'er\xEDamos', u'ir\xEDamos',
u'i\xE9ramos', u'i\xE9semos', u'ar\xEDais',
u'aremos', u'er\xEDais', u'eremos',
u'ir\xEDais', u'iremos', u'ierais', u'ieseis',
u'asteis', u'isteis', u'\xE1bamos',
u'\xE1ramos', u'\xE1semos', u'ar\xEDan',
u'ar\xEDas', u'ar\xE9is', u'er\xEDan',
u'er\xEDas', u'er\xE9is', u'ir\xEDan',
u'ir\xEDas', u'ir\xE9is',
u'ieran', u'iesen', u'ieron', u'iendo', u'ieras',
u'ieses', u'abais', u'arais', u'aseis',
u'\xE9amos', u'ar\xE1n', u'ar\xE1s',
u'ar\xEDa', u'er\xE1n', u'er\xE1s',
u'er\xEDa', u'ir\xE1n', u'ir\xE1s',
u'ir\xEDa', u'iera', u'iese', u'aste', u'iste',
u'aban', u'aran', u'asen', u'aron', u'ando',
u'abas', u'adas', u'idas', u'aras', u'ases',
u'\xEDais', u'ados', u'idos', u'amos', u'imos',
u'emos', u'ar\xE1', u'ar\xE9', u'er\xE1',
u'er\xE9', u'ir\xE1', u'ir\xE9', u'aba',
u'ada', u'ida', u'ara', u'ase', u'\xEDan',
u'ado', u'ido', u'\xEDas', u'\xE1is',
u'\xE9is', u'\xEDa', u'ad', u'ed', u'id',
u'an', u'i\xF3', u'ar', u'er', u'ir', u'as',
u'\xEDs', u'en', u'es')
__step3_suffixes = (u"os", u"a", u"e", u"o", u"\xE1",
u"\xE9", u"\xED", u"\xF3")
def stem(self, word):
u"""
Stem a Spanish word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if rv.endswith(suffix):
if rv[:-len(suffix)].endswith((u"i\xE9ndo",
u"\xE1ndo",
u"\xE1r", u"\xE9r",
u"\xEDr")):
word = (word[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
r1 = (r1[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
r2 = (r2[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
rv = (rv[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
elif rv[:-len(suffix)].endswith((u"ando", u"iendo",
u"ar", u"er", u"ir")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[:-len(suffix)].endswith(u"yendo") and
word[:-len(suffix)].endswith(u"uyendo")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith(u"iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith((u"os", u"ic", u"ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in (u"adora", u"ador", u"aci\xF3n", u"adoras",
u"adores", u"aciones", u"ante", u"antes",
u"ancia", u"ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u"log\xEDa", u"log\xEDas"):
word = word.replace(suffix, u"log")
rv = rv.replace(suffix, u"log")
elif suffix in (u"uci\xF3n", u"uciones"):
word = word.replace(suffix, u"u")
rv = rv.replace(suffix, u"u")
elif suffix in (u"encia", u"encias"):
word = word.replace(suffix, u"ente")
rv = rv.replace(suffix, u"ente")
elif suffix == u"mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith((u"ante", u"able", u"ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"idad", u"idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in (u"abil", u"ic", u"iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in (u"ivo", u"iva", u"ivos", u"ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == u"u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix in (u"en", u"es", u"\xE9is", u"emos"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word.endswith(u"gu"):
word = word[:-1]
if rv.endswith(u"gu"):
rv = rv[:-1]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
if suffix in (u"e", u"\xE9"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word[-2:] == u"gu" and rv[-1] == u"u":
word = word[:-1]
else:
word = word[:-len(suffix)]
break
word = (word.replace(u"\xE1", u"a").replace(u"\xE9", u"e")
.replace(u"\xED", u"i").replace(u"\xF3", u"o")
.replace(u"\xFA", u"u"))
return word
class SwedishStemmer(_ScandinavianStemmer):
u"""
The Swedish Snowball stemmer.
@cvar __vowels: The Swedish vowels.
@type __vowels: C{unicode}
@cvar __s_ending: Letters that may directly appear before a word final 's'.
@type __s_ending: C{unicode}
@cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
@type __step1_suffixes: C{tuple}
@cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
@type __step2_suffixes: C{tuple}
@cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
@type __step3_suffixes: C{tuple}
@note: A detailed description of the Swedish
stemming algorithm can be found under
U{http://snowball.tartarus.org/algorithms
/swedish/stemmer.html}.
"""
__vowels = u"aeiouy\xE4\xE5\xF6"
__s_ending = u"bcdfghjklmnoprtvy"
__step1_suffixes = (u"heterna", u"hetens", u"heter", u"heten",
u"anden", u"arnas", u"ernas", u"ornas", u"andes",
u"andet", u"arens", u"arna", u"erna", u"orna",
u"ande", u"arne", u"aste", u"aren", u"ades",
u"erns", u"ade", u"are", u"ern", u"ens", u"het",
u"ast", u"ad", u"en", u"ar", u"er", u"or", u"as",
u"es", u"at", u"a", u"e", u"s")
__step2_suffixes = (u"dd", u"gd", u"nn", u"dt", u"gt", u"kt", u"tt")
__step3_suffixes = (u"fullt", u"l\xF6st", u"els", u"lig", u"ig")
def stem(self, word):
u"""
Stem a Swedish word and return the stemmed form.
@param word: The word that is stemmed.
@type word: C{str, unicode}
@return: The stemmed form.
@rtype: C{unicode}
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in (u"els", u"lig", u"ig"):
word = word[:-len(suffix)]
elif suffix in (u"fullt", u"l\xF6st"):
word = word[:-1]
break
return word
def demo():
u"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print u"\n"
print u"******************************"
print u"Demo for the Snowball stemmers"
print u"******************************"
while True:
language = raw_input(u"Please enter the name of the language " +
u"to be demonstrated\n" +
u"/".join(SnowballStemmer.languages) +
u"\n" +
u"(enter 'exit' in order to leave): ")
if language == u"exit":
break
if language not in SnowballStemmer.languages:
print (u"\nOops, there is no stemmer for this language. " +
u"Please try again.\n")
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = u" ".join([stemmer.stem(word) for word in excerpt])
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+u' ').rstrip()
excerpt = u" ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+u' ').rstrip()
print u"\n"
print u'-' * 70
print u'ORIGINAL'.center(70)
print excerpt
print u"\n\n"
print u'STEMMED RESULTS'.center(70)
print stemmed
print u'-' * 70
print u"\n"
if __name__ == u"__main__":
demo() | markgw/jazzparser | lib/nltk/stem/snowball.py | Python | gpl-3.0 | 155,023 | [
"ASE"
] | 9e8ef4ac59b6511cbcc8168c6184b26656e6de89240b2bafc43bdac0b2b5f5e0 |
'''
This example uses Kivy Garden Graph addon to draw graphs plotting the
accelerometer values in X,Y and Z axes.
The package is installed in the directory: ./libs/garden/garden.graph
To read more about kivy garden, visit: http://kivy-garden.github.io/.
'''
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.clock import Clock
from plyer import accelerometer
from kivy.garden.graph import MeshLinePlot
kivy.require('1.8.0')
class AccelerometerDemo(BoxLayout):
def __init__(self):
super().__init__()
self.sensorEnabled = False
self.graph = self.ids.graph_plot
# For all X, Y and Z axes
self.plot = []
self.plot.append(MeshLinePlot(color=[1, 0, 0, 1])) # X - Red
self.plot.append(MeshLinePlot(color=[0, 1, 0, 1])) # Y - Green
self.plot.append(MeshLinePlot(color=[0, 0, 1, 1])) # Z - Blue
self.reset_plots()
for plot in self.plot:
self.graph.add_plot(plot)
def reset_plots(self):
for plot in self.plot:
plot.points = [(0, 0)]
self.counter = 1
def do_toggle(self):
try:
if not self.sensorEnabled:
accelerometer.enable()
Clock.schedule_interval(self.get_acceleration, 1 / 20.)
self.sensorEnabled = True
self.ids.toggle_button.text = "Stop Accelerometer"
else:
accelerometer.disable()
self.reset_plots()
Clock.unschedule(self.get_acceleration)
self.sensorEnabled = False
self.ids.toggle_button.text = "Start Accelerometer"
except NotImplementedError:
popup = ErrorPopup()
popup.open()
def get_acceleration(self, dt):
if (self.counter == 100):
# We re-write our points list if number of values exceed 100.
# ie. Move each timestamp to the left.
for plot in self.plot:
del(plot.points[0])
plot.points[:] = [(i[0] - 1, i[1]) for i in plot.points[:]]
self.counter = 99
val = accelerometer.acceleration[:3]
if(not val == (None, None, None)):
self.plot[0].points.append((self.counter, val[0]))
self.plot[1].points.append((self.counter, val[1]))
self.plot[2].points.append((self.counter, val[2]))
self.counter += 1
class AccelerometerDemoApp(App):
def build(self):
return AccelerometerDemo()
def on_pause(self):
return True
class ErrorPopup(Popup):
pass
if __name__ == '__main__':
AccelerometerDemoApp().run()
| kivy/plyer | examples/accelerometer/using_graph/main.py | Python | mit | 2,722 | [
"VisIt"
] | b0929d057476b2433deeb26eb730c5e70e0f5b05e4188ad231944e7e2101cb35 |
# proxy module
from __future__ import absolute_import
from mayavi.filters.gaussian_splatter import *
| enthought/etsproxy | enthought/mayavi/filters/gaussian_splatter.py | Python | bsd-3-clause | 101 | [
"Mayavi"
] | c6e5d85b178fb5042ba9a9a87d34c2b2713c99ca71bede682a2af824eeb7e397 |
add = lambda x, y: x + y
print(add(2, 3))
names = ['David Beazley', 'Brian Jones', 'Raymond Hettinger', 'Ned Batchelder']
print(sorted(names, key=lambda name: name.split()[-1].lower())) | likeleon/Python | cookbook/7.6 이름 없는 함수와 인라인 함수 정의.py | Python | gpl-2.0 | 186 | [
"Brian"
] | b05c4e391a8761fe8f2379e1005ef69b26d516dc9fb208c58f9936b4e836da6a |
# coding: utf-8
""" Interpolate absolute magnitudes from the PARSEC isochrones """
from __future__ import division, print_function
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
# Standard library
from glob import glob
# Third party
import numpy as np
import scipy.interpolate
def load_parsec_isochrone(filename):
# Load the relevant information from the isochrone filename
"""
# File generated by CMD 2.5 (http://stev.oapd.inaf.it/cmd) on Wed Jan 8 16:38:19 CET 2014
# PARSEC isochrones, release v1.1
# Basic reference: Bressan et al. (2012), MNRAS, 427, 127
# Warning: the TP-AGB phase is not included! TP-AGB tracks are in preparation by Marigo et al.
# Photometric system: 2MASS <i>JHK</i><sub>s</sub>
# BCs of Carbon stars derive from Loidl et al. (2001, A&A 342, 531)
# O-rich circumstellar dust ignored
# C-rich circumstellar dust ignored
# IMF: Chabrier (2001) lognormal
# On RGB, assumed Reimers mass loss with efficiency eta=0.2
# Kind of output: isochrone tables
# Isochrone Z = 0.01210 Y = 0.27040 [M/H] = -0.089 eta_R = 0.200 Age = 1.0000e+09 yr
# Z log(age/yr) M_ini M_act logL/Lo logTe logG mbol J H Ks int_IMF stage
0.012100 9.0000 0.10000000 0.1000 -2.9005 3.5040 5.3005 12.021 10.125 9.580 9.342 1.57040203 0
"""
# We require: Teff, Logg, J, H, K
# We will arrange it like this though: (J, H, K, Teff, logg) because
# that will make things easier later
isochrone_data = np.loadtxt(filename, usecols=(8, 9, 10, 5, 6))
# Parse the overall metallicity as well:
with open(filename, 'r') as fp:
# There are cleverer ways to do this
content = fp.readlines()
try:
feh = float(content[11].split()[10])
except:
feh = float(content[12].split()[10])
# Put this as a column to the data array
feh_column = np.array([feh] * len(isochrone_data)).reshape(-1, 1)
isochrone_data = np.hstack([isochrone_data, feh_column])
return isochrone_data
def load_all_parsec_isochrones(filenames):
# Need to create one big array which we will use for interpolation
all_isochrone_data = []
[all_isochrone_data.extend(load_parsec_isochrone(filename)) for filename in filenames]
# Array-ify the data
all_isochrone_data = np.array(all_isochrone_data)
# Need to split it into points and values
# Points are the data we have: teff, logg, feh
# Values are what we want to interpolate (J, H, K)
values = all_isochrone_data[:, :3] # J, H, K
points = all_isochrone_data[:, 3:] # teff, logg, feh
return (points, values)
class MagnitudeInterpolator(object):
def __init__(self, filenames):
points, values = load_all_parsec_isochrones(filenames)
# Convert logarithmic quantities to linear space
# log(Teff), log(g), [Fe/H]
points = 10**points
# J, H, K are all logarithms
values = 10**values
self.interpolator = scipy.interpolate.LinearNDInterpolator(points, values)
def __call__(self, point):
""" Interpolate absolute uncertainties for a point (teff, logg, [M/H]) """
# Teff already linear
# Convert logg, feh to linear
point[1] = 10**point[1]
point[2] = 10**point[2]
absolute_magnitudes = self.interpolator(*point)
# Convert the absolute magnitudes (which are linear) back to logarithmic
# space
return np.log10(absolute_magnitudes)
def pdf(self, point, uncertainties, size=100):
""" Get a PDF of magnitudes for a given point with uncertainties """
teff, logg, feh = point
u_teff, u_logg, u_feh = uncertainties
teff_pdf = np.random.normal(teff, u_teff, size)
logg_pdf = np.random.normal(logg, u_logg, size)
feh_pdf = np.random.normal(feh, u_feh, size)
magnitude_distributions = np.zeros((size, 3))
magnitude_distributions[:]
for i, (teff_i, logg_i, feh_i) in enumerate(zip(teff_pdf, logg_pdf, feh_pdf)):
point_i = [teff_i, logg_i, feh_i]
magnitude_distributions[i, :] = self(point_i)
q = lambda v: (v[1], v[2] - v[1], v[1] - v[0])
quantiles = []
for i in range(3):
quantiles.append(q(np.percentile(magnitude_distributions[:, i], [16, 50, 84])))
return quantiles
if __name__ == "__main__":
"""
# Create an interpolator
magnitudes = MagnitudeInterpolator(glob("isochrones/PARSEC*.dat"))
# Example: Some set of stellar parameters:
teff, logg, feh = [5777., 4.445, 0.]
j_absolute, h_absolute, k_absolute = magnitudes([teff, logg, feh])
print("For Teff = {0:.0f}, logg = {1:.2f}, [Fe/H] = {2:.2f} absolute magnitudes are:"
" J = {3:.2f}, H = {4:.2f}, K = {5:.2f}".format(teff, logg, feh, j_absolute,
h_absolute, k_absolute))
# Example: Propagate symmetric uncertainties in stellar parameters:
teff, logg, feh = [5000, 2.2, -1]
u_teff, u_logg, u_feh = [250, 0.2, 0.1]
print("For Teff = {0:.0f} +/- {1:.0f} K, logg = {2:.2f} +/- {3:.2f}, [Fe/H] = {4:.2f} +/- {5:.2f}:".format(
teff, u_teff, logg, u_logg, feh, u_feh))
bands = "JHK"
# Since we don't have posterior distributions and we just have a mean and standard
# deviation, we will approximate this as a gaussian and sample it N times
pdf = magnitudes.pdf([teff, logg, feh], [u_teff, u_logg, u_feh])
for band, quantiles in zip(bands, pdf):
print(" {0} = {1:.2f} +/- (+{2:.2f}, -{3:.2f})".format(band, quantiles[0], quantiles[1], quantiles[2]))
"""
magnitudes = MagnitudeInterpolator(glob("isochrones/PARSEC*.dat"))
# Example: Some set of stellar parameters:
teff, logg, feh = [5777., 4.445, 0.]
j_absolute, h_absolute, k_absolute = magnitudes([teff, logg, feh])
print("With 1 Gyr isochrones..")
print("For Teff = {0:.0f}, logg = {1:.2f}, [Fe/H] = {2:.2f} absolute magnitudes are:"
" J = {3:.2f}, H = {4:.2f}, K = {5:.2f}".format(teff, logg, feh, j_absolute,
h_absolute, k_absolute))
magnitudes = MagnitudeInterpolator(glob("isochrones/ness*.dat"))
teff, logg, feh = [5777., 4.445, 0.]
j_absolute, h_absolute, k_absolute = magnitudes([teff, logg, feh])
print("With 10 Gyr isochrones..")
print("For Teff = {0:.0f}, logg = {1:.2f}, [Fe/H] = {2:.2f} absolute magnitudes are:"
" J = {3:.2f}, H = {4:.2f}, K = {5:.2f}".format(teff, logg, feh, j_absolute,
h_absolute, k_absolute))
with open("testandy.txt", "r") as fp:
lines = fp.readlines()[1:]
rows = []
for line in lines:
teff, logg, feh, k_apparent, A_k = map(float, line.split())
j_absolute, h_absolute, k_absolute = magnitudes([teff, logg, feh])
k_corrected = k_apparent - max([A_k, 0])
mu = k_corrected - k_absolute
distance = 10**(1 + mu/5.0)
distance /= 1000. # in kpc
rows.append({
"Teff": teff,
"logg": logg,
"[Fe/H]": feh,
"K_apparent": k_apparent,
"A_k": A_k,
"J_absolute": j_absolute,
"H_absolute": h_absolute,
"K_absolute": k_absolute,
"Distance": distance
})
from astropy.table import Table
results = Table(rows=rows)
results.write("results.csv")
| davidwhogg/Platypus | code/distances/distances.py | Python | mit | 7,656 | [
"Gaussian"
] | 39800fe1ee2cff63c024a3ff83218303157cd9f6d9829d583f8e5b5d75f677cc |
""" A computing element class that uses sudo
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pwd
import stat
import errno
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
__RCSID__ = "$Id$"
class SudoComputingElement(ComputingElement):
#############################################################################
def __init__(self, ceUniqueID):
"""Standard constructor."""
super(SudoComputingElement, self).__init__(ceUniqueID)
self.ceType = "Sudo"
self.submittedJobs = 0
self.runningJobs = 0
self.processors = int(self.ceParameters.get("NumberOfProcessors", 1))
self.ceParameters["MaxTotalJobs"] = 1
#############################################################################
def submitJob(self, executableFile, proxy=None, **kwargs):
"""Method to submit job, overridden from super-class.
:param str executableFile: file to execute via systemCall.
Normally the JobWrapperTemplate when invoked by the JobAgent.
:param str proxy: the proxy used for running the job (the payload). It will be dumped to a file.
"""
payloadProxy = ""
if proxy:
self.log.verbose("Setting up proxy for payload")
result = self.writeProxyToFile(proxy)
if not result["OK"]:
return result
payloadProxy = result["Value"] # payload proxy file location
if "X509_USER_PROXY" not in os.environ:
self.log.error("X509_USER_PROXY variable for pilot proxy not found in local environment")
return S_ERROR(DErrno.EPROXYFIND, "X509_USER_PROXY not found")
# See if a fixed value has been given
payloadUsername = self.ceParameters.get("PayloadUser")
if payloadUsername:
self.log.info("Payload username %s from PayloadUser in ceParameters" % payloadUsername)
else:
# First username in the sequence to use when running payload job
# If first is pltXXp00 then have pltXXp01, pltXXp02, ...
try:
baseUsername = self.ceParameters.get("BaseUsername")
baseCounter = int(baseUsername[-2:])
self.log.info("Base username from BaseUsername in ceParameters : %s" % baseUsername)
except Exception:
# Last chance to get PayloadUsername
if "USER" not in os.environ:
self.log.error('PayloadUser, BaseUsername and os.environ["USER"] are not properly defined')
return S_ERROR(errno.EINVAL, "No correct payload username provided")
baseUsername = os.environ["USER"] + "00p00"
baseCounter = 0
self.log.info("Base username from $USER + 00p00 : %s" % baseUsername)
# Next one in the sequence
payloadUsername = baseUsername[:-2] + ("%02d" % (baseCounter + self.submittedJobs))
self.log.info("Payload username set to %s using jobs counter" % payloadUsername)
try:
payloadUID = pwd.getpwnam(payloadUsername).pw_uid
payloadGID = pwd.getpwnam(payloadUsername).pw_gid
except KeyError:
error = S_ERROR('User "' + str(payloadUsername) + '" does not exist!')
return error
self.log.verbose("Starting process for monitoring payload proxy")
gThreadScheduler.addPeriodicTask(
self.proxyCheckPeriod,
self.monitorProxy,
taskArgs=(payloadProxy, payloadUsername, payloadUID, payloadGID),
executions=0,
elapsedTime=0,
)
# Submit job
self.log.info("Changing permissions of executable (%s) to 0755" % executableFile)
self.submittedJobs += 1
try:
os.chmod(
os.path.abspath(executableFile),
stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
)
except OSError as x:
self.log.error("Failed to change permissions of executable to 0755 with exception", "\n%s" % (x))
result = self.sudoExecute(
os.path.abspath(executableFile), payloadProxy, payloadUsername, payloadUID, payloadGID
)
self.runningJobs -= 1
if not result["OK"]:
self.log.error("Failed sudoExecute", result)
return result
self.log.debug("Sudo CE result OK")
return S_OK()
#############################################################################
def sudoExecute(self, executableFile, payloadProxy, payloadUsername, payloadUID, payloadGID):
"""Run sudo with checking of the exit status code."""
# We now implement a file giveaway using groups, to avoid any need to sudo to root.
# Each payload user must have their own group. The pilot user must be a member
# of all of these groups. This allows the pilot user to set the group of the
# payloadProxy file to be that of the payload user. The payload user can then
# read it and make a copy of it (/tmp/x509up_uNNNN) that it owns. Some grid
# commands check that the proxy is owned by the current user so the copy stage
# is necessary.
# 1) Make sure the payload user can read its proxy via its per-user group
os.chown(payloadProxy, -1, payloadGID)
os.chmod(payloadProxy, stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP)
# 2) Now create a copy of the proxy owned by the payload user
result = shellCall(
0,
'/usr/bin/sudo -u %s sh -c "cp -f %s /tmp/x509up_u%d ; chmod 0400 /tmp/x509up_u%d"'
% (payloadUsername, payloadProxy, payloadUID, payloadUID),
callbackFunction=self.sendOutput,
)
# 3) Make sure the current directory is +rwx by the pilot's group
# (needed for InstallDIRAC but not for LHCbInstallDIRAC, for example)
os.chmod(".", os.stat(".").st_mode | stat.S_IRWXG)
# Run the executable (the wrapper in fact)
cmd = "/usr/bin/sudo -u %s " % payloadUsername
cmd += "PATH=$PATH "
cmd += "DIRACSYSCONFIG=/scratch/%s/pilot.cfg " % os.environ.get("USER", "")
cmd += "LD_LIBRARY_PATH=$LD_LIBRARY_PATH "
cmd += "PYTHONPATH=$PYTHONPATH "
cmd += "X509_CERT_DIR=$X509_CERT_DIR "
cmd += "X509_USER_PROXY=/tmp/x509up_u%d sh -c '%s'" % (payloadUID, executableFile)
self.log.info("CE submission command is: %s" % cmd)
self.runningJobs += 1
result = shellCall(0, cmd, callbackFunction=self.sendOutput)
self.runningJobs -= 1
if not result["OK"]:
result["Value"] = (0, "", "")
return result
resultTuple = result["Value"]
status = resultTuple[0]
stdOutput = resultTuple[1]
stdError = resultTuple[2]
self.log.info("Status after the sudo execution is %s" % str(status))
if status > 128:
error = S_ERROR(status)
error["Value"] = (status, stdOutput, stdError)
return error
return result
#############################################################################
def getCEStatus(self):
"""Method to return information on running and pending jobs."""
result = S_OK()
result["SubmittedJobs"] = self.submittedJobs
result["RunningJobs"] = self.runningJobs
result["WaitingJobs"] = 0
# processors
result["AvailableProcessors"] = self.processors
return result
#############################################################################
def monitorProxy(self, payloadProxy, payloadUsername, payloadUID, payloadGID):
"""Monitor the payload proxy and renew as necessary."""
retVal = self._monitorProxy(payloadProxy)
if not retVal["OK"]:
# Failed to renew the proxy, nothing else to be done
return retVal
if not retVal["Value"]:
# No need to renew the proxy, nothing else to be done
return retVal
self.log.info("Re-executing sudo to make renewed payload proxy available as before")
# New version of the proxy file, so we have to do the copy again
# 1) Make sure the payload user can read its proxy via its per-user group
os.chown(payloadProxy, -1, payloadGID)
os.chmod(payloadProxy, stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP)
# 2) Now recreate the copy of the proxy owned by the payload user
cmd = '/usr/bin/sudo -u %s sh -c "cp -f %s /tmp/x509up_u%d ; chmod 0400 /tmp/x509up_u%d"' % (
payloadUsername,
payloadProxy,
payloadUID,
payloadUID,
)
result = shellCall(0, cmd, callbackFunction=self.sendOutput)
if not result["OK"]:
self.log.error("Could not recreate the copy of the proxy", "CMD: %s; %s" % (cmd, result["Message"]))
return S_OK("Proxy checked")
| ic-hep/DIRAC | src/DIRAC/Resources/Computing/SudoComputingElement.py | Python | gpl-3.0 | 9,479 | [
"DIRAC"
] | be6647e7712f8a14cf55e63aaf65dc99924e1db2bb1f786454e79627ec8d438d |
import os
import sys
class Atom(object):
"""Atom Class"""
def __init__(self, rectype="ATOM", id=-1, name="", altLoc=" ", resName=" ",
chainID=-1, resSeq=-1, iCode=" ",
x=0, y=0, z=0, occupancy=" ", tempFactor=" ", charge=" "):
self.rectype = rectype
self.id = id
self.name = name
self.altLoc = altLoc
self.resName = resName
self.chainID = chainID
self.resSeq = resSeq
self.iCode = iCode
self.x = x
self.y = y
self.z = z
self.occupancy = occupancy
self.tempFactor = tempFactor
self.charge = charge
class Bond(object):
"""Bond Class"""
def __init__(self, atom1=0, atom2=0):
self.atom1 = atom1
self.atom2 = atom2
class Residue(object):
"""Residue Class"""
def __init__(self, id=-1, name="", atoms=None, chain=""):
self.id = id
self.name = name
if atoms == None:
self.atoms = []
else:
self.atoms = atoms
self.chain = chain
class Chain(object):
"""Chain Class"""
def __init__(self, id=-1, name="", residues=None):
self.id = id
self.name = name
if residues == None:
self.residues = []
else:
self.residues = residues
class Molecule(object):
"""Molecule Class"""
def __init__(self, id=0, name="", atoms=None, bonds=None, residues=None, chains=None):
self.id = id
self.name = name
if atoms == None:
self.atoms = {}
else:
self.atoms = atoms
if bonds == None:
self.bonds = []
else:
self.bonds = bonds
if residues == None:
self.residues = {}
else:
self.residues = residues
if chains == None:
self.chains = []
else:
self.chains = chains
def residue_total(self):
return len(self.residues)
def atom_total(self):
return len(self.atoms)
def bond_total(self):
return len(self.bonds)
def chain_total(self):
return len(self.chains)
class pyPDB(object):
"""PDB Class"""
def __init__(self, filename):
self.filename = filename
self.molecule = None
self.selectedAtoms = []
self.reduced = []
self._readFile()
self.verbose = False
def _readFile(self):
m = Molecule(self.filename)
m.name = os.path.splitext(self.filename)[0].lower()
f = open(self.filename, 'r').read().replace('\r\n', '\n')
l = 0
temp_chain = []
chain_no = 1
for line in f.splitlines():
l += 1
if (line[0:4] == 'ATOM' or line[0:6] == 'HETATM'):
# get atom information
atom = self._readAtom(line)
# add atom to molecule atoms
m.atoms[atom.id] = atom
if atom.resSeq not in m.residues.keys():
# new residue
r = Residue()
r.id = atom.resSeq
r.name = atom.resName
r.atoms = [atom.id]
chain_name = line[21:22]
r.chain = chain_name
m.residues[r.id] = r
temp_chain.append(r)
else:
# new atom to residue
m.residues[atom.resSeq].atoms.append(atom)
if line[0:6] == 'CONECT':
bonds_in_line = self._readBonds(line)
for bond in bonds_in_line:
m.bonds.append(bond)
if 'TER' in line:
c = Chain()
c.name = line[21:22]
c.residues = temp_chain
c.id = chain_no
m.chains.append(c)
temp_chain = []
chain_no = chain_no + 1
if m.bond_total() == 0:
print 'Warning: No CONECT info, so no bond analysis.'
if 'TER' not in f:
print 'Warning: No TER statement, so no chains are built.\n'
self.molecule = m
def _readAtom(self, line):
a = Atom()
a.rectype = line[0:6] # ATOM or HETATM
iid = line[7:11].strip()
a.id = int(iid)
a.name = line[12:14].strip()
a.altLoc = line[16]
a.resName = line[17:20]
a.chainID = line[21]
a.resSeq = int(line[22:26])
a.iCode = line[26]
a.x = float(line[30:37])
a.y = float(line[38:45])
a.z = float(line[46:53])
a.occupancy = line[54:59]
a.tempFactor = line[60:65]
a.charge = line[78:89]
return a
def _readBonds(self, line):
fields = line.split()
bonds = []
n = 2
while n < len(fields):
bond = Bond()
bond.atom1 = int(fields[1])
bond.atom2 = int(fields[n])
bonds.append(bond)
n += 1
return bonds
def distanceBetweenAtoms(self, atomid1, atomid2):
import numpy
atom1 = self.molecule.atoms[atomid1]
atom2 = self.molecule.atoms[atomid2]
a = numpy.array((atom1.x, atom1.y, atom1.z))
b = numpy.array((atom2.x, atom2.y, atom2.z))
dist = numpy.linalg.norm(a - b)
return int(dist * 100) / 100.00
def atomsWithinDistanceOfAtom(self, atomid, distance):
referenceAtom = self.molecule.atoms[atomid]
atomsWithinDistance = []
atomDistances = []
self.selectedAtoms = []
for key in self.molecule.atoms:
if self.distanceBetweenAtoms(atomid, self.molecule.atoms[key].id) <= distance:
if self.molecule.atoms[key].id != atomid:
atomsWithinDistance.append(self.molecule.atoms[key])
d = self.distanceBetweenAtoms(
atomid, self.molecule.atoms[key].id)
atomDistances.append(d)
self.selectedAtoms.append(self.molecule.atoms[key])
return (atomsWithinDistance, atomDistances)
def toJSON(self):
ret = '{ \n'
ret += '\t "atom_total": {0},\n'.format(self.molecule.atom_total())
ret += '\t "residue_total": {0},\n'.format(self.molecule.residue_total())
ret += '\t "bond_total": {0}'.format(self.molecule.bond_total())
ret += '\n}'
return ret
def distanceMap(self):
n1 = 0
dist_map = []
for atom in self.molecule.atoms:
atom1 = self.molecule.atoms[atom]
temp_distances = []
for a2 in self.molecule.atoms:
atom2 = self.molecule.atoms[a2]
temp_distances.append(
self.distanceBetweenAtoms(atom1.id, atom2.id))
dist_map.append(temp_distances)
return dist_map
def plotDistanceMap(self, save=False, directory='', close=False):
import numpy
import matplotlib.pyplot as plt
m = self.distanceMap()
matrix = numpy.matrix(m)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect('equal')
plt.title('Distance Map')
extent = self.molecule.atom_total() + 0.5
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.hot,
extent=(0.5, extent, 0.5, extent))
plt.colorbar()
if save == True:
plt.savefig('{}distance_map.pdf'.format(directory))
if close == True:
plt.close()
else:
plt.show()
def selectAtom(self, atomid):
atom = self.molecule.atoms[atomid]
alreadySelected = False
for atoms in self.selectedAtoms:
if atomid == atoms.id:
alreadySelected = True
if alreadySelected == False:
self.selectedAtoms.append(atom)
return self # enables chaining
def selectAtoms(self, atomids=[]):
for atom in atomids:
alreadySelected = False
for atoms in self.selectedAtoms:
if atom == atoms.id:
alreadySelected = True
if alreadySelected == False:
self.selectedAtoms.append(self.molecule.atoms[atom])
def reduce(self):
for atom in self.molecule.atoms:
if 'H' not in self.molecule.atoms[atom].name:
self.reduced.append(self.molecule.atoms[atom])
return self.reduced
def listResiduesFromAtoms(self, atoms):
residues = []
for atom in atoms:
if atom not in residues:
residues.append(self.molecule.residues[atom.residue_id])
temp_residue_list = []
for residue in residues:
if residue.id not in temp_residue_list:
temp_residue_list.append(residue.id)
return temp_residue_list
def toAmberMask(self, key='residues'):
ret = ''
i = 1
if(key == 'residues'):
for residue in self.listResiduesFromAtoms(self.selectedAtoms):
if i == len(self.listResiduesFromAtoms(self.selectedAtoms)):
comma = ''
else:
comma = ','
ret += '{0}{1}'.format(residue, comma)
i = i + 1
return ret
elif(key == 'atoms'):
for atom in self.selectedAtoms:
if i == len(self.selectedAtoms):
comma = ''
else:
comma = ','
ret += '{0}{1}'.format(atom.id, comma)
i = i + 1
return ret
def removeSelection(self):
self.selectedAtoms = []
return self
def writePDB(self):
if not self.selectedAtoms:
atomsToWrite = self.molecule.atoms
else:
atomsToWrite = []
for atom in self.selectedAtoms:
atomsToWrite.append(atom.id)
for atom in sorted(atomsToWrite, key=lambda k: k):
a = self.molecule.atoms[atom]
print self._get_atom_line(a)
def _get_atom_line(self, a):
# COLUMNS DATA TYPE FIELD DEFINITION
# ---------------------------------------------------------------------
# 0 - 5 Record name "ATOM "
# 6 - 10 Integer serial Atom serial number.
# 12 - 15 Atom name Atom name.
# 16 Character altLoc Alternate location indicator.
# 17 - 19 Residue name resName Residue name.
# 21 Character chainID Chain identifier.
# 22 - 25 Integer resSeq Residue sequence number.
# 26 AChar iCode Code for insertion of residues.
# 30 - 37 Real(8.3) x Orthogonal coordinates for X in Angstroms.
# 38 - 45 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
# 46 - 53 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
# 54 - 59 Real(6.2) occupancy Occupancy.
# 60 - 65 Real(6.2) tempFactor Temperature factor.
# 76 - 77 LString(2) element Element symbol, right-justified.
# 78 - 89 LString(2) charge Charge on the atom.
# 1 2 3 4 5 6 7 8
# 012345678901234567890123456789012345678901234567890123456789012345678901234567890
# MODEL 1
# ATOM 1 N LEU A 25 80.669 55.349 53.905 1.00 39.12 N
# ATOM 1 N LEU A 2 80.660 55.340 53.900 1.0
args = (a.rectype, a.id, a.name, a.altLoc, a.resName,
a.chainID, a.resSeq, a.iCode,
a.x, a.y, a.z, a.occupancy)
return "%s%5i %-4s%c%3s %c%4i%c %8.3f%8.3f%8.3f%s" % args
def translateCoordinates(self, translateVector):
if not self.selectedAtoms:
atoms_list = self.molecule.atoms
else:
atoms_list = []
for atom in self.selectedAtoms:
atoms_list.append(atom.id)
for atom in atoms_list:
a = self.molecule.atoms[atom]
a1 = numpy.array([a.x, a.y, a.z])
a2 = numpy.array(translateVector)
s = numpy.add(a1, a2)
self.molecule.atoms[atom].x = s[0]
self.molecule.atoms[atom].y = s[1]
self.molecule.atoms[atom].z = s[2]
return self
def reorderResidues(self):
counter = 1
_log(self.verbose, 'Reordering Residues:', colour="red", bold=True)
for i in self.molecule.residues:
res = self.molecule.residues[i]
_log(self.verbose, '{}-{} ----> {}'.format(res.name, res.id, counter))
res.id = counter
counter = counter + 1
return self
def describeResidues(self):
description = "{}\n----------------------------------\n".format(
os.path.basename(self.molecule.name))
for i in self.molecule.residues:
description += "Residue ID: {:3g} -- Residue Name: {} -- Chain ID: {}\n".format(
self.molecule.residues[i].id, self.molecule.residues[i].name, self.molecule.residues[i].chain)
return description
def mergePDBs(pdbs, output):
# TODO: Deal with two residues having same ID but are on different chains
with open(output, 'w') as outfile:
for fname in pdbs:
with open(fname) as infile:
for line in infile:
if line[0:4] == 'ATOM':
outfile.write(line)
outfile.write("TER\n")
return output
def _log(verbose=True, message="", colour=None, background=None, bold=False, underline=False, inverted=False, run=False):
if verbose:
colours = {
'black': '90',
'red': '91',
'green': '92',
'yellow': '93',
'blue': '94',
'magenta': '95',
'cyan': '96',
'white': '97'
}
backgrounds = {
'default': '49',
'black': '100',
'red': '101',
'green': '102',
'yellow': '103',
'blue': '104',
'magenta': '105',
'cyan': '106',
'white': '107'
}
if bold: message = '\033[1m' + message + '\033[21m'
if underline: message = '\033[4m' + message + '\033[24m'
if background is not None: message = '\033[' + backgrounds[background] + 'm' + message + '\033[49m'
if colour is not None: message = '\033[' + colours[colour] + 'm' + message + '\033[0m'
if inverted: message = '\033[7m' + message + '\033[27m'
if run:
print message,
else:
print message
return
def unZip(archive, uncompressed):
import gzip
f = gzip.open(archive, 'r')
g = open(uncompressed, 'w')
g.writelines(f.readlines())
f.close()
g.close()
os.remove(archive)
return True
def downloadPDB(pdbCode, output=""):
import urllib
pdb = "{pdbid}.pdb.gz".format(pdbid=pdbCode)
url = "http://www.rcsb.org/pdb/files/{pdb}".format(pdb=pdb)
urllib.urlretrieve(url, pdb)
if output == "":
output_path = "{pdbid}.pdb".format(pdbid=pdbCode)
else:
output_path = output
unZip(pdb, output_path)
return output_path
if __name__ == '__main__':
# load pdb
#p = pyPDB('pdbs/gly.pdb')
# if you need to download the pdb, you can load it straight away
#p2 = pyPDB(downloadPDB('1P47', 'pdbs/1P47.pdb'))
# we can merge two pdb files:
p3 = pyPDB('pdbs/multiple.pdbqt')
p3.verbose = True
# after merging, we probably need to reorder the residues:
# p3.reorderResidues()
# and also describe the residues:
# p3.describeResidues()
# translate the coordinates (or selection)
# p.translateCoordinates([10,5,1])
# for atom in p.molecule.atoms:
# a = p.molecule.atoms[atom]
# print "[{0:g}, {1:g}, {2:g}]".format(a.x, a.y, a.z)
# select one atom
# p.selectAtom(4)
# select multiple atoms individually (this continues after the previous one)
# p.selectAtom(5).selectAtom(6)
# or select multiple atoms all in one go
# p.selectAtoms([4, 5, 6])
# the 'p' pyPDB instance now has a selectedAtoms attribute that is iterable:
# for atom in p.selectedAtoms:
# print '{}{}'.format(atom.name, atom.id)
# calculate a distance map
# print p.distanceMap()
# and also plot it
# p.plotDistanceMap(save=False, close=True)
# calculate the distance between two atoms
# print p.distanceBetweenAtoms(8, 9)
# calculate atoms within a given distance of another atom
# print p.atomsWithinDistanceOfAtom(10, 3)
# you can iterate over something like the above such as:
# atomsWithinDistance = p.atomsWithinDistanceOfAtom(10, 3)
# i = 0
# for x in atomsWithinDistance[0]:
# print 'Atom {}{} is within {} of {}{}: {}'.format(x.name, x.id, 3,
# p.molecule.atoms[10].name, 10, atomsWithinDistance[1][i])
# i += 1
# or even make an amber mask:
# print p.toAmberMask('atoms')
# output a description of 'p' as json
# print p.toJSON()
# reduce a pdb:
# p.reduce()
# ...which can be iterated over:
# for atom in p.reduce():
# print '{}{}'.format(atom.name, atom.id)
# the selection (or all atoms if no selection) can be written to a pdb file
# p.writePDB()
# the selection can be removed using
# p.removeSelection()
| iamgp/pyPDB | pyPDB.py | Python | gpl-3.0 | 17,831 | [
"Amber"
] | 50735a6a58ec9eae0942cc142867084fcf84f5501c28f1b1b7c095b0b3cca7d8 |
# coding: utf-8
import os
from os.path import join, exists
import shutil
import pybel
from .config import CALCULATE_DATA_PATH
from .mopac import MopacModel
from .gaussian_optimize import GaussianOptimizeModel
from utils import chemistry_logger
from django.conf import settings
from chemistry.calcore.utils import CalcoreCmd
class Converter():
def __init__(self, smiles=None, molfiles=None, model_name=None):
self.__smilenum_list = []
self.__molfile = []
self.model_name = model_name
# smiles 可以传入list,也可以传入以comma作为分隔符的smiles字符串
if isinstance(smiles, list):
self.__smilenum_list = [s for s in smiles]
elif isinstance(smiles, basestring):
self.__smilenum_list = smiles.split(',')
# molfies 传递的地址是完整路径
if isinstance(molfiles, list):
self.__molfile = [f for f in molfiles if exists(f)]
elif isinstance(molfiles, basestring):
self.__molfile = [f for f in molfiles.split(',') if exists(f)]
def smile2_3d(self, smile):
mymol = pybel.readstring('smi', smile)
if self.model_name == 'logPL':
mymol.addh()
mymol.make3D()
name = self.format_filename(smile)
mol_fpath = join(settings.MOL_ABSTRACT_FILE_PATH, '%s.mol' % name)
mymol.write('mol', mol_fpath, overwrite=True)
return mol_fpath
def iter_smiles_files(self, src_list, src_type):
for element in src_list:
if src_type == 'smile':
name = self.format_filename(element)
elif src_type == 'file':
name = os.path.basename(element).split('.')[0]
else:
raise Exception('No support %s' % src_type)
dragon_dpath = join(CALCULATE_DATA_PATH.DRAGON, self.model_name, name)
mopac_dpath = join(CALCULATE_DATA_PATH.MOPAC, self.model_name, name)
mop_fpath = join(mopac_dpath, '%s.mop' % name)
if not os.path.exists(dragon_dpath):
os.makedirs(dragon_dpath)
if not exists(mopac_dpath):
os.makedirs(mopac_dpath)
yield (element, name, dragon_dpath, mopac_dpath, mop_fpath)
def mol2dragon_folder(self):
mop_fname_set = set()
# STEP: 将smile码经过obabel转化,生成mop文件,并放在指定目录中
for element in self.iter_smiles_files(self.__smilenum_list, 'smile'):
smile, name, dragon_dpath, mopac_dpath, mop_fpath = element
# '-:smi' 可以直接对smile进行转化
cmd = 'obabel -:"%s" -o mop -O "%s" --gen3D' % (smile,
mop_fpath)
chemistry_logger.info('mop2mopac, smi->mop: %s' % cmd)
CalcoreCmd(cmd, output=mop_fpath).run()
# 修改smi->mop文件的头部
lines = []
with open(mop_fpath, 'rb') as f:
lines = f.readlines()
if self.model_name in ('logKOA',):
lines[0] = 'EF GNORM=0.0001 MMOK GEO-OK PM3\n'
elif self.model_name in ('logRP',):
lines[0] = 'eps=78.6 EF GNORM =0.0100 MMOK GEO-OK PM6 MULLIK GRAPH ESR HYPERFINE POLAR PRECISE BOND PI ENPART DEBUG\n'
elif self.model_name in ('logPL',):
lines[0] = 'EF GNORM=0.01 MMOK GEO-OK PM6 MULLIK POLAR\n'
elif self.model_name in ('logO3', ):
lines[0] = 'EF GNORM=0.001 PM6 MULLIK GRAPH ESR HYPERFINE POLAR\n'
elif self.model_name in ('logBDG',):
lines[0] = 'EF GNORM=0.1 MMOK GEO-OK PM5\n'
else:
lines[0] = 'no keywords'
lines[1] = mopac_dpath + "\n"
with open(mop_fpath, 'wb') as f:
f.writelines(lines)
mop_fname_set.add('%s.mop' % name)
for element in self.iter_smiles_files(self.__molfile, 'file'):
mol_fpath, name, dragon_dpath, mopac_dpath, _ = element
mop_fpath = mol2mop(mol_fpath)
if not os.path.exists(mopac_dpath):
os.makedirs(mopac_dpath)
shutil.copy(mop_fpath, mopac_dpath)
mop_fname_set.add('%s.mop' % name)
# 使用mopac对dragon结果进行优化(输入转化生成的mop文件)
mop = MopacModel(mop_fname_set)
mop.opt4dragon(self.model_name)
def mol2gjf2dragon_folder(self):
gaussian_files_set = set()
for element in self.iter_smiles_files(self.__smilenum_list, 'smile'):
smile, name, dragon_dpath, mopac_dpath, mop_fpath = element
gaussian_dpath = join(CALCULATE_DATA_PATH.GAUSSIAN, self.model_name, name)
# smile-> mol
mol_fpath = self.smile2_3d(smile)
# mol -> gjf file
gjf_fpath = mol2gjf(mol_fpath, self.model_name)
if not os.path.exists(dragon_dpath):
os.makedirs(dragon_dpath)
if not os.path.exists(gaussian_dpath):
os.makedirs(gaussian_dpath)
shutil.copy(mol_fpath, dragon_dpath)
shutil.copy(gjf_fpath, gaussian_dpath)
gaussian_files_set.add('%s.gjf' % name)
for element in self.iter_smiles_files(self.__molfile, 'file'):
mol_fpath, name, dragon_dpath, mopac_dpath, mop_fpath = element
if not os.path.exists(dragon_dpath):
os.makedirs(dragon_dpath)
shutil.copy(mol_fpath, dragon_dpath)
gaussian_files_set.add('%s.gjf' % name)
#FIXME: gaussian 计算很慢吗?
chemistry_logger.info('GaussianOptimizeModel gjf4dragon')
gjf = GaussianOptimizeModel(gaussian_files_set)
gjf.gjf4dragon(self.model_name)
def format_filename(self, filename):
return filename.replace('\\', '#').replace('/', '$')
def get_smilenum_list(self):
return self.__smilenum_list
def get_molfile(self):
return self.__molfile
def mol2mop(fpath):
fname = os.path.basename(fpath)
fname_no_ext = fname.split('.')[0]
content = []
with open(fpath, 'r') as f:
for line in f.readlines():
try:
values = line.split()
if 'A' < values[3] < 'Z':
content.append(' %s %s %s %s\n' % (values[3], values[0],
values[1], values[2]))
except Exception:
pass
mop_list = []
mop_list.append('EF GNORM=0.0001 MMOK GEO-OK PM3\n')
mop_list.append('\n\r\n')
mop_list.extend(content)
mop_fpath = join(settings.MOL_ABSTRACT_FILE_PATH, '%s.mop' % fname_no_ext)
# FIXME:多个人同时写一个名的文件会存在问题
with open(mop_fpath, 'w') as f:
f.writelines(tuple(mop_list))
return mop_fpath
def mol2gjf(fpath, model_name):
if model_name in ('logKOH', 'logKOH_T'):
element = dict.fromkeys([
'H', 'C', 'N', 'O', 'F', 'P', 'S', 'Cl',
'Se', 'Br', 'I', 'Si', 'Hg', 'Pb'], 0)
fname = os.path.basename(fpath)
fname_no_ext = fname.split('.')[0]
content = []
with open(fpath, 'r') as f:
for line in f.readlines():
try:
values = line.split()
if 'A' < values[3] < 'Z':
content.append(' %s %s %s %s\n' % (values[3], values[0],
values[1], values[2]))
if model_name in ('logKOH', 'logKOH_T'):
element[values[3]] = 1
except Exception:
pass
gjf_list = []
gjf_list.append('%%chk=%s.chk\n' % fname_no_ext)
gjf_list.append('%nproc=2\n')
gjf_list.append('%mem=2GB\n')
if model_name in ('logKOH', 'logKOH_T'):
element_op = (element['I'] | element['Si'] | element['Hg'] |
element['Pb']) == 1
if element_op:
gjf_list.append('#p opt freq b3lyp/genecp scf=tight int=ultrafine\n')
else:
gjf_list.append('#p opt freq b3lyp/6-311+G(d,p) scf=tight int=ultrafine\n')
elif model_name in ("logKOC", "logBCF"):
gjf_list.append('#p opt freq b3lyp/6-31+g(d,p) SCRF=(IEFPCM,SOLVENT=WATER)\n')
gjf_list.append('\n')
gjf_list.append('Title Card Required\n')
gjf_list.append('\n')
gjf_list.append('0 1\n')
gjf_list.extend(content)
gjf_list.append('\n')
if model_name in ('logKOH', 'logKOH_T') and element_op:
tempC = ""
tempHg = ""
for t in ('I', 'Si', 'Hg', 'Pb'):
if element[t] == 1:
element[t] = 0
tempHg += "%s " % t
for key in element:
if element[key] == 1:
tempC += "%s " % key
gjf_list.append('%s 0\n' % tempC)
gjf_list.append('6-31+g(d,p)\n')
gjf_list.append('****\n')
gjf_list.append('%s 0\n' % tempHg)
gjf_list.append('LANL2DZ\n')
gjf_list.append('****\n\n')
gjf_list.append('%s 0\n' % tempHg)
gjf_list.append('LANL2DZ\n\n')
gjf_fpath = join(settings.MOL_ABSTRACT_FILE_PATH, '%s.gjf' % fname_no_ext)
chemistry_logger.info('mol->gjf gjf path: %s' % gjf_fpath)
chemistry_logger.info('mol->gjf, content: %s' % gjf_list)
with open(gjf_fpath, 'w') as f:
f.writelines(tuple(gjf_list))
return gjf_fpath
| chemistryTools/ChemToolsWebService | chemistry/calcore/converters.py | Python | agpl-3.0 | 9,523 | [
"Gaussian",
"MOPAC",
"Pybel"
] | d0983166ddabb26de37857ade0571f33e61ecec10dc0193c6489ea5079c62813 |
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| kashif/scikit-learn | examples/decomposition/plot_sparse_coding.py | Python | bsd-3-clause | 4,037 | [
"Gaussian"
] | 1f09918937c8f110679230266819cbbfe176adcb532cca0cfe89cae298753ad7 |
'''
Author: S.T. Castle
Created: 2015-03-15
'''
#import math
import numpy as np
from scipy import ndimage
from scipy import stats
import scipy.ndimage.filters
import scipy.linalg
#import skimage.feature
import cv2
from matplotlib import pyplot as plt
def main():
'''
Run the explicit coherence enhancing filter with spatial adaptive
elliptical kernel from F.Li et al. 2012.
'''
# Params.
window_size = 7
sigma = 1 # Standard deviation of initial Gaussian kernel.
rho = 6 # Std dev of Gaussian kernel used to compute structure tensor.
gamma = 0.05
eps = np.spacing(1) # Very small positive number.
filename = 'fingerprint1.png'
# Open as grayscale image.
orig_img = cv2.imread(filename, 0)
print 'Opened ' + filename
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
# Convolve image with a Gaussian kernel with standard deviation sigma.
img = scipy.ndimage.filters.gaussian_filter(orig_img, sigma)
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
print 'shape of img:',
print img.shape
# Compute the 2D structure tensor of the image.
# The structure tensor is:
# [j11 j12]
# [j12 j22]
#j11, j12, j22 = skimage.feature.structure_tensor(img, sigma=sigma)
j11, j12, j22 = structure_tensor(img, sigma=sigma)
#print 'j11'
#print j11
#print 'j12'
#print j12
#print 'j22'
#print j22
print 'shape of j11:',
print j11.shape
print 'shape of J:',
print np.array([[j11,j12],[j12,j22]]).shape
# Compute eigenvalues mu1, mu2 of structure tensor. mu1 >= mu2.
mu1 = (j11 + j22) / 2 + np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
mu2 = (j11 + j22) / 2 - np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
print 'shape of mu1:',
print mu1.shape
# Compute corresponding normalized eigenvectors v1, v2.
v1 = np.asarray([ 2*j12,
j22-j11 + np.sqrt((j11-j22)**2 + 4*(j12**2)) ])
# Rearrange axis so that v1 is indexed as (x,y,(eigvector))
v1 = np.rollaxis(v1,0,3)
#print 'mu1'
#print mu1
#print 'mu2'
#print mu2
#print 'v1'
#print v1
#print 'v2'
#print v2
print 'shape of v1:',
print v1.shape
#print 'v1[0] =',
#print v1[0]
#print 'v1[0][0] =',
#print v1[0][0]
#print v1
# Compute theta based on the angle of v1 and the positive direction of
# the horizontal axis.
# cos(theta) = x / magnitude.
# If the magnitude is 0, then just try setting theta=0 for now.
print 'Calculating theta...'
theta = np.empty((v1.shape[0], v1.shape[1]))
for i in xrange(v1.shape[0]):
for j in xrange(v1.shape[1]):
v = v1[i][j]
mag = float(magnitude(v))
if mag:
theta[i][j] = np.arccos(v[0]/magnitude(v))
else:
theta[i][j] = 0
print 'Done.'
print 'shape of theta:',
print theta.shape
# Now that necessary values are calculated, proceed to filtering.
print 'Filtering...'
fimg = np.empty_like(img) # Create a blank array for the filtered image.
rad = window_size/2 # Radius of the filtering window.
sig1 = 10*gamma
# Current pixel is (x1,x2) and neighbor is (y1,y2).
height = img.shape[0]
width = img.shape[1]
for x1 in xrange(height):
for x2 in xrange(width):
eig1 = mu1[x1][x2]
eig2 = mu2[x1][x2]
ang = theta[x1][x2]
sig2 = 10*(gamma+(1-gamma)*np.exp(-1/((eig1-eig2)**2+eps)))
wt_const = 1/(2*np.pi*sig1*sig2) # Constant factor for weighting.
# Add weighted value from neighbor pixel y.
sum = 0
wt_sum = 0 # Sum of the weights for normalization scaling.
for i in xrange(-rad,rad+1):
y1 = x1+i
if (y1 < 0) or (y1 >= height):
continue
for j in xrange(-rad,rad+1):
y2 = x2+i
if (y2 < 0) or (y2 >= width):
continue
# Calculate weight of neighboring position y.
s = (y1-x1)*np.cos(ang) + (y2-x2)*np.sin(ang)
t = -(y1-x1)*np.sin(ang) + (y2-x2)*np.cos(ang)
wt = wt_const * np.exp( -s**2/(2*sig1**2) - t**2/(2*sig2**2) )
sum = sum + wt*orig_img[y1][y2] # Use original image or blurred?
wt_sum = wt_sum + wt
# Set value of this pixel x.
#sum = sum * (1.0/wt_sum) # Scale the pixel value.
fimg[x1][x2] = sum
print x1
print 'Done.'
# Display original and filtered images.
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(fimg, cmap = 'gray')
plt.title('Filtered Image'), plt.xticks([]), plt.yticks([])
plt.show()
def magnitude(v):
"""Magnitude of a vector."""
return np.sqrt(np.dot(v, v))
# from skimage !!!!
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndimage.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndimage.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
#image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndimage.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndimage.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
if __name__ == '__main__':
main()
| castlest/shell-detection | coherence-elliptical-kernel/main.py | Python | bsd-3-clause | 7,932 | [
"Gaussian"
] | c8b64818f4016da1940eeb3b8af33e683b158908384160f38f756b0d76414300 |
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
import os
import warnings
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.template import (TemplateDoesNotExist, TemplateSyntaxError,
Context, Template, loader)
import django.template.context
from django.test import Client, TestCase
from django.test.client import encode_file, RequestFactory
from django.test.utils import ContextList, override_settings
class AssertContainsTests(TestCase):
def setUp(self):
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError, e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError, e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError, e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError, e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError, e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError, e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError, e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, u'さかき')
self.assertContains(r, '\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, u'はたけ')
self.assertNotContains(r, '\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: No templates used to render the response", str(e))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertIn("Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEqual(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEqual(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertIn("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])", str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])", str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError, e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def setUp(self):
self.old_SESSION_ENGINE = settings.SESSION_ENGINE
settings.SESSION_ENGINE = 'regressiontests.test_client_regress.session'
def tearDown(self):
settings.SESSION_ENGINE = self.old_SESSION_ENGINE
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Remove the 'session' contrib app from INSTALLED_APPS
@override_settings(INSTALLED_APPS=tuple(filter(lambda a: a!='django.contrib.sessions', settings.INSTALLED_APPS)))
class NoSessionsAppInstalled(SessionEngineTests):
"""#7836 - Test client can exercise sessions even when 'django.contrib.sessions' isn't installed."""
def test_session(self):
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(self.client.session['session_var'], 'YES')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_no_404_template(self):
"Missing templates are correctly reported by test client"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about missing template")
except TemplateDoesNotExist:
pass
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'bad_templates'),)
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/test_client_regress/arg_view/somename/')
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
try:
django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},)
response = self.client.get("/test_client_regress/request_context_view/")
self.assertContains(response, 'Path: /test_client_regress/request_context_view/')
finally:
django.template.context._standard_context_processors = None
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, 'request method: HEAD')
self.assertEqual(response.content, '')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: DELETE')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
for method_name in ('get','head','options','put','delete'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = u'{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json)
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return 'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual('--TEST_BOUNDARY', encoded_file[0])
self.assertEqual('Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual('TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual('Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
'Content-Type: application/x-compress',
'Content-Type: application/x-zip',
'Content-Type: application/x-zip-compressed',
'Content-Type: application/zip',))
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, "HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, "HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/test_client_regress/check_headers/',
status_code=301, target_status_code=200)
class ResponseTemplateDeprecationTests(TestCase):
"""
Response.template still works backwards-compatibly, but with pending deprecation warning. Refs #12226.
"""
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning)
def tearDown(self):
self.restore_warnings_state()
def test_response_template_data(self):
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.template.__class__, Template)
self.assertEqual(response.template.name, 'base.html')
def test_response_no_template(self):
response = self.client.get("/test_client_regress/request_methods/")
self.assertEqual(response.template, None)
class RawPostDataTest(TestCase):
"Access to request.raw_post_data from the test client."
def test_raw_post_data(self):
# Refs #14753
try:
response = self.client.get("/test_client_regress/raw_post_data/")
except AssertionError:
self.fail("Accessing request.raw_post_data from a view fetched with GET by the test client shouldn't fail.")
class RequestFactoryStateTest(TestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behaviour. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def setUp(self):
self.factory = RequestFactory()
def common_test_that_should_always_pass(self):
request = self.factory.get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
| mitsuhiko/django | tests/regressiontests/test_client_regress/models.py | Python | bsd-3-clause | 45,179 | [
"VisIt"
] | c928a1d3d02f542ee83afa689f1b6f2be24f970068e85c26c95aafa2bf297143 |
"""Support for creating packages of data
Data Bundles are packages of data that simplify the process of finding,
cleaning, transforming and loading popular datasets. The data bundle format,
tools and management processes are designed to make common public data sets easy
to use and share, while allowing users to audit how the data they use has been
acquired and processed. The Data Bundle concept includes the data format, a
definition for bundle configuration and meta data, tools for manipulating
bundles, and a process for acquiring, processing and managing data. The goal of
a data bundle is for data analysts to be able to run few simple commands to find
a dataset and load it into a relational database.
Visit Visit http://wiki.clarinova.com/display/CKDB/Data+Bundles for more
information.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
__author__ = "Eric Busboom"
__copyright__ = "Copyright (c) 2013 Clarinova"
__credits__ = []
__license__ = "Revised BSD"
__version__ = '0.0.2'
__maintainer__ = "Eric Busboom"
__email__ = "eric@clarinova.com"
__status__ = "Development"
def resolve_id(arg, bundle=None, library=None):
'''resolve any of the many ways of identifying a partition or
bundle into an ObjectNumber for a Dataset or Partition '''
from identity import ObjectNumber, Identity
if isinstance(arg, basestring):
on = ObjectNumber.parse(arg)
elif isinstance(arg, ObjectNumber):
return arg
elif isinstance(arg, Identity):
if not arg.id_ and bundle is None:
raise Exception("Identity does not have an id_ defined")
elif not arg.id_ and bundle is not None:
raise NotImplementedError("Database lookup for Identity Id via bundle is not yet implemented")
elif not arg.id_ and bundle is not None:
raise NotImplementedError("Database lookup for Identity Id via library is not yet implemented")
else:
on = ObjectNumber.parse(arg.id_)
else:
# hope that is has an identity field
on = ObjectNumber.parse(arg.identity.id_)
return on
| treyhunner/databundles | databundles/__init__.py | Python | bsd-3-clause | 2,193 | [
"VisIt"
] | 31ecf81abafb7c6ceb65dd22b6fb31327b0aeb35ed09d9c95676210fe752655e |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import textwrap
"""
This module implements input and output processing from QChem.
"""
import copy
import re
import numpy as np
from string import Template
import six
from monty.io import zopen
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Molecule
from pymatgen.core.units import Energy, FloatWithUnit
from monty.json import MSONable
from pymatgen.util.coord_utils import get_angle
__author__ = "Xiaohui Qu"
__copyright__ = "Copyright 2013, The Electrolyte Genome Project"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "xhqu1981@gmail.com"
__date__ = "11/4/13"
class QcTask(MSONable):
"""
An object representing a QChem input file.
Args:
molecule: The input molecule. If it is None of string "read",
QChem will read geometry from checkpoint file. If it is a
Molecule object, QcInput will convert it into Cartesian
coordinates. Valid values: pymatgen Molecule object, "read", None
Defaults to None.
charge (int): Charge of the molecule. If None, charge on molecule is
used. Defaults to None.
spin_multiplicity (int): Spin multiplicity of molecule. Defaults to
None, which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
jobtype (str): The type the QChem job. "SP" for Single Point Energy,
"opt" for geometry optimization, "freq" for
vibrational frequency.
title (str): Comments for the job. Defaults to None. Which means the
$comment section will be discarded.
exchange (str): The exchange methods of the theory. Examples including:
"B" (in pure BLYP), "PW91", "PBE", "TPSS".
Defaults to "HF".
This parameter can also be common names of hybrid
functionals, such as B3LYP, TPSSh, XYGJOS. In such cases,
the correlation parameter should be left as None.
correlation (str): The correlation level of the theory. Example
including: "MP2", "RI-MP2", "CCSD(T)", "LYP", "PBE", "TPSS"
Defaults to None.
basis_set (str/dict): The basis set.
If it is a dict, each element can use different basis set.
aux_basis_set (str/dict): Auxiliary basis set. For methods,
like RI-MP2, XYG3, OXYJ-OS, auxiliary basis set is required.
If it is a dict, each element can use different auxiliary
basis set.
ecp: Effective core potential (ECP) to be used.
If it is a dict, each element can use different ECP.
rem_params (dict): The parameters supposed to write in the $rem
section. Dict of key/value pairs.
Example: {"scf_algorithm": "diis_gdm", "scf_max_cycles": 100}
optional_params (dict): The parameter for keywords other than $rem
section. Dict of key/value pairs.
Example: {"basis": {"Li": "cc-PVTZ", "B": "aug-cc-PVTZ",
"F": "aug-cc-PVTZ"} "ecp": {"Cd": "srsc", "Br": "srlc"}}
ghost_atom (list): List of ghost atoms indices. Indices start from 0.
The ghost atom will be represented in of the form of @element_symmbol
"""
optional_keywords_list = {"basis", "basis2", "ecp", "empirical_dispersion",
"external_charges", "force_field_params",
"intracule", "isotopes", "aux_basis",
"localized_diabatization", "multipole_field",
"nbo", "occupied", "swap_occupied_virtual", "opt",
"pcm", "pcm_solvent", "solvent", "plots", "qm_atoms", "svp",
"svpirf", "van_der_waals", "xc_functional",
"cdft", "efp_fragments", "efp_params", "alist"}
alternative_keys = {"job_type": "jobtype",
"symmetry_ignore": "sym_ignore",
"scf_max_cycles": "max_scf_cycles"}
alternative_values = {"optimization": "opt",
"frequency": "freq"}
zmat_patt = re.compile("^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
xyz_patt = re.compile("^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+"
"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(self, molecule=None, charge=None, spin_multiplicity=None,
jobtype='SP', title=None, exchange="HF", correlation=None,
basis_set="6-31+G*", aux_basis_set=None, ecp=None,
rem_params=None, optional_params=None, ghost_atoms=None):
self.mol = copy.deepcopy(molecule) if molecule else "read"
self.charge = charge
self.spin_multiplicity = spin_multiplicity
if isinstance(self.mol, six.string_types):
self.mol = self.mol.lower()
if self.mol != "read":
raise ValueError('The only accept text value for mol is "read"')
elif isinstance(self.mol, list):
for m in self.mol:
if not isinstance(m, Molecule):
raise ValueError("In case of type list, every element of mol must be a pymatgen Molecule")
if self.charge is None or self.spin_multiplicity is None:
raise ValueError("For fragments molecule section input, charge and spin_multiplicity "
"must be specificed")
total_charge = sum([m.charge for m in self.mol])
total_unpaired_electron = sum([m.spin_multiplicity-1 for m in self.mol])
if total_charge != self.charge:
raise ValueError("The charge of the molecule doesn't equal to the sum of the fragment charges")
if total_unpaired_electron % 2 != (self.spin_multiplicity - 1) % 2:
raise ValueError("Spin multiplicity of molecule and fragments doesn't match")
elif isinstance(self.mol, Molecule):
self.charge = charge if charge is not None else self.mol.charge
ghost_nelectrons = 0
if ghost_atoms:
for i in ghost_atoms:
site = self.mol.sites[i]
for sp, amt in site.species_and_occu.items():
ghost_nelectrons += sp.Z * amt
nelectrons = self.mol.charge + self.mol.nelectrons - ghost_nelectrons - self.charge
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError("Charge of {} and spin multiplicity of {} "
"is not possible for this molecule"
.format(self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
else:
raise ValueError("The molecule must be a pymatgen Molecule "
"object or read/None or list of pymatgen Molecule")
if (self.charge is None) != (self.spin_multiplicity is None):
raise ValueError("spin multiplicity must be set together")
if self.charge is not None and isinstance(self.mol, Molecule) and not ghost_atoms:
self.mol.set_charge_and_spin(self.charge, self.spin_multiplicity)
self.params = dict()
if title is not None:
self.params["comment"] = self._wrap_comment(title)
if "rem" not in self.params:
self.params["rem"] = dict()
self.params["rem"]["exchange"] = exchange.lower()
available_jobtypes = {"sp", "opt", "ts", "freq", "force", "rpath",
"nmr", "bsse", "eda", "pes_scan", "fsm", "aimd",
"pimc", "makeefp"}
jt = jobtype.lower()
if jt in self.alternative_values:
jt = self.alternative_values[jt]
if jt not in available_jobtypes:
raise ValueError("Job type " + jobtype + " is not supported yet")
self.params["rem"]["jobtype"] = jobtype.lower()
if correlation is not None:
self.params["rem"]["correlation"] = correlation.lower()
if rem_params is not None:
for k, v in rem_params.items():
k = k.lower()
if k in self.alternative_keys:
k = self.alternative_keys[k]
if isinstance(v, six.string_types):
v = str(v).lower()
if v in self.alternative_values:
# noinspection PyTypeChecker
v = self.alternative_values[v]
self.params["rem"][k] = v
elif isinstance(v, int) or isinstance(v, float):
self.params["rem"][k] = v
else:
raise ValueError("The value in $rem can only be Integer "
"or string")
if optional_params:
op_key = set([k.lower() for k in optional_params.keys()])
if len(op_key - self.optional_keywords_list) > 0:
invalid_keys = op_key - self.optional_keywords_list
raise ValueError(','.join(['$' + k for k in invalid_keys]) +
'is not a valid optional section')
self.params.update(optional_params)
self.set_basis_set(basis_set)
if aux_basis_set is None:
if self._aux_basis_required():
if isinstance(self.params["rem"]["basis"], six.string_types):
if self.params["rem"]["basis"].startswith("6-31+g"):
self.set_auxiliary_basis_set("rimp2-aug-cc-pvdz")
elif self.params["rem"]["basis"].startswith("6-311+g"):
self.set_auxiliary_basis_set("rimp2-aug-cc-pvtz")
if "aux_basis" not in self.params["rem"]:
raise ValueError("Auxiliary basis set is missing")
else:
self.set_auxiliary_basis_set(aux_basis_set)
if ecp:
self.set_ecp(ecp)
self.ghost_atoms = ghost_atoms
if self.ghost_atoms:
if not isinstance(self.ghost_atoms, list):
raise ValueError("ghost_atoms must be a list of integers")
for atom in self.ghost_atoms:
if not isinstance(atom, int):
raise ValueError("Each element of ghost atom list must an integer")
def _aux_basis_required(self):
if self.params["rem"]["exchange"] in ['xygjos', 'xyg3', 'lxygjos']:
return True
if 'correlation' in self.params["rem"]:
if self.params["rem"]["correlation"].startswith("ri"):
return True
def set_basis_set(self, basis_set):
if isinstance(basis_set, six.string_types):
self.params["rem"]["basis"] = str(basis_set).lower()
if basis_set.lower() not in ["gen", "mixed"]:
self.params.pop("basis", None)
elif isinstance(basis_set, dict):
self.params["rem"]["basis"] = "gen"
bs = dict()
for element, basis in basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["basis"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["basis"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The basis set for elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("Basis set error: the molecule "
"doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(basis_set, list):
self.params["rem"]["basis"] = "mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in basis_set]
self.params["basis"] = bs
if len(self.mol) != len(basis_set):
raise ValueError("Must specific a basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(basis_set)))
def set_partial_hessian_atoms(self, alist, phess=1):
for a in alist:
if not isinstance(a, int):
raise ValueError("the parament alist must a list of atom indices")
self.params["rem"]["n_sol"] = len(alist)
if phess == 1:
self.params["rem"]["phess"] = True
else:
self.params["rem"]["phess"] = phess
self.params["rem"]["jobtype"] = "freq"
self.params["alist"] = alist
def set_basis2(self, basis2_basis_set):
if isinstance(basis2_basis_set, six.string_types):
self.params["rem"]["basis2"] = basis2_basis_set.lower()
if basis2_basis_set.lower() not in ["basis2_gen", "basis2_mixed"]:
self.params.pop("basis2", None)
elif isinstance(basis2_basis_set, dict):
self.params["rem"]["basis2"] = "basis2_gen"
bs = dict()
for element, basis in basis2_basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["basis2"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["basis2"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The BASIS2 basis set for "
"elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("BASIS2 basis set error: the "
"molecule doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(basis2_basis_set, list):
self.params["rem"]["basis2"] = "basis2_mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in basis2_basis_set]
self.params["basis2"] = bs
if len(self.mol) != len(basis2_basis_set):
raise ValueError("Must specific a BASIS2 basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(basis2_basis_set)))
def set_auxiliary_basis_set(self, aux_basis_set):
if isinstance(aux_basis_set, six.string_types):
self.params["rem"]["aux_basis"] = aux_basis_set.lower()
if aux_basis_set.lower() not in ["gen", "mixed"]:
self.params.pop("aux_basis", None)
elif isinstance(aux_basis_set, dict):
self.params["rem"]["aux_basis"] = "gen"
bs = dict()
for element, basis in aux_basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["aux_basis"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["aux_basis"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The auxiliary basis set for "
"elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("Auxiliary asis set error: the "
"molecule doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(aux_basis_set, list):
self.params["rem"]["aux_basis"] = "mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in aux_basis_set]
self.params["aux_basis"] = bs
if len(self.mol) != len(aux_basis_set):
raise ValueError("Must specific a auxiliary basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(aux_basis_set)))
def set_ecp(self, ecp):
if isinstance(ecp, six.string_types):
self.params["rem"]["ecp"] = ecp.lower()
elif isinstance(ecp, dict):
self.params["rem"]["ecp"] = "gen"
potentials = dict()
for element, p in ecp.items():
potentials[element.strip().capitalize()] = p.lower()
self.params["ecp"] = potentials
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
ecp_elements = set(self.params["ecp"].keys())
if len(ecp_elements - mol_elements) > 0:
raise ValueError("ECP error: the molecule "
"doesn't contain element " +
", ".join(ecp_elements - mol_elements))
@property
def molecule(self):
return self.mol
def set_memory(self, total=None, static=None):
"""
Set the maxium allowed memory.
Args:
total: The total memory. Integer. Unit: MBytes. If set to None,
this parameter will be neglected.
static: The static memory. Integer. Unit MBytes. If set to None,
this parameterwill be neglected.
"""
if total:
self.params["rem"]["mem_total"] = total
if static:
self.params["rem"]["mem_static"] = static
def set_max_num_of_scratch_files(self, num=16):
"""
In QChem, the size of a single scratch is limited 2GB. By default,
the max number of scratich is 16, which is cooresponding to 32GB
scratch space. If you want to use more scratch disk space, you need
to increase the number of scratch files:
Args:
num: The max number of the scratch files. (Integer)
"""
self.params["rem"]["max_sub_file_num"] = num
def set_scf_algorithm_and_iterations(self, algorithm="diis",
iterations=50):
"""
Set algorithm used for converging SCF and max number of SCF iterations.
Args:
algorithm: The algorithm used for converging SCF. (str)
iterations: The max number of SCF iterations. (Integer)
"""
available_algorithms = {"diis", "dm", "diis_dm", "diis_gdm", "gdm",
"rca", "rca_diis", "roothaan"}
if algorithm.lower() not in available_algorithms:
raise ValueError("Algorithm " + algorithm +
" is not available in QChem")
self.params["rem"]["scf_algorithm"] = algorithm.lower()
self.params["rem"]["max_scf_cycles"] = iterations
def set_scf_convergence_threshold(self, exponent=8):
"""
SCF is considered converged when the wavefunction error is less than
10**(-exponent).
In QChem, the default values are:
5 For single point energy calculations.
7 For geometry optimizations and vibrational analysis.
8 For SSG calculations
Args:
exponent: The exponent of the threshold. (Integer)
"""
self.params["rem"]["scf_convergence"] = exponent
def set_integral_threshold(self, thresh=12):
"""
Cutoff for neglect of two electron integrals. 10−THRESH (THRESH <= 14).
In QChem, the default values are:
8 For single point energies.
10 For optimizations and frequency calculations.
14 For coupled-cluster calculations.
Args:
thresh: The exponent of the threshold. (Integer)
"""
self.params["rem"]["thresh"] = thresh
def set_dft_grid(self, radical_points=128, angular_points=302,
grid_type="Lebedev"):
"""
Set the grid for DFT numerical integrations.
Args:
radical_points: Radical points. (Integer)
angular_points: Angular points. (Integer)
grid_type: The type of of the grid. There are two standard grids:
SG-1 and SG-0. The other two supported grids are "Lebedev" and
"Gauss-Legendre"
"""
available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146,
170, 194, 230, 266, 302, 350, 434,
590, 770, 974, 1202, 1454, 1730,
2030, 2354, 2702, 3074, 3470, 3890,
4334, 4802, 5294}
if grid_type.lower() == "sg-0":
self.params["rem"]["xc_grid"] = 0
elif grid_type.lower() == "sg-1":
self.params["rem"]["xc_grid"] = 1
elif grid_type.lower() == "lebedev":
if angular_points not in available_lebedev_angular_points:
raise ValueError(str(angular_points) + " is not a valid "
"Lebedev angular points number")
self.params["rem"]["xc_grid"] = "{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
elif grid_type.lower() == "gauss-legendre":
self.params["rem"]["xc_grid"] = "-{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
else:
raise ValueError("Grid type " + grid_type + " is not supported "
"currently")
def set_scf_initial_guess(self, guess="SAD"):
"""
Set initial guess method to be used for SCF
Args:
guess: The initial guess method. (str)
"""
availabel_guesses = {"core", "sad", "gwh", "read", "fragmo"}
if guess.lower() not in availabel_guesses:
raise ValueError("The guess method " + guess + " is not supported "
"yet")
self.params["rem"]["scf_guess"] = guess.lower()
def set_geom_max_iterations(self, iterations):
"""
Set the max iterations of geometry optimization.
Args:
iterations: the maximum iterations of geometry optimization.
(Integer)
"""
self.params["rem"]["geom_opt_max_cycles"] = iterations
def set_geom_opt_coords_type(self, coords_type="internal_switch"):
"""
Set the coordinates system used in geometry optimization.
"cartesian" --- always cartesian coordinates.
"internal" --- always internal coordinates.
"internal-switch" --- try internal coordinates first, if fails, switch
to cartesian coordinates.
"z-matrix" --- always z-matrix coordinates.
"z-matrix-switch" --- try z-matrix first, if fails, switch to
cartesian coordinates.
Args:
coords_type: The type of the coordinates. (str)
"""
coords_map = {"cartesian": 0, "internal": 1, "internal-switch": -1,
"z-matrix": 2, "z-matrix-switch": -2}
if coords_type.lower() not in set(coords_map.keys()):
raise ValueError("Coodinate system " + coords_type + " is not "
"supported yet")
else:
self.params["rem"]["geom_opt_coords"] = \
coords_map[coords_type.lower()]
def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1,
energy=0.1):
"""
Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8.
"""
if gradient < 1.0/(300-1) or displacement < 1.0/(1200-1) or \
energy < 1.0/(100-1):
raise ValueError("The geometry optimization convergence criteria "
"is too tight")
self.params["rem"]["geom_opt_tol_gradient"] = int(gradient * 300)
self.params["rem"]["geom_opt_tol_displacement"] = int(displacement *
1200)
self.params["rem"]["geom_opt_tol_energy"] = int(energy * 100)
def set_geom_opt_use_gdiis(self, subspace_size=None):
"""
Use GDIIS algorithm in geometry optimization.
Args:
subspace_size: The size of the DIIS subsapce. None for default
value. The default value is min(NDEG, NATOMS, 4) NDEG = number
of moleculardegrees of freedom.
"""
subspace_size = subspace_size if subspace_size is not None else -1
self.params["rem"]["geom_opt_max_diis"] = subspace_size
def disable_symmetry(self):
"""
Turn the symmetry off.
"""
self.params["rem"]["sym_ignore"] = True
self.params["rem"]["symmetry"] = False
def use_cosmo(self, dielectric_constant=78.4):
"""
Set the solvent model to COSMO.
Args:
dielectric_constant: the dielectric constant for the solvent.
"""
self.params["rem"]["solvent_method"] = "cosmo"
self.params["rem"]["solvent_dielectric"] = dielectric_constant
def use_pcm(self, pcm_params=None, solvent_key="solvent", solvent_params=None,
radii_force_field=None):
"""
Set the solvent model to PCM. Default parameters are trying to comply to
gaussian default value
Args:
pcm_params (dict): The parameters of "$pcm" section.
solvent_key (str): for versions < 4.2 the section name is "pcm_solvent"
solvent_params (dict): The parameters of solvent_key section
radii_force_field (str): The force fied used to set the solute
radii. Default to UFF.
"""
self.params["pcm"] = dict()
self.params[solvent_key] = dict()
default_pcm_params = {"Theory": "SSVPE",
"vdwScale": 1.1,
"Radii": "UFF"}
if not solvent_params:
solvent_params = {"Dielectric": 78.3553}
if pcm_params:
for k, v in pcm_params.items():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else v
for k, v in default_pcm_params.items():
if k.lower() not in self.params["pcm"].keys():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else v
for k, v in solvent_params.items():
self.params[solvent_key][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else copy.deepcopy(v)
self.params["rem"]["solvent_method"] = "pcm"
if radii_force_field:
self.params["pcm"]["radii"] = "bondi"
self.params["rem"]["force_fied"] = radii_force_field.lower()
def __str__(self):
sections = ["comment", "molecule", "rem"] + \
sorted(list(self.optional_keywords_list))
lines = []
for sec in sections:
if sec in self.params or sec == "molecule":
foramt_sec = self.__getattribute__("_format_" + sec)
lines.append("$" + sec)
lines.extend(foramt_sec())
lines.append("$end")
lines.append('\n')
return '\n'.join(lines)
@classmethod
def _wrap_comment(cls, comment):
ml_section_start = comment.find('<')
if ml_section_start >= 0:
title_section = comment[0:ml_section_start]
ml_section = comment[ml_section_start:]
else:
title_section = comment
ml_section = ''
wrapped_title_lines = textwrap.wrap(title_section.strip(), width=70, initial_indent=' ')
wrapped_ml_lines = []
for l in ml_section.splitlines():
if len(l) > 70:
wrapped_ml_lines.extend(textwrap.wrap(l.strip(), width=70, initial_indent=' '))
else:
wrapped_ml_lines.append(l)
return '\n'.join(wrapped_title_lines + wrapped_ml_lines)
def _format_comment(self):
return self._wrap_comment(self.params["comment"]).splitlines()
def _format_alist(self):
return [" {}".format(x) for x in self.params["alist"]]
def _format_molecule(self):
lines = []
def inner_format_mol(m2, index_base):
mol_lines = []
for i, site in enumerate(m2.sites):
ghost = "@" if self.ghost_atoms \
and i + index_base in self.ghost_atoms else ""
atom = "{ghost:s}{element:s}".format(ghost=ghost,
element=site.species_string)
mol_lines.append(" {atom:<4} {x:>17.8f} {y:>17.8f} "
"{z:>17.8f}".format(atom=atom, x=site.x,
y=site.y, z=site.z))
return mol_lines
if self.charge is not None:
lines.append(" {charge:d} {multi:d}".format(charge=self
.charge, multi=self.spin_multiplicity))
if isinstance(self.mol, six.string_types) and self.mol == "read":
lines.append(" read")
elif isinstance(self.mol, list):
starting_index = 0
for m in self.mol:
lines.append("--")
lines.append(" {charge:d} {multi:d}".format(
charge=m.charge, multi=m.spin_multiplicity))
lines.extend(inner_format_mol(m, starting_index))
starting_index += len(m)
else:
lines.extend(inner_format_mol(self.mol, 0))
return lines
def _format_rem(self):
rem_format_template = Template(" {name:>$name_width} = "
"{value}")
name_width = 0
for name, value in self.params["rem"].items():
if len(name) > name_width:
name_width = len(name)
rem = rem_format_template.substitute(name_width=name_width)
lines = []
all_keys = set(self.params["rem"].keys())
priority_keys = ["jobtype", "exchange", "basis"]
additional_keys = all_keys - set(priority_keys)
ordered_keys = priority_keys + sorted(list(additional_keys))
for name in ordered_keys:
value = self.params["rem"][name]
lines.append(rem.format(name=name, value=value))
return lines
def _format_basis(self):
lines = []
if isinstance(self.params["basis"], dict):
for element in sorted(self.params["basis"].keys()):
basis = self.params["basis"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
elif isinstance(self.params["basis"], list):
for i, (element, bs) in enumerate(self.params["basis"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_aux_basis(self):
lines = []
if isinstance(self.params["aux_basis"], dict):
for element in sorted(self.params["aux_basis"].keys()):
basis = self.params["aux_basis"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
else:
for i, (element, bs) in enumerate(self.params["aux_basis"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_basis2(self):
lines = []
if isinstance(self.params["basis2"], dict):
for element in sorted(self.params["basis2"].keys()):
basis = self.params["basis2"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
else:
for i, (element, bs) in enumerate(self.params["basis2"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_ecp(self):
lines = []
for element in sorted(self.params["ecp"].keys()):
ecp = self.params["ecp"][element]
lines.append(" " + element)
lines.append(" " + ecp)
lines.append(" ****")
return lines
def _format_pcm(self):
pcm_format_template = Template(" {name:>$name_width} "
"{value}")
name_width = 0
for name in self.params["pcm"].keys():
if len(name) > name_width:
name_width = len(name)
rem = pcm_format_template.substitute(name_width=name_width)
lines = []
for name in sorted(self.params["pcm"].keys()):
value = self.params["pcm"][name]
lines.append(rem.format(name=name, value=value))
return lines
def _format_pcm_solvent(self, key="pcm_solvent"):
pp_format_template = Template(" {name:>$name_width} "
"{value}")
name_width = 0
for name in self.params[key].keys():
if len(name) > name_width:
name_width = len(name)
rem = pp_format_template.substitute(name_width=name_width)
lines = []
all_keys = set(self.params[key].keys())
priority_keys = []
for k in ["dielectric", "nonels", "nsolventatoms", "solventatom"]:
if k in all_keys:
priority_keys.append(k)
additional_keys = all_keys - set(priority_keys)
ordered_keys = priority_keys + sorted(list(additional_keys))
for name in ordered_keys:
value = self.params[key][name]
if name == "solventatom":
for v in copy.deepcopy(value):
value = "{:<4d} {:<4d} {:<4d} {:4.2f}".format(*v)
lines.append(rem.format(name=name, value=value))
continue
lines.append(rem.format(name=name, value=value))
return lines
def _format_solvent(self):
return self._format_pcm_solvent(key="solvent")
def _format_opt(self):
# lines is a list of all opt keywords
lines = []
# only constraints added at this point
constraint_lines = ['CONSTRAINT']
for index in range(len(self.params['opt'])):
vals = self.params['opt'][index]
if vals[0] in ['outp', 'tors', 'linc', 'linp']:
constraint_lines.append("{vals[0]} {vals[1]} {vals[2]} {vals[3]} {vals[4]} {vals[5]}".format(vals=vals))
elif vals[0] == 'stre':
constraint_lines.append("{vals[0]} {vals[1]} {vals[2]}".format(vals=vals))
elif vals[0] == 'bend':
constraint_lines.append("{vals[0]} {vals[1]} {vals[2]} {vals[3]} {vals[4]}".format(vals=vals))
constraint_lines.append('ENDCONSTRAINT')
lines.extend(constraint_lines)
# another opt keyword can be added by extending lines
return lines
def as_dict(self):
if isinstance(self.mol, six.string_types):
mol_dict = self.mol
elif isinstance(self.mol, Molecule):
mol_dict = self.mol.as_dict()
elif isinstance(self.mol, list):
mol_dict = [m.as_dict() for m in self.mol]
else:
raise ValueError('Unknow molecule type "{}"'.format(type(self.mol)))
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": mol_dict,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"params": self.params}
if self.ghost_atoms:
d["ghost_atoms"] = self.ghost_atoms
return d
@classmethod
def from_dict(cls, d):
if d["molecule"] == "read":
mol = "read"
elif isinstance(d["molecule"], dict):
mol = Molecule.from_dict(d["molecule"])
elif isinstance(d["molecule"], list):
mol = [Molecule.from_dict(m) for m in d["molecule"]]
else:
raise ValueError('Unknow molecule type "{}"'.format(type(d["molecule"])))
jobtype = d["params"]["rem"]["jobtype"]
title = d["params"].get("comment", None)
exchange = d["params"]["rem"]["exchange"]
correlation = d["params"]["rem"].get("correlation", None)
basis_set = d["params"]["rem"]["basis"]
aux_basis_set = d["params"]["rem"].get("aux_basis", None)
ecp = d["params"]["rem"].get("ecp", None)
ghost_atoms = d.get("ghost_atoms", None)
optional_params = None
op_keys = set(d["params"].keys()) - {"comment", "rem"}
if len(op_keys) > 0:
optional_params = dict()
for k in op_keys:
optional_params[k] = d["params"][k]
return QcTask(molecule=mol, charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
jobtype=jobtype, title=title,
exchange=exchange, correlation=correlation,
basis_set=basis_set, aux_basis_set=aux_basis_set,
ecp=ecp, rem_params=d["params"]["rem"],
optional_params=optional_params,
ghost_atoms=ghost_atoms)
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
@classmethod
def from_string(cls, contents):
"""
Creates QcInput from a string.
Args:
contents: String representing a QChem input file.
Returns:
QcInput object
"""
mol = None
charge = None
spin_multiplicity = None
params = dict()
lines = contents.split('\n')
parse_section = False
section_name = None
section_text = []
ghost_atoms = None
for line_num, line in enumerate(lines):
l = line.strip().lower()
if len(l) == 0:
continue
if (not parse_section) and (l == "$end" or not l.startswith("$")):
raise ValueError("Format error, parsing failed")
if parse_section and l != "$end":
section_text.append(line)
if l.startswith("$") and not parse_section:
parse_section = True
section_name = l[1:]
available_sections = ["comment", "molecule", "rem"] + \
sorted(list(cls.optional_keywords_list))
if section_name not in available_sections:
raise ValueError("Unrecognized keyword " + line.strip() +
" at line " + str(line_num))
if section_name in params:
raise ValueError("duplicated keyword " + line.strip() +
"at line " + str(line_num))
if parse_section and l == "$end":
func_name = "_parse_" + section_name
if func_name not in QcTask.__dict__:
raise Exception(func_name + " is not implemented yet, "
"please implement it")
parse_func = QcTask.__dict__[func_name].__get__(None, QcTask)
if section_name == "molecule":
mol, charge, spin_multiplicity, ghost_atoms = parse_func(section_text)
else:
d = parse_func(section_text)
params[section_name] = d
parse_section = False
section_name = None
section_text = []
if parse_section:
raise ValueError("Format error. " + section_name + " is not "
"terminated")
jobtype = params["rem"]["jobtype"]
title = params.get("comment", None)
exchange = params["rem"].get("exchange", "hf")
correlation = params["rem"].get("correlation", None)
basis_set = params["rem"]["basis"]
aux_basis_set = params["rem"].get("aux_basis", None)
ecp = params["rem"].get("ecp", None)
optional_params = None
op_keys = set(params.keys()) - {"comment", "rem"}
if len(op_keys) > 0:
optional_params = dict()
for k in op_keys:
optional_params[k] = params[k]
return QcTask(molecule=mol, charge=charge,
spin_multiplicity=spin_multiplicity,
jobtype=jobtype, title=title,
exchange=exchange, correlation=correlation,
basis_set=basis_set, aux_basis_set=aux_basis_set,
ecp=ecp, rem_params=params["rem"],
optional_params=optional_params,
ghost_atoms=ghost_atoms)
@classmethod
def _parse_comment(cls, contents):
return '\n'.join(contents).strip()
@classmethod
def _parse_coords(cls, coord_lines):
"""
Helper method to parse coordinates. Copied from GaussianInput class.
"""
paras = {}
var_pattern = re.compile("^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1)] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and cls.xyz_patt.match(l):
m = cls.xyz_patt.match(l)
species.append(m.group(1))
toks = re.split("[,\s]+", l.strip())
if len(toks) > 4:
coords.append(list(map(float, toks[2:5])))
else:
coords.append(list(map(float, toks[1:4])))
elif cls.zmat_patt.match(l):
zmode = True
toks = re.split("[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0.0, 0.0, 0.0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array(
[0.0, 0.0, float(parameters[0])]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub("\d", "", sp_str)
return sp.capitalize()
species = list(map(parse_species, species))
return Molecule(species, coords)
@classmethod
def _parse_molecule(cls, contents):
def parse_ghost_indices(coord_text_lines):
no_ghost_text = [l.replace("@", "") for l in coord_text_lines]
ghosts = []
for index, l in enumerate(coord_text_lines):
l = l.strip()
if not l:
break
if "@" in l:
ghosts.append(index)
return ghosts, no_ghost_text
text = copy.deepcopy(contents[:2])
charge_multi_pattern = re.compile('\s*(?P<charge>'
'[-+]?\d+)\s+(?P<multi>\d+)')
line = text.pop(0)
m = charge_multi_pattern.match(line)
if m:
charge = int(m.group("charge"))
spin_multiplicity = int(m.group("multi"))
line = text.pop(0)
else:
charge = None
spin_multiplicity = None
if line.strip().lower() == "read":
return "read", charge, spin_multiplicity, None
elif charge is None or spin_multiplicity is None:
raise ValueError("Charge or spin multiplicity is not found")
else:
if contents[1].strip()[0:2] == "--":
chunks = "\n".join(contents[2:]).split("--\n")
mol = []
ghost_atoms = []
starting_index = 0
for chunk in chunks:
frag_contents = chunk.split("\n")
m = charge_multi_pattern.match(frag_contents[0])
if m:
fragment_charge = int(m.group("charge"))
fragment_spin_multiplicity = int(m.group("multi"))
else:
raise Exception("charge and spin multiplicity must be specified for each fragment")
gh, coord_lines = parse_ghost_indices(frag_contents[1:])
fragment = cls._parse_coords(coord_lines)
fragment.set_charge_and_spin(fragment_charge, fragment_spin_multiplicity)
mol.append(fragment)
ghost_atoms.extend([i+starting_index for i in gh])
starting_index += len(fragment)
else:
ghost_atoms, coord_lines = parse_ghost_indices(contents[1:])
mol = cls._parse_coords(coord_lines)
if len(ghost_atoms) == 0:
mol.set_charge_and_spin(charge, spin_multiplicity)
ghost_atoms = ghost_atoms if len(ghost_atoms) > 0 else None
return mol, charge, spin_multiplicity, ghost_atoms
@classmethod
def _parse_rem(cls, contents):
d = dict()
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $rem section, there should be "
"at least two field: key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if k2 == "xc_grid":
d[k2] = v
elif v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_aux_basis(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Auxiliary basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $aux_basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_basis2(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Auxiliary basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $aux_basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_basis(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_ecp(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("ECP section format error")
chunks = zip(*[iter(contents)]*3)
d = dict()
for ch in chunks:
element, ecp = ch[:2]
d[element.strip().capitalize()] = ecp.strip().lower()
return d
@classmethod
def _parse_alist(cls, contents):
atom_list = []
for line in contents:
atom_list.extend([int(x) for x in line.split()])
return atom_list
@classmethod
def _parse_pcm(cls, contents):
d = dict()
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $pcm section, there should be "
"at least two field: key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_pcm_solvent(cls, contents):
d = dict()
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $pcm_solvent section, "
"there should be at least two field: "
"key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if k2 == "solventatom":
v = [int(i) for i in tokens[1:4]]
# noinspection PyTypeChecker
v.append(float(tokens[4]))
if k2 not in d:
d[k2] = [v]
else:
d[k2].append(v)
elif v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_solvent(cls, contents):
return cls._parse_pcm_solvent(contents)
@classmethod
def _parse_opt(cls, contents):
#only parses opt constraints
opt_list = []
constraints = False
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().split()
if re.match('ENDCONSTRAINT', line):
constraints = False
elif constraints:
vals = []
for val in tokens:
if int_pattern.match(val):
vals.append(int(val))
elif float_pattern.match(val):
vals.append(float(val))
else:
vals.append(val)
opt_list.append(vals)
elif re.match('CONSTRAINT', line):
constraints = True
return opt_list
class QcInput(MSONable):
"""
An object representing a multiple step QChem input file.
Args:
jobs: The QChem jobs (List of QcInput object)
"""
def __init__(self, jobs):
jobs = jobs if isinstance(jobs, list) else [jobs]
for j in jobs:
if not isinstance(j, QcTask):
raise ValueError("jobs must be a list QcInput object")
self.jobs = jobs
def __str__(self):
return "\n@@@\n\n\n".join([str(j) for j in self.jobs])
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"jobs": [j.as_dict() for j in self.jobs]}
@classmethod
def from_dict(cls, d):
jobs = [QcTask.from_dict(j) for j in d["jobs"]]
return QcInput(jobs)
@classmethod
def from_string(cls, contents):
qc_contents = contents.split("@@@")
jobs = [QcTask.from_string(cont) for cont in qc_contents]
return QcInput(jobs)
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
class QcOutput(object):
kcal_per_mol_2_eV = 4.3363E-2
def __init__(self, filename):
self.filename = filename
split_pattern = "\n\nRunning Job \d+ of \d+ \S+|" \
"[*]{61}\nJob \d+ of \d+ \n[*]{61}|" \
"\n.*time.*\nRunning Job \d+ of \d+ \S+"
try:
with zopen(filename, "rt") as f:
data = f.read()
except UnicodeDecodeError:
with zopen(filename, "rb") as f:
data = f.read().decode("latin-1")
try:
chunks = re.split(split_pattern, data)
# noinspection PyTypeChecker
self.data = list(map(self._parse_job, chunks))
except UnicodeDecodeError:
data = data.decode("latin-1")
chunks = re.split(split_pattern, data)
# noinspection PyTypeChecker
self.data = list(map(self._parse_job, chunks))
@property
def final_energy(self):
return self.data[-1]["energies"][-1][-1]
@property
def final_structure(self):
return self.data[-1]["molecules"][-1]
@classmethod
def _expected_successful_pattern(cls, qctask):
text = ["Convergence criterion met"]
if "correlation" in qctask.params["rem"]:
if "ccsd" in qctask.params["rem"]["correlation"]\
or "qcisd" in qctask.params["rem"]["correlation"]:
text.append('CC.*converged')
if qctask.params["rem"]["jobtype"] == "opt"\
or qctask.params["rem"]["jobtype"] == "ts":
text.append("OPTIMIZATION CONVERGED")
if qctask.params["rem"]["jobtype"] == "freq":
text.append("VIBRATIONAL ANALYSIS")
if qctask.params["rem"]["jobtype"] == "gradient":
text.append("Gradient of SCF Energy")
return text
@classmethod
def _parse_job(cls, output):
scf_energy_pattern = re.compile("Total energy in the final basis set ="
"\s+(?P<energy>-\d+\.\d+)")
corr_energy_pattern = re.compile("(?P<name>[A-Z\-\(\)0-9]+)\s+"
"([tT]otal\s+)?[eE]nergy\s+=\s+"
"(?P<energy>-\d+\.\d+)")
coord_pattern = re.compile("\s*\d+\s+(?P<element>[A-Z][a-zH]*)\s+"
"(?P<x>\-?\d+\.\d+)\s+"
"(?P<y>\-?\d+\.\d+)\s+"
"(?P<z>\-?\d+\.\d+)")
num_ele_pattern = re.compile("There are\s+(?P<alpha>\d+)\s+alpha "
"and\s+(?P<beta>\d+)\s+beta electrons")
total_charge_pattern = re.compile("Sum of atomic charges ="
"\s+(?P<charge>\-?\d+\.\d+)")
scf_iter_pattern = re.compile("\d+\s*(?P<energy>\-\d+\.\d+)\s+"
"(?P<diis_error>\d+\.\d+E[-+]\d+)")
zpe_pattern = re.compile("Zero point vibrational energy:"
"\s+(?P<zpe>\d+\.\d+)\s+kcal/mol")
thermal_corr_pattern = re.compile("(?P<name>\S.*\S):\s+"
"(?P<correction>\d+\.\d+)\s+"
"k?cal/mol")
detailed_charge_pattern = re.compile("(Ground-State )?(?P<method>\w+)( Net)?"
" Atomic Charges")
nbo_charge_pattern = re.compile("(?P<element>[A-Z][a-z]{0,2})\s*(?P<no>\d+)\s+(?P<charge>\-?\d\.\d+)"
"\s+(?P<core>\-?\d+\.\d+)\s+(?P<valence>\-?\d+\.\d+)"
"\s+(?P<rydberg>\-?\d+\.\d+)\s+(?P<total>\-?\d+\.\d+)"
"(\s+(?P<spin>\-?\d\.\d+))?")
nbo_wavefunction_type_pattern = re.compile("This is an? (?P<type>\w+\-\w+) NBO calculation")
bsse_pattern = re.compile("DE, kJ/mol\s+(?P<raw_be>\-?\d+\.?\d+([eE]\d+)?)\s+"
"(?P<corrected_be>\-?\d+\.?\d+([eE]\d+)?)")
float_pattern = re.compile("\-?\d+\.?\d+([eE]\d+)?$")
error_defs = (
(re.compile("Convergence failure"), "Bad SCF convergence"),
(re.compile("Coordinates do not transform within specified "
"threshold"), "autoz error"),
(re.compile("MAXIMUM OPTIMIZATION CYCLES REACHED"),
"Geometry optimization failed"),
(re.compile("\s+[Nn][Aa][Nn]\s+"), "NAN values"),
(re.compile("energy\s+=\s*(\*)+"), "Numerical disaster"),
(re.compile("NewFileMan::OpenFile\(\):\s+nopenfiles=\d+\s+"
"maxopenfiles=\d+s+errno=\d+"), "Open file error"),
(re.compile("Application \d+ exit codes: 1[34]\d+"), "Exit Code 134"),
(re.compile("Negative overlap matrix eigenvalue. Tighten integral "
"threshold \(REM_THRESH\)!"), "Negative Eigen"),
(re.compile("Unable to allocate requested memory in mega_alloc"),
"Insufficient static memory"),
(re.compile("Application \d+ exit signals: Killed"),
"Killed"),
(re.compile("UNABLE TO DETERMINE Lamda IN FormD"),
"Lamda Determination Failed"),
(re.compile("Job too small. Please specify .*CPSCF_NSEG"),
"Freq Job Too Small"),
(re.compile("Not enough total memory"),
"Not Enough Total Memory"),
(re.compile("Use of \$pcm_solvent section has been deprecated starting in Q-Chem"),
"pcm_solvent deprecated")
)
energies = []
scf_iters = []
coords = []
species = []
molecules = []
gradients = []
freqs = []
vib_freqs = []
vib_modes = []
grad_comp = None
errors = []
parse_input = False
parse_coords = False
parse_scf_iter = False
parse_gradient = False
parse_freq = False
parse_modes = False
qctask_lines = []
qctask = None
jobtype = None
charge = None
spin_multiplicity = None
thermal_corr = dict()
properly_terminated = False
pop_method = None
parse_charge = False
nbo_available = False
nbo_charge_header = None
parse_nbo_charge = False
charges = dict()
scf_successful = False
opt_successful = False
parse_alpha_homo = False
parse_alpha_lumo = False
parse_beta_homo = False
parse_beta_lumo = False
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
homo_lumo = []
bsse = None
hiershfiled_pop = False
for line in output.split("\n"):
for ep, message in error_defs:
if ep.search(line):
if message == "NAN values":
if "time" in line:
continue
errors.append(message)
if parse_input:
if "-" * 50 in line:
if len(qctask_lines) == 0:
continue
else:
qctask = QcTask.from_string('\n'.join(qctask_lines))
jobtype = qctask.params["rem"]["jobtype"]
parse_input = False
continue
qctask_lines.append(line)
elif parse_coords:
if "-" * 50 in line:
if len(coords) == 0:
continue
else:
if qctask and qctask.ghost_atoms:
if isinstance(qctask.mol, Molecule):
for i in qctask.ghost_atoms:
species[i] = qctask.mol.sites[i].specie.symbol
molecules.append(Molecule(species, coords))
coords = []
species = []
parse_coords = False
continue
if "Atom" in line:
continue
m = coord_pattern.match(line)
coords.append([float(m.group("x")), float(m.group("y")),
float(m.group("z"))])
species.append(m.group("element"))
elif parse_scf_iter:
if "SCF time: CPU" in line:
parse_scf_iter = False
continue
if 'Convergence criterion met' in line:
scf_successful = True
m = scf_iter_pattern.search(line)
if m:
scf_iters[-1].append((float(m.group("energy")),
float(m.group("diis_error"))))
elif parse_gradient:
if "Max gradient component" in line:
gradients[-1]["max_gradient"] = \
float(line.split("=")[1])
if grad_comp:
if len(grad_comp) == 3:
gradients[-1]["gradients"].extend(zip(*grad_comp))
else:
raise Exception("Gradient section parsing failed")
continue
elif "RMS gradient" in line:
gradients[-1]["rms_gradient"] = \
float(line.split("=")[1])
parse_gradient = False
grad_comp = None
continue
elif "." not in line:
if grad_comp:
if len(grad_comp) == 3:
gradients[-1]["gradients"].extend(zip(*grad_comp))
else:
raise Exception("Gradient section parsing failed")
grad_comp = []
else:
grad_line_token = list(line)
grad_crowd = False
grad_line_final = line
for i in range(5, len(line), 12):
c = grad_line_token[i]
if not c.isspace():
grad_crowd = True
if ' ' in grad_line_token[i+1: i+6+1] or \
len(grad_line_token[i+1: i+6+1]) < 6:
continue
grad_line_token[i-1] = ' '
if grad_crowd:
grad_line_final = ''.join(grad_line_token)
grad_comp.append([float(x) for x
in grad_line_final.strip().split()[1:]])
elif parse_freq:
if parse_modes:
if "TransDip" in line:
parse_modes = False
for freq, mode in zip(vib_freqs, zip(*vib_modes)):
freqs.append({"frequency": freq,
"vib_mode": mode})
vib_modes = []
continue
dis_flat = [float(x) for x in line.strip().split()[1:]]
dis_atom = zip(*([iter(dis_flat)]*3))
vib_modes.append(dis_atom)
if "STANDARD THERMODYNAMIC QUANTITIES" in line\
or "Imaginary Frequencies" in line:
parse_freq = False
continue
if "Frequency:" in line:
vib_freqs = [float(vib) for vib
in line.strip().strip().split()[1:]]
elif "X Y Z" in line:
parse_modes = True
continue
elif parse_charge:
if '-'*20 in line:
if len(charges[pop_method]) == 0:
continue
else:
pop_method = None
parse_charge = False
else:
if len(line.strip()) == 0 or\
'Atom' in line:
continue
else:
charges[pop_method].append(float(line.split()[2]))
elif parse_nbo_charge:
if '-'*20 in line:
if len(charges[pop_method]) == 0:
continue
elif "="*20 in line:
pop_method = None
parse_nbo_charge = False
else:
m = nbo_charge_pattern.search(line)
if m:
charges[pop_method].append(float(m.group("charge")))
else:
raise Exception("Can't find NBO charges")
elif parse_alpha_homo:
if "-- Occupied --" in line:
continue
elif "-- Virtual --" in line:
parse_alpha_homo = False
parse_alpha_lumo = True
continue
else:
tokens = line.split()
m = float_pattern.search(tokens[-1])
if m:
current_alpha_homo = float(m.group(0))
continue
elif parse_alpha_lumo:
current_alpha_lumo = float(line.split()[0])
parse_alpha_lumo = False
continue
elif parse_beta_homo:
if "-- Occupied --" in line:
continue
elif "-- Virtual --" in line:
parse_beta_homo = False
parse_beta_lumo = True
continue
else:
tokens = line.split()
m = float_pattern.search(tokens[-1])
if m:
current_beta_homo = float(m.group(0))
continue
elif parse_beta_lumo:
current_beta_lumo = float(line.split()[0])
parse_beta_lumo = False
if isinstance(current_alpha_homo, float) and isinstance(current_beta_homo, float):
current_homo = max([current_alpha_homo, current_beta_homo])
else:
current_homo = 0.0
if isinstance(current_alpha_lumo, float) and isinstance(current_beta_lumo, float):
current_lumo = min([current_alpha_lumo, current_beta_lumo])
else:
current_lumo = 0.0
homo_lumo.append([Energy(current_homo, "Ha").to("eV"),
Energy(current_lumo, "Ha").to("eV")])
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
continue
elif "-" * 50 in line and not (current_alpha_lumo is None):
homo_lumo.append([Energy(current_alpha_homo, "Ha").to("eV"),
Energy(current_alpha_lumo, "Ha").to("eV")])
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
continue
else:
if spin_multiplicity is None:
m = num_ele_pattern.search(line)
if m:
spin_multiplicity = int(m.group("alpha")) - \
int(m.group("beta")) + 1
if charge is None:
m = total_charge_pattern.search(line)
if m:
charge = int(float(m.group("charge")))
if jobtype and jobtype == "freq":
m = zpe_pattern.search(line)
if m:
zpe = float(m.group("zpe"))
thermal_corr["ZPE"] = zpe
m = thermal_corr_pattern.search(line)
if m:
thermal_corr[m.group("name")] = \
float(m.group("correction"))
m = bsse_pattern.search(line)
if m:
raw_be = float(m.group("raw_be"))
corrected_be = float(m.group("corrected_be"))
bsse_fwu = FloatWithUnit(raw_be - corrected_be, "kJ mol^-1")
bsse = bsse_fwu.to('eV atom^-1').real
name = None
energy = None
m = scf_energy_pattern.search(line)
if m:
name = "SCF"
energy = Energy(m.group("energy"), "Ha").to("eV")
m = corr_energy_pattern.search(line)
if m and m.group("name") != "SCF":
name = m.group("name")
energy = Energy(m.group("energy"), "Ha").to("eV")
m = detailed_charge_pattern.search(line)
if m:
pop_method = m.group("method").lower()
parse_charge = True
charges[pop_method] = []
if nbo_available:
if nbo_charge_header is None:
m = nbo_wavefunction_type_pattern.search(line)
if m:
nbo_wavefunction_type = m.group("type")
nbo_charge_header_dict = {
"closed-shell": "Atom No Charge Core "
"Valence Rydberg Total",
"open-shell": "Atom No Charge Core "
"Valence Rydberg Total Density"}
nbo_charge_header = nbo_charge_header_dict[nbo_wavefunction_type]
continue
if nbo_charge_header in line:
pop_method = "nbo"
parse_nbo_charge = True
charges[pop_method] = []
if "N A T U R A L B O N D O R B I T A L A N A L Y S I S" in line:
nbo_available = True
if name and energy:
energies.append(tuple([name, energy]))
if "User input:" in line:
parse_input = True
elif "Standard Nuclear Orientation (Angstroms)" in line:
parse_coords = True
elif "Performing Hirshfeld population analysis" in line:
hiershfiled_pop = True
elif "Hirshfeld: atomic densities completed" in line:
hiershfiled_pop = False
elif ("Cycle Energy DIIS Error" in line
or "Cycle Energy RMS Gradient" in line)\
and not hiershfiled_pop:
parse_scf_iter = True
scf_iters.append([])
scf_successful = False
elif "Gradient of SCF Energy" in line:
parse_gradient = True
gradients.append({"gradients": []})
elif "VIBRATIONAL ANALYSIS" in line:
parse_freq = True
elif "Alpha MOs" in line:
parse_alpha_homo = True
parse_alpha_lumo = False
elif "Beta MOs" in line:
parse_beta_homo = True
parse_beta_lumo = False
elif "Thank you very much for using Q-Chem." in line:
properly_terminated = True
elif "OPTIMIZATION CONVERGED" in line:
opt_successful = True
if charge is None:
errors.append("Molecular charge is not found")
elif spin_multiplicity is None:
errors.append("Molecular spin multipilicity is not found")
else:
for mol in molecules:
if qctask is None or qctask.ghost_atoms is None:
mol.set_charge_and_spin(charge, spin_multiplicity)
for k in thermal_corr.keys():
v = thermal_corr[k]
if "Entropy" in k:
v *= cls.kcal_per_mol_2_eV * 1.0E-3
else:
v *= cls.kcal_per_mol_2_eV
thermal_corr[k] = v
solvent_method = "NA"
if qctask:
if "solvent_method" in qctask.params["rem"]:
solvent_method = qctask.params["rem"]["solvent_method"]
else:
errors.append("No input text")
if not scf_successful:
if 'Bad SCF convergence' not in errors:
errors.append('Bad SCF convergence')
if jobtype == 'opt':
if not opt_successful:
if 'Geometry optimization failed' not in errors:
errors.append('Geometry optimization failed')
if len(errors) == 0:
for text in cls._expected_successful_pattern(qctask):
success_pattern = re.compile(text)
if not success_pattern.search(output):
errors.append("Can't find text to indicate success")
data = {
"jobtype": jobtype,
"energies": energies,
"HOMO/LUMOs": homo_lumo,
"bsse": bsse,
'charges': charges,
"corrections": thermal_corr,
"molecules": molecules,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": freqs,
"gradients": gradients,
"input": qctask,
"gracefully_terminated": properly_terminated,
"scf_iteration_energies": scf_iters,
"solvent_method": solvent_method
}
return data
| aykol/pymatgen | pymatgen/io/qchem.py | Python | mit | 82,167 | [
"Gaussian",
"Q-Chem",
"pymatgen"
] | 2bd97a7965fb59251d8c960b69c1e6cd8ee1c214747a0dfc4a15d170eb69bd58 |
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
r"""Tools for mimicing the API of the Common Data Model (CDM).
The CDM is a data model for representing a wide array of data. The
goal is to be a simple, universal interface to different datasets. This API is a Python
implementation in the spirit of the original Java interface in netCDF-Java.
"""
from collections import OrderedDict
import numpy as np
class AttributeContainer(object):
r"""Handle maintaining a list of netCDF attributes.
Implements the attribute handling for other CDM classes.
"""
def __init__(self):
r"""Initialize an :class:`AttributeContainer`."""
self._attrs = []
def ncattrs(self):
r"""Get a list of the names of the netCDF attributes.
Returns
-------
List[str]
"""
return self._attrs
def __setattr__(self, key, value):
"""Handle setting attributes."""
if hasattr(self, '_attrs'):
self._attrs.append(key)
self.__dict__[key] = value
def __delattr__(self, item):
"""Handle attribute deletion."""
self.__dict__.pop(item)
if hasattr(self, '_attrs'):
self._attrs.remove(item)
class Group(AttributeContainer):
r"""Holds dimensions and variables.
Every CDM dataset has at least a root group.
"""
def __init__(self, parent, name):
r"""Initialize this :class:`Group`.
Instead of constructing a :class:`Group` directly, you should use
:meth:`~Group.createGroup`.
Parameters
----------
parent : Group or None
The parent Group for this one. Passing in :data:`None` implies that this is
the root :class:`Group`.
name : str
The name of this group
See Also
--------
Group.createGroup
"""
self.parent = parent
if parent:
self.parent.groups[name] = self
#: :desc: The name of the :class:`Group`
#: :type: str
self.name = name
#: :desc: Any Groups nested within this one
#: :type: dict[str, Group]
self.groups = OrderedDict()
#: :desc: Variables contained within this group
#: :type: dict[str, Variable]
self.variables = OrderedDict()
#: :desc: Dimensions contained within this group
#: :type: dict[str, Dimension]
self.dimensions = OrderedDict()
# Do this last so earlier attributes aren't captured
super(Group, self).__init__()
# CamelCase API names for netcdf4-python compatibility
def createGroup(self, name): # noqa: N802
"""Create a new Group as a descendant of this one.
Parameters
----------
name : str
The name of the new Group.
Returns
-------
Group
The newly created :class:`Group`
"""
grp = Group(self, name)
self.groups[name] = grp
return grp
def createDimension(self, name, size): # noqa: N802
"""Create a new :class:`Dimension` in this :class:`Group`.
Parameters
----------
name : str
The name of the new Dimension.
size : int
The size of the Dimension
Returns
-------
Dimension
The newly created :class:`Dimension`
"""
dim = Dimension(self, name, size)
self.dimensions[name] = dim
return dim
def createVariable(self, name, datatype, dimensions=(), fill_value=None, # noqa: N802
wrap_array=None):
"""Create a new Variable in this Group.
Parameters
----------
name : str
The name of the new Variable.
datatype : str or numpy.dtype
A valid Numpy dtype that describes the layout of the data within the Variable.
dimensions : tuple[str], optional
The dimensions of this Variable. Defaults to empty, which implies a scalar
variable.
fill_value : number, optional
A scalar value that is used to fill the created storage. Defaults to None, which
performs no filling, leaving the storage uninitialized.
wrap_array : numpy.ndarray, optional
Instead of creating an array, the Variable instance will assume ownership of the
passed in array as its data storage. This is a performance optimization to avoid
copying large data blocks. Defaults to None, which means a new array will be
created.
Returns
-------
Variable
The newly created :class:`Variable`
"""
var = Variable(self, name, datatype, dimensions, fill_value, wrap_array)
self.variables[name] = var
return var
def __str__(self):
"""Return a string representation of the Group."""
print_groups = []
if self.name:
print_groups.append(self.name)
if self.groups:
print_groups.append('Groups:')
for group in self.groups.values():
print_groups.append(str(group))
if self.dimensions:
print_groups.append('\nDimensions:')
for dim in self.dimensions.values():
print_groups.append(str(dim))
if self.variables:
print_groups.append('\nVariables:')
for var in self.variables.values():
print_groups.append(str(var))
if self.ncattrs():
print_groups.append('\nAttributes:')
for att in self.ncattrs():
print_groups.append('\t{0}: {1}'.format(att, getattr(self, att)))
return '\n'.join(print_groups)
class Dataset(Group):
r"""Represents a set of data using the Common Data Model (CDM).
This is currently only a wrapper around the root Group.
"""
def __init__(self):
"""Initialize a Dataset."""
super(Dataset, self).__init__(None, 'root')
class Variable(AttributeContainer):
r"""Holds typed data (using a :class:`numpy.ndarray`), as well as attributes (e.g. units).
In addition to its various attributes, the Variable supports getting *and* setting data
using the ``[]`` operator and indices or slices. Getting data returns
:class:`numpy.ndarray` instances.
"""
def __init__(self, group, name, datatype, dimensions, fill_value, wrap_array):
"""Initialize a Variable.
Instead of constructing a Variable directly, you should use
:meth:`Group.createVariable`.
Parameters
----------
group : Group
The parent :class:`Group` that owns this Variable.
name : str
The name of this Variable.
datatype : str or numpy.dtype
A valid Numpy dtype that describes the layout of each element of the data
dimensions : tuple[str], optional
The dimensions of this Variable. Defaults to empty, which implies a scalar
variable.
fill_value : scalar, optional
A scalar value that is used to fill the created storage. Defaults to None, which
performs no filling, leaving the storage uninitialized.
wrap_array : numpy.ndarray, optional
Instead of creating an array, the Variable instance will assume ownership of the
passed in array as its data storage. This is a performance optimization to avoid
copying large data blocks. Defaults to None, which means a new array will be
created.
See Also
--------
Group.createVariable
"""
# Initialize internal vars
self._group = group
self._name = name
self._dimensions = tuple(dimensions)
# Set the storage--create/wrap as necessary
shape = tuple(len(group.dimensions.get(d)) for d in dimensions)
if wrap_array is not None:
if shape != wrap_array.shape:
raise ValueError('Array to wrap does not match dimensions.')
self._data = wrap_array
else:
self._data = np.empty(shape, dtype=datatype)
if fill_value is not None:
self._data.fill(fill_value)
# Do this last so earlier attributes aren't captured
super(Variable, self).__init__()
# Not a property to maintain compatibility with NetCDF4 python
def group(self):
"""Get the Group that owns this Variable.
Returns
-------
Group
The parent Group.
"""
return self._group
@property
def name(self):
"""str: the name of the variable."""
return self._name
@property
def size(self):
"""int: the total number of elements."""
return self._data.size
@property
def shape(self):
"""tuple[int]: Describes the size of the Variable along each of its dimensions."""
return self._data.shape
@property
def ndim(self):
"""int: the number of dimensions used by this variable."""
return self._data.ndim
@property
def dtype(self):
"""numpy.dtype: Describes the layout of each element of the data."""
return self._data.dtype
@property
def datatype(self):
"""numpy.dtype: Describes the layout of each element of the data."""
return self._data.dtype
@property
def dimensions(self):
"""tuple[str]: all the names of :class:`Dimension` used by this :class:`Variable`."""
return self._dimensions
def __setitem__(self, ind, value):
"""Handle setting values on the Variable."""
self._data[ind] = value
def __getitem__(self, ind):
"""Handle getting values from the Variable."""
return self._data[ind]
def __str__(self):
"""Return a string representation of the Variable."""
groups = [str(type(self)) +
': {0.datatype} {0.name}({1})'.format(self, ', '.join(self.dimensions))]
for att in self.ncattrs():
groups.append('\t{0}: {1}'.format(att, getattr(self, att)))
if self.ndim:
if self.ndim > 1:
shape_str = str(self.shape)
else:
shape_str = str(self.shape[0])
groups.append('\tshape = ' + shape_str)
return '\n'.join(groups)
# Punting on unlimited dimensions for now since we're relying upon numpy for storage
# We don't intend to be a full file API or anything, just need to be able to represent
# other files using a common API.
class Dimension(object):
r"""Represent a shared dimension between different Variables.
For instance, variables that are dependent upon a common set of times.
"""
def __init__(self, group, name, size=None):
"""Initialize a Dimension.
Instead of constructing a Dimension directly, you should use ``Group.createDimension``.
Parameters
----------
group : Group
The parent Group that owns this Variable.
name : str
The name of this Variable.
size : int or None, optional
The size of the Dimension. Defaults to None, which implies an empty dimension.
See Also
--------
Group.createDimension
"""
self._group = group
#: :desc: The name of the Dimension
#: :type: str
self.name = name
#: :desc: The size of this Dimension
#: :type: int
self.size = size
# Not a property to maintain compatibility with NetCDF4 python
def group(self):
"""Get the Group that owns this Dimension.
Returns
-------
Group
The parent Group.
"""
return self._group
def __len__(self):
"""Return the length of this Dimension."""
return self.size
def __str__(self):
"""Return a string representation of this Dimension."""
return '{0}: name = {1.name}, size = {1.size}'.format(type(self), self)
# Not sure if this lives long-term or not
def cf_to_proj(var):
r"""Convert a Variable with projection information to a Proj.4 Projection instance.
The attributes of this Variable must conform to the Climate and Forecasting (CF)
netCDF conventions.
Parameters
----------
var : Variable
The projection variable with appropriate attributes.
"""
import pyproj
kwargs = dict(lat_0=var.latitude_of_projection_origin,
a=var.earth_radius, b=var.earth_radius)
if var.grid_mapping_name == 'lambert_conformal_conic':
kwargs['proj'] = 'lcc'
kwargs['lon_0'] = var.longitude_of_central_meridian
kwargs['lat_1'] = var.standard_parallel
kwargs['lat_2'] = var.standard_parallel
elif var.grid_mapping_name == 'polar_stereographic':
kwargs['proj'] = 'stere'
kwargs['lon_0'] = var.straight_vertical_longitude_from_pole
kwargs['lat_0'] = var.latitude_of_projection_origin
kwargs['lat_ts'] = var.standard_parallel
kwargs['x_0'] = False # Easting
kwargs['y_0'] = False # Northing
elif var.grid_mapping_name == 'mercator':
kwargs['proj'] = 'merc'
kwargs['lon_0'] = var.longitude_of_projection_origin
kwargs['lat_ts'] = var.standard_parallel
kwargs['x_0'] = False # Easting
kwargs['y_0'] = False # Northing
return pyproj.Proj(**kwargs)
| ahill818/MetPy | metpy/io/cdm.py | Python | bsd-3-clause | 13,610 | [
"NetCDF"
] | 53242a0df34be8e7698c2f12c959c64e579bb5a48e57b4825eda73a2b41ce738 |
#!/usr/bin/python
'''
Create Video Statistics
'''
import os, sys
import csv
import re
import json
import gsutil
import bqutil
import datetime
import process_tracking_logs
from path import Path as path
from collections import OrderedDict
from collections import defaultdict
from check_schema_tracking_log import schema2dict, check_schema
from load_course_sql import find_course_sql_dir, openfile
from unidecode import unidecode
import re
from time import sleep
import urllib2
import json
import os
import datetime
import gzip
#-----------------------------------------------------------------------------
# CONSTANTS
#-----------------------------------------------------------------------------
VIDEO_LENGTH = 'video_length'
VIDEO_ID = 'youtube_id'
YOUTUBE_PARTS = "contentDetails,statistics"
MIN_IN_SECS = 60
HOURS_IN_SECS = MIN_IN_SECS * 60
DAYS_IN_SECS = HOURS_IN_SECS * 24
WEEKS_IN_SECS = DAYS_IN_SECS * 7
MONTHS_IN_SECS = WEEKS_IN_SECS * 4
YEAR_IN_SECS = MONTHS_IN_SECS * 12
TABLE_VIDEO_STATS = 'video_stats'
TABLE_VIDEO_STATS_PER_DAY = 'video_stats_day'
TABLE_VIDEO_AXIS = 'video_axis'
TABLE_COURSE_AXIS = 'course_axis'
TABLE_PERSON_COURSE_VIDEO_WATCHED = "person_course_video_watched"
FILENAME_VIDEO_AXIS = TABLE_VIDEO_AXIS + ".json.gz"
SCHEMA_VIDEO_AXIS = 'schemas/schema_video_axis.json'
SCHEMA_VIDEO_AXIS_NAME = 'video_axis'
DATE_DEFAULT_START = '20120101'
DATE_DEFAULT_END = datetime.datetime.today().strftime("%Y%m%d")
DATE_DEFAULT_END_NEW = datetime.datetime.today().strftime("%Y-%m-%d")
#-----------------------------------------------------------------------------
# METHODS
#-----------------------------------------------------------------------------
def analyze_videos(course_id, api_key=None, basedir=None,
datedir=None, force_recompute=False,
use_dataset_latest=False,
use_latest_sql_dir=False,
):
make_video_stats(course_id, api_key, basedir, datedir, force_recompute, use_dataset_latest, use_latest_sql_dir)
pass # Add new video stat methods here
def make_video_stats(course_id, api_key, basedir, datedir, force_recompute, use_dataset_latest, use_latest_sql_dir):
'''
Create Video stats for Videos Viewed and Videos Watched.
First create a video axis, based on course axis. Then use tracking logs to count up videos viewed and videos watched
'''
assert api_key is not None, "[analyze videos]: Public API Key is missing from configuration file. Visit https://developers.google.com/console/help/new/#generatingdevkeys for details on how to generate public key, and then add to edx2bigquery_config.py as API_KEY variable"
# Get Course Dir path
basedir = path(basedir or '')
course_dir = course_id.replace('/','__')
lfp = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest or use_latest_sql_dir)
# get schema
mypath = os.path.dirname(os.path.realpath(__file__))
SCHEMA_FILE = '%s/%s' % ( mypath, SCHEMA_VIDEO_AXIS )
the_schema = json.loads(open(SCHEMA_FILE).read())[ SCHEMA_VIDEO_AXIS_NAME ]
the_dict_schema = schema2dict(the_schema)
# Create initial video axis
videoAxisExists = False
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
va_date = None
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_VIDEO_AXIS )
assert tinfo is not None, "[analyze videos] %s.%s does not exist. First time creating table" % ( dataset, TABLE_VIDEO_AXIS )
videoAxisExists = True
va_date = tinfo['lastModifiedTime'] # datetime
except (AssertionError, Exception) as err:
print "%s --> Attempting to process %s table" % ( str(err), TABLE_VIDEO_AXIS )
sys.stdout.flush()
# get course axis time
ca_date = None
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_COURSE_AXIS )
ca_date = tinfo['lastModifiedTime'] # datetime
except (AssertionError, Exception) as err:
pass
if videoAxisExists and (not force_recompute) and ca_date and va_date and (ca_date > va_date):
force_recompute = True
print "video_axis exists, but has date %s, older than course_axis date %s; forcing recompute" % (va_date, ca_date)
sys.stdout.flush()
if not videoAxisExists or force_recompute:
force_recompute = True
createVideoAxis(course_id=course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest)
# Get video lengths
va = bqutil.get_table_data(dataset, TABLE_VIDEO_AXIS)
assert va is not None, "[analyze videos] Possibly no data in video axis table. Check course axis table"
va_bqdata = va['data']
fileoutput = lfp / FILENAME_VIDEO_AXIS
getYoutubeDurations( dataset=dataset, bq_table_input=va_bqdata, api_key=api_key, outputfilename=fileoutput, schema=the_dict_schema, force_recompute=force_recompute )
# upload and import video axis
gsfn = gsutil.gs_path_from_course_id(course_id, use_dataset_latest=use_dataset_latest) / FILENAME_VIDEO_AXIS
gsutil.upload_file_to_gs(fileoutput, gsfn)
table = TABLE_VIDEO_AXIS
bqutil.load_data_to_table(dataset, table, gsfn, the_schema, wait=True)
else:
print "[analyze videos] %s.%s already exists (and force recompute not specified). Skipping step to generate %s using latest course axis" % ( dataset, TABLE_VIDEO_AXIS, TABLE_VIDEO_AXIS )
# Lastly, create video stats
createVideoStats_day( course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest )
createVideoStats( course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest )
# also create person_course_video_watched
createPersonCourseVideo( course_id, force_recompute=force_recompute, use_dataset_latest=use_dataset_latest )
#-----------------------------------------------------------------------------
def createVideoAxis(course_id, force_recompute=False, use_dataset_latest=False):
'''
Video axis depends on the current course axis, and looks for the category field defines as video.
In addition, the edx video id is extracted (with the full path stripped, in order to generalize tracking log searches for video ids where it
was found that some courses contained the full path beginning with i4x, while other courses only had the edx video id), youtube id
and the chapter name / index for that respective video
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
table = TABLE_VIDEO_AXIS
# Get Video results
the_sql = """
SELECT chapters.index as index_chapter,
videos.index as index_video,
videos.category as category,
videos.course_id as course_id,
videos.name as name,
videos.vid_id as video_id,
videos.yt_id as youtube_id,
chapters.name as chapter_name
FROM ( SELECT index, category, course_id, name, chapter_mid,
#REGEXP_REPLACE(module_id, '[.]', '_') as vid_id, # vid id containing full path
REGEXP_EXTRACT(REGEXP_REPLACE(module_id, '[.]', '_'), r'(?:.*\/)(.*)') as vid_id, # Only containing video id
REGEXP_EXTRACT(data.ytid, r'\:(.*)') as yt_id,
FROM [{dataset}.course_axis]
WHERE category = "video") as videos
LEFT JOIN
( SELECT name, module_id, index
FROM [{dataset}.course_axis]
) as chapters
ON videos.chapter_mid = chapters.module_id
ORDER BY videos.index asc
""".format(dataset=dataset)
print "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_AXIS, course_id)
sys.stdout.flush()
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_COURSE_AXIS )
assert tinfo is not None, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_AXIS, TABLE_COURSE_AXIS )
except (AssertionError, Exception) as err:
print " --> Err: missing %s.%s? Skipping creation of %s" % ( dataset, TABLE_COURSE_AXIS, TABLE_VIDEO_AXIS )
sys.stdout.flush()
return
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.course_axis" % (dataset)],
)
return bqdat
#-----------------------------------------------------------------------------
def createVideoStats_day( course_id, force_recompute=False, use_dataset_latest=False, skip_last_day=False, end_date=None):
'''
Create video statistics per ay for viewed by looking for users who had a video position > 0, and watched by looking for users who had a video
position > 95% of the total video length duration.
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
logs = bqutil.course_id2dataset(course_id, dtype='logs')
table = TABLE_VIDEO_STATS_PER_DAY
the_sql = """
SELECT date(time)as date, username,
#module_id as video_id,
#REGEXP_REPLACE(REGEXP_EXTRACT(JSON_EXTRACT(event, '$.id'), r'(?:i4x-)(.*)(?:"$)'), '-', '/') as video_id, # Old method takes full video id path
(case when REGEXP_MATCH( JSON_EXTRACT(event, '$.id') , r'([-])' ) then REGEXP_EXTRACT(REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', ''), r'(?:.*\/)(.*)') else REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', '') end) as video_id, # This takes video id only
max(case when JSON_EXTRACT_SCALAR(event, '$.speed') is not null then float(JSON_EXTRACT_SCALAR(event,'$.speed'))*float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) else float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) end) as position,
FROM {DATASETS}
WHERE (event_type = "play_video" or event_type = "pause_video" or event_type = "stop_video") and
event is not null
group by username, video_id, date
order by date
"""
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_VIDEO_STATS_PER_DAY )
assert tinfo is not None, "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS_PER_DAY, course_id)
print "[analyze_videos] Appending latest data to %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS_PER_DAY, course_id)
sys.stdout.flush()
except (AssertionError, Exception) as err:
print str(err)
sys.stdout.flush()
print " --> Missing %s.%s? Attempting to create..." % ( dataset, TABLE_VIDEO_STATS_PER_DAY )
sys.stdout.flush()
pass
print "=== Processing Video Stats Per Day for %s (start %s)" % (course_id, datetime.datetime.now())
sys.stdout.flush()
def gdf(row):
return datetime.datetime.strptime(row['date'], '%Y-%m-%d')
process_tracking_logs.run_query_on_tracking_logs(the_sql, table, course_id, force_recompute=force_recompute,
use_dataset_latest=use_dataset_latest,
get_date_function=gdf,
skip_last_day=skip_last_day)
print "Done with Video Stats Per Day for %s (end %s)" % (course_id, datetime.datetime.now())
print "="*77
sys.stdout.flush()
#-----------------------------------------------------------------------------
def createVideoStats( course_id, force_recompute=False, use_dataset_latest=False ):
'''
Final step for video stats is to run through daily video stats table and aggregate for entire course for videos watch and videos viewed
Join results with video axis to get detailed metadata per video for dashboard data
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
logs = bqutil.course_id2dataset(course_id, dtype='logs')
table = TABLE_VIDEO_STATS
the_sql = """
SELECT index_chapter,
index_video,
name,
video_id,
chapter_name,
sum(case when position > 0 then 1 else 0 end) as videos_viewed,
sum(case when position > video_length*0.95 then 1 else 0 end) as videos_watched,
FROM (
SELECT username, index_chapter,
index_video,
name,
video_id,
chapter_name,
max(position) as position,
video_length,
FROM (SELECT * FROM [{dataset}.{videostatsperday}]) as video_log,
LEFT JOIN EACH
(SELECT video_length,
video_id as vid_id,
name,
index_video,
index_chapter,
chapter_name
FROM [{dataset}.{videoaxis}]
) as video_axis
ON video_log.video_id = video_axis.vid_id
WHERE video_id is not null and username is not null
group by username, video_id, name, index_chapter, index_video, chapter_name, video_length
order by video_id asc)
GROUP BY video_id, index_chapter, index_video, name, chapter_name
ORDER BY index_video asc;
""".format(dataset=dataset, videoaxis=TABLE_VIDEO_AXIS, videostatsperday=TABLE_VIDEO_STATS_PER_DAY)
print "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS, course_id)
sys.stdout.flush()
try:
tinfo_va = bqutil.get_bq_table_info( dataset, TABLE_VIDEO_AXIS )
trows_va = int(tinfo_va['numRows'])
tinfo_va_day = bqutil.get_bq_table_info( dataset, TABLE_VIDEO_STATS_PER_DAY )
trows_va_day = int(tinfo_va['numRows'])
assert tinfo_va is not None and trows_va != 0, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_STATS, TABLE_VIDEO_AXIS )
assert tinfo_va_day is not None and trows_va_day != 0, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_STATS, TABLE_VIDEO_STATS_PER_DAY )
except (AssertionError, Exception) as err:
print " --> Err: missing %s.%s and/or %s (including 0 rows in table)? Skipping creation of %s" % ( dataset, TABLE_VIDEO_AXIS, TABLE_VIDEO_STATS_PER_DAY, TABLE_VIDEO_STATS )
sys.stdout.flush()
return
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.%s" % (dataset, TABLE_VIDEO_AXIS)],
)
return bqdat
#-----------------------------------------------------------------------------
def createPersonCourseVideo( course_id, force_recompute=False, use_dataset_latest=False ):
'''
Create the person_course_video_watched table, based on video_stats.
Each row gives the number of unique videos watched by a given user, for the given course.
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
table = TABLE_PERSON_COURSE_VIDEO_WATCHED
the_sql = """
SELECT user_id,
"{course_id}" as course_id,
count(*) n_unique_videos_watched,
count(*) / n_total_videos as fract_total_videos_watched,
viewed, certified, verified
FROM
(
SELECT PC.user_id as user_id, UV.username as username,
video_id,
n_views,
NV.n_total_videos as n_total_videos,
certified,
viewed,
(mode=="verified") as verified,
FROM
(
SELECT username, video_id, count(*) as n_views
FROM [{dataset}.video_stats_day]
GROUP BY username, video_id
) UV
JOIN [{dataset}.person_course] PC
on UV.username = PC.username
CROSS JOIN
(
SELECT count(*) as n_total_videos
FROM [{dataset}.video_axis]
) NV
WHERE ((PC.roles = 'Student') OR (PC.roles is NULL)) # accommodate case when roles.csv is missing
# WHERE PC.roles = 'Student'
)
GROUP BY user_id, certified, viewed, verified, n_total_videos
order by user_id
"""
the_sql = the_sql.format(course_id=course_id, dataset=dataset)
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.%s" % (dataset, TABLE_VIDEO_STATS)],
newer_than=datetime.datetime( 2017, 2, 6, 18, 30 ),
startIndex=-2)
if not bqdat:
nfound = 0
else:
nfound = bqutil.get_bq_table_size_rows(dataset, table)
print "--> Done with %s for %s, %d entries found" % (table, course_id, nfound)
sys.stdout.flush()
return bqdat
#-----------------------------------------------------------------------------
def createVideoStats_obsolete( course_id, force_recompute=False, use_dataset_latest=False, startDate=DATE_DEFAULT_START, endDate=DATE_DEFAULT_END ):
'''
Create video statistics for viewed by looking for users who had a video position > 0, and watched by looking for users who had a video
position > 95% of the total video length duration.
This was the original method used, but is not the most efficient since it queries entire log set. Instead, generate video stats per day, then incrementally
append to that data table as the daily log data comes in.
'''
dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)
logs = bqutil.course_id2dataset(course_id, dtype='logs')
table = TABLE_VIDEO_STATS
the_sql = """
SELECT index_chapter,
index_video,
name,
video_id,
chapter_name,
sum(case when position > 0 then 1 else 0 end) as videos_viewed,
sum(case when position > video_length*0.95 then 1 else 0 end) as videos_watched,
FROM (SELECT username,
#module_id as video_id,
#REGEXP_REPLACE(REGEXP_EXTRACT(JSON_EXTRACT(event, '$.id'), r'(?:i4x-)(.*)(?:"$)'), '-', '/') as video_id, # Old method takes full video id path
(case when REGEXP_MATCH( JSON_EXTRACT(event, '$.id') , r'[-]' ) then REGEXP_EXTRACT(REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', ''), r'(?:.*\/)(.*)') else REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(JSON_EXTRACT(event, '$.id'), '-', '/'), '"', ''), 'i4x/', '') end) as video_id, # This takes video id only
max(case when JSON_EXTRACT_SCALAR(event, '$.speed') is not null then float(JSON_EXTRACT_SCALAR(event,'$.speed'))*float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) else float(JSON_EXTRACT_SCALAR(event, '$.currentTime')) end) as position,
FROM (TABLE_QUERY({logs},
"integer(regexp_extract(table_id, r'tracklog_([0-9]+)')) BETWEEN {start_date} and {end_date}"))
WHERE (event_type = "play_video" or event_type = "pause_video" or event_type = "stop_video") and
event is not null
group by username, video_id
order by username, video_id) as video_log,
LEFT JOIN EACH
(SELECT video_length,
video_id as vid_id,
name,
index_video,
index_chapter,
chapter_name
FROM [{dataset}.{videoaxis}]
) as {videoaxis}
ON video_log.video_id = {videoaxis}.vid_id
WHERE video_id is not null
group by video_id, name, index_chapter, index_video, chapter_name
order by index_video asc;
""".format(dataset=dataset,start_date=startDate,end_date=endDate,logs=logs, videoaxis=TABLE_VIDEO_AXIS)
print "[analyze_videos] Creating %s.%s table for %s" % (dataset, TABLE_VIDEO_STATS, course_id)
sys.stdout.flush()
try:
tinfo = bqutil.get_bq_table_info(dataset, TABLE_VIDEO_AXIS )
assert tinfo is not None, "[analyze videos] %s table depends on %s, which does not exist" % ( TABLE_VIDEO_STATS, TABLE_VIDEO_AXIS )
except (AssertionError, Exception) as err:
print " --> Err: missing %s.%s? Skipping creation of %s" % ( dataset, TABLE_VIDEO_AXIS, TABLE_VIDEO_STATS )
sys.stdout.flush()
return
bqdat = bqutil.get_bq_table(dataset, table, the_sql, force_query=force_recompute,
depends_on=["%s.%s" % (dataset, TABLE_VIDEO_AXIS)],
)
return bqdat
#-----------------------------------------------------------------------------
def get_youtube_api_stats(youtube_id, api_key, part, delay_secs=0):
'''
Youtube video duration lookup, using specified API_KEY from configuration file
Visit https://developers.google.com/console/help/new/#generatingdevkeys for details on how to generate public key
'''
if youtube_id is '': return None
sleep(delay_secs)
try:
assert api_key is not None, "[analyze videos] Public API Key is missing from configuration file."
#url = "http://gdata.youtube.com/feeds/api/videos/" + youtube_id + "?v=2&alt=jsonc" # Version 2 API has been deprecated
url = "https://www.googleapis.com/youtube/v3/videos?part=" + part + "&id=" + youtube_id + "&key=" + api_key # Version 3.0 API
data = urllib2.urlopen(url).read().decode("utf-8")
except (AssertionError, Exception) as err:
error = str(err)
if "504" in error or "403" in error:
# rate-limit issue: try again with double timeout
if delay_secs > MIN_IN_SECS:
print "[Giving up] %s\n%s" % (youtube_id, url)
return None, None
new_delay = max(1.0, delay_secs * 2.0)
print "[Rate-limit] <%s> - Trying again with delay: %s" % (youtube_id, str(new_delay))
return get_youtube_api_stats(youtube_id=youtube_id, api_key=api_key, delay_secs=new_delay)
else:
print "[Error] <%s> - Unable to get duration.\n%s" % (youtube_id, url)
raise
d = json.loads(data)
contentDetails = d['items'][0]['contentDetails']
statistics = d['items'][0]['statistics']
return contentDetails, statistics
#-----------------------------------------------------------------------------
def parseISOduration(isodata):
'''
Parses time duration for video length
'''
# see http://en.wikipedia.org/wiki/ISO_8601#Durations
ISO_8601_period_rx = re.compile(
'P' # designates a period
'(?:(?P<years>\d+)Y)?' # years
'(?:(?P<months>\d+)M)?' # months
'(?:(?P<weeks>\d+)W)?' # weeks
'(?:(?P<days>\d+)D)?' # days
'(?:T' # time part must begin with a T
'(?:(?P<hours>\d+)H)?' # hourss
'(?:(?P<minutes>\d+)M)?' # minutes
'(?:(?P<seconds>\d+)S)?' # seconds
')?' # end of time part
)
parsedISOdata = ISO_8601_period_rx.match(isodata).groupdict()
return parsedISOdata
#-----------------------------------------------------------------------------
def getTotalTimeSecs(data):
'''
Convert parsed time duration dict into seconds
'''
sec = 0
for timeData in data:
if data[timeData] is not None:
if timeData == 'years':
sec = sec + int(data[timeData])*YEAR_IN_SECS
if timeData == 'months':
sec = sec + int(data[timeData])*MONTHS_IN_SECS
if timeData == 'weeks':
sec = sec + int(data[timeData])*WEEKS_IN_SECS
if timeData == 'hours':
sec = sec + int(data[timeData])*HOURS_IN_SECS
if timeData == 'minutes':
sec = sec + int(data[timeData])*MIN_IN_SECS
if timeData == 'seconds':
sec = sec + int(data[timeData])
return sec
#-----------------------------------------------------------------------------
def findVideoLength(dataset, youtube_id, api_key=None):
'''
Handle video length lookup
'''
try:
youtube_id = unidecode(youtube_id)
except Exception as err:
print "youtube_id is not ascii? ytid=", youtube_id
return 0
try:
assert youtube_id is not None, "[analyze videos] youtube id does not exist"
content, stats = get_youtube_api_stats(youtube_id=youtube_id, api_key=api_key, part=YOUTUBE_PARTS)
durationDict = parseISOduration(content['duration'].encode("ascii","ignore"))
length = getTotalTimeSecs(durationDict)
print "[analyze videos] totalTime for youtube video %s is %s sec" % (youtube_id, length)
except (AssertionError, Exception) as err:
print "Failed to lookup video length for %s! Error=%s, data=%s" % (youtube_id, err, dataset)
length = 0
return length
#-----------------------------------------------------------------------------
def openfile(fn, mode='r'):
'''
Properly open file according to file extension type
'''
if (not os.path.exists(fn)) and (not fn.endswith('.gz')):
fn += ".gz"
if mode=='r' and not os.path.exists(fn):
return None # failure, no file found, return None
if fn.endswith('.gz'):
return gzip.GzipFile(fn, mode)
return open(fn, mode)
#-----------------------------------------------------------------------------
def getYoutubeDurations(dataset, bq_table_input, api_key, outputfilename, schema, force_recompute):
'''
Add youtube durations to Video Axis file using youtube id's and then write out to specified local path to prep for google storage / bigquery upload
'''
fp = openfile(outputfilename, 'w')
linecnt = 0
for row_dict in bq_table_input:
linecnt += 1
verified_row = OrderedDict()
# Initial pass-through of keys in current row
for keys in row_dict:
# Only include keys defined in schema
if keys in schema.keys():
verified_row[keys] = row_dict[keys]
# Recompute Video Length durations
if force_recompute:
verified_row[VIDEO_LENGTH] = findVideoLength( dataset=dataset, youtube_id=verified_row[VIDEO_ID], api_key=api_key )
# Ensure schema type
check_schema(linecnt, verified_row, the_ds=schema, coerce=True)
try:
fp.write(json.dumps(verified_row)+'\n')
except Exception as err:
print "Failed to write line %s! Error=%s, data=%s" % (linecnt, str(err), dataset)
fp.close()
#-----------------------------------------------------------------------------
| mitodl/edx2bigquery | edx2bigquery/make_video_analysis.py | Python | gpl-2.0 | 28,500 | [
"VisIt"
] | afa8613cd549298f67c06377e8f3a44e5c0431f8bfdb41e619ef9147954fdcbe |
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages import
from os import path
# Local library packages import
from pyDNA.Utilities import mkdir, import_seq, file_basename, file_name, file_extension, fgunzip
from BlastnWrapper import Aligner
from MakeblastdbWrapper import NewDB, ExistingDB
#~~~~~~~MAIN METHODS~~~~~~~#
def align (query_list,
subject_db = None,
subject_fasta = None,
aligner = "blastn",
align_opt = "",
num_threads = 1,
db_maker = "makeblastdb",
db_opt = "",
db_outdir = "./blast_db/",
db_outname = "out"):
"""
Main function of RefMasker that integrate database creation, blast and homology masking
* Instantiate Blast database and blastn object
* Perform iterative blasts of query sequences against the subject database and create a list of
hits.
@param query_list List of paths indicating fasta files containing query sequences (can be
gzipped). Fasta can contains multiple sequences.
@param subject_db Basename of file from a blast database created by "makeblastdb" if available
@param subject_fasta Reference fasta file. Required if no ref_index is given (can be gzipped)
@param aligner Path ot the blastn executable. Not required if blast+ if added to your path
@param blastn_opt Blastn command line options as a string
@param db_maker Path ot the makeblastdb executable. Not required if blast+ if added to your path
@param db_opt makeblastdb command line options as a string
@param db_outdir Directory where to store the database files
@param db_outname Basename of the database files
@return A list of BlastHit objects
"""
# Try to import an existing database
try:
if not subject_db:
raise Exception("No Blast database was provided")
print("Existing database provided")
db = ExistingDB(subject_db)
# If no DB or if an error occured during validation of the existing DB = create a new db
except Exception as E:
print (E)
# Verify the presence of the reference fasta file
if not subject_fasta or not path.isfile (subject_fasta):
raise Exception("Invalid or no fasta file provided. Cannot create a database")
print ("Generate a database...")
mkdir(db_outdir)
db_path = path.join (db_outdir, db_outname)
# Create the new database
db = NewDB(ref_path=subject_fasta, db_path=db_path, makeblastdb_opt=db_opt, makeblastdb=db_maker)
# Initialise a Blastn object
blast = Aligner(db, align_opt, aligner, num_threads)
#~print (repr(blast))
# Generate a list of hit containing hits of all sequence in query list in subject
hit_list = []
# Extend the list of hits for each query in a bigger list.
for query in query_list:
hit_list.extend(blast.align(query))
return hit_list
| a-slide/pyDNA | Blast/Blastn.py | Python | gpl-2.0 | 2,949 | [
"BLAST"
] | b0dd40c0c71618993a682bd8d8432a5e49274e460cf80b19d2920a398aba2539 |
from base import *
def prim_equal(p1, p2):
return match((p1, p2),
("(PInt(), PInt())", lambda: True),
("(PFloat(), PFloat())", lambda: True),
("(PStr(), PStr())", lambda: True),
("(PChar(), PChar())", lambda: True),
("(PBool(), PBool())", lambda: True),
("_", lambda: False))
def array_kinds_equal(k1, k2):
return match((k1, k2),
("(AGC(), AGC())", lambda: True),
("(ABoxed(), ABoxed())", lambda: True),
("(ARaw(), ARaw())", lambda: True),
("_", lambda: False))
def _type_tuple_equal(ts1, ts2):
if len(ts1) != len(ts2):
return False
for t1, t2 in ezip(ts1, ts2):
if not type_equal(t1, t2):
return False
return True
def _type_func_equal(as1, r1, m1, as2, r2, m2):
if len(as1) != len(as2):
return False
for a1, a2 in ezip(as1, as2):
if not type_equal(a1, a2):
return False
if not results_equal(r1, r2):
return False
return metas_equal(m1, m2)
def _type_data_equal(d1, ts1, d2, ts2):
if d1 is not d2:
return False
if len(ts1) != len(ts2):
return False
for t1, t2 in ezip(ts1, ts2):
if not type_equal(t1, t2):
return False
return True
def _type_array_equal(t1, k1, t2, k2):
if not type_equal(t1, t2):
return False
if not array_kinds_equal(k1, k2):
return False
return True
def type_equal(a, b):
if a is b:
return True
return match((a, b),
("(TVar(a), TVar(b))", lambda a, b: a is b),
("(TPrim(a), TPrim(b))", prim_equal),
("(TTuple(ts1), TTuple(ts2))", _type_tuple_equal),
("(TFunc(args1, r1, m1), TFunc(args2, r2, m2))", _type_func_equal),
("(TData(d1, ts1), TData(d2, ts2))", _type_data_equal),
("(TCtor(c1, ts1), TCtor(c2, ts2))", _type_data_equal),
("(TArray(t1, k1), TArray(t2, k2))", _type_array_equal),
("(TWeak(a), TWeak(b))", type_equal),
("_", lambda: False))
def results_equal(a, b):
return match((a, b), ("(Ret(a), Ret(b))", type_equal),
("(Void(), Void())", lambda: True),
("(Bottom(), Bottom())", lambda: True),
("_", lambda: False))
def _get_name(a):
if not a or not has_extrinsic(Name, a):
try:
return '<%r?>' % (a,)
except:
return '<??!>'
return extrinsic(Name, a)
REPRENV = new_env('REPRENV', set([Type]))
def _meta_type_repr(t, j):
assert t is not j
return _type_repr(j)
def _type_repr(t):
seen = env(REPRENV)
if t in seen:
return '<cyclic 0x%x>' % id(t)
seen.add(t)
rstr = match(t, ("TVar(v)", lambda v: col('Green', _get_name(v))),
("TPrim(PInt())", lambda: 'int'),
("TPrim(PFloat())", lambda: 'float'),
("TPrim(PStr())", lambda: 'str'),
("TPrim(PChar())", lambda: 'char'),
("TPrim(PBool())", lambda: 'bool'),
("TTuple(ts)", lambda ts: fmtcol('^Cyan^t(^N{0}^Cyan)^N',
(col('Cyan', ', ').join(map(_type_repr, ts))))),
("TArray(t, kind)", _tarray_repr),
("TFunc(ps, res, m)", _func_repr),
("TData(d, ps)", _tdata_repr),
("TCtor(c, ps)", _tdata_repr),
("TWeak(t)", lambda t: '*%s' % (_type_repr(t),)),
("CMeta(cell)", repr),
("_", lambda: mark('<bad type %s>' % type(t))))
seen.remove(t)
return rstr
def _func_repr(ps, result, meta):
if len(ps) == 0:
s = 'void'
elif len(ps) == 1 and not meta.params[0].held:
s = _type_repr(ps[0])
else:
bits = [col('Cyan', '(')]
first = True
for param, pmeta in ezip(ps, meta.params):
if first:
first = False
else:
bits.append(col('Cyan', ', '))
bits.append(_type_repr(param))
if pmeta.held:
bits.append(col('LG', ' held'))
bits.append(col('Cyan', ')'))
s = ''.join(bits)
ret = match(result, ('Ret(t)', _type_repr),
('Void()', lambda: 'void'),
('Bottom()', lambda: 'noreturn'))
if not meta.envParam:
ret += col('LG', ' noenv')
for environ in meta.requiredEnvs:
ret += fmtcol(' ^LG{0}^N', extrinsic(Name, environ))
return fmtcol('{0} ^Cyan->^N {1}', s, ret)
def _tdata_repr(dt, apps):
if not apps:
return _get_name(dt)
return fmtcol('{0}^LG(^N{1}^LG)^N', dt,
col('Cyan', ', ').join(map(_type_repr, apps)))
def _tarray_repr(t, kind):
pfx = match(kind, ("AGC()", lambda: ''),
("ABoxed()", lambda: 'x'),
("ARaw()", lambda: 'r'))
return fmtcol('^Red{0}^N[{1}]', pfx, _type_repr(t))
def cyclic_check_type_repr(t):
return in_env(REPRENV, set(), lambda: _type_repr(t))
def _inject_type_reprs():
temp = globals()
for t in temp:
if len(t) > 1 and t[0] == 'T' and t[1].isupper():
temp[t].__repr__ = cyclic_check_type_repr
_inject_type_reprs()
def map_type_vars(f, t):
"""Applies f to every typevar in the given type."""
# critical function
cls = t.__class__
if cls is TPrim:
return t
elif cls is TVar:
return f(t)
elif cls is TData:
return TData(t.data, [map_type_vars(f, a) for a in t.appTypes])
elif cls is TCtor:
return TCtor(m.ctor, [map_type_vars(f, a) for a in t.appTypes])
elif cls is TFunc:
ps = [map_type_vars(f, p) for p in t.paramTypes]
m = match(t.result)
if m('Ret(t)'):
m.ret(Ret(map_type_vars(f, m.t)))
elif m('Void()'):
m.ret(Void())
elif m('Bottom()'):
m.ret(Bottom())
return TFunc(ps, m.result(), copy_meta(t.meta))
elif cls is TTuple:
return TTuple([map_type_vars(f, tt) for tt in t.tupleTypes])
elif cls is TArray:
return TArray(map_type_vars(f, t.elemType), t.arrayKind)
elif cls is TWeak:
return TWeak(map_type_vars(f, t.refType))
else:
assert False
def occurs(typeVar, t):
visit = lambda t: visit_type_vars(f, t)
visit_many = lambda ts: all(visit_type_vars(f, t) for t in ts)
cls = t.__class__
if cls is TPrim:
return False
elif cls is TVar:
return t.typeVar is typeVar
elif cls is TData or cls is TCtor:
return any(occurs(typeVar, a) for a in t.appTypes)
elif cls is TFunc:
if any(occurs(typeVar, p) for p in t.paramTypes):
return True
if t.result.__class__ is Ret and occurs(typeVar, t.result.type):
return True
return False
elif cls is TTuple:
return any(occurs(typeVar, tt) for tt in t.tupleTypes)
elif cls is TArray:
return occurs(typeVar, t.elemType)
elif cls is TWeak:
return occurs(typeVar, t.refType)
else:
assert False
def subst_affects(mapping, t):
visit = lambda t: visit_type_vars(f, t)
visit_many = lambda ts: all(visit_type_vars(f, t) for t in ts)
cls = t.__class__
if cls is TPrim:
return False
elif cls is TVar:
return t.typeVar in mapping
elif cls is TData or cls is TCtor:
return any(subst_affects(mapping, a) for a in t.appTypes)
elif cls is TFunc:
if any(subst_affects(mapping, p) for p in t.paramTypes):
return True
if t.result.__class__ is Ret and subst_affects(mapping, t.result.type):
return True
return False
elif cls is TTuple:
return any(subst_affects(mapping, tt) for tt in t.tupleTypes)
elif cls is TArray:
return subst_affects(mapping, t.elemType)
elif cls is TWeak:
return subst_affects(mapping, t.refType)
else:
assert False
def app_map(data, appTs):
apps = {}
for tv, at in ezip(data.tvars, appTs):
if isinstance(at, TVar) and at.typeVar is tv:
continue
apps[tv] = at
return apps
def subst(mapping, t):
return map_type_vars(lambda st: mapping.get(st.typeVar, st), t)
# Make sure the input is sane and non-redundant
def checked_subst(mapping, t):
for tvar, rt in mapping.iteritems():
assert not occurs(tvar, rt), "%s occurs in replacement %s" % (tvar, rt)
unseen = set(mapping)
assert len(unseen) > 0, "Empty substitution for %s" % (t,)
def app(st):
tvar = st.typeVar
if tvar in mapping:
st = mapping[tvar]
if tvar in unseen:
unseen.remove(tvar)
return st
s = map_type_vars(app, t)
assert len(unseen) == 0, "Typevars %s unused in subst for %s" % (unseen, t)
return s
def is_strong_type(t):
return matches(t, "TData(_, _) or TCtor(_, _) or TArray(_, _)" +
" or TTuple(_) or TFunc(_, _, _)")
def ctor_type(ctor, dtT):
paramTypes = []
paramMetas = []
for f in ctor.fields:
paramTypes.append(f.type)
paramMetas.append(ParamMeta(is_strong_type(f.type)))
return TFunc(paramTypes, Ret(dtT), basic_meta(paramMetas))
builtins_types = {
'True': 'bool', 'False': 'bool', 'not': 'bool -> bool',
'+': '(int, int) -> int', '-': '(int, int) -> int',
'*': '(int, int) -> int', '//': '(int, int) -> int',
'%': '(int, int) -> int',
'negate': 'int -> int', 'fnegate': 'float -> float',
'fadd': '(float, float) -> float', 'fsub': '(float, float) -> float',
'fmul': '(float, float) -> float', 'fdiv': '(float, float) -> float',
'float': 'int -> float', 'int': 'float -> int',
'&': '(int, int) -> int', '|': '(int, int) -> int',
'^': '(int, int) -> int',
'==': '(int, int) -> bool', '!=': '(int, int) -> bool',
'<': '(int, int) -> bool', '>': '(int, int) -> bool',
'<=': '(int, int) -> bool', '>=': '(int, int) -> bool',
'is': '(a, a) -> bool', 'is not': '(a, a) -> bool',
'len': '[a] -> int', 'subscript': '([a], int) -> a',
'rawlen': 'r[a] -> int', 'intsubscript': '(r[int], int) -> int',
'buffer': 'int -> str', 'free_buffer': 'str -> void',
}
# vi: set sw=4 ts=4 sts=4 tw=79 ai et nocindent:
| pshc/archipelago | types_builtin.py | Python | mit | 10,301 | [
"VisIt"
] | 00fce1422b6f62b25808e3f5ba25a09b550e368abbd765276068471b4e92559d |
from __future__ import print_function, division
import os
import tempfile
import h5py
import numpy as np
from numpy.testing import assert_array_almost_equal_nulp
from ...dust import IsotropicDust
from .. import Model
from ...util.functions import random_id
def get_test_dust(set_emissivities=True):
dust = IsotropicDust([3.e9, 3.e16], [0.5, 0.5], [1., 1.])
if set_emissivities:
dust.set_lte_emissivities(n_temp=10, temp_min=0.1, temp_max=1600.)
return dust
def get_realistic_test_dust():
nu = [3.e7, 1.e10, 2.e11, 2.e12, 2.e13, 2.e14, 2.e15, 2.e16, 2.e17]
chi = [1.e-11, 2.e-6, 2.e-3, 0.2, 13., 90., 1000., 700., 700.]
albedo = [0., 0., 0., 0., 0.1, 0.5, 0.4, 0.4, 0.4]
dust = IsotropicDust(nu, albedo, chi)
dust.set_lte_emissivities(n_temp=40, temp_min=0.1, temp_max=100000.)
return dust
def get_highly_reflective_dust():
nu = [3.e7, 1.e10, 2.e11, 2.e12, 2.e13, 2.e14, 2.e15, 2.e16, 2.e17]
chi = np.repeat(100., len(nu))
albedo = np.repeat(0.7, len(nu))
dust = IsotropicDust(nu, albedo, chi)
dust.set_lte_emissivities(n_temp=40, temp_min=0.1, temp_max=100000.)
return dust
def get_test_model_noimaging():
model = Model()
model.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
model.set_n_photons(initial=1, imaging=0)
model.set_n_initial_iterations(1)
source = model.add_point_source()
source.luminosity = 1.
source.temperature = 1000.
return model
def assert_identical_results(file1, file2):
# List of attributes to exclude from checking (time-dependent)
EXCLUDE_ATTR = ['date_started', 'date_ended', 'cpu_time', 'python_version', 'fortran_version', 'd_min', 'd_max']
# TODO
# For now, also exclude 'killed' attributes because they have been moved
# to a different group, but not worth re-generating all the reference
# models just for this. However, update this next time the reference
# models are re-generated.
EXCLUDE_ATTR += ['killed_photons_geo_initial',
'killed_photons_int_initial',
'killed_photons_geo',
'killed_photons_int']
# Open both files
f1 = h5py.File(file1, 'r')
f2 = h5py.File(file2, 'r')
# List datasets and groups in file 1
data1 = []
group1 = []
def func(name, obj):
if isinstance(obj, h5py.Dataset):
data1.append(name)
elif isinstance(obj, h5py.Group):
group1.append(name)
f1.visititems(func)
# Visit order is not guaranteed, so sort
data1.sort()
group1.sort()
# List datasets and attributes in file 1
data2 = []
group2 = []
def func(name, obj):
if isinstance(obj, h5py.Dataset):
data2.append(name)
elif isinstance(obj, h5py.Group):
group2.append(name)
f2.visititems(func)
# Visit order is not guaranteed, so sort
data2.sort()
group2.sort()
# Check if list of datasets is the same
assert data1 == data2
# Loop over datasets to check content
TOLERANCE = 1000
for d in data1:
a1 = np.array(f1[d])
a2 = np.array(f2[d])
if a1.dtype.kind == 'V': # structured array
for col in a1.dtype.fields:
assert_array_almost_equal_nulp(a1[col], a2[col], TOLERANCE)
else: # normal array
assert_array_almost_equal_nulp(a1, a2, TOLERANCE)
# Check if list of groups is the same
assert group1 == group2
# Loop over all groups and datasets to check attributes
for item in ['/'] + data1 + group1:
# Find all attributes
attr1 = list(f1[item].attrs.keys())
attr1.sort()
attr2 = list(f2[item].attrs.keys())
attr2.sort()
for e in EXCLUDE_ATTR:
if e in attr1:
attr1.remove(e)
if e in attr2:
attr2.remove(e)
assert attr1 == attr2
for a in attr1:
if a not in EXCLUDE_ATTR:
assert f1[item].attrs[a] == f2[item].attrs[a]
| hyperion-rt/hyperion | hyperion/model/tests/test_helpers.py | Python | bsd-2-clause | 4,063 | [
"VisIt"
] | 089c70896c1e61535e2b976bfdb559570849cff808f8488097b0a1e971c6e72b |
""" This is a test of the chain
DataStoreClient -> DataStoreHandler -> AccountingDB
It supposes that the DB is present, and that the service is running
this is pytest!
"""
# pylint: disable=invalid-name,wrong-import-position
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
gLogger.setLevel('DEBUG')
def createAccountingRecord():
accountingDict = {}
accountingDict['OperationType'] = 'putAndRegister'
accountingDict['User'] = 'system'
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = 'se'
accountingDict['TransferTotal'] = 1
accountingDict['TransferOK'] = 1
accountingDict['TransferSize'] = 1
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = 'testSite'
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict(accountingDict)
return oDataOperation
def test_addAndRemove():
# just inserting one record
record = createAccountingRecord()
record.setStartTime()
record.setEndTime()
res = gDataStoreClient.addRegister(record)
assert res['OK']
res = gDataStoreClient.commit()
assert res['OK']
# now removing that record
res = gDataStoreClient.remove(record)
assert res['OK']
| fstagni/DIRAC | tests/Integration/AccountingSystem/Test_DataStoreClient.py | Python | gpl-3.0 | 1,561 | [
"DIRAC"
] | 587708a3519e726e9b0a3d6f40eb440212318c029554938b6490d49e675f6a24 |
import datetime
from dateutil.relativedelta import relativedelta
from django.utils.timezone import now, get_current_timezone
from unittest import mock
from factory.fuzzy import FuzzyDate, FuzzyInteger, FuzzyChoice
from freezegun import freeze_time
from jmespath import search as s
from django.urls import reverse
from django.core.management import call_command
from fixturedb.factories.win import create_win_factory
from mi.models import (
OverseasRegionGroup,
OverseasRegion,
FinancialYear,
OverseasRegionGroupYear,
OverseasRegionYear,
)
from mi.tests.base_test_case import (
MiApiViewsBaseTestCase,
MiApiViewsWithWinsBaseTestCase,
)
from mi.tests.utils import GenericTopNonHvcWinsTestMixin, GenericWinTableTestMixin
from mi.utils import sort_campaigns_by
from mi.views.region_views import BaseOverseasRegionGroupMIView
class BaseOverseasRegionGroupMIViewTestCase(MiApiViewsBaseTestCase):
view_class = BaseOverseasRegionGroupMIView
def setUp(self):
super().setUp()
self.view = self.view_class()
self.all_os_region_groups = list(OverseasRegionGroup.objects.all())
def test_queryset_is_unfiltered(self):
self.assertEqual(
len(self.all_os_region_groups),
self.view.get_queryset().count()
)
def test_get_result_uses_serializer(self):
with mock.patch('mi.views.region_views.OverseasRegionGroupSerializer') as mocked_serializer:
mocked_serializer.data = {}
self.view.get_results()
self.assertTrue(mocked_serializer.called)
self.assertEqual(
len(mocked_serializer.call_args_list),
len(self.all_os_region_groups)
)
class OverseasRegionGroupListViewTestCase(MiApiViewsBaseTestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
@classmethod
def setUpTestData(cls):
# clear out existing hierarchy for this test
OverseasRegionGroupYear.objects.all().delete()
OverseasRegionGroup.objects.all().delete()
osr = OverseasRegion.objects.all()
OverseasRegionYear.objects.filter(overseas_region__in=osr).delete()
osr.delete()
cls.fy2017 = FinancialYear.objects.get(id=2017)
cls.fy2016 = FinancialYear.objects.get(id=2016)
cls.region1 = OverseasRegion.objects.create(name='test1')
cls.region2 = OverseasRegion.objects.create(name='test2')
cls.group1 = OverseasRegionGroup.objects.create(name='group 1')
cls.group2 = OverseasRegionGroup.objects.create(name='group 2')
OverseasRegionGroupYear.objects.create(
group=cls.group1, financial_year=cls.fy2016, region=cls.region1)
OverseasRegionGroupYear.objects.create(
group=cls.group2, financial_year=cls.fy2017, region=cls.region2)
def test_os_region_groups_list_2016(self):
""" test `OverseasRegionGroup` list API"""
self.url = reverse('mi:overseas_region_groups') + "?year=2016"
self.expected_response = [
{
'name': 'group 1',
'id': self.group1.id,
'regions': [{'name': 'test1', 'id': self.region1.id}]
}
]
def test_os_region_groups_list_2017(self):
""" test `OverseasRegionGroup` list API"""
self.url = reverse('mi:overseas_region_groups') + "?year=2017"
self.expected_response = [
{
'name': 'group 2',
'id': self.group2.id,
'regions': [{'name': 'test2', 'id': self.region2.id}]
}
]
self.assertResponse()
def test_os_region_groups_list_no_duplicates(self):
""" test `OverseasRegionGroup` list API"""
OverseasRegionGroupYear.objects.create(
group=self.group1, financial_year=self.fy2017, region=self.region1)
self.url = reverse('mi:overseas_region_groups') + "?year=2017"
self.expected_response = [
{
'name': 'group 1',
'id': self.group1.id,
'regions': [{'name': 'test1', 'id': self.region1.id}]
},
{
'name': 'group 2',
'id': self.group2.id,
'regions': [{'name': 'test2', 'id': self.region2.id}]
}
]
self.assertResponse()
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class OverseasRegionBaseViewTestCase(MiApiViewsWithWinsBaseTestCase):
view_base_url = reverse('mi:overseas_regions')
export_value = 100000
win_date_2017 = datetime.datetime(2017, 4, 25, tzinfo=get_current_timezone())
win_date_2016 = datetime.datetime(2016, 4, 25, tzinfo=get_current_timezone())
fy_2016_last_date = datetime.datetime(2017, 3, 31, tzinfo=get_current_timezone())
frozen_date_17 = datetime.datetime(2017, 5, 30, tzinfo=get_current_timezone())
def get_url_for_year(self, year, base_url=None):
if not base_url:
base_url = self.view_base_url
return '{base}?year={year}'.format(base=base_url, year=year)
def assert_result_count(self, expected_length):
self.assertEqual(
expected_length,
len(self._api_response_data)
)
@property
def countries(self):
return {x['name'].lower() for x in self._api_response_data}
class OverseasRegionListViewTestCase(OverseasRegionBaseViewTestCase):
view_base_url = reverse('mi:overseas_regions')
def test_list_returns_only_countries_for_2016(self):
self.url = self.get_url_for_year(2016)
self.assert_result_count(17)
# Africa region should only be in 2017 data
self.assertFalse('africa' in self.countries)
self.assertTrue('north africa' in self.countries)
def test_list_only_returns_countries_for_2017(self):
self.url = self.get_url_for_year(2017)
self.assert_result_count(15)
self.assertTrue('africa' in self.countries)
# North Africa still in 2017
self.assertTrue('north africa' in self.countries)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class OverseasRegionOverviewTestCase(OverseasRegionBaseViewTestCase):
view_base_url = reverse('mi:overseas_region_overview')
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.export_value = 777777
def test_list_returns_only_countries_for_2016(self):
self.url = self.get_url_for_year(2016)
self.assert_result_count(17)
# Africa region should only be in 2017 data
self.assertFalse('africa' in self.countries)
self.assertTrue('north africa' in self.countries)
def test_list_only_returns_countries_for_2017(self):
self.url = self.get_url_for_year(2017)
self.assert_result_count(15)
self.assertTrue('africa' in self.countries)
# North Africa still in 2017
self.assertTrue('north africa' in self.countries)
def test_overview_value_1_win(self):
w1 = self._create_hvc_win(
hvc_code='E016', win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value
)
self.assertEqual(w1.country.code, 'CA')
self.url = self.get_url_for_year(2017)
data = self._api_response_data
na_data = [x for x in data if x['name'] == 'North America'][0]
self.assertEqual(w1.total_expected_export_value, na_data[
'values']['hvc']['current']['confirmed'])
def test_overview_value_2_wins_same_region(self):
w1 = self._create_hvc_win(
hvc_code='E016',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value
)
w2 = self._create_hvc_win(
hvc_code='E016',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=1
)
self.assertEqual(w1.country.code, w2.country.code)
self.url = self.get_url_for_year(2017)
data = self._api_response_data
na_data = [x for x in data if x['name'] == 'North America'][0]
self.assertEqual(w1.total_expected_export_value + 1,
na_data['values']['hvc']['current']['confirmed'])
def test_overview_value_2_wins_different_regions(self):
w1 = self._create_hvc_win(
hvc_code='E016', win_date=self.win_date_2017, confirm=True,
fin_year=2017, export_value=self.export_value
)
w2 = self._create_hvc_win(
hvc_code='E119', win_date=self.win_date_2017,
confirm=True, fin_year=2017, export_value=1
)
self.assertEqual(w1.country.code, w2.country.code)
self.url = self.get_url_for_year(2017)
data = self._api_response_data
na_data = [x for x in data if x['name'] == 'North America'][0]
we_data = [x for x in data if x['name'] == 'Western Europe'][0]
self.assertEqual(w1.total_expected_export_value, na_data[
'values']['hvc']['current']['confirmed'])
self.assertEqual(w2.total_expected_export_value, we_data[
'values']['hvc']['current']['confirmed'])
def test_overview_1_unconfirmed_and_1_confirmed_same_year(self):
w1 = self._create_hvc_win(
hvc_code='E016', win_date=self.win_date_2017, confirm=True,
fin_year=2017, export_value=self.export_value
)
w2 = self._create_hvc_win(
hvc_code='E016', win_date=self.win_date_2017, confirm=False,
fin_year=2017, export_value=1
)
self.assertEqual(w1.country.code, w2.country.code)
self.url = self.get_url_for_year(2017)
data = self._api_response_data
na_data = [x for x in data if x['name'] == 'North America'][0]
self.assertEqual(w1.total_expected_export_value, na_data[
'values']['hvc']['current']['confirmed'])
self.assertEqual(w2.total_expected_export_value, na_data[
'values']['hvc']['current']['unconfirmed'])
def test_overview_1_unconfirmed_in_current_year_should_not_show_up_in_last_year(self):
w1 = self._create_hvc_win(
hvc_code='E016', win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value
)
self.url = self.get_url_for_year(2017)
data = self._api_response_data
na_data = [x for x in data if x['name'] == 'North America'][0]
self.assertEqual(w1.total_expected_export_value, na_data[
'values']['hvc']['current']['unconfirmed'])
self.assertEqual(0, na_data['values']['hvc']['current']['confirmed'])
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
self.assertEqual(
0,
s("sum([?name=='North America'].values.*.current[].[confirmed,unconfirmed][])", data_2016)
)
def test_overview_1_unconfirmed_last_year_should_not_show_up_in_last_year(self):
w1 = self._create_hvc_win(
hvc_code='E016', win_date=self.frozen_date,
confirm=False, fin_year=2016, export_value=self.export_value
)
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
self.assertEqual(
0,
s("sum([?name=='North America'].values.*.current[].[confirmed,unconfirmed][])", data_2016)
)
# it should be in this year
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = [x for x in data_2017 if x[
'name'] == 'North America'][0]
self.assertEqual(w1.total_expected_export_value, na_data_2017[
'values']['hvc']['current']['unconfirmed'])
def test_overview_1_unconfirmed_last_year_should_show_up_in_new_region_if_country_has_moved_regions(self):
w1 = self._create_hvc_win(
hvc_code='E016', win_date=self.frozen_date,
confirm=False, fin_year=2016, export_value=self.export_value
)
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
na_data_2016 = [x for x in data_2016 if x[
'name'] == 'North America'][0]
self.assertEqual(0, na_data_2016['values'][
'hvc']['current']['confirmed'])
self.assertEqual(0, na_data_2016['values'][
'hvc']['current']['unconfirmed'])
self.assertEqual(w1.country.code, 'CA')
# move Canada to a different region
region_year = OverseasRegionYear.objects.get(
country__country='CA', financial_year_id=2017)
region_year.overseas_region = OverseasRegion.objects.get(
name='Western Europe')
region_year.save()
# it should be in this year
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = s("[?name=='North America']|[0]", data_2017)
we_data_2017 = s("[?name=='Western Europe']|[0]", data_2017)
self.assertEqual(0, na_data_2017['values'][
'hvc']['current']['unconfirmed'])
self.assertEqual(w1.total_expected_export_value, we_data_2017[
'values']['hvc']['current']['unconfirmed'])
# Non HVC
def test_non_hvc_win_in_overview_confirmed_current_year(self):
w1 = self._create_non_hvc_win(
win_date=self.win_date_2017, export_value=self.export_value,
confirm=True, country='CA', fin_year=2017
)
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = s("[?name=='North America']|[0]", data_2017)
self.assertEqual(w1.total_expected_export_value,
na_data_2017['values']['non_hvc']['current']['confirmed'])
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
self.assertEqual(
0,
s("sum([?name=='North America'].values.*.current[].[confirmed,unconfirmed][])", data_2016)
)
def test_non_hvc_win_in_overview_unconfirmed_current_year(self):
w1 = self._create_non_hvc_win(
win_date=self.win_date_2017, export_value=self.export_value,
confirm=False, country='CA', fin_year=2017
)
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = s("[?name=='North America']|[0]", data_2017)
self.assertEqual(w1.total_expected_export_value,
na_data_2017['values']['non_hvc']['current']['unconfirmed'])
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
self.assertEqual(
0,
s("sum([?name=='North America'].values.*.current[].[confirmed,unconfirmed][])", data_2016)
)
def test_2_non_hvc_win_in_overview_both_confirmed_current_year(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
export_value=self.export_value + 1,
confirm=True,
country='CA',
fin_year=2017
)
self._create_non_hvc_win(
win_date=self.win_date_2017,
export_value=self.export_value - 1,
confirm=True,
country='CA',
fin_year=2017
)
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = s("[?name=='North America']|[0]", data_2017)
self.assertEqual(
self.export_value * 2,
na_data_2017['values']['non_hvc']['current']['confirmed']
)
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
self.assertEqual(
0,
s("sum([?name=='North America'].values.*.current[].[confirmed,unconfirmed][])", data_2016)
)
def test_2_non_hvc_win_in_overview_confirmed_and_unconfirmed_current_year(self):
w1 = self._create_non_hvc_win(
win_date=self.win_date_2017,
export_value=self.export_value + 1,
confirm=True,
country='CA',
fin_year=2017
)
w2 = self._create_non_hvc_win(
win_date=self.win_date_2017,
export_value=self.export_value - 1,
confirm=False,
country='CA',
fin_year=2017
)
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = s("[?name=='North America']|[0]", data_2017)
self.assertEqual(w1.total_expected_export_value, na_data_2017[
'values']['non_hvc']['current']['confirmed'])
self.assertEqual(w2.total_expected_export_value, na_data_2017[
'values']['non_hvc']['current']['unconfirmed'])
def test_5_non_hvc_win_in_overview_confirmed_2016_for_2016(self):
num_to_create = 5
for i in range(num_to_create):
self._create_non_hvc_win(
win_date=self.frozen_date,
export_value=self.export_value,
confirm=True,
country='CA',
fin_year=2016
)
# should not be in 2017
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
self.assertEqual(
0,
s("sum([?name=='North America'].values.*.current[].[confirmed,unconfirmed][])", data_2017)
)
# should show up in 2016
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
na_data_2016 = s("[?name=='North America']|[0]", data_2016)
self.assertEqual(self.export_value * num_to_create,
na_data_2016['values']['non_hvc']['current']['confirmed'])
def test_overview_non_hvc_1_unconfirmed_last_year_should_show_up_in_new_region_if_country_has_moved_regions(self):
w1 = self._create_non_hvc_win(
win_date=self.frozen_date,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2016)
data_2016 = self._api_response_data
na_data_2016 = s("[?name=='North America']|[0]", data_2016)
self.assertEqual(
0,
s("sum(values.non_hvc.current.[*][])", na_data_2016)
)
self.assertEqual(w1.country.code, 'CA')
# move Canada to a different region
region_year = OverseasRegionYear.objects.get(
country__country='CA', financial_year_id=2017)
region_year.overseas_region = OverseasRegion.objects.get(
name='Western Europe')
region_year.save()
# it should be in this year
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
na_data_2017 = s("[?name=='North America']|[0]", data_2017)
we_data_2017 = s("[?name=='Western Europe']|[0]", data_2017)
self.assertEqual(
0,
na_data_2017['values']['non_hvc']['current']['unconfirmed']
)
self.assertEqual(
w1.total_expected_export_value,
we_data_2017['values']['non_hvc']['current']['unconfirmed']
)
class OverseasRegionCampaignsTestCase(OverseasRegionBaseViewTestCase):
list_regions_base_url = reverse('mi:overseas_regions')
view_base_url = reverse('mi:overseas_region_campaigns', kwargs={"region_id": 10})
CEN_16_HVCS = ["E017", "E018", "E019", "E020",]
CEN_17_HVCS = ["E017", "E018", "E019", "E020", "E219", "E220", "E221", "E222",]
TEST_CAMPAIGN_ID = "E017"
TARGET_E017 = 30000000
PRORATED_TARGET = 2465760 # target based on the frozen date
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.url = self.get_url_for_year(2017)
def test_campaigns_list_2016(self):
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(
api_response["hvcs"]["campaigns"]))
self.assertEqual(len(api_response["campaigns"]), 4)
def test_campaigns_2016_no_duplicates(self):
list_regions_url = self.get_url_for_year(year=2016,
base_url=self.list_regions_base_url)
all_regions = self._get_api_response(list_regions_url).data["results"]
for region in all_regions:
region_url = reverse('mi:overseas_region_campaigns',
kwargs={"region_id": region["id"]})
self.url = self.get_url_for_year(2016, base_url=region_url)
api_response = self._api_response_data
for campaign in api_response["campaigns"]:
dups = s("campaigns[?campaign_id=='{}'].campaign".format(campaign["campaign_id"]),
api_response)
self.assertTrue(len(dups) == 1)
def test_campaigns_list_2017(self):
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(
api_response["hvcs"]["campaigns"]))
self.assertEqual(len(api_response["campaigns"]), 8)
def test_campaigns_list_2017_no_duplicates(self):
list_regions_url = self.get_url_for_year(year=2017,
base_url=self.list_regions_base_url)
all_regions = self._get_api_response(list_regions_url).data["results"]
for region in all_regions:
region_url = reverse('mi:overseas_region_campaigns',
kwargs={"region_id": region["id"]})
self.url = self.get_url_for_year(2017, base_url=region_url)
api_response = self._api_response_data
for campaign in api_response["campaigns"]:
dups = s("campaigns[?campaign_id=='{}'].campaign".format(campaign["campaign_id"]),
api_response)
self.assertTrue(len(dups) == 1)
def test_campaigns_json_2016_no_wins(self):
self.url = self.get_url_for_year(2016)
self.expected_response = {
"campaigns": [],
"name": "Central European Network",
"hvcs": {
"campaigns": [
"HVC: E017",
"HVC: E018",
"HVC: E019",
"HVC: E020",
],
"target": self.CAMPAIGN_TARGET * len(self.CEN_16_HVCS)
},
"avg_time_to_confirm": 0.0
}
campaigns = []
for hvc_code in self.CEN_16_HVCS:
campaigns.append({
"campaign": "HVC",
"campaign_id": hvc_code,
"totals": {
"hvc": {
"value": {
"unconfirmed": 0,
"confirmed": 0,
"total": 0
},
"number": {
"unconfirmed": 0,
"confirmed": 0,
"total": 0
}
},
"change": "up",
"progress": {
"unconfirmed_percent": 0.0,
"confirmed_percent": 0.0,
"status": "red"
},
"target": self.CAMPAIGN_TARGET
}
})
self.expected_response["campaigns"] = sorted(campaigns, key=sort_campaigns_by, reverse=True)
self.assertResponse()
def test_avg_time_to_confirm_unconfirmed_wins(self):
""" Average time to confirm will be zero, if there are no confirmed wins """
for hvc_code in self.CEN_16_HVCS:
self._create_hvc_win(hvc_code=hvc_code, confirm=False)
api_response = self._api_response_data
expected_avg_time = 0.0
response_avg_time = api_response["avg_time_to_confirm"]
self.assertEqual(expected_avg_time, response_avg_time)
def test_avg_time_to_confirm_wins_confirmed_nextday(self):
""" Test average time to confirm when all wins confirmed in one day """
for hvc_code in self.CEN_16_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
notify_date=self.win_date_2017,
response_date=self.win_date_2017 + datetime.timedelta(days=1),
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
api_response = self._api_response_data
expected_avg_time = 1.0
response_avg_time = api_response["avg_time_to_confirm"]
self.assertEqual(expected_avg_time, response_avg_time)
def test_avg_time_to_confirm_wins_confirmed_randomly(self):
"""
Average time to confirm should be more than one,
when wins took more than one day to be confirmed
"""
for hvc_code in self.CEN_16_HVCS:
response_date = FuzzyDate(datetime.datetime(2017, 5, 27),
datetime.datetime(2017, 5, 31)).evaluate(2, None, False)
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
notify_date=self.win_date_2017,
response_date=response_date,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
api_response = self._api_response_data
response_avg_time = api_response["avg_time_to_confirm"]
self.assertTrue(response_avg_time > 1.0)
def test_campaigns_count_no_wins(self):
""" Make sure number of campaigns returned have no effect when there are no wins """
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(self.CEN_17_HVCS))
def test_campaigns_count_unconfirmed_wins(self):
""" unconfirmed wins shouldn't have any effect on number of campaigns """
for hvc_code in self.CEN_16_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(self.CEN_17_HVCS))
def test_campaigns_count_confirmed_wins(self):
""" confirmed HVC wins shouldn't have any effect on number of campaigns """
for hvc_code in self.CEN_16_HVCS:
self._create_hvc_win(
hvc_code=hvc_code,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(self.CEN_17_HVCS))
def test_campaigns_count_unconfirmed_nonhvc_wins(self):
""" unconfirmed non-hvc wins shouldn't have any effect on number of campaigns """
for _ in self.CEN_16_HVCS:
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(self.CEN_17_HVCS))
def test_campaigns_count_confirmed_nonhvc_wins(self):
""" unconfirmed non-hvc wins shouldn't have any effect on number of campaigns """
for _ in self.CEN_16_HVCS:
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(len(api_response["campaigns"]), len(self.CEN_17_HVCS))
def test_campaign_progress_colour_no_wins(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are no wins """
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_unconfirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are no confirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_confirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are not enough confirmed wins """
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_nonhvc_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are only non-hvc wins """
for _ in range(1, 11):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=100000,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_nonhvc_confirmed_wins_red(self):
""" Given the 'Frozen datetime', progress colour will be Red if there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_colour_confirmed_wins_amber(self):
"""
Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 30 / 100
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_50_green(self):
""" Progress colour should be green if there are enough win to take runrate past 45% """
for _ in range(1, 5):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=3000000,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "green")
def test_campaign_progress_confirmed_wins_45_green(self):
""" Boundary Testing for Green:
Progress colour should be green if there are enough win to take runrate past 45% """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=791700,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "green")
def test_campaign_progress_confirmed_wins_44_amber(self):
"""
Boundary testing for Amber: Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 44 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val/10,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_25_amber(self):
"""
Boundary testing for Amber: Given the 'Frozen datetime', progress colour will be Amber
if there only few confirmed wins to take runrate past 25% but still less than 45%
"""
export_val = self.PRORATED_TARGET * 25 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val/10,
country='HU'
)
self._create_hvc_win(hvc_code=self.TEST_CAMPAIGN_ID, export_value=146700, confirm=True)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "amber")
def test_campaign_progress_confirmed_wins_24_red(self):
""" Boundary testing for red: Anything less than 25% runrate of progress should be Red """
export_val = self.PRORATED_TARGET * 24 / 100
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=export_val/10,
country='HU'
)
api_response = self._api_response_data
e017_status = s("campaigns[?campaign_id=='{}'].totals.progress.status".format(self.TEST_CAMPAIGN_ID),
api_response)[0]
self.assertEqual(e017_status, "red")
def test_campaign_progress_percent_no_wins(self):
""" Progress percentage will be 0, if there are no confirmed HVC wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_unconfirmed_wins(self):
""" Progress percentage will be 0, if there are no confirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10.0)
def test_campaign_progress_percent_confirmed_wins_1(self):
""" Test simple progress percent """
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 1.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_nonhvc_wins(self):
""" Non hvc wins shouldn't effect progress percent """
for _ in range(1, 11):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=100000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_nonhvc_confirmed_wins(self):
""" Non hvc confirmed wins shouldn't effect progress percent """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_progress_percent_confirmed_wins_20(self):
""" Check 20% progress percent """
for _ in range(1, 3):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=3000000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.confirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 20.0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.progress.unconfirmed_percent"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0.0)
def test_campaign_hvc_number_no_wins(self):
""" HVC number shouldn't be affected when there are no wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_only_nonhvc_wins(self):
""" HVC number shouldn't be affected when there are only non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_only_nonhvc_confirmed_wins(self):
""" HVC number shouldn't be affected when there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_number_unconfirmed_wins(self):
""" Check HVC number with unconfirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
def test_campaign_hvc_number_confirmed_wins(self):
""" Check HVC number with confirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
def test_campaign_hvc_number_mixed_wins(self):
""" Check HVC numbers with both confirmed and unconfirmed HVC wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 10)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.number.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 20)
def test_campaign_hvc_value_no_wins(self):
""" HVC value will be 0 with there are no wins """
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_only_nonhvc_wins(self):
""" HVC value will be 0 there are only unconfirmed non-HVC wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_only_nonhvc_confirmed_wins(self):
""" HVC value will be 0 when there are only confirmed non-HVC wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
def test_campaign_hvc_value_unconfirmed_wins(self):
""" Check HVC value when there are unconfirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
def test_campaign_hvc_value_confirmed_wins(self):
""" Check HVC value when there are confirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 0)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
def test_campaign_hvc_value_mixed_wins(self):
""" Check HVC value when there are both confirmed and unconfirmed wins """
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=300000,
country='HU'
)
for _ in range(1, 11):
self._create_hvc_win(
hvc_code=self.TEST_CAMPAIGN_ID,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=300000,
country='HU'
)
api_response = self._api_response_data
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.confirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.unconfirmed"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 3000000)
self.assertEqual(s("campaigns[?campaign_id=='{}'].totals.hvc.value.total"
.format(self.TEST_CAMPAIGN_ID), api_response)[0], 6000000)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class OverseasRegionDetailsTestCase(OverseasRegionBaseViewTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.cen_region_url
we_region_url = reverse('mi:overseas_region_detail', kwargs={"region_id": 5})
cen_region_url = reverse('mi:overseas_region_detail', kwargs={"region_id": 10})
latam_region_url = reverse('mi:overseas_region_detail', kwargs={"region_id": 6})
region_url_2016_only = reverse('mi:overseas_region_detail', kwargs={"region_id": 15})
region_url_2017_only = reverse('mi:overseas_region_detail', kwargs={"region_id": 18})
def test_2017_region_in_2016_404(self):
self.view_base_url = self.region_url_2017_only
self.url = self.get_url_for_year(2016)
self._get_api_response(self.url, status_code=404)
def test_2016_region_in_2017_404(self):
self.view_base_url = self.region_url_2016_only
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_details_no_wins_2016(self):
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["number"]["total"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
self.assertEqual(api_response["wins"]["non_export"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["value"]["total"], 0)
self.assertEqual(api_response["wins"]["non_export"]["number"]["total"], 0)
def test_details_no_wins_2017(self):
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(api_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(api_response["wins"]["export"]["hvc"]["number"]["total"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(api_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
self.assertEqual(api_response["wins"]["non_export"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["non_export"]["value"]["total"], 0)
self.assertEqual(api_response["wins"]["non_export"]["number"]["total"], 0)
def test_details_cen_hvc_win_for_2017_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_cen_hvc_win_for_2017_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
def test_details_cen_hvc_win_for_2016_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_cen_hvc_win_for_2016_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
def test_details_cen_hvc_win_confirmed_in_2016_appears_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_cen_hvc_win_confirmed_in_2016_doesnt_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
def test_details_cen_hvc_win_from_2016_confirmed_in_2017_doesnt_appears_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
def test_details_cen_hvc_win_from_2016_confirmed_in_2017_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_hvc_win_from_other_region_but_cen_country_doesnt_appear_in_cen(self):
self._create_hvc_win(
hvc_code='E016',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
def test_details_hvc_win_from_other_region_other_country_doesnt_appear_in_cen(self):
self._create_hvc_win(
hvc_code='E016',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
def test_details_cen_hvc_win_unconfirmed_in_2016_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.frozen_date,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_cen_hvc_win_unconfirmed_in_2017_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_unconfirmed_hvc_win_last_year_should_show_up_in_new_region_if_country_has_moved_regions(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.frozen_date,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
# check in CEN first
self.view_base_url = self.cen_region_url
self.url = self.get_url_for_year(2017)
data_2016 = self._api_response_data
self.assertEqual(data_2016["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(data_2016["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(data_2016["wins"]["export"]["hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(data_2016["wins"]["export"]["hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(data_2016["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(data_2016["wins"]["export"]["hvc"]["number"]["total"], 1)
# move HU to a different region
region_year = OverseasRegionYear.objects.get(
country__country='HU', financial_year_id=2017)
region_year.overseas_region = OverseasRegion.objects.get(
name='Western Europe')
region_year.save()
# it should be in within Western Europe region this year
self.view_base_url = self.we_region_url
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
self.assertEqual(data_2017["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(data_2017["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(data_2017["wins"]["export"]["hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(data_2017["wins"]["export"]["hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(data_2017["wins"]["export"]["hvc"]["value"]["total"], self.export_value)
self.assertEqual(data_2017["wins"]["export"]["hvc"]["number"]["total"], 1)
def test_details_hvc_win_HU_as_secondary_market_doesnt_appear_in_latam(self):
""" A win to MX, but HVC from North America.
This win would appear in North America, but not in Latin America region"""
self._create_hvc_win(
hvc_code='E192',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='MX'
)
self.view_base_url = self.latam_region_url
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["hvc"]["number"]["total"], 0)
# Non-HVC
def test_details_cen_non_hvc_win_for_2017_in_2017(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_cen_non_hvc_win_for_2017_in_2016(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
def test_details_cen_non_hvc_win_for_2016_in_2016(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_cen_non_hvc_win_for_2016_in_2017(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
def test_details_cen_non_hvc_win_confirmed_in_2016_appears_in_2016(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_cen_non_hvc_win_confirmed_in_2016_doesnt_appears_in_2017(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
def test_details_cen_non_hvc_win_from_2016_confirmed_in_2017_doesnt_appears_in_2016(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
def test_details_cen_non_hvc_win_from_2016_confirmed_in_2017_appears_in_2017(self):
self._create_non_hvc_win(
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_non_hvc_win_from_secondary_market_appears_in_cen(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='PL'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_non_hvc_win_from_other_region_other_country_doesnt_appear_in_cen(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
def _test_details_non_hvc_win_from_cen_region_other_country_doesnt_appear_in_cen(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
def test_details_cen_non_hvc_win_unconfirmed_in_2016_appears_in_2017(self):
self._create_non_hvc_win(
win_date=self.frozen_date,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_cen_non_hvc_win_unconfirmed_in_2017_appears_in_2017(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 0)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(cen_response["wins"]["export"]["non_hvc"]["number"]["total"], 1)
def test_details_unconfirmed_non_hvc_win_last_year_should_show_up_in_new_region_if_country_has_moved_regions(self):
self._create_non_hvc_win(
win_date=self.frozen_date,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
# check in CEN first
self.view_base_url = self.cen_region_url
self.url = self.get_url_for_year(2017)
data_2016 = self._api_response_data
self.assertEqual(data_2016["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(data_2016["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(data_2016["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(data_2016["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(data_2016["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(data_2016["wins"]["export"]["non_hvc"]["number"]["total"], 1)
# move HU to a different region
region_year = OverseasRegionYear.objects.get(
country__country='HU', financial_year_id=2017)
region_year.overseas_region = OverseasRegion.objects.get(
name='Western Europe')
region_year.save()
# it should be in within Western Europe region this year
self.view_base_url = self.we_region_url
self.url = self.get_url_for_year(2017)
data_2017 = self._api_response_data
self.assertEqual(data_2017["wins"]["export"]["non_hvc"]["value"]["confirmed"], 0)
self.assertEqual(data_2017["wins"]["export"]["non_hvc"]["number"]["confirmed"], 0)
self.assertEqual(data_2017["wins"]["export"]["non_hvc"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(data_2017["wins"]["export"]["non_hvc"]["number"]["unconfirmed"], 1)
self.assertEqual(data_2017["wins"]["export"]["non_hvc"]["value"]["total"], self.export_value)
self.assertEqual(data_2017["wins"]["export"]["non_hvc"]["number"]["total"], 1)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class OverseasRegionMonthsTestCase(OverseasRegionBaseViewTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.test_region = OverseasRegion.objects.get(name='Western Europe')
self.view_base_url = reverse('mi:overseas_region_monthly', kwargs={'region_id': self.test_region.id})
def test_get_with_no_data(self):
self.url = self.get_url_for_year(2017)
data = self._api_response_data
# there should be no wins
number_of_wins = s('sum(months[].totals.*[].*[].number.*[])', data)
self.assertEqual(number_of_wins, 0)
# every value in the wins breakdown should be 0
value_of_wins = s('sum(months[].totals.*[].*[].*[].*[])', data)
self.assertEqual(value_of_wins, 0)
def test_get_with_1_win(self):
export_value = 123456
self._create_hvc_win(
hvc_code='E011', win_date=now(), response_date=now(),
confirm=True, fin_year=2017, export_value=export_value)
self.url = self.get_url_for_year(2017)
data = self._api_response_data
number_of_wins_2017_05 = s("months[?date=='2017-05'].totals.export.hvc.number.total | [0]", data)
number_of_wins_2017_04 = s("months[?date=='2017-04'].totals.export.hvc.number.total | [0]", data)
# there should be no wins for 'last' month
self.assertEqual(number_of_wins_2017_04, 0)
# there should be 1 for this month
self.assertEqual(number_of_wins_2017_05, 1)
# value should match the win we created
value_of_wins = s("sum(months[].totals.export.hvc.value.total)", data)
self.assertEqual(value_of_wins, export_value)
def test_group_by_month_from_data(self):
export_value = 123456
self._create_hvc_win(
hvc_code='E011', win_date=now(), response_date=now(),
confirm=True, fin_year=2017, export_value=export_value)
self._create_hvc_win(
hvc_code='E011', win_date=now() + relativedelta(months=-1),
confirm=True, fin_year=2017, export_value=export_value)
self.url = self.get_url_for_year(2017)
data = self._api_response_data
number_of_wins_2017_05 = s("months[?date=='2017-05'].totals.export.hvc.number.total | [0]", data)
number_of_wins_2017_04 = s("months[?date=='2017-04'].totals.export.hvc.number.total | [0]", data)
# there should be no wins for 'last' month
self.assertEqual(number_of_wins_2017_04, 1)
# there should be 2 for this month (cumulative) take the one
# from last month and add to the one from this month
self.assertEqual(number_of_wins_2017_05, 2)
# value should match the win we created for
# month 1, then 2x value for month 2 therefore
# sum of all totals will be 3x export_value
value_of_wins = s("sum(months[].totals.export.hvc.value.total)", data)
self.assertEqual(value_of_wins, export_value * 3)
class OverseasRegionsTopNonHvcWinsTestCase(OverseasRegionBaseViewTestCase, GenericTopNonHvcWinsTestMixin):
export_value = 9999
fin_years = [2016, 2017]
TEST_COUNTRY_CODE = 'FR'
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.test_region = OverseasRegion.objects.get(name='Western Europe')
self.view_base_url = reverse('mi:overseas_region_top_nonhvc', kwargs={
'region_id': self.test_region.id
})
class OverseasRegionsWinTableTestCase(OverseasRegionBaseViewTestCase, GenericWinTableTestMixin):
export_value = 100000
fin_years = [2016, 2017]
TEST_COUNTRY_CODE = 'FR'
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.test_region = OverseasRegion.objects.get(name='Western Europe')
self.expected_response = {
"os_region": {
"id": str(self.test_region.id),
"name": self.test_region.name,
},
"wins": {
"hvc": []
}
}
self.os_win_table_url = reverse('mi:overseas_region_win_table', kwargs={
'region_id': self.test_region.id
})
self.os_win_table_url_invalid = reverse('mi:overseas_region_win_table', kwargs={
'region_id': 100
})
self.view_base_url = self.os_win_table_url
def test_2017_win_table_in_2016_404(self):
self.view_base_url = self.os_win_table_url_invalid
self.url = self.get_url_for_year(2016)
self._get_api_response(self.url, status_code=404)
def test_2016_win_table_in_2017_404(self):
self.view_base_url = self.os_win_table_url_invalid
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
| UKTradeInvestment/export-wins-data | mi/tests/test_region_views.py | Python | gpl-3.0 | 95,185 | [
"Amber"
] | ab814ae87b11ea8ea335b2c358b2d3d80cfe3426d07766a720f6ee1d3c8fbd79 |
#!/usr/bin/env python
"""
Read a maf and output a single block fasta file, concatenating blocks
usage %prog species1,species2 maf_file out_file
"""
#Dan Blankenberg
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.align import maf
from galaxy.tools.util import maf_utilities
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
try:
species = maf_utilities.parse_species_option( sys.argv[1] )
except Exception, e:
maf_utilities.tool_fail( "Error determining species value: %s" % e )
try:
input_filename = sys.argv[2]
except Exception, e:
maf_utilities.tool_fail( "Error reading MAF filename: %s" % e )
try:
file_out = open( sys.argv[3], 'w' )
except Exception, e:
maf_utilities.tool_fail( "Error opening file for output: %s" % e )
if species:
print "Restricted to species: %s" % ', '.join( species )
else:
print "Not restricted to species."
if not species:
try:
species = maf_utilities.get_species_in_maf( input_filename )
except Exception, e:
maf_utilities.tool_fail( "Error determining species in input MAF: %s" % e )
for spec in species:
file_out.write( ">" + spec + "\n" )
try:
for start_block in maf.Reader( open( input_filename, 'r' ) ):
for block in maf_utilities.iter_blocks_split_by_species( start_block ):
block.remove_all_gap_columns() #remove extra gaps
component = block.get_component_by_src_start( spec ) #blocks only have one occurrence of a particular species, so this is safe
if component:
file_out.write( component.text )
else:
file_out.write( "-" * block.text_size )
except Exception, e:
maf_utilities.tool_fail( "Your MAF file appears to be malformed: %s" % e )
file_out.write( "\n" )
file_out.close()
if __name__ == "__main__": __main__()
| volpino/Yeps-EURAC | tools/maf/maf_to_fasta_concat.py | Python | mit | 2,085 | [
"Galaxy"
] | c5c4649d9be10e352d3c2ed48eaa67b3158ac1403bd70c586badbf2197f0a1bc |
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import collections
from typing import Deque, Dict, Iterable, Iterator, List, Optional, Tuple, cast
from pysam import VCFRecord
import paleomix.common.vcfwrap as vcfwrap
_INF = float("inf")
# Rough number of records to keep in memory at once
_CHUNK_SIZE = 10000
def add_varfilter_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--homozygous-chromosome",
action="append",
default=[],
help="Filter heterozygous SNPs observed on this chromosome (e.g. chrX)",
)
parser.add_argument(
"-q",
"--min-quality",
type=int,
default=30,
help="Minimum Phred score recorded in the QUAL column",
)
# No longer supported options:
parser.add_argument("-f", "--min-allele-frequency", help=argparse.SUPPRESS)
parser.add_argument("-b", "--pileup", help=argparse.SUPPRESS)
parser.add_argument(
"-k",
"--keep-ambigious-genotypes",
default=False,
action="store_true",
help="Keep SNPs without a most likely genotype (based on PL)",
)
# Options adapted from varFilter
parser.add_argument(
"-Q",
"--min-mapping-quality",
type=int,
default=10,
help="Minimum RMS mapping quality for SNPs",
)
parser.add_argument(
"-d",
"--min-read-depth",
type=int,
default=8,
help="Minimum read depth",
)
parser.add_argument(
"-D",
"--max-read-depth",
type=int,
default=10000000,
help="Maximum read depth",
)
parser.add_argument(
"-a",
"--min-num-alt-bases",
type=int,
default=2,
help="Minimum number of alternative bases observed for variants",
)
parser.add_argument(
"-w",
"--min-distance-to-indels",
type=int,
default=3,
help="SNP within INT bp around a gap to be filtered",
)
parser.add_argument(
"-W",
"--min-distance-between-indels",
type=int,
default=10,
help="Window size for filtering adjacent gaps",
)
parser.add_argument(
"-1",
"--min-strand-bias",
type=float,
default=1e-4,
help="Min P-value for strand bias (given PV4)",
)
parser.add_argument(
"-2",
"--min-baseq-bias",
type=float,
default=1e-100,
help="Min P-value for baseQ bias (given PV4)",
)
parser.add_argument(
"-3",
"--min-mapq-bias",
type=float,
default=0,
help="Min P-value for mapQ bias (given PV4)",
)
parser.add_argument(
"-4",
"--min-end-distance-bias",
type=float,
default=1e-4,
help="Min P-value for end distance bias (given PV4)",
)
parser.add_argument_group(parser)
def describe_filters(options: argparse.Namespace) -> Dict[str, str]:
return {
"HET": "Heterozygous SNPs observed on homozygous chromosome (e.g. chrX)",
"q:%i" % options.min_quality: "Minimum Phred score recorded in the QUAL column",
"k": "SNPs without a most likely genotype (based on PL)",
"Q:%i" % options.min_mapping_quality: "Minimum RMS mapping quality",
"d:%i" % options.min_read_depth: "Minimum read depth",
"D:%i" % options.max_read_depth: "Maximum read depth",
"a:%i"
% options.min_num_alt_bases: "Minimum number of alternative bases observed for variants",
"w:%i" % options.min_distance_to_indels: "SNP within INT bp around a gap",
"W:%i"
% options.min_distance_between_indels: "Indel within INT bp of another indel",
"1:%e" % options.min_strand_bias: "Min P-value for strand bias (given PV4)",
"2:%e" % options.min_baseq_bias: "Min P-value for baseQ bias (given PV4)",
"3:%e" % options.min_mapq_bias: "Min P-value for mapQ bias (given PV4)",
"4:%e"
% options.min_end_distance_bias: "Min P-value for end distance bias (given PV4)",
}
def filter_vcfs(
options: argparse.Namespace,
vcfs: Iterable[VCFRecord],
) -> Iterator[VCFRecord]:
vcfs = iter(vcfs)
chunk = collections.deque() # type: Deque[Optional[VCFRecord]]
while _read_chunk(vcfs, chunk):
_filter_by_indels(options, chunk)
_filter_by_properties(options, chunk)
for vcf in _trim_chunk(options, chunk):
if vcf.filter == ".":
vcf.filter = "PASS"
yield vcf
def _read_chunk(
vcfs: Iterator[VCFRecord],
chunk: Deque[Optional[VCFRecord]],
) -> bool:
try:
while len(chunk) < _CHUNK_SIZE:
chunk.append(next(vcfs))
except StopIteration:
chunk.append(None)
return len(chunk) > 1
def _trim_chunk(
options: argparse.Namespace,
chunk: Deque[Optional[VCFRecord]],
) -> Iterator[VCFRecord]:
min_distance = max(
options.min_distance_between_indels, options.min_distance_to_indels
)
if not chunk:
return
record = chunk[-1]
if record is None:
end_chr = "!@#$%^&*()_+"
end_pos = _INF
chunk.pop()
else:
end_chr = record.contig
end_pos = record.pos
while chunk:
vcf = cast(VCFRecord, chunk[0])
if vcf.contig == end_chr:
# 'length' will become a too large value for heterozygous SNPs,
# but it is faster than having to parse every position, and has
# no effect on the final results.
length = max(len(vcf.ref), len(vcf.alt))
if (vcf.pos + length + min_distance) >= end_pos:
break
yield cast(VCFRecord, chunk.popleft())
def _group_indels_near_position(
indels: Iterable[VCFRecord],
distance: int,
) -> Dict[int, List[VCFRecord]]:
"""Returns a dictionary of positions that are either directly covered by, or
adjacent to indels, given some arbitrary distance. For each position, a list
of adjacent/overlapping indels are provided."""
positions = collections.defaultdict(list) # type: Dict[int, List[VCFRecord]]
if not distance:
return positions
for vcf in indels:
# The number of bases covered (excluding the prefix)
# For ambigious indels (e.g. in low complexity regions), this ensures
# that the entire region is considered. Note that we do not need to
# consider the alternative sequence(s)
length = len(vcf.ref) - 1
# Inclusive start/end positions for bases that should be blacklisted
# Note that vcf.pos is the base just before the insertion/deletion
start = vcf.pos + 1 - distance
end = vcf.pos + 1 + distance + length
for position in range(start, end + 1):
positions[position].append(vcf)
return positions
def _select_best_indel(indels: Iterable[VCFRecord]) -> VCFRecord:
"""Select the highest quality indel, based on the quality,
prefering low earlier positions above later positions in
case of ties."""
def _indel_by_quality_and_position(indel: VCFRecord) -> Tuple[float, int]:
# The negative position is used to select the first
# of equally quality indels
return (float(indel.qual), -indel.pos)
return max(indels, key=_indel_by_quality_and_position)
def _filter_by_indels(
options: argparse.Namespace,
chunk: Deque[Optional[VCFRecord]],
) -> None:
"""Filters a list of SNPs and Indels, such that no SNP is closer to
an indel than the value set in options.min_distance_to_indels, and
such that no two indels too close. If two or more indels are within
this distance, the indel with the highest QUAL score is retained. When
no unique highest QUAL score exists, an arbitrary indel is retained
among those indels with the highest QUAL score. SNPs are filtered
based on prefiltered Indels."""
indels = [vcf for vcf in chunk if vcf is not None and vcfwrap.is_indel(vcf)]
distance_between = options.min_distance_between_indels
indel_blacklist = _group_indels_near_position(indels, distance_between)
distance_to = options.min_distance_to_indels
snp_blacklist = _group_indels_near_position(indels, distance_to)
for vcf in chunk:
if vcf is None:
continue
elif vcfwrap.is_indel(vcf):
blacklisted = indel_blacklist.get(vcf.pos + 1, [vcf])
if vcf is not _select_best_indel(blacklisted):
_mark_as_filtered(vcf, "W:%i" % distance_between)
elif (vcf.alt != ".") and (vcf.pos in snp_blacklist):
# TODO: How to handle heterozygous SNPs near
_mark_as_filtered(vcf, "w:%i" % distance_to)
def _filter_by_properties(
options: argparse.Namespace,
vcfs: Deque[Optional[VCFRecord]],
) -> None:
"""Filters a list of SNPs/indels based on the various properties recorded in
the info column, and others. This mirrors most of the filtering carried out
by vcfutils.pl varFilter."""
for vcf in vcfs:
if vcf is None:
continue
elif float(vcf.qual) < options.min_quality:
_mark_as_filtered(vcf, "q:%i" % options.min_quality)
properties = {} # type: Dict[str, Optional[str]]
for field in vcf.info.split(";"):
if "=" in field:
key, value = field.split("=")
else:
key, value = field, None
properties[key] = value
read_depth = float(properties["DP"])
if options.min_read_depth > read_depth:
_mark_as_filtered(vcf, "d:%i" % options.min_read_depth)
elif options.max_read_depth < read_depth:
_mark_as_filtered(vcf, "D:%i" % options.max_read_depth)
mapping_qual = properties.get("MQ")
if mapping_qual is not None and options.min_mapping_quality:
if mapping_qual == "." or float(mapping_qual) < options.min_mapping_quality:
_mark_as_filtered(vcf, "Q:%i" % options.min_mapping_quality)
if "PV4" in properties:
pv4 = [float(value) for value in properties["PV4"].split(",")]
if pv4[0] < options.min_strand_bias:
_mark_as_filtered(vcf, "1:%e" % options.min_strand_bias)
if pv4[1] < options.min_baseq_bias:
_mark_as_filtered(vcf, "2:%e" % options.min_baseq_bias)
if pv4[2] < options.min_mapq_bias:
_mark_as_filtered(vcf, "3:%e" % options.min_mapq_bias)
if pv4[3] < options.min_end_distance_bias:
_mark_as_filtered(vcf, "4:%e" % options.min_end_distance_bias)
if vcf.alt != ".":
_, _, alt_fw, alt_rev = map(int, properties["DP4"].split(","))
if (alt_fw + alt_rev) < options.min_num_alt_bases:
_mark_as_filtered(vcf, "a:%i" % options.min_num_alt_bases)
ml_genotype = vcfwrap.get_ml_genotype(vcf)
if (ml_genotype == ("N", "N")) and not options.keep_ambigious_genotypes:
# No most likely genotype
_mark_as_filtered(vcf, "k")
if ml_genotype[0] != ml_genotype[1]:
if vcf.contig in options.homozygous_chromosome:
_mark_as_filtered(vcf, "HET")
def _mark_as_filtered(vcf: VCFRecord, filter_name: str) -> bool:
if vcf.filter in (".", "PASS"):
vcf.filter = filter_name
return True
elif filter_name not in vcf.filter.split(";"):
vcf.filter += ";" + filter_name
return True
return False
| MikkelSchubert/paleomix | paleomix/common/vcffilter.py | Python | mit | 12,710 | [
"pysam"
] | a2c27c9e6ee73872688c3703742967d685cf932058eb50263a1feee6686b3c29 |
from math import sqrt,log,cos,sin,pi
from random import random
# Constants
Z = 79
e = 1.602e-19
E = 7.7e6*e
epsilon0 = 8.854e-12
a0 = 5.292e-11
sigma = a0/100
N = 1000000
# Function to generate two Gaussian random numbers
def gaussian():
r = sqrt(-2*sigma*sigma*log(1-random()))
theta = 2*pi*random()
x = r*cos(theta)
y = r*sin(theta)
return x,y
# Main program
count = 0
for i in range(N):
x,y = gaussian()
b = sqrt(x*x+y*y)
if b<Z*e*e/(2*pi*epsilon0*E):
count += 1
print(count,"particles were reflected out of",N)
| KiMiralles/Python-Learning | Computational Physics Newman/Book Resources/rutherford.py | Python | gpl-3.0 | 559 | [
"Gaussian"
] | e426a68536de8ec39780c102f4761d5546d3a995bb5960dbab28afc67317b414 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.migration_service import (
MigrationServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient
from google.cloud.aiplatform_v1.services.migration_service import pagers
from google.cloud.aiplatform_v1.services.migration_service import transports
from google.cloud.aiplatform_v1.types import migratable_resource
from google.cloud.aiplatform_v1.types import migration_service
from google.longrunning import operations_pb2
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MigrationServiceClient._get_default_mtls_endpoint(None) is None
assert (
MigrationServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [MigrationServiceClient, MigrationServiceAsyncClient,]
)
def test_migration_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.MigrationServiceGrpcTransport, "grpc"),
(transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_migration_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [MigrationServiceClient, MigrationServiceAsyncClient,]
)
def test_migration_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_migration_service_client_get_transport_class():
transport = MigrationServiceClient.get_transport_class()
available_transports = [
transports.MigrationServiceGrpcTransport,
]
assert transport in available_transports
transport = MigrationServiceClient.get_transport_class("grpc")
assert transport == transports.MigrationServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
MigrationServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceClient),
)
@mock.patch.object(
MigrationServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceAsyncClient),
)
def test_migration_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MigrationServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MigrationServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
MigrationServiceClient,
transports.MigrationServiceGrpcTransport,
"grpc",
"true",
),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
MigrationServiceClient,
transports.MigrationServiceGrpcTransport,
"grpc",
"false",
),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
MigrationServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceClient),
)
@mock.patch.object(
MigrationServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_migration_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [MigrationServiceClient, MigrationServiceAsyncClient]
)
@mock.patch.object(
MigrationServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceClient),
)
@mock.patch.object(
MigrationServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceAsyncClient),
)
def test_migration_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_migration_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
MigrationServiceClient,
transports.MigrationServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_migration_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_migration_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = MigrationServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
MigrationServiceClient,
transports.MigrationServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_migration_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [migration_service.SearchMigratableResourcesRequest, dict,]
)
def test_search_migratable_resources(request_type, transport: str = "grpc"):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.SearchMigratableResourcesResponse(
next_page_token="next_page_token_value",
)
response = client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.SearchMigratableResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchMigratableResourcesPager)
assert response.next_page_token == "next_page_token_value"
def test_search_migratable_resources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
client.search_migratable_resources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.SearchMigratableResourcesRequest()
@pytest.mark.asyncio
async def test_search_migratable_resources_async(
transport: str = "grpc_asyncio",
request_type=migration_service.SearchMigratableResourcesRequest,
):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
migration_service.SearchMigratableResourcesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.SearchMigratableResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_migratable_resources_async_from_dict():
await test_search_migratable_resources_async(request_type=dict)
def test_search_migratable_resources_field_headers():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.SearchMigratableResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
call.return_value = migration_service.SearchMigratableResourcesResponse()
client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_migratable_resources_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.SearchMigratableResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
migration_service.SearchMigratableResourcesResponse()
)
await client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_search_migratable_resources_flattened():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.SearchMigratableResourcesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_migratable_resources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_search_migratable_resources_flattened_error():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_migratable_resources(
migration_service.SearchMigratableResourcesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_search_migratable_resources_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.SearchMigratableResourcesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
migration_service.SearchMigratableResourcesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_migratable_resources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_migratable_resources_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_migratable_resources(
migration_service.SearchMigratableResourcesRequest(), parent="parent_value",
)
def test_search_migratable_resources_pager(transport_name: str = "grpc"):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.search_migratable_resources(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, migratable_resource.MigratableResource) for i in results
)
def test_search_migratable_resources_pages(transport_name: str = "grpc"):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
pages = list(client.search_migratable_resources(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_migratable_resources_async_pager():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
async_pager = await client.search_migratable_resources(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, migratable_resource.MigratableResource) for i in responses
)
@pytest.mark.asyncio
async def test_search_migratable_resources_async_pages():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_migratable_resources(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [migration_service.BatchMigrateResourcesRequest, dict,]
)
def test_batch_migrate_resources(request_type, transport: str = "grpc"):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.BatchMigrateResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_migrate_resources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
client.batch_migrate_resources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.BatchMigrateResourcesRequest()
@pytest.mark.asyncio
async def test_batch_migrate_resources_async(
transport: str = "grpc_asyncio",
request_type=migration_service.BatchMigrateResourcesRequest,
):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.BatchMigrateResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_migrate_resources_async_from_dict():
await test_batch_migrate_resources_async(request_type=dict)
def test_batch_migrate_resources_field_headers():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.BatchMigrateResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_migrate_resources_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.BatchMigrateResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_migrate_resources_flattened():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_migrate_resources(
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].migrate_resource_requests
mock_val = [
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
]
assert arg == mock_val
def test_batch_migrate_resources_flattened_error():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_migrate_resources(
migration_service.BatchMigrateResourcesRequest(),
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
@pytest.mark.asyncio
async def test_batch_migrate_resources_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_migrate_resources(
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].migrate_resource_requests
mock_val = [
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_migrate_resources_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_migrate_resources(
migration_service.BatchMigrateResourcesRequest(),
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MigrationServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MigrationServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.MigrationServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.MigrationServiceGrpcTransport,)
def test_migration_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MigrationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_migration_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.MigrationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"search_migratable_resources",
"batch_migrate_resources",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_migration_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_migration_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport()
adc.assert_called_once()
def test_migration_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MigrationServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MigrationServiceGrpcTransport, grpc_helpers),
(transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_migration_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_migration_service_host_no_port():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_migration_service_host_with_port():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_migration_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MigrationServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_migration_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MigrationServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_migration_service_grpc_lro_client():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_migration_service_grpc_lro_async_client():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_annotated_dataset_path():
project = "squid"
dataset = "clam"
annotated_dataset = "whelk"
expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
project=project, dataset=dataset, annotated_dataset=annotated_dataset,
)
actual = MigrationServiceClient.annotated_dataset_path(
project, dataset, annotated_dataset
)
assert expected == actual
def test_parse_annotated_dataset_path():
expected = {
"project": "octopus",
"dataset": "oyster",
"annotated_dataset": "nudibranch",
}
path = MigrationServiceClient.annotated_dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_annotated_dataset_path(path)
assert expected == actual
def test_dataset_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "nautilus",
"location": "scallop",
"dataset": "abalone",
}
path = MigrationServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_dataset_path(path)
assert expected == actual
def test_dataset_path():
project = "squid"
location = "clam"
dataset = "whelk"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "octopus",
"location": "oyster",
"dataset": "nudibranch",
}
path = MigrationServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_dataset_path(path)
assert expected == actual
def test_dataset_path():
project = "cuttlefish"
dataset = "mussel"
expected = "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "winkle",
"dataset": "nautilus",
}
path = MigrationServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_dataset_path(path)
assert expected == actual
def test_model_path():
project = "scallop"
location = "abalone"
model = "squid"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = MigrationServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "clam",
"location": "whelk",
"model": "octopus",
}
path = MigrationServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_model_path(path)
assert expected == actual
def test_model_path():
project = "oyster"
location = "nudibranch"
model = "cuttlefish"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = MigrationServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "mussel",
"location": "winkle",
"model": "nautilus",
}
path = MigrationServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_model_path(path)
assert expected == actual
def test_version_path():
project = "scallop"
model = "abalone"
version = "squid"
expected = "projects/{project}/models/{model}/versions/{version}".format(
project=project, model=model, version=version,
)
actual = MigrationServiceClient.version_path(project, model, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "clam",
"model": "whelk",
"version": "octopus",
}
path = MigrationServiceClient.version_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = MigrationServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = MigrationServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = MigrationServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = MigrationServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = MigrationServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = MigrationServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = MigrationServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = MigrationServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = MigrationServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = MigrationServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.MigrationServiceTransport, "_prep_wrapped_messages"
) as prep:
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.MigrationServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = MigrationServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(MigrationServiceClient, transports.MigrationServiceGrpcTransport),
(MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-aiplatform | tests/unit/gapic/aiplatform_v1/test_migration_service.py | Python | apache-2.0 | 80,019 | [
"Octopus"
] | fae7a5c648fa6a1af177dee41656499884f7852ca105e47059c52152e10cc4d6 |
# -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham, Martin Hawlisch
# Copyright (C) 2009 Yevgeny Zegzda <ezegjda@ya.ru>
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from html import escape
import math
import os
import pickle
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from gi.repository import PangoCairo
import cairo
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import ChildRef, ChildRefType, Family
from gramps.gui.views.navigationview import NavigationView
from gramps.gui.editors import FilterEditor
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.utils.alive import probably_alive
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.db import find_children, find_parents, find_witnessed_people
from gramps.gen.utils.libformatting import FormattingHelper
from gramps.gen.utils.thumbnails import get_thumbnail_path
from gramps.gen.errors import WindowActiveError
from gramps.gui.editors import EditPerson, EditFamily
from gramps.gui.ddtargets import DdTargets
from gramps.gen.config import config
from gramps.gui.views.bookmarks import PersonBookmarks
from gramps.gen.const import CUSTOM_FILTERS
from gramps.gui.dialog import RunDatabaseRepair, ErrorDialog
from gramps.gui.utils import color_graph_box, hex_to_rgb_float, is_right_click
from gramps.gen.constfunc import lin
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.utils.symbols import Symbols
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_PERSON = "p"
_BORN = _('short for born|b.')
_DIED = _('short for died|d.')
_BAPT = _('short for baptized|bap.')
_CHRI = _('short for christened|chr.')
_BURI = _('short for buried|bur.')
_CREM = _('short for cremated|crem.')
class _PersonWidgetBase(Gtk.DrawingArea):
"""
Default set up for person widgets.
Set up drag options and button release events.
"""
def __init__(self, view, format_helper, person):
Gtk.DrawingArea.__init__(self)
self.view = view
self.format_helper = format_helper
self.person = person
self.force_mouse_over = False
self.in_drag = False
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK)
if self.person:
self.connect("button-release-event", self.cb_on_button_release)
self.connect("drag_data_get", self.cb_drag_data_get)
self.connect("drag_begin", self.cb_drag_begin)
self.connect("drag_end", self.cb_drag_end)
# Enable drag
self.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
tglist = Gtk.TargetList.new([])
tglist.add(DdTargets.PERSON_LINK.atom_drag_type,
DdTargets.PERSON_LINK.target_flags,
DdTargets.PERSON_LINK.app_id)
#allow drag to a text document, info on drag_get will be 0L !
tglist.add_text_targets(0)
self.drag_source_set_target_list(tglist)
def cb_drag_begin(self, widget, data):
"""Set up some inital conditions for drag. Set up icon."""
self.in_drag = True
self.drag_source_set_icon_name('gramps-person')
def cb_drag_end(self, widget, data):
"""Set up some inital conditions for drag. Set up icon."""
self.in_drag = False
def cb_drag_data_get(self, widget, context, sel_data, info, time):
"""
Returned parameters after drag.
Specified for 'person-link', for others return text info about person.
"""
tgs = [x.name() for x in context.list_targets()]
if info == DdTargets.PERSON_LINK.app_id:
data = (DdTargets.PERSON_LINK.drag_type,
id(self), self.person.get_handle(), 0)
sel_data.set(sel_data.get_target(), 8, pickle.dumps(data))
elif ('TEXT' in tgs or 'text/plain' in tgs) and info == 0:
sel_data.set_text(self.format_helper.format_person(self.person, 11), -1)
def cb_on_button_release(self, widget, event):
"""
Default action for release event from mouse.
Change active person to current.
"""
if self.in_drag:
return False
if event.button == 1 and event.type == Gdk.EventType.BUTTON_RELEASE:
self.view.cb_childmenu_changed(None, self.person.get_handle())
return True
return False
def get_image(self, dbstate, person):
"""
Return a thumbnail image for the given person.
"""
image_path = None
media_list = person.get_media_list()
if media_list:
photo = media_list[0]
object_handle = photo.get_reference_handle()
obj = dbstate.db.get_media_from_handle(
object_handle)
if obj:
mtype = obj.get_mime_type()
if mtype and mtype[0:5] == "image":
image_path = get_thumbnail_path(
media_path_full(
dbstate.db,
obj.get_path()),
rectangle=photo.get_rectangle())
return image_path
class PersonBoxWidgetCairo(_PersonWidgetBase):
"""Draw person box using cairo library"""
def __init__(self, view, format_helper, dbstate, person, alive, maxlines,
image=None, tags=False):
_PersonWidgetBase.__init__(self, view, format_helper, person)
self.set_size_request(120, 25)
# Required for tooltip and mouse-over
self.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)
# Required for tooltip and mouse-over
self.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)
self.alive = alive
self.maxlines = maxlines
self.hightlight = False
self.connect("draw", self.draw)
self.text = ""
if self.person:
self.text = self.format_helper.format_person(self.person,
self.maxlines, True)
gender = self.person.get_gender()
else:
gender = None
self.bgcolor, self.bordercolor = color_graph_box(alive, gender)
if tags and person:
for tag_handle in person.get_tag_list():
# For the complete tag, don't modify the default color
# which is black
tag = dbstate.db.get_tag_from_handle(tag_handle)
if tag.get_color() not in ("#000000", "#000000000000"):
self.bgcolor = tag.get_color()
self.bgcolor = hex_to_rgb_float(self.bgcolor)
self.bordercolor = hex_to_rgb_float(self.bordercolor)
self.img_surf = None
if image:
image_path = self.get_image(dbstate, person)
if image_path and os.path.exists(image_path):
with open(image_path, 'rb') as image:
self.img_surf = cairo.ImageSurface.create_from_png(image)
# enable mouse-over
self.connect("enter-notify-event", self.cb_on_enter)
# enable mouse-out
self.connect("leave-notify-event", self.cb_on_leave)
self.context = None
self.textlayout = None
def cb_on_enter(self, widget, event):
"""On mouse-over highlight border"""
if self.person or self.force_mouse_over:
self.hightlight = True
self.queue_draw()
def cb_on_leave(self, widget, event):
"""On mouse-out normal border"""
self.hightlight = False
self.queue_draw()
def draw(self, widget, context):
"""
Redrawing the contents of the widget.
Creat new cairo object and draw in it all (borders, background and etc.)
witout text.
"""
def _boxpath(context, alloc):
# Create box shape and store path
#context.new_path()
context.move_to(0, 5)
context.curve_to(0, 2, 2, 0, 5, 0)
context.line_to(alloc.width-8, 0)
context.curve_to(alloc.width-5, 0,
alloc.width-3, 2,
alloc.width-3, 5)
context.line_to(alloc.width-3, alloc.height-8)
context.curve_to(alloc.width-3, alloc.height-5,
alloc.width-5, alloc.height-3,
alloc.width-8, alloc.height-3)
context.line_to(5, alloc.height-3)
context.curve_to(2, alloc.height-3,
0, alloc.height-5,
0, alloc.height-8)
context.close_path()
# pylint: disable-msg=E1101
minw = 120
minh = 25
alw = self.get_allocated_width()
alh = self.get_allocated_height()
if not self.textlayout:
self.textlayout = PangoCairo.create_layout(context)
# The following seems like it Should work, but it doesn't
# font_desc = self.get_style_context().get_property(
# "font", Gtk.StateFlags.NORMAL)
font_desc = self.get_style_context().get_font(Gtk.StateFlags.NORMAL)
self.textlayout.set_font_description(font_desc)
self.textlayout.set_markup(self.text, -1)
size = self.textlayout.get_pixel_size()
xmin = size[0] + 12
ymin = size[1] + 11
if self.img_surf:
xmin += self.img_surf.get_width()
ymin = max(ymin, self.img_surf.get_height()+4)
self.set_size_request(max(xmin, minw), max(ymin, minh))
alloc = self.get_allocation()
alw = self.get_allocated_width()
alh = self.get_allocated_height()
# widget area for debugging
##context.rectangle(0, 0, alloc.width, alloc.height)
##context.set_source_rgb(1, 0, 1)
##context.fill_preserve()
##context.stroke()
# Create box shape and store path
context.save()
# shadow
context.translate(3, 3)
_boxpath(context, alloc)
context.set_source_rgba(*(self.bordercolor[:3] + (0.4,)))
context.fill_preserve()
context.set_line_width(0)
context.stroke()
context.restore()
context.save()
# box shape used for clipping
_boxpath(context, alloc)
context.clip()
# background (while clipped)
_boxpath(context, alloc)
context.set_source_rgb(*self.bgcolor[:3])
context.fill_preserve()
context.stroke()
# image
if self.img_surf:
context.set_source_surface(self.img_surf,
alloc.width-4-self.img_surf.get_width(), 1)
context.paint()
# Mark deceased
context.new_path()
if self.person and not self.alive:
context.set_source_rgb(0, 0, 0)
context.set_line_width(2)
context.move_to(0, 10)
context.line_to(10, 0)
context.stroke()
#border
_boxpath(context, alloc)
if self.hightlight:
context.set_line_width(5)
else:
context.set_line_width(2)
context.set_source_rgb(*self.bordercolor[:3])
context.stroke()
context.restore()
context.save()
# text
context.move_to(5, 4)
context.set_source_rgb(0, 0, 0)
PangoCairo.show_layout(context, self.textlayout)
context.restore()
context.get_target().flush()
class LineWidget(Gtk.DrawingArea):
"""
Draw lines linking Person boxes - Types A and C.
"""
def __init__(self, child, father, frel, mother, mrel, direction):
Gtk.DrawingArea.__init__(self)
self.child_box = child
self.father_box = father
self.mother_box = mother
self.frel = frel
self.mrel = mrel
self.direction = direction
self.connect("draw", self.expose)
def expose(self, widget, context):
"""
Redraw the contents of the widget.
"""
self.set_size_request(20, 20)
context.set_source_rgb(0.,0.,0.)
# pylint: disable-msg=E1101
alloc = self.get_allocation()
child = self.child_box.get_allocation()
if self.father_box:
father = self.father_box.get_allocation()
if self.mother_box:
mother = self.mother_box.get_allocation()
if self.direction in [2, 3]: # horizontal
child_side = 0
centre = alloc.width / 2
parent_side = alloc.width
middle = child.y - alloc.y + child.height / 2
if self.father_box:
father_side = father.height / 2
if self.mother_box:
mother_side = alloc.height - mother.height / 2
else:
child_side = 0
centre = alloc.height / 2
parent_side = alloc.height
middle = child.x - alloc.x + child.width / 2
if self.father_box:
father_side = father.width / 2
if self.mother_box:
mother_side = alloc.width - mother.width / 2
if self.direction in [1, 3]: # bottom to top or right to left
child_side = parent_side
parent_side = 0
if self.father_box:
self.draw_link(context, parent_side, middle, child_side, centre,
father_side, self.mrel)
if self.mother_box:
self.draw_link(context, parent_side, middle, child_side, centre,
mother_side, self.frel)
def draw_link(self, cr, parent_side, middle, child_side, centre, side, rela):
"""
Draw a link between parent and child.
"""
cr.set_line_width(3)
if rela:
cr.set_dash([], 0) #SOLID
else:
cr.set_dash([9.], 1) #DASH
self.draw_line(cr, parent_side, side, centre, side)
self.draw_line(cr, centre, side, centre, middle, True)
self.draw_line(cr, centre, middle, child_side, middle, True)
cr.stroke()
def draw_line(self, cr, x_from, y_from, x_to, y_to, join=False):
"""
Draw a single line in a link.
"""
# pylint: disable-msg=E1101
if self.direction in [2, 3]: # horizontal
if not join:
cr.move_to(x_from, y_from)
cr.line_to(x_to, y_to)
else:
if not join:
cr.move_to(y_from, x_from)
cr.line_to(y_to, x_to)
class LineWidget2(Gtk.DrawingArea):
"""
Draw lines linking Person boxes - Type B.
"""
def __init__(self, male, rela, direction):
Gtk.DrawingArea.__init__(self)
self.male = male
self.rela = rela
self.direction = direction
self.connect("draw", self.expose)
def expose(self, widget, context):
"""
Redraw the contents of the widget.
"""
self.set_size_request(20, -1)
context.set_source_rgb(0.,0.,0.)
# pylint: disable-msg=E1101
alloc = self.get_allocation()
if self.direction in [2, 3]: # horizontal
child_x = alloc.width / 2
child_y = alloc.height
parent_x = alloc.width
parent_y = alloc.height / 2
mid_x = alloc.width / 2
mid_y = alloc.height / 2
else:
child_y = alloc.width
child_x = alloc.height / 2
parent_y = alloc.width / 2
parent_x = alloc.height
mid_y = alloc.width / 2
mid_x = alloc.height / 2
context.set_line_width(3)
if self.rela:
context.set_dash([], 0) #SOLID
else:
context.set_dash([9.], 1) #DASH
if self.direction in [1, 3]:
parent_x = 0
if not self.male:
child_y = 0
self.draw_line(context, child_x, child_y, mid_x, mid_y)
self.draw_line(context, mid_x, mid_y, parent_x, parent_y, True)
def draw_line(self, cr, x_from, y_from, x_to, y_to, join=False):
"""
Draw a single line in a link.
"""
# pylint: disable-msg=E1101
if self.direction in [2, 3]: # horizontal
if not join:
cr.move_to(x_from, y_from)
cr.line_to(x_to, y_to)
else:
if not join:
cr.move_to(y_from, x_from)
cr.line_to(y_to, x_to)
#-------------------------------------------------------------------------
#
# PedigreeView
#
#-------------------------------------------------------------------------
class PedigreeView(NavigationView):
"""
View for pedigree tree.
Displays the ancestors of a selected individual.
"""
#settings in the config file
CONFIGSETTINGS = (
('interface.pedview-tree-size', 5),
('interface.pedview-layout', 0),
('interface.pedview-show-images', True),
('interface.pedview-show-marriage', True),
('interface.pedview-show-tags', False),
('interface.pedview-tree-direction', 2),
('interface.pedview-show-unknown-people', True),
)
FLEUR_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.FLEUR)
def __init__(self, pdata, dbstate, uistate, nav_group=0):
NavigationView.__init__(self, _('Pedigree'), pdata, dbstate, uistate,
PersonBookmarks, nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.dbstate.connect('database-changed', self.change_db)
uistate.connect('nameformat-changed', self.person_rebuild)
uistate.connect('placeformat-changed', self.person_rebuild)
uistate.connect('font-changed', self.person_rebuild)
self.format_helper = FormattingHelper(self.dbstate, self.uistate)
# Depth of tree.
self._depth = 1
# Variables for drag and scroll
self._last_x = 0
self._last_y = 0
self._in_move = False
self.key_active_changed = None
# GTK objects
self.scrolledwindow = None
self.table = None
self.additional_uis.append(self.additional_ui)
# Automatic resize
self.force_size = self._config.get('interface.pedview-tree-size')
# Nice tree
self.tree_style = self._config.get('interface.pedview-layout')
# Show photos of persons
self.show_images = self._config.get('interface.pedview-show-images')
# Hide marriage data by default
self.show_marriage_data = self._config.get(
'interface.pedview-show-marriage')
# Show person with tag color
self.show_tag_color = self._config.get('interface.pedview-show-tags')
# Tree draw direction
self.tree_direction = self._config.get('interface.pedview-tree-direction')
self.cb_change_scroll_direction(None, self.tree_direction < 2)
# Show on not unknown people.
# Default - not show, for mo fast display hight tree
self.show_unknown_people = self._config.get(
'interface.pedview-show-unknown-people')
# use symbols
self.symbols = Symbols()
self.uistate.connect('font-changed', self.reload_symbols)
def reload_symbols(self):
dth_idx = self.uistate.death_symbol
if self.uistate.symbols:
self.bth = self.symbols.get_symbol_for_string(self.symbols.SYMBOL_BIRTH)
self.dth = self.symbols.get_death_symbol_for_char(dth_idx)
else:
self.bth = self.symbols.get_symbol_fallback(self.symbols.SYMBOL_BIRTH)
self.dth = self.symbols.get_death_symbol_fallback(dth_idx)
def get_handle_from_gramps_id(self, gid):
"""
returns the handle of the specified object
"""
obj = self.dbstate.db.get_person_from_gramps_id(gid)
if obj:
return obj.get_handle()
else:
return None
def change_page(self):
"""Called when the page changes."""
NavigationView.change_page(self)
self.uistate.clear_filter_results()
if self.dirty:
self.rebuild_trees(self.get_active())
def get_stock(self):
"""
The category stock icon
"""
return 'gramps-pedigree'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'gramps-pedigree'
def build_widget(self):
"""
Builds the interface and returns a Gtk.Container type that
contains the interface. This containter will be inserted into
a Gtk.ScrolledWindow page.
"""
self.scrolledwindow = Gtk.ScrolledWindow(hadjustment=None,
vadjustment=None)
self.scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.scrolledwindow.add_events(Gdk.EventMask.SCROLL_MASK)
self.scrolledwindow.connect("scroll-event", self.cb_bg_scroll_event)
event_box = Gtk.EventBox()
# Required for drag-scroll events and popup menu
event_box.add_events(Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.BUTTON_RELEASE_MASK
| Gdk.EventMask.BUTTON1_MOTION_MASK)
# Signal begin drag-scroll
event_box.connect("button-press-event", self.cb_bg_button_press)
# Signal end drag-scroll and popup menu
event_box.connect("button-release-event", self.cb_bg_button_release)
#Signal for controll motion-notify when left mouse button pressed
event_box.connect("motion-notify-event", self.cb_bg_motion_notify_event)
self.scrolledwindow.add(event_box)
self.table = Gtk.Grid()
# force LTR layout of the tree, even though the text might be RTL!
# this way the horizontal scroll preferences will be correct always
if self.table.get_direction() == Gtk.TextDirection.RTL:
self.table.set_direction(Gtk.TextDirection.LTR)
self.table.set_halign(Gtk.Align.END)
event_box.add(self.table)
event_box.get_parent().set_shadow_type(Gtk.ShadowType.NONE)
self.table.set_row_spacing(1)
self.table.set_column_spacing(0)
return self.scrolledwindow
additional_ui = [ # Defines the UI string for UIManager
'''
<placeholder id="CommonGo">
<section>
<item>
<attribute name="action">win.Back</attribute>
<attribute name="label" translatable="yes">_Back</attribute>
</item>
<item>
<attribute name="action">win.Forward</attribute>
<attribute name="label" translatable="yes">_Forward</attribute>
</item>
</section>
<section>
<item>
<attribute name="action">win.HomePerson</attribute>
<attribute name="label" translatable="yes">_Home</attribute>
</item>
</section>
</placeholder>
''',
'''
<section id="AddEditBook">
<item>
<attribute name="action">win.AddBook</attribute>
<attribute name="label" translatable="yes">_Add Bookmark</attribute>
</item>
<item>
<attribute name="action">win.EditBook</attribute>
<attribute name="label" translatable="no">%s...</attribute>
</item>
</section>
''' % _('Organize Bookmarks'),
'''
<placeholder id='otheredit'>
<item>
<attribute name="action">win.FilterEdit</attribute>
<attribute name="label" translatable="yes">'''
'''Person Filter Editor</attribute>
</item>
</placeholder>
''', # Following are the Toolbar items
'''
<placeholder id='CommonNavigation'>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-previous</property>
<property name="action-name">win.Back</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the previous object in the history</property>
<property name="label" translatable="yes">_Back</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-next</property>
<property name="action-name">win.Forward</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the next object in the history</property>
<property name="label" translatable="yes">_Forward</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-home</property>
<property name="action-name">win.HomePerson</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the default person</property>
<property name="label" translatable="yes">_Home</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''']
def define_actions(self):
"""
Required define_actions function for PageView. Builds the action
group information required. We extend beyond the normal here,
since we want to have more than one action group for the PersonView.
Most PageViews really won't care about this.
Special action groups for Forward and Back are created to allow the
handling of navigation buttons. Forward and Back allow the user to
advance or retreat throughout the history, and we want to have these
be able to toggle these when you are at the end of the history or
at the beginning of the history.
"""
NavigationView.define_actions(self)
self._add_action('FilterEdit', self.cb_filter_editor)
self._add_action('F2', self.kb_goto_home, 'F2')
self._add_action('PRIMARY-J', self.jump, '<PRIMARY>J')
def cb_filter_editor(self, *obj):
"""
Display the person filter editor.
"""
try:
FilterEditor('Person', CUSTOM_FILTERS,
self.dbstate, self.uistate)
except WindowActiveError:
return
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
try:
active = self.get_active()
if active:
self.rebuild_trees(active)
else:
self.rebuild_trees(None)
except AttributeError as msg:
RunDatabaseRepair(str(msg),
parent=self.uistate.window)
def _connect_db_signals(self):
"""
Connect database signals.
"""
self._add_db_signal('person-add', self.person_rebuild)
self._add_db_signal('person-update', self.person_rebuild)
self._add_db_signal('person-delete', self.person_rebuild)
self._add_db_signal('person-rebuild', self.person_rebuild_bm)
self._add_db_signal('family-update', self.person_rebuild)
self._add_db_signal('family-add', self.person_rebuild)
self._add_db_signal('family-delete', self.person_rebuild)
self._add_db_signal('family-rebuild', self.person_rebuild)
self._add_db_signal('event-update', self.person_rebuild)
def change_db(self, db):
"""
Callback associated with DbState. Whenever the database
changes, this task is called. In this case, we rebuild the
columns, and connect signals to the connected database. Tree
is no need to store the database, since we will get the value
from self.state.db
"""
self._change_db(db)
if self.active:
self.bookmarks.redraw()
self.build_tree()
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Person'
def can_configure(self):
"""
See :class:`~gui.views.pageview.PageView
:return: bool
"""
return True
def on_delete(self):
self._config.save()
NavigationView.on_delete(self)
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given person handle as the root.
"""
self.dirty = True
if handle:
person = self.dbstate.db.get_person_from_handle(handle)
if person:
self.rebuild_trees(handle)
else:
self.rebuild_trees(None)
else:
self.rebuild_trees(None)
self.uistate.modify_statusbar(self.dbstate)
def person_rebuild_bm(self, dummy=None):
"""Large change to person database"""
self.person_rebuild(dummy)
if self.active:
self.bookmarks.redraw()
def person_rebuild(self, dummy=None):
"""Callback function for signals of change database."""
self.format_helper.clear_cache()
self.format_helper.reload_symbols()
self.dirty = True
if self.active:
self.rebuild_trees(self.get_active())
def rebuild_trees(self, person_handle):
"""
Rebuild tree with root person_handle.
Called from many fuctions, when need full redraw tree.
"""
person = None
if person_handle:
person = self.dbstate.db.get_person_from_handle(person_handle)
self.dirty = False
if self.tree_style == 1 and (
self.force_size > 5 or self.force_size == 0):
self.force_size = 5
# A position definition is a tuple of nodes.
# Each node consists of a tuple of:
# (person box rectangle, connection, marriage box rectangle)
# A rectangle is a tuple of the format (x, y, width, height)
# A connectcion is either a line or a tuple of two lines.
# A line is of the format (x, y, height). Lines have a width of 1.
if self.tree_style == 1:
if self.force_size == 2:
pos = (((0, 3, 3, 3), ((1, 0, 3), (1, 6, 3)), (3, 3, 2, 3)),
((2, 0, 3, 3), None, None),
((2, 6, 3, 3), None, None))
elif self.force_size == 3:
pos = (((0, 4, 3, 5), ((1, 1, 3), (1, 9, 3)), (3, 5, 2, 3)),
((2, 1, 3, 3), ((3, 0, 1), (3, 4, 1)), (5, 1, 2, 3)),
((2, 9, 3, 3), ((3, 8, 1), (3, 12, 1)), (5, 9, 2, 3)),
((4, 0, 3, 1), None, None),
((4,4,3,1),None,None),
((4,8,3,1),None,None),
((4,12,3,1),None,None))
elif self.force_size == 4:
pos = (((0, 5, 3, 5), ((1, 2, 3), (1, 10, 3)), (3, 6, 2, 3)),
((2, 2, 3, 3), ((3, 1, 1), (3, 5, 1)), (5, 3, 2, 1)),
((2, 10, 3, 3), ((3, 9, 1), (3, 13, 1)), (5, 11, 2, 1)),
((4, 1, 3, 1), ((5, 0, 1), (5, 2, 1)), (7, 1, 2, 1)),
((4, 5, 3, 1), ((5, 4, 1), (5, 6, 1)), (7, 5, 2, 1)),
((4, 9, 3, 1), ((5, 8, 1), (5, 10, 1)), (7, 9, 2, 1)),
((4, 13, 3, 1), ((5, 12, 1), (5, 14, 1)), (7, 13, 2, 1)),
((6, 0, 3, 1), None, None),
((6, 2, 3, 1), None, None),
((6, 4, 3, 1), None, None),
((6, 6, 3, 1), None, None),
((6, 8, 3, 1), None, None),
((6, 10, 3, 1), None, None),
((6, 12, 3, 1), None, None),
((6, 14, 3, 1), None, None))
elif self.force_size == 5:
pos = (((0, 10, 3, 11), ((1, 5, 5), (1, 21, 5)), (3, 13, 2, 5)),
((2, 5, 3, 5), ((3, 2, 3), (3, 10, 3)), (5, 6, 2, 3)),
((2, 21, 3, 5), ((3, 18, 3), (3, 26, 3)), (5, 22, 2, 3)),
((4, 2, 3, 3), ((5, 1, 1), (5, 5, 1)), (7, 3, 2, 1)),
((4, 10, 3, 3), ((5, 9, 1), (5, 13, 1)), (7, 11, 2, 1)),
((4, 18, 3, 3), ((5, 17, 1), (5, 21, 1)), (7, 19, 2, 1)),
((4, 26, 3, 3), ((5, 25, 1), (5, 29, 1)), (7, 27, 2, 1)),
((6, 1, 3, 1), ((7, 0, 1), (7, 2, 1)), (9, 1, 2, 1)),
((6, 5, 3, 1), ((7, 4, 1), (7, 6, 1)), (9, 5, 2, 1)),
((6, 9, 3, 1), ((7, 8, 1), (7, 10, 1)), (9, 9, 2, 1)),
((6, 13, 3, 1), ((7, 12, 1), (7, 14, 1)), (9, 13, 2, 1)),
((6, 17, 3, 1), ((7, 16, 1), (7, 18, 1)), (9, 17, 2, 1)),
((6, 21, 3, 1), ((7, 20, 1), (7, 22, 1)), (9, 21, 2, 1)),
((6, 25, 3, 1), ((7, 24, 1), (7, 26, 1)), (9, 25, 2, 1)),
((6, 29, 3, 1), ((7, 28, 1), (7, 30, 1)), (9, 29, 2, 1)),
((8, 0, 3, 1), None, None),
((8, 2, 3, 1), None, None),
((8, 4, 3, 1), None, None),
((8, 6, 3, 1), None, None),
((8, 8, 3, 1), None, None),
((8, 10, 3, 1), None, None),
((8, 12, 3, 1), None, None),
((8, 14, 3, 1), None, None),
((8, 16, 3, 1), None, None),
((8, 18, 3, 1), None, None),
((8, 20, 3, 1), None, None),
((8, 22, 3, 1), None, None),
((8, 24, 3, 1), None, None),
((8, 26, 3, 1), None, None),
((8, 28, 3, 1), None, None),
((8, 30, 3, 1), None, None))
else:
pos = None
# Build ancestor tree only one for all different sizes
self._depth = 1
lst = [None] * (2**self.force_size)
self.find_tree(person, 0, 1, lst)
# Purge current table content
for child in self.table.get_children():
child.destroy()
##self.table = Gtk.Grid()
if person:
self.rebuild(self.table, pos, lst, self.force_size)
def rebuild(self, table_widget, positions, lst, size):
"""
Function called from rebuild_trees.
For table_widget (Gtk.Grid) place list of person, use positions array.
For style C position calculated, for others style use static positions.
All display options process in this function.
"""
# Calculate maximum table size
xmax = 0
ymax = 0
if self.tree_style == 0:
xmax = 2 * size
ymax = 2 ** size
elif self.tree_style == 1:
xmax = 2 * size + 2
ymax = [0, 10, 14, 16, 32][size - 1]
elif self.tree_style == 2:
# For style C change tree depth if they real size less then max.
if self.show_unknown_people:
self._depth += 1
if size > self._depth:
size = self._depth
xmax = 2 * size
ymax = 2 ** size * 2
pbw = None
for i in range(0, 2 ** size - 1):
####################################################################
# Table placement for person data
####################################################################
if self.tree_style in [0, 2]:
# Dynamic position person in tree
width = _width = 1
height = _height = 3
level = int(math.log(i+1, 2))
offset = i + 1 - (2**level)
if self.tree_style == 0:
_delta = (2**size) // (2**level)
else:
_delta = (2**size) // (2**level) * 2
x_pos = (1 + _width) * level + 1
y_pos = _delta // 2 + offset * _delta - 1
if self.tree_style == 0 and level == size - 1:
y_pos = _delta // 2 + offset * _delta
height = _height = 1
else:
try:
x_pos = positions[i][0][0]+1
y_pos = positions[i][0][1]+1
width = positions[i][0][2]
height = positions[i][0][3]
except IndexError: # no position for this person defined
continue
last_pbw = pbw
pbw = None
if not lst[i] and (
(self.tree_style in [0, 2] and self.show_unknown_people and
lst[((i+1) // 2) - 1]) or self.tree_style == 1):
#
# No person -> show empty box
#
pbw = PersonBoxWidgetCairo(self, self.format_helper,
self.dbstate, None, False, 0, None,
tags=self.show_tag_color)
if i > 0 and lst[((i+1) // 2) - 1]:
fam_h = None
fam = lst[((i+1) // 2) - 1][2]
if fam:
fam_h = fam.get_handle()
if not self.dbstate.db.readonly:
pbw.connect("button-press-event",
self.cb_missing_parent_button_press,
lst[((i+1) // 2) - 1][0].get_handle(), fam_h)
pbw.force_mouse_over = True
elif lst[i]:
#
# Person exists -> populate box
#
image = False
if self.show_images and height > 1 and (
i < ((2**size-1) // 2) or self.tree_style == 2):
image = True
pbw = PersonBoxWidgetCairo(self, self.format_helper,
self.dbstate, lst[i][0], lst[i][3], height, image,
tags=self.show_tag_color)
lst[i][4] = pbw
if height < 7:
pbw.set_tooltip_text(self.format_helper.format_person(
lst[i][0], 11))
fam_h = None
if lst[i][2]:
fam_h = lst[i][2].get_handle()
pbw.connect("button-press-event",
self.cb_person_button_press,
lst[i][0].get_handle(), fam_h)
if pbw:
self.attach_widget(table_widget, pbw, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
####################################################################
# Connection lines
####################################################################
if self.tree_style == 1 and (
positions[i][1] and len(positions[i][1]) == 2):
# separate boxes for father and mother
x_pos = positions[i][1][0][0]+1
y_pos = positions[i][1][0][1]+1
width = 1
height = positions[i][1][0][2]
rela = False
if lst[2*i+1]: # Father
rela = lst[2*i+1][1]
line = LineWidget2(1, rela, self.tree_direction)
if lst[i] and lst[i][2]:
# Required for popup menu
line.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
line.connect("button-press-event",
self.cb_relation_button_press,
lst[i][2].get_handle())
self.attach_widget(table_widget, line, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
x_pos = positions[i][1][1][0]+1
y_pos = positions[i][1][1][1]+1
rela = False
if lst[2*i+2]: # Mother
rela = lst[2*i+2][1]
line = LineWidget2(0, rela, self.tree_direction)
if lst[i] and lst[i][2]:
# Required for popup menu
line.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
line.connect("button-press-event",
self.cb_relation_button_press,
lst[i][2].get_handle())
self.attach_widget(table_widget, line, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
elif self.tree_style in [0, 2] and lst[((i+1) // 2) - 1]:
# combined for father and mother
x_pos = (1 + _width) * level
y_pos = offset * _delta - (_delta // 2) - 1
width = 1
height = _delta + 3
if self.tree_style == 0 and level == size - 1:
height -= 2
y_pos += 1
if i > 0 and i % 2 == 0 and (pbw or last_pbw):
frela = mrela = None
if lst[i]:
frela = lst[i][1]
if lst[i - 1]:
mrela = lst[i-1][1]
line = LineWidget(lst[((i+1) // 2) - 1][4],
last_pbw, frela,
pbw, mrela,
self.tree_direction)
if lst[((i+1) // 2) - 1] and lst[((i+1) // 2) - 1][2]:
# Required for popup menu
line.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
line.connect("button-press-event",
self.cb_relation_button_press,
lst[((i+1) // 2) - 1][2].get_handle())
# Required for tooltip and mouse-over
line.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)
# Required for tooltip and mouse-over
line.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)
line.set_tooltip_text(
self.format_helper.format_relation(
lst[((i+1) // 2) - 1][2], 11))
self.attach_widget(table_widget, line, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
####################################################################
# Show marriage data
####################################################################
if self.show_marriage_data and (
self.tree_style == 1 and positions[i][2] or
(self.tree_style in [0, 2] and level+1 < size)):
if lst[i] and lst[i][2]:
text = self.format_helper.format_relation(lst[i][2], 1, True)
else:
text = " "
label = Gtk.Label(label=text)
label.set_justify(Gtk.Justification.LEFT)
label.set_use_markup(True)
label.set_line_wrap(True)
label.set_halign(Gtk.Align.START)
if self.tree_style in [0, 2]:
x_pos = (1 + _width) * (level + 1) + 1
y_pos = _delta // 2 + offset * _delta -1 + _height // 2
width = 1
height = 1
if self.tree_style == 0 and level < 2 and size > 4:
# Boxes can be bigger for lowest levels on larger trees.
y_pos -= 2
height += 4
else:
x_pos = positions[i][2][0]+1
y_pos = positions[i][2][1]+1
width = positions[i][2][2]
height = positions[i][2][3]
self.attach_widget(table_widget, label, xmax,
x_pos, x_pos+width, y_pos, y_pos+height)
########################################################################
# Add navigation arrows
########################################################################
if lst[0]:
if self.tree_direction == 2:
child_arrow = "go-previous-symbolic" # Gtk.ArrowType.LEFT
parent_arrow = "go-next-symbolic" # Gtk.ArrowType.RIGHT
elif self.tree_direction == 0:
child_arrow = "go-up-symbolic" # Gtk.ArrowType.UP
parent_arrow = "go-down-symbolic" # Gtk.ArrowType.DOWN
elif self.tree_direction == 1:
child_arrow = "go-down-symbolic" # Gtk.ArrowType.DOWN
parent_arrow = "go-up-symbolic" # Gtk.ArrowType.UP
elif self.tree_direction == 3:
child_arrow = "go-next-symbolic" # Gtk.ArrowType.RIGHT
parent_arrow = "go-previous-symbolic" # Gtk.ArrowType.LEFT
# GTK will reverse the icons for RTL locales, but we force LTR layout of the table,
# so reverse the arrows back...
if self.tree_direction in [2,3] and self.scrolledwindow.get_direction() == Gtk.TextDirection.RTL:
child_arrow, parent_arrow = parent_arrow, child_arrow
button = Gtk.Button.new_from_icon_name(child_arrow,
Gtk.IconSize.BUTTON)
childlist = find_children(self.dbstate.db, lst[0][0])
if childlist:
button.connect("clicked", self.cb_on_show_child_menu)
button.set_tooltip_text(_("Jump to child..."))
else:
button.set_sensitive(False)
ymid = ymax // 2
self.attach_widget(table_widget, button, xmax,
0, 1, ymid, ymid +1)
button = Gtk.Button()
button = Gtk.Button.new_from_icon_name(parent_arrow,
Gtk.IconSize.BUTTON)
if lst[1]:
button.connect("clicked", self.cb_childmenu_changed,
lst[1][0].handle)
button.set_tooltip_text(_("Jump to father"))
else:
button.set_sensitive(False)
ymid = ymax // 4
self.attach_widget(table_widget, button, xmax,
xmax, xmax+1, ymid-1, ymid+2)
button = Gtk.Button()
button = Gtk.Button.new_from_icon_name(parent_arrow,
Gtk.IconSize.BUTTON)
if lst[2]:
button.connect("clicked", self.cb_childmenu_changed,
lst[2][0].handle)
button.set_tooltip_text(_("Jump to mother"))
else:
button.set_sensitive(False)
ymid = ymax // 4 * 3
self.attach_widget(table_widget, button, xmax,
xmax, xmax+1, ymid-1, ymid+2)
# add dummy widgets into the corners of the table
# to allow the pedigree to be centered
## label = Gtk.Label(label="")
## table_widget.attach(label, 0, 1, 0, 1,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0)
## label = Gtk.Label(label="")
## if self.tree_direction in [2, 3]:
## table_widget.attach(label, xmax, xmax+1, ymax, ymax+1,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0)
## else:
## table_widget.attach(label, ymax, ymax+1, xmax, xmax+1,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL,
## Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0)
debug = False
if debug:
used_cells = {}
xmax = 0
ymax = 0
# iterate table to see which cells are used.
for child in table_widget.get_children():
left = table_widget.child_get_property(child, "left-attach")
right = table_widget.child_get_property(child, "right-attach")
top = table_widget.child_get_property(child, "top-attach")
bottom = table_widget.child_get_property(child, "bottom-attach")
for x_pos in range(left, right):
for y_pos in range(top, bottom):
try:
used_cells[x_pos][y_pos] = True
except KeyError:
used_cells[x_pos] = {}
used_cells[x_pos][y_pos] = True
if y_pos > ymax:
ymax = y_pos
if x_pos > xmax:
xmax = x_pos
for x_pos in range(0, xmax+1):
for y_pos in range(0, ymax+1):
try:
tmp = used_cells[x_pos][y_pos]
except KeyError:
# fill unused cells
label = Gtk.Label(label="%d,%d"%(x_pos, y_pos))
frame = Gtk.ScrolledWindow(hadjustment=None,
vadjustment=None)
frame.set_shadow_type(Gtk.ShadowType.NONE)
frame.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER)
frame.add(label)
table_widget.attach(frame, x_pos, y_pos, 1, 1)
table_widget.show_all()
# Setup scrollbars for view root person
window = table_widget.get_parent().get_parent().get_parent()
hadjustment = window.get_hadjustment()
vadjustment = window.get_vadjustment()
if self.tree_direction == 2:
self.update_scrollbar_positions(hadjustment, hadjustment.get_lower())
self.update_scrollbar_positions(vadjustment,
(vadjustment.get_upper() - vadjustment.get_page_size()) / 2)
elif self.tree_direction == 0:
self.update_scrollbar_positions(hadjustment,
(hadjustment.get_upper() - hadjustment.get_page_size()) / 2)
self.update_scrollbar_positions(vadjustment,
vadjustment.get_upper() - vadjustment.get_page_size())
elif self.tree_direction == 1:
self.update_scrollbar_positions(hadjustment,
(hadjustment.get_upper() - hadjustment.get_page_size()) / 2)
self.update_scrollbar_positions(vadjustment, vadjustment.get_lower())
elif self.tree_direction == 3:
self.update_scrollbar_positions(hadjustment,
hadjustment.get_upper() - hadjustment.get_page_size())
self.update_scrollbar_positions(vadjustment,
(vadjustment.get_upper() - vadjustment.get_page_size()) / 2)
# Setup mouse wheel scroll direction for style C,
# depending of tree direction
if self.tree_direction in [0, 1]:
self.cb_change_scroll_direction(None, True)
elif self.tree_direction in [2, 3]:
self.cb_change_scroll_direction(None, False)
def attach_widget(self, table, widget, xmax, right, left, top, bottom):
"""
Attach a widget to the table.
"""
if self.tree_direction == 0: # Vertical (top to bottom)
table.attach(widget, top, right, bottom-top, left-right)
elif self.tree_direction == 1: # Vertical (bottom to top)
table.attach(widget, top, xmax - left + 1, bottom-top, left - right)
elif self.tree_direction == 2: # Horizontal (left to right)
table.attach(widget, right, top, left-right, bottom-top)
elif self.tree_direction == 3: # Horizontal (right to left)
table.attach(widget, xmax - left + 1, top, left - right, bottom-top)
def cb_home(self, menuitem):
"""Change root person to default person for database."""
defperson = self.dbstate.db.get_default_person()
if defperson:
self.change_active(defperson.get_handle())
def cb_set_home(self, menuitem, handle):
"""Set the root person to current person for database."""
active = self.uistate.get_active('Person')
if active:
self.dbstate.db.set_default_person_handle(handle)
self.cb_home(None)
def cb_edit_person(self, obj, person_handle):
"""
Open edit person window for person_handle.
Called after double click or from submenu.
"""
person = self.dbstate.db.get_person_from_handle(person_handle)
if person:
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
return True
return True
return False
def cb_edit_family(self, obj, family_handle):
"""
Open edit person family for family_handle.
Called after double click or from submenu.
"""
family = self.dbstate.db.get_family_from_handle(family_handle)
if family:
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
return True
return True
return False
def cb_add_parents(self, obj, person_handle, family_handle):
"""Edit not full family."""
if family_handle: # one parent already exists -> Edit current family
family = self.dbstate.db.get_family_from_handle(family_handle)
else: # no parents -> create new family
family = Family()
childref = ChildRef()
childref.set_reference_handle(person_handle)
family.add_child_ref(childref)
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
return
def cb_copy_person_to_clipboard(self, obj, person_handle):
"""
Renders the person data into some lines of text and
puts that into the clipboard
"""
person = self.dbstate.db.get_person_from_handle(person_handle)
if person:
clipboard = Gtk.Clipboard.get_for_display(Gdk.Display.get_default(),
Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(self.format_helper.format_person(person, 11), -1)
return True
return False
def cb_copy_family_to_clipboard(self, obj, family_handle):
"""
Renders the family data into some lines of text and
puts that into the clipboard
"""
family = self.dbstate.db.get_family_from_handle(family_handle)
if family:
clipboard = Gtk.Clipboard.get_for_display(Gdk.Display.get_default(),
Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(self.format_helper.format_relation(family, 11), -1)
return True
return False
def cb_on_show_option_menu(self, obj, event, data=None):
"""Right click option menu."""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
self.add_nav_portion_to_menu(self.menu, None)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return True
def cb_bg_button_press(self, widget, event):
"""
Enter in scroll mode when mouse button pressed in background
or call option menu.
"""
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
widget.get_window().set_cursor(self.FLEUR_CURSOR)
self._last_x = event.x
self._last_y = event.y
self._in_move = True
return True
elif is_right_click(event):
self.cb_on_show_option_menu(widget, event)
return True
return False
def cb_bg_button_release(self, widget, event):
"""Exit from scroll mode when button release."""
if event.button == 1 and event.type == Gdk.EventType.BUTTON_RELEASE:
self.cb_bg_motion_notify_event(widget, event)
widget.get_window().set_cursor(None)
self._in_move = False
return True
return False
def cb_bg_motion_notify_event(self, widget, event):
"""Function for motion notify events for drag and scroll mode."""
if self._in_move and (event.type == Gdk.EventType.MOTION_NOTIFY or
event.type == Gdk.EventType.BUTTON_RELEASE):
window = widget.get_parent()
hadjustment = window.get_hadjustment()
vadjustment = window.get_vadjustment()
self.update_scrollbar_positions(vadjustment,
vadjustment.get_value() - (event.y - self._last_y))
self.update_scrollbar_positions(hadjustment,
hadjustment.get_value() - (event.x - self._last_x))
return True
return False
def update_scrollbar_positions(self, adjustment, value):
"""Controle value then try setup in scrollbar."""
if value > (adjustment.get_upper() - adjustment.get_page_size()):
adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size())
else:
adjustment.set_value(value)
return True
def cb_bg_scroll_event(self, widget, event):
"""
Function change scroll direction to horizontally
if variable self.scroll_direction setup.
"""
if self.scroll_direction and event.type == Gdk.EventType.SCROLL:
if event.direction == Gdk.ScrollDirection.UP:
event.direction = Gdk.ScrollDirection.LEFT
elif event.direction == Gdk.ScrollDirection.DOWN:
event.direction = Gdk.ScrollDirection.RIGHT
return False
def cb_person_button_press(self, obj, event, person_handle, family_handle):
"""
Call edit person function for mouse left button double click on person
or submenu for person for mouse right click.
And setup plug for button press on person widget.
"""
if is_right_click(event):
self.cb_build_full_nav_menu(obj, event,
person_handle, family_handle)
return True
elif (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
self.cb_edit_person(obj, person_handle)
return True
return True
def cb_relation_button_press(self, obj, event, family_handle):
"""
Call edit family function for mouse left button double click
on family line or call full submenu for mouse right click.
And setup plug for button press on family line.
"""
if is_right_click(event):
self.cb_build_relation_nav_menu(obj, event, family_handle)
return True
elif (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
self.cb_edit_family(obj, family_handle)
return True
return True
def cb_missing_parent_button_press(self, obj, event,
person_handle, family_handle):
"""
Call function for not full family for mouse left button double click
on missing persons or call submenu for mouse right click.
"""
if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
self.cb_add_parents(obj, person_handle, family_handle)
return True
elif is_right_click(event):
self.cb_build_missing_parent_nav_menu(obj, event, person_handle,
family_handle)
return True
return False
def cb_on_show_child_menu(self, obj):
"""User clicked button to move to child of active person"""
person = self.dbstate.db.get_person_from_handle(self.get_active())
if person:
# Build and display the menu attached to the left pointing arrow
# button. The menu consists of the children of the current root
# person of the tree. Attach a child to each menu item.
childlist = find_children(self.dbstate.db, person)
if len(childlist) == 1:
child = self.dbstate.db.get_person_from_handle(childlist[0])
if child:
self.change_active(childlist[0])
elif len(childlist) > 1:
self.my_menu = Gtk.Menu()
self.my_menu.set_reserve_toggle_size(False)
for child_handle in childlist:
child = self.dbstate.db.get_person_from_handle(child_handle)
cname = escape(name_displayer.display(child))
if find_children(self.dbstate.db, child):
label = Gtk.Label(label='<b><i>%s</i></b>' % cname)
else:
label = Gtk.Label(label=cname)
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
menuitem = Gtk.MenuItem()
menuitem.add(label)
self.my_menu.append(menuitem)
menuitem.connect("activate", self.cb_childmenu_changed,
child_handle)
menuitem.show()
self.my_menu.popup(None, None, None, None, 0, 0)
return 1
return 0
def cb_childmenu_changed(self, obj, person_handle):
"""
Callback for the pulldown menu selection, changing to the person
attached with menu item.
"""
self.change_active(person_handle)
return True
def cb_change_scroll_direction(self, menuitem, data):
"""Change scroll_direction option."""
if data:
self.scroll_direction = True
else:
self.scroll_direction = False
def kb_goto_home(self, *obj):
"""Goto home person from keyboard."""
self.cb_home(None)
def find_tree(self, person, index, depth, lst, val=0):
"""Recursively build a list of ancestors"""
if depth > self.force_size or not person:
return
if self._depth < depth:
self._depth = depth
try:
alive = probably_alive(person, self.dbstate.db)
except RuntimeError:
ErrorDialog(_('Relationship loop detected'),
_('A person was found to be his/her own ancestor.'),
parent=self.uistate.window)
alive = False
lst[index] = [person, val, None, alive, None]
parent_families = person.get_parent_family_handle_list()
if parent_families:
family_handle = parent_families[0]
else:
return
mrel = True
frel = True
family = self.dbstate.db.get_family_from_handle(family_handle)
if family:
for child_ref in family.get_child_ref_list():
if child_ref.ref == person.handle:
mrel = child_ref.mrel == ChildRefType.BIRTH
frel = child_ref.frel == ChildRefType.BIRTH
lst[index] = [person, val, family, alive, None]
father_handle = family.get_father_handle()
if father_handle:
father = self.dbstate.db.get_person_from_handle(
father_handle)
self.find_tree(father, (2*index)+1, depth+1, lst, frel)
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.dbstate.db.get_person_from_handle(
mother_handle)
self.find_tree(mother, (2*index)+2, depth+1, lst, mrel)
def add_nav_portion_to_menu(self, menu, person_handle):
"""
This function adds a common history-navigation portion
to the context menu. Used by both build_nav_menu() and
build_full_nav_menu() methods.
"""
hobj = self.uistate.get_history(self.navigation_type(),
self.navigation_group())
home_sensitivity = True
if not self.dbstate.db.get_default_person():
home_sensitivity = False
# bug 4884: need to translate the home label
entries = [
(_("Pre_vious"), self.back_clicked, not hobj.at_front()),
(_("_Next"), self.fwd_clicked, not hobj.at_end()),
(_("_Home"), self.cb_home, home_sensitivity),
]
for label, callback, sensitivity in entries:
item = Gtk.MenuItem.new_with_mnemonic(label)
item.set_sensitive(sensitivity)
if callback:
item.connect("activate", callback)
item.show()
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(_("Set _Home Person"))
item.connect("activate", self.cb_set_home, person_handle)
if person_handle is None:
item.set_sensitive(False)
item.show()
menu.append(item)
def add_settings_to_menu(self, menu):
"""
Add frequently used settings to the menu. Most settings will be set
from the configuration dialog.
"""
# Separator.
item = Gtk.SeparatorMenuItem()
item.show()
menu.append(item)
# Mouse scroll direction setting.
item = Gtk.MenuItem(label=_("Mouse scroll direction"))
item.set_submenu(Gtk.Menu())
scroll_direction_menu = item.get_submenu()
entry = Gtk.RadioMenuItem(label=_("Top <-> Bottom"))
entry.connect("activate", self.cb_change_scroll_direction, False)
if self.scroll_direction == False:
entry.set_active(True)
entry.show()
scroll_direction_menu.append(entry)
entry = Gtk.RadioMenuItem(label=_("Left <-> Right"))
entry.connect("activate", self.cb_change_scroll_direction, True)
if self.scroll_direction == True:
entry.set_active(True)
entry.show()
scroll_direction_menu.append(entry)
scroll_direction_menu.show()
item.show()
menu.append(item)
def cb_build_missing_parent_nav_menu(self, obj, event,
person_handle, family_handle):
"""Builds the menu for a missing parent."""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
add_item = Gtk.MenuItem.new_with_mnemonic(_('_Add'))
add_item.connect("activate", self.cb_add_parents, person_handle,
family_handle)
add_item.show()
self.menu.append(add_item)
# Add a separator line
add_item = Gtk.SeparatorMenuItem()
add_item.show()
self.menu.append(add_item)
# Add history-based navigation
self.add_nav_portion_to_menu(self.menu, None)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return 1
def cb_build_full_nav_menu(self, obj, event, person_handle, family_handle):
"""
Builds the full menu (including Siblings, Spouses, Children,
and Parents) with navigation.
"""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
person = self.dbstate.db.get_person_from_handle(person_handle)
if not person:
return 0
go_item = Gtk.MenuItem(label=name_displayer.display(person))
go_item.connect("activate", self.cb_childmenu_changed, person_handle)
go_item.show()
self.menu.append(go_item)
edit_item = Gtk.MenuItem.new_with_mnemonic(_('_Edit'))
edit_item.connect("activate", self.cb_edit_person, person_handle)
edit_item.show()
self.menu.append(edit_item)
clipboard_item = Gtk.MenuItem.new_with_mnemonic(_('_Copy'))
clipboard_item.connect("activate", self.cb_copy_person_to_clipboard,
person_handle)
clipboard_item.show()
self.menu.append(clipboard_item)
# collect all spouses, parents and children
linked_persons = []
# Go over spouses and build their menu
item = Gtk.MenuItem(label=_("Spouses"))
fam_list = person.get_family_handle_list()
no_spouses = 1
for fam_id in fam_list:
family = self.dbstate.db.get_family_from_handle(fam_id)
if family.get_father_handle() == person.get_handle():
sp_id = family.get_mother_handle()
else:
sp_id = family.get_father_handle()
spouse = None
if sp_id:
spouse = self.dbstate.db.get_person_from_handle(sp_id)
if not spouse:
continue
if no_spouses:
no_spouses = 0
item.set_submenu(Gtk.Menu())
sp_menu = item.get_submenu()
sp_menu.set_reserve_toggle_size(False)
sp_item = Gtk.MenuItem(label=name_displayer.display(spouse))
linked_persons.append(sp_id)
sp_item.connect("activate", self.cb_childmenu_changed, sp_id)
sp_item.show()
sp_menu.append(sp_item)
if no_spouses:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over siblings and build their menu
item = Gtk.MenuItem(label=_("Siblings"))
pfam_list = person.get_parent_family_handle_list()
no_siblings = 1
for pfam in pfam_list:
fam = self.dbstate.db.get_family_from_handle(pfam)
sib_list = fam.get_child_ref_list()
for sib_ref in sib_list:
sib_id = sib_ref.ref
if sib_id == person.get_handle():
continue
sib = self.dbstate.db.get_person_from_handle(sib_id)
if not sib:
continue
if no_siblings:
no_siblings = 0
item.set_submenu(Gtk.Menu())
sib_menu = item.get_submenu()
sib_menu.set_reserve_toggle_size(False)
if find_children(self.dbstate.db, sib):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(name_displayer.display(sib)))
else:
label = Gtk.Label(label=escape(name_displayer.display(sib)))
sib_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
sib_item.add(label)
linked_persons.append(sib_id)
sib_item.connect("activate", self.cb_childmenu_changed, sib_id)
sib_item.show()
sib_menu.append(sib_item)
if no_siblings:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over children and build their menu
item = Gtk.MenuItem(label=_("Children"))
no_children = 1
childlist = find_children(self.dbstate.db, person)
for child_handle in childlist:
child = self.dbstate.db.get_person_from_handle(child_handle)
if not child:
continue
if no_children:
no_children = 0
item.set_submenu(Gtk.Menu())
child_menu = item.get_submenu()
child_menu.set_reserve_toggle_size(False)
if find_children(self.dbstate.db, child):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(name_displayer.display(child)))
else:
label = Gtk.Label(label=escape(name_displayer.display(child)))
child_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
child_item.add(label)
linked_persons.append(child_handle)
child_item.connect("activate", self.cb_childmenu_changed,
child_handle)
child_item.show()
child_menu.append(child_item)
if no_children:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over parents and build their menu
item = Gtk.MenuItem(label=_("Parents"))
no_parents = 1
par_list = find_parents(self.dbstate.db, person)
for par_id in par_list:
par = None
if par_id:
par = self.dbstate.db.get_person_from_handle(par_id)
if not par:
continue
if no_parents:
no_parents = 0
item.set_submenu(Gtk.Menu())
par_menu = item.get_submenu()
par_menu.set_reserve_toggle_size(False)
if find_parents(self.dbstate.db, par):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(name_displayer.display(par)))
else:
label = Gtk.Label(label=escape(name_displayer.display(par)))
par_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
par_item.add(label)
linked_persons.append(par_id)
par_item.connect("activate", self.cb_childmenu_changed, par_id)
par_item.show()
par_menu.append(par_item)
if no_parents:
if self.tree_style == 2 and not self.show_unknown_people:
item.set_submenu(Gtk.Menu())
par_menu = item.get_submenu()
par_menu.set_reserve_toggle_size(False)
par_item = Gtk.MenuItem(label=_("Add New Parents..."))
par_item.connect("activate", self.cb_add_parents, person_handle,
family_handle)
par_item.show()
par_menu.append(par_item)
else:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Go over parents and build their menu
item = Gtk.MenuItem(label=_("Related"))
no_related = 1
for p_id in find_witnessed_people(self.dbstate.db, person):
#if p_id in linked_persons:
# continue # skip already listed family members
per = self.dbstate.db.get_person_from_handle(p_id)
if not per:
continue
if no_related:
no_related = 0
item.set_submenu(Gtk.Menu())
per_menu = item.get_submenu()
per_menu.set_reserve_toggle_size(False)
label = Gtk.Label(label=escape(name_displayer.display(per)))
per_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
per_item.add(label)
per_item.connect("activate", self.cb_childmenu_changed, p_id)
per_item.show()
per_menu.append(per_item)
if no_related:
item.set_sensitive(0)
item.show()
self.menu.append(item)
# Add separator line
item = Gtk.SeparatorMenuItem()
item.show()
self.menu.append(item)
# Add history-based navigation
self.add_nav_portion_to_menu(self.menu, person_handle)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return 1
def cb_build_relation_nav_menu(self, obj, event, family_handle):
"""Builds the menu for a parents-child relation line."""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
family = self.dbstate.db.get_family_from_handle(family_handle)
if not family:
return 0
edit_item = Gtk.MenuItem.new_with_mnemonic(_('_Edit'))
edit_item.connect("activate", self.cb_edit_family, family_handle)
edit_item.show()
self.menu.append(edit_item)
clipboard_item = Gtk.MenuItem.new_with_mnemonic(_('_Copy'))
clipboard_item.connect("activate", self.cb_copy_family_to_clipboard,
family_handle)
clipboard_item.show()
self.menu.append(clipboard_item)
# Add separator
item = Gtk.SeparatorMenuItem()
item.show()
self.menu.append(item)
# Add history-based navigation
self.add_nav_portion_to_menu(self.menu, None)
self.add_settings_to_menu(self.menu)
self.menu.popup(None, None, None, None, 0, event.time)
return 1
def cb_update_show_tags(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the tags setting.
"""
if entry == 'True':
self.show_tag_color = True
else:
self.show_tag_color = False
self.rebuild_trees(self.get_active())
def cb_update_show_images(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the images setting.
"""
if entry == 'True':
self.show_images = True
else:
self.show_images = False
self.rebuild_trees(self.get_active())
def cb_update_show_marriage(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the marriage data setting.
"""
if entry == 'True':
self.show_marriage_data = True
else:
self.show_marriage_data = False
self.rebuild_trees(self.get_active())
def cb_update_show_unknown_people(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the unknown people setting.
"""
if entry == 'True':
self.show_unknown_people = True
else:
self.show_unknown_people = False
self.rebuild_trees(self.get_active())
def cb_update_layout(self, obj, constant):
"""
Called when the configuration menu changes the layout.
"""
entry = obj.get_active()
self._config.set(constant, entry)
self.tree_style = int(entry)
adj = self.config_size_slider.get_adjustment()
if entry == 1: # Limit tree size to 5 for the compact style
adj.set_upper(5)
if self.force_size > 5:
self.force_size = 5
adj.set_value(5)
else:
adj.set_upper(9)
adj.emit("changed")
self.rebuild_trees(self.get_active())
def cb_update_tree_direction(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the tree direction.
"""
self.tree_direction = int(entry)
self.rebuild_trees(self.get_active())
def cb_update_tree_size(self, client, cnxn_id, entry, data):
"""
Called when the configuration menu changes the tree size.
"""
self.force_size = int(entry)
self.rebuild_trees(self.get_active())
def config_connect(self):
"""
Overwriten from :class:`~gui.views.pageview.PageView method
This method will be called after the ini file is initialized,
use it to monitor changes in the ini file
"""
self._config.connect('interface.pedview-show-images',
self.cb_update_show_images)
self._config.connect('interface.pedview-show-marriage',
self.cb_update_show_marriage)
self._config.connect('interface.pedview-show-tags',
self.cb_update_show_tags)
self._config.connect('interface.pedview-show-unknown-people',
self.cb_update_show_unknown_people)
self._config.connect('interface.pedview-tree-direction',
self.cb_update_tree_direction)
self._config.connect('interface.pedview-tree-size',
self.cb_update_tree_size)
def _get_configure_page_funcs(self):
"""
Return a list of functions that create gtk elements to use in the
notebook pages of the Configure dialog
:return: list of functions
"""
return [self.config_panel]
def config_panel(self, configdialog):
"""
Function that builds the widget in the configuration dialog
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
configdialog.add_checkbox(grid,
_('Show images'),
0, 'interface.pedview-show-images')
configdialog.add_checkbox(grid,
_('Show marriage data'),
1, 'interface.pedview-show-marriage')
configdialog.add_checkbox(grid,
_('Show unknown people'),
2, 'interface.pedview-show-unknown-people')
configdialog.add_checkbox(grid,
_('Show tags'),
3, 'interface.pedview-show-tags')
configdialog.add_combo(grid,
_('Tree style'),
4, 'interface.pedview-layout',
((0, _('Standard')),
(1, _('Compact')),
(2, _('Expanded'))),
callback=self.cb_update_layout)
configdialog.add_combo(grid,
_('Tree direction'),
5, 'interface.pedview-tree-direction',
((0, _('Vertical (↓)')),
(1, _('Vertical (↑)')),
(2, _('Horizontal (→)')),
(3, _('Horizontal (←)'))))
self.config_size_slider = configdialog.add_slider(grid,
_('Tree size'),
6, 'interface.pedview-tree-size',
(2, 9))
return _('Layout'), grid
| sam-m888/gramps | gramps/plugins/view/pedigreeview.py | Python | gpl-2.0 | 84,720 | [
"FLEUR"
] | ef80d635045d8be4c67be5063048acd8cd4f436642b99d0cf47247290be470e6 |
import argparse
import os
from coalib.misc import Constants
from coalib.parsing.filters import available_filters
# argcomplete is a delayed optional import
# This variable may be None, the module, or False
argcomplete = None
class CustomFormatter(argparse.RawDescriptionHelpFormatter):
"""
A Custom Formatter that will keep the metavars in the usage but remove them
in the more detailed arguments section.
"""
def _format_action_invocation(self, action):
if not action.option_strings:
# For arguments that don't have options strings
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
# Option string arguments (like "-f, --files")
parts = action.option_strings
return ', '.join(parts)
class PathArg(str):
"""
Uni(xi)fying OS-native directory separators in path arguments.
Removing the pain from interactively using coala in a Windows cmdline,
because backslashes are interpreted as escaping syntax and therefore
removed when arguments are turned into coala settings
>>> import os
>>> PathArg(os.path.join('path', 'with', 'separators'))
'path/with/separators'
"""
def __new__(cls, path):
return str.__new__(cls, path.replace(os.path.sep, '/'))
def default_arg_parser(formatter_class=None):
"""
This function creates an ArgParser to parse command line arguments.
:param formatter_class: Formatting the arg_parser output into a specific
form. For example: In the manpage format.
"""
formatter_class = (CustomFormatter if formatter_class is None
else formatter_class)
description = """
coala provides a common command-line interface for linting and fixing all your
code, regardless of the programming languages you use.
To find out what kind of analysis coala offers for the languages you use, visit
http://coala.io/languages, or run::
$ coala --show-bears --filter-by language C Python
To perform code analysis, simply specify the analysis routines (bears) and the
files you want it to run on, for example:
spaceBear::
$ coala --bears SpaceConsistencyBear --files **.py
coala can also automatically fix your code:
spacePatchBear::
$ coala --bears SpaceConsistencyBear --files **.py --apply-patches
To run coala without user interaction, run the `coala --non-interactive`,
`coala --json` and `coala --format` commands.
"""
arg_parser = argparse.ArgumentParser(
formatter_class=formatter_class,
prog='coala',
description=description,
# Use our own help so that we can put it in the group we want
add_help=False)
arg_parser.add_argument('TARGETS',
nargs='*',
help='sections to be executed exclusively')
info_group = arg_parser.add_argument_group('Info')
info_group.add_argument('-h',
'--help',
action='help',
help='show this help message and exit')
info_group.add_argument('-v',
'--version',
action='version',
version=Constants.VERSION)
mode_group = arg_parser.add_argument_group('Mode')
mode_group.add_argument(
'-C', '--non-interactive', const=True, action='store_const',
help='run coala in non interactive mode')
mode_group.add_argument(
'--ci', action='store_const', dest='non_interactive', const=True,
help='continuous integration run, alias for `--non-interactive`')
mode_group.add_argument(
'--json', const=True, action='store_const',
help='mode in which coala will display output as json')
mode_group.add_argument(
'--format', const=True, nargs='?', metavar='STR',
help='output results with a custom format string, e.g. '
'"Message: {message}"; possible placeholders: '
'id, origin, file, line, end_line, column, end_column, '
'severity, severity_str, message, message_base, '
'message_arguments, affected_code, source_lines')
config_group = arg_parser.add_argument_group('Configuration')
config_group.add_argument(
'-c', '--config', type=PathArg, nargs=1, metavar='FILE',
help='configuration file to be used, defaults to {}'.format(
Constants.local_coafile))
config_group.add_argument(
'-F', '--find-config', action='store_const', const=True,
help='find {} in ancestors of the working directory'.format(
Constants.local_coafile))
config_group.add_argument(
'-I', '--no-config', const=True, action='store_const',
help='run without using any config file')
config_group.add_argument(
'-s', '--save', type=PathArg, nargs='?', const=True, metavar='FILE',
help='save used arguments to a config file to a {}, the given path, '
'or at the value of -c'.format(Constants.local_coafile))
config_group.add_argument(
'--disable-caching', const=True, action='store_const',
help='run on all files even if unchanged')
config_group.add_argument(
'--flush-cache', const=True, action='store_const',
help='rebuild the file cache')
config_group.add_argument(
'--no-autoapply-warn', const=True, action='store_const',
help='turn off warning about patches not being auto applicable')
inputs_group = arg_parser.add_argument_group('Inputs')
bears = inputs_group.add_argument(
'-b', '--bears', nargs='+', metavar='NAME',
help='names of bears to use')
inputs_group.add_argument(
'-f', '--files', type=PathArg, nargs='+', metavar='FILE',
help='files that should be checked')
inputs_group.add_argument(
'-i', '--ignore', type=PathArg, nargs='+', metavar='FILE',
help='files that should be ignored')
inputs_group.add_argument(
'--limit-files', type=PathArg, nargs='+', metavar='FILE',
help="filter the `--files` argument's matches further")
inputs_group.add_argument(
'-d', '--bear-dirs', type=PathArg, nargs='+', metavar='DIR',
help='additional directories which may contain bears')
outputs_group = arg_parser.add_argument_group('Outputs')
outputs_group.add_argument(
'-V', '--verbose', action='store_const',
dest='log_level', const='DEBUG',
help='alias for `-L DEBUG`')
outputs_group.add_argument(
'-L', '--log-level', nargs=1,
choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'], metavar='ENUM',
help='set log output level to DEBUG/INFO/WARNING/ERROR, '
'defaults to INFO')
outputs_group.add_argument(
'-m', '--min-severity', nargs=1,
choices=('INFO', 'NORMAL', 'MAJOR'), metavar='ENUM',
help='set minimal result severity to INFO/NORMAL/MAJOR')
outputs_group.add_argument(
'-N', '--no-color', const=True, action='store_const',
help='display output without coloring (excluding logs)')
outputs_group.add_argument(
'-B', '--show-bears', const=True, action='store_const',
help='list all bears')
outputs_group.add_argument(
'-l', '--filter-by-language', nargs='+', metavar='LANG',
help='filters `--show-bears` by the given languages')
outputs_group.add_argument(
'--filter-by', action='append', nargs='+',
metavar=('FILTER_NAME FILTER_ARG', 'FILTER_ARG'),
help='filters `--show-bears` by the filter given as argument. '
'Available filters: {}'.format(', '.join(sorted(
available_filters))))
outputs_group.add_argument(
'-p', '--show-capabilities', nargs='+', metavar='LANG',
help='show what coala can fix and detect for the given languages')
outputs_group.add_argument(
'-D', '--show-description', const=True, action='store_const',
help='show bear descriptions for `--show-bears`')
outputs_group.add_argument(
'--show-settings', const=True, action='store_const',
help='show bear settings for `--show-bears`')
outputs_group.add_argument(
'--show-details', const=True, action='store_const',
help='show bear details for `--show-bears`')
outputs_group.add_argument(
'--log-json', const=True, action='store_const',
help='output logs as json along with results'
' (must be called with --json)')
outputs_group.add_argument(
'-o', '--output', type=PathArg, nargs=1, metavar='FILE',
help='write results to the given file (must be called with --json)')
outputs_group.add_argument(
'-r', '--relpath', nargs='?', const=True,
help='return relative paths for files (must be called with --json)')
devtool_exclusive_group = arg_parser.add_mutually_exclusive_group()
devtool_exclusive_group.add_argument(
'--debug-bears', nargs='?', const=True,
help='Enable bear debugging with pdb, that can help to identify and'
' correct errors in bear code. Steps into bear code as soon as being'
' executed. To specify which bears to debug, supply bear names as'
' additional arguments. If used without arguments, all bears specified'
' with --bears will be debugged (even implicit dependency bears).')
devtool_exclusive_group.add_argument(
'--profile', nargs='?', const=True,
help='Enable bear profiling with cProfile. To specify where to dump the'
' profiled files, supply the directory path. If specified directory'
' does not exist it will be created. If the specified path points to an'
' already existing file a error is raised. All bears (even'
' implicit dependency bears) in a section will be profiled. Profiled'
' data files will have a name format'
' ``{section.name}_{bear.name}.prof``.')
misc_group = arg_parser.add_argument_group('Miscellaneous')
misc_group.add_argument(
'-S', '--settings', nargs='+', metavar='SETTING',
help='arbitrary settings in the form of section.key=value')
misc_group.add_argument(
'-a', '--apply-patches', action='store_const',
dest='default_actions', const='**: ApplyPatchAction',
help='apply all patches automatically if possible')
misc_group.add_argument(
'-j', '--jobs', type=int,
help='number of jobs to use in parallel')
misc_group.add_argument(
'-n', '--no-orig', const=True, action='store_const',
help="don't create .orig backup files before patching")
misc_group.add_argument(
'-A', '--single-action', const=True, action='store_const',
help='apply a single action for all results')
misc_group.add_argument(
'--debug', const=True, action='store_const',
help='run coala in debug mode, starting ipdb, '
'which must be separately installed, '
'on unexpected internal exceptions '
'(implies --verbose)')
global argcomplete
if argcomplete is None:
try:
# Auto completion should be optional, because of somewhat
# complicated setup.
import argcomplete
argcomplete.autocomplete(arg_parser)
except ImportError:
argcomplete = False
if argcomplete:
try:
from coalib.collecting.Collectors import (
_argcomplete_bears_names)
except ImportError:
pass
else:
bears.completer = _argcomplete_bears_names
return arg_parser
| jayvdb/coala | coalib/parsing/DefaultArgParser.py | Python | agpl-3.0 | 11,829 | [
"VisIt"
] | e6020aef8812254e8c2e9c89b64584763eca17b6074c8b9d175877bb829217d9 |
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import unittest
from external.wip import work_in_progress
from .molecule import Molecule
from .resonance import *
from .resonance import _clar_optimization, _clar_transformation
class ResonanceTest(unittest.TestCase):
def testAllylShift(self):
"""Test allyl shift for hexadienyl radical"""
molList = generate_resonance_structures(Molecule(SMILES="C=C[CH]C=CC"))
self.assertEqual(len(molList), 3)
def testOxime(self):
"""Test resonance structure generation for CC=N[O] radical
Simple case for lone pair <=> radical resonance"""
molList = generate_resonance_structures(Molecule(SMILES="CC=N[O]"))
self.assertEqual(len(molList), 3)
self.assertTrue(any([any([atom.charge != 0 for atom in mol.vertices]) for mol in molList]))
def testAzide(self):
"""Test resonance structure generation for ethyl azide
Simple case for N5dd <=> N5t resonance"""
molList = generate_resonance_structures(Molecule(SMILES="CCN=[N+]=[N-]"))
self.assertEqual(len(molList), 3)
self.assertTrue(all([any([atom.charge != 0 for atom in mol.vertices]) for mol in molList]))
def testStyryl1(self):
"""Test resonance structure generation for styryl, with radical on branch
In this case, the radical can be delocalized into the aromatic ring"""
molList = generate_resonance_structures(Molecule(SMILES="c1ccccc1[C]=C"))
self.assertEqual(len(molList), 4)
def testStyryl2(self):
"""Test resonance structure generation for styryl, with radical on ring
In this case, the radical can be delocalized into the aromatic ring"""
molList = generate_resonance_structures(Molecule(SMILES="C=C=C1C=C[CH]C=C1"))
self.assertEqual(len(molList), 4)
def testNaphthyl(self):
"""Test resonance structure generation for naphthyl radical
In this case, the radical is orthogonal to the pi-orbital plane and cannot delocalize"""
molList = generate_resonance_structures(Molecule(SMILES="c12[c]cccc1cccc2"))
self.assertEqual(len(molList), 4)
def testMethylNapthalene(self):
"""Test resonance structure generation for methyl naphthalene
Example of stable polycyclic aromatic species"""
molList = generate_resonance_structures(Molecule(SMILES="CC1=CC=CC2=CC=CC=C12"))
self.assertEqual(len(molList), 4)
def testMethylPhenanthrene(self):
"""Test resonance structure generation for methyl phenanthrene
Example of stable polycyclic aromatic species"""
molList = generate_resonance_structures(Molecule(SMILES="CC1=CC=CC2C3=CC=CC=C3C=CC=21"))
self.assertEqual(len(molList), 3)
def testMethylPhenanthreneRadical(self):
"""Test resonance structure generation for methyl phenanthrene radical
Example radical polycyclic aromatic species where the radical can delocalize"""
molList = generate_resonance_structures(Molecule(SMILES="[CH2]C1=CC=CC2C3=CC=CC=C3C=CC=21"))
self.assertEqual(len(molList), 9)
def testAromaticWithLonePairResonance(self):
"""Test resonance structure generation for aromatic species with lone pair <=> radical resonance"""
molList = generate_resonance_structures(Molecule(SMILES="c1ccccc1CC=N[O]"))
self.assertEqual(len(molList), 6)
def testAromaticWithNResonance(self):
"""Test resonance structure generation for aromatic species with N5dd <=> N5t resonance"""
molList = generate_resonance_structures(Molecule(SMILES="c1ccccc1CCN=[N+]=[N-]"))
self.assertEqual(len(molList), 6)
def testNoClarStructures(self):
"""Test that we can turn off Clar structure generation."""
molList = generate_resonance_structures(Molecule(SMILES='C1=CC=CC2C3=CC=CC=C3C=CC=21'), clarStructures=False)
self.assertEqual(len(molList), 2)
def testC13H11Rad(self):
"""Test resonance structure generation for p-methylbenzylbenzene radical
Has multiple resonance structures that break aromaticity of a ring"""
molList = generate_resonance_structures(Molecule(SMILES="[CH](c1ccccc1)c1ccc(C)cc1"))
self.assertEqual(len(molList), 6)
def testC8H8(self):
"""Test resonance structure generation for 5,6-dimethylene-1,3-cyclohexadiene
Example of molecule that RDKit considers aromatic, but RMG does not"""
molList = generate_resonance_structures(Molecule(SMILES="C=C1C=CC=CC1=C"))
self.assertEqual(len(molList), 1)
def testC8H7J(self):
"""Test resonance structure generation for 5,6-dimethylene-1,3-cyclohexadiene radical
Example of molecule that RDKit considers aromatic, but RMG does not"""
molList = generate_resonance_structures(Molecule(SMILES="C=C1C=CC=CC1=[CH]"))
self.assertEqual(len(molList), 1)
def testC8H7J2(self):
"""Test resonance structure generation for 5,6-dimethylene-1,3-cyclohexadiene radical
Example of molecule that RDKit considers aromatic, but RMG does not"""
molList = generate_resonance_structures(Molecule(SMILES="C=C1C=[C]C=CC1=C"))
self.assertEqual(len(molList), 1)
def test_C9H9_aro(self):
"""Test cyclopropyl benzene radical, aromatic SMILES"""
mol = Molecule(SMILES="[CH]1CC1c1ccccc1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_C9H9_kek(self):
"""Test cyclopropyl benzene radical, kekulized SMILES"""
mol = Molecule(SMILES="[CH]1CC1C1C=CC=CC=1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_Benzene_aro(self):
"""Test benzene, aromatic SMILES"""
mol = Molecule(SMILES="c1ccccc1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_Benzene_kek(self):
"""Test benzene, kekulized SMILES"""
mol = Molecule(SMILES="C1C=CC=CC=1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_C9H11_aro(self):
"""Test propylbenzene radical, aromatic SMILES"""
mol = Molecule(SMILES="[CH2]CCc1ccccc1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_C10H11_aro(self):
"""Test cyclobutylbenzene radical, aromatic SMILES"""
mol = Molecule(SMILES="[CH]1CCC1c1ccccc1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_C9H10_aro(self):
"""Test cyclopropylbenzene, aromatic SMILES"""
mol = Molecule(SMILES="C1CC1c1ccccc1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 2)
def test_C10H12_aro(self):
"""Test cyclopropylmethyl benzene, aromatic SMILES"""
mol = Molecule(SMILES="C1CC1c1c(C)cccc1")
molList = generate_resonance_structures(mol)
self.assertEqual(len(molList), 3)
def test_C9H10_aro_2(self):
"""Test cyclopropyl benzene, generate aromatic resonance isomers"""
mol = Molecule(SMILES="C1CC1c1ccccc1")
molList = generate_aromatic_resonance_structures(mol)
self.assertEqual(len(molList), 1)
def testFusedAromatic1(self):
"""Test we can make aromatic perylene from both adjlist and SMILES"""
perylene = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {3,B} {6,B} {7,B}
2 C u0 p0 c0 {4,B} {5,B} {8,B}
3 C u0 p0 c0 {1,B} {4,B} {11,B}
4 C u0 p0 c0 {2,B} {3,B} {12,B}
5 C u0 p0 c0 {2,B} {6,B} {15,B}
6 C u0 p0 c0 {1,B} {5,B} {16,B}
7 C u0 p0 c0 {1,B} {9,B} {10,B}
8 C u0 p0 c0 {2,B} {13,B} {14,B}
9 C u0 p0 c0 {7,B} {17,B} {22,S}
10 C u0 p0 c0 {7,B} {18,B} {23,S}
11 C u0 p0 c0 {3,B} {18,B} {25,S}
12 C u0 p0 c0 {4,B} {19,B} {26,S}
13 C u0 p0 c0 {8,B} {19,B} {28,S}
14 C u0 p0 c0 {8,B} {20,B} {29,S}
15 C u0 p0 c0 {5,B} {20,B} {31,S}
16 C u0 p0 c0 {6,B} {17,B} {32,S}
17 C u0 p0 c0 {9,B} {16,B} {21,S}
18 C u0 p0 c0 {10,B} {11,B} {24,S}
19 C u0 p0 c0 {12,B} {13,B} {27,S}
20 C u0 p0 c0 {14,B} {15,B} {30,S}
21 H u0 p0 c0 {17,S}
22 H u0 p0 c0 {9,S}
23 H u0 p0 c0 {10,S}
24 H u0 p0 c0 {18,S}
25 H u0 p0 c0 {11,S}
26 H u0 p0 c0 {12,S}
27 H u0 p0 c0 {19,S}
28 H u0 p0 c0 {13,S}
29 H u0 p0 c0 {14,S}
30 H u0 p0 c0 {20,S}
31 H u0 p0 c0 {15,S}
32 H u0 p0 c0 {16,S}
""")
perylene2 = Molecule().fromSMILES('c1cc2cccc3c4cccc5cccc(c(c1)c23)c54')
for isomer in generate_aromatic_resonance_structures(perylene2):
if perylene.isIsomorphic(isomer):
break
else: # didn't break
self.fail("{} isn't isomorphic with any aromatic forms of {}".format(
perylene.toSMILES(),
perylene2.toSMILES()
))
def testFusedAromatic2(self):
"""Test we can make aromatic naphthalene from both adjlist and SMILES"""
naphthalene = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,B} {4,B}
2 C u0 p0 c0 {1,B} {5,B} {6,B}
3 C u0 p0 c0 {1,B} {8,B} {13,S}
4 C u0 p0 c0 {1,B} {9,B} {14,S}
5 C u0 p0 c0 {2,B} {10,B} {17,S}
6 C u0 p0 c0 {2,B} {7,B} {18,S}
7 C u0 p0 c0 {6,B} {8,B} {11,S}
8 C u0 p0 c0 {3,B} {7,B} {12,S}
9 C u0 p0 c0 {4,B} {10,B} {15,S}
10 C u0 p0 c0 {5,B} {9,B} {16,S}
11 H u0 p0 c0 {7,S}
12 H u0 p0 c0 {8,S}
13 H u0 p0 c0 {3,S}
14 H u0 p0 c0 {4,S}
15 H u0 p0 c0 {9,S}
16 H u0 p0 c0 {10,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {6,S}
""")
naphthalene2 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
for isomer in generate_aromatic_resonance_structures(naphthalene2):
if naphthalene.isIsomorphic(isomer):
break
else: # didn't break
self.fail("{} isn't isomorphic with any aromatic forms of {}".format(
naphthalene.toSMILES(),
naphthalene2.toSMILES()
))
def testAromaticResonanceStructures(self):
"""Test that generate_aromatic_resonance_structures gives consistent output
Check that we get the same resonance structure regardless of which structure we start with"""
# Kekulized form, radical on methyl
struct1 = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,S} {3,D} {7,S}
2 C u0 p0 c0 {1,S} {4,S} {8,D}
3 C u0 p0 c0 {1,D} {5,S} {11,S}
4 C u0 p0 c0 {2,S} {9,D} {10,S}
5 C u0 p0 c0 {3,S} {6,D} {15,S}
6 C u0 p0 c0 {5,D} {12,S} {16,S}
7 C u0 p0 c0 {1,S} {12,D} {18,S}
8 C u0 p0 c0 {2,D} {13,S} {19,S}
9 C u0 p0 c0 {4,D} {14,S} {22,S}
10 C u0 p0 c0 {4,S} {11,D} {23,S}
11 C u0 p0 c0 {3,S} {10,D} {24,S}
12 C u0 p0 c0 {6,S} {7,D} {17,S}
13 C u0 p0 c0 {8,S} {14,D} {20,S}
14 C u0 p0 c0 {9,S} {13,D} {21,S}
15 C u1 p0 c0 {5,S} {25,S} {26,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {12,S}
18 H u0 p0 c0 {7,S}
19 H u0 p0 c0 {8,S}
20 H u0 p0 c0 {13,S}
21 H u0 p0 c0 {14,S}
22 H u0 p0 c0 {9,S}
23 H u0 p0 c0 {10,S}
24 H u0 p0 c0 {11,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {15,S}
""")
# Kekulized form, radical on ring
struct2 = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,S} {3,S} {7,D}
2 C u0 p0 c0 {1,S} {4,S} {8,D}
3 C u0 p0 c0 {1,S} {5,S} {11,D}
4 C u0 p0 c0 {2,S} {9,S} {10,D}
5 C u0 p0 c0 {3,S} {6,S} {15,D}
6 C u0 p0 c0 {5,S} {12,D} {16,S}
7 C u0 p0 c0 {1,D} {12,S} {17,S}
8 C u0 p0 c0 {2,D} {13,S} {18,S}
9 C u0 p0 c0 {4,S} {14,D} {19,S}
10 C u0 p0 c0 {4,D} {11,S} {20,S}
11 C u0 p0 c0 {3,D} {10,S} {21,S}
12 C u0 p0 c0 {6,D} {7,S} {22,S}
13 C u1 p0 c0 {8,S} {14,S} {23,S}
14 C u0 p0 c0 {9,D} {13,S} {24,S}
15 C u0 p0 c0 {5,D} {25,S} {26,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {7,S}
18 H u0 p0 c0 {8,S}
19 H u0 p0 c0 {9,S}
20 H u0 p0 c0 {10,S}
21 H u0 p0 c0 {11,S}
22 H u0 p0 c0 {12,S}
23 H u0 p0 c0 {13,S}
24 H u0 p0 c0 {14,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {15,S}
""")
# Aromatic form
struct3 = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,B} {3,B} {7,B}
2 C u0 p0 c0 {1,B} {4,B} {8,B}
3 C u0 p0 c0 {1,B} {5,B} {11,B}
4 C u0 p0 c0 {2,B} {9,B} {10,B}
5 C u0 p0 c0 {3,B} {6,B} {15,S}
6 C u0 p0 c0 {5,B} {12,B} {16,S}
7 C u0 p0 c0 {1,B} {12,B} {18,S}
8 C u0 p0 c0 {2,B} {13,B} {19,S}
9 C u0 p0 c0 {4,B} {14,B} {22,S}
10 C u0 p0 c0 {4,B} {11,B} {23,S}
11 C u0 p0 c0 {3,B} {10,B} {24,S}
12 C u0 p0 c0 {6,B} {7,B} {17,S}
13 C u0 p0 c0 {8,B} {14,B} {20,S}
14 C u0 p0 c0 {9,B} {13,B} {21,S}
15 C u1 p0 c0 {5,S} {25,S} {26,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {12,S}
18 H u0 p0 c0 {7,S}
19 H u0 p0 c0 {8,S}
20 H u0 p0 c0 {13,S}
21 H u0 p0 c0 {14,S}
22 H u0 p0 c0 {9,S}
23 H u0 p0 c0 {10,S}
24 H u0 p0 c0 {11,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {15,S}
""")
result1 = generate_aromatic_resonance_structures(struct1)
result2 = generate_aromatic_resonance_structures(struct2)
result3 = generate_aromatic_resonance_structures(struct3)
self.assertEqual(len(result1), 1)
self.assertEqual(len(result2), 1)
self.assertEqual(len(result3), 1)
self.assertTrue(result1[0].isIsomorphic(result2[0]))
self.assertTrue(result1[0].isIsomorphic(result3[0]))
def testBridgedAromatic(self):
"""Test that we can handle bridged aromatics.
This is affected by how we perceive rings. Using getSmallestSetOfSmallestRings gives
non-deterministic output, so using getAllCyclesOfSize allows this test to pass."""
mol = Molecule(SMILES='c12c3cccc1c3ccc2')
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,B} {8,B}
2 C u0 p0 c0 {1,B} {4,B} {5,B}
3 C u0 p0 c0 {1,B} {4,S} {6,B}
4 C u0 p0 c0 {2,B} {3,S} {7,B}
5 C u0 p0 c0 {2,B} {9,B} {11,S}
6 C u0 p0 c0 {3,B} {9,B} {13,S}
7 C u0 p0 c0 {4,B} {10,B} {14,S}
8 C u0 p0 c0 {1,B} {10,B} {16,S}
9 C u0 p0 c0 {5,B} {6,B} {12,S}
10 C u0 p0 c0 {7,B} {8,B} {15,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {9,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {10,S}
16 H u0 p0 c0 {8,S}
""")
out = generate_resonance_structures(mol)
self.assertEqual(len(out), 3)
self.assertTrue(arom.isIsomorphic(out[1]))
def testPolycyclicAromaticWithNonAromaticRing(self):
"""Test that we can make aromatic resonance structures when there is a pseudo-aromatic ring.
This applies in cases where RDKit misidentifies one ring as aromatic, but there are other
rings in the molecule that are actually aromatic."""
mol = Molecule(SMILES='c1c2cccc1C(=C)C=[C]2')
arom = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,S} {4,B} {5,B}
2 C u0 p0 c0 {1,S} {8,S} {9,D}
3 C u0 p0 c0 {4,B} {6,B} {10,S}
4 C u0 p0 c0 {1,B} {3,B} {11,S}
5 C u0 p0 c0 {1,B} {7,B} {14,S}
6 C u0 p0 c0 {3,B} {7,B} {12,S}
7 C u0 p0 c0 {5,B} {6,B} {13,S}
8 C u0 p0 c0 {2,S} {10,D} {15,S}
9 C u0 p0 c0 {2,D} {16,S} {17,S}
10 C u1 p0 c0 {3,S} {8,D}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {6,S}
13 H u0 p0 c0 {7,S}
14 H u0 p0 c0 {5,S}
15 H u0 p0 c0 {8,S}
16 H u0 p0 c0 {9,S}
17 H u0 p0 c0 {9,S}
""")
out = generate_resonance_structures(mol)
self.assertEqual(len(out), 2)
self.assertTrue(arom.isIsomorphic(out[1]))
def testPolycyclicAromaticWithNonAromaticRing2(self):
"""Test that we can make aromatic resonance structures when there is a pseudo-aromatic ring.
This applies in cases where RDKit misidentifies one ring as aromatic, but there are other
rings in the molecule that are actually aromatic."""
mol = Molecule(SMILES='C=C(C1=CC2=C(C=C1C=C3)C4=CC5=CC=CC=C5C=C4C=C2)C3=C')
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {4,S} {6,B} {11,B}
2 C u0 p0 c0 {3,B} {5,B} {12,B}
3 C u0 p0 c0 {2,B} {9,B} {13,B}
4 C u0 p0 c0 {1,S} {10,S} {23,D}
5 C u0 p0 c0 {2,B} {11,B} {20,B}
6 C u0 p0 c0 {1,B} {12,B} {15,S}
7 C u0 p0 c0 {8,B} {13,B} {17,B}
8 C u0 p0 c0 {7,B} {14,B} {18,B}
9 C u0 p0 c0 {3,B} {14,B} {19,B}
10 C u0 p0 c0 {4,S} {16,S} {24,D}
11 C u0 p0 c0 {1,B} {5,B} {25,S}
12 C u0 p0 c0 {2,B} {6,B} {26,S}
13 C u0 p0 c0 {3,B} {7,B} {29,S}
14 C u0 p0 c0 {8,B} {9,B} {34,S}
15 C u0 p0 c0 {6,S} {16,D} {27,S}
16 C u0 p0 c0 {10,S} {15,D} {28,S}
17 C u0 p0 c0 {7,B} {21,B} {30,S}
18 C u0 p0 c0 {8,B} {22,B} {33,S}
19 C u0 p0 c0 {9,B} {20,B} {35,S}
20 C u0 p0 c0 {5,B} {19,B} {36,S}
21 C u0 p0 c0 {17,B} {22,B} {31,S}
22 C u0 p0 c0 {18,B} {21,B} {32,S}
23 C u0 p0 c0 {4,D} {37,S} {38,S}
24 C u0 p0 c0 {10,D} {39,S} {40,S}
25 H u0 p0 c0 {11,S}
26 H u0 p0 c0 {12,S}
27 H u0 p0 c0 {15,S}
28 H u0 p0 c0 {16,S}
29 H u0 p0 c0 {13,S}
30 H u0 p0 c0 {17,S}
31 H u0 p0 c0 {21,S}
32 H u0 p0 c0 {22,S}
33 H u0 p0 c0 {18,S}
34 H u0 p0 c0 {14,S}
35 H u0 p0 c0 {19,S}
36 H u0 p0 c0 {20,S}
37 H u0 p0 c0 {23,S}
38 H u0 p0 c0 {23,S}
39 H u0 p0 c0 {24,S}
40 H u0 p0 c0 {24,S}
""")
out = generate_resonance_structures(mol)
self.assertEqual(len(out), 4)
self.assertTrue(arom.isIsomorphic(out[1]))
def testKekulizeBenzene(self):
"""Test that we can kekulize benzene."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {6,B} {7,S}
2 C u0 p0 c0 {1,B} {3,B} {8,S}
3 C u0 p0 c0 {2,B} {4,B} {9,S}
4 C u0 p0 c0 {3,B} {5,B} {10,S}
5 C u0 p0 c0 {4,B} {6,B} {11,S}
6 C u0 p0 c0 {1,B} {5,B} {12,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
""")
keku = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {8,S}
3 C u0 p0 c0 {2,S} {4,D} {9,S}
4 C u0 p0 c0 {3,D} {5,S} {10,S}
5 C u0 p0 c0 {4,S} {6,D} {11,S}
6 C u0 p0 c0 {1,S} {5,D} {12,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertTrue(out[0].isIsomorphic(keku))
def testKekulizeNaphthalene(self):
"""Test that we can kekulize naphthalene."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,B} {4,B}
2 C u0 p0 c0 {1,B} {5,B} {6,B}
3 C u0 p0 c0 {1,B} {8,B} {13,S}
4 C u0 p0 c0 {1,B} {9,B} {14,S}
5 C u0 p0 c0 {2,B} {10,B} {17,S}
6 C u0 p0 c0 {2,B} {7,B} {18,S}
7 C u0 p0 c0 {6,B} {8,B} {11,S}
8 C u0 p0 c0 {3,B} {7,B} {12,S}
9 C u0 p0 c0 {4,B} {10,B} {15,S}
10 C u0 p0 c0 {5,B} {9,B} {16,S}
11 H u0 p0 c0 {7,S}
12 H u0 p0 c0 {8,S}
13 H u0 p0 c0 {3,S}
14 H u0 p0 c0 {4,S}
15 H u0 p0 c0 {9,S}
16 H u0 p0 c0 {10,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {6,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertFalse(out[0].isAromatic())
bonds = set()
for atom in out[0].atoms:
for bond in atom.bonds.itervalues():
bonds.add(bond)
dBonds = 0
for bond in bonds:
if bond.isDouble():
dBonds += 1
self.assertEqual(dBonds, 5)
def testKekulizePhenanthrene(self):
"""Test that we can kekulize phenanthrene."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,B} {5,B}
2 C u0 p0 c0 {1,B} {4,B} {9,B}
3 C u0 p0 c0 {1,B} {6,B} {10,B}
4 C u0 p0 c0 {2,B} {7,B} {8,B}
5 C u0 p0 c0 {1,B} {12,B} {17,S}
6 C u0 p0 c0 {3,B} {7,B} {18,S}
7 C u0 p0 c0 {4,B} {6,B} {19,S}
8 C u0 p0 c0 {4,B} {13,B} {20,S}
9 C u0 p0 c0 {2,B} {14,B} {23,S}
10 C u0 p0 c0 {3,B} {11,B} {24,S}
11 C u0 p0 c0 {10,B} {12,B} {15,S}
12 C u0 p0 c0 {5,B} {11,B} {16,S}
13 C u0 p0 c0 {8,B} {14,B} {21,S}
14 C u0 p0 c0 {9,B} {13,B} {22,S}
15 H u0 p0 c0 {11,S}
16 H u0 p0 c0 {12,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {6,S}
19 H u0 p0 c0 {7,S}
20 H u0 p0 c0 {8,S}
21 H u0 p0 c0 {13,S}
22 H u0 p0 c0 {14,S}
23 H u0 p0 c0 {9,S}
24 H u0 p0 c0 {10,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertFalse(out[0].isAromatic())
bonds = set()
for atom in out[0].atoms:
for bond in atom.bonds.itervalues():
bonds.add(bond)
dBonds = 0
for bond in bonds:
if bond.isDouble():
dBonds += 1
self.assertEqual(dBonds, 7)
def testKekulizePyrene(self):
"""Test that we can kekulize pyrene."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,B} {6,B}
2 C u0 p0 c0 {1,B} {4,B} {5,B}
3 C u0 p0 c0 {1,B} {7,B} {8,B}
4 C u0 p0 c0 {2,B} {9,B} {10,B}
5 C u0 p0 c0 {2,B} {11,B} {12,B}
6 C u0 p0 c0 {1,B} {13,B} {14,B}
7 C u0 p0 c0 {3,B} {15,B} {18,S}
8 C u0 p0 c0 {3,B} {9,B} {19,S}
9 C u0 p0 c0 {4,B} {8,B} {20,S}
10 C u0 p0 c0 {4,B} {16,B} {21,S}
11 C u0 p0 c0 {5,B} {16,B} {23,S}
12 C u0 p0 c0 {5,B} {13,B} {24,S}
13 C u0 p0 c0 {6,B} {12,B} {25,S}
14 C u0 p0 c0 {6,B} {15,B} {26,S}
15 C u0 p0 c0 {7,B} {14,B} {17,S}
16 C u0 p0 c0 {10,B} {11,B} {22,S}
17 H u0 p0 c0 {15,S}
18 H u0 p0 c0 {7,S}
19 H u0 p0 c0 {8,S}
20 H u0 p0 c0 {9,S}
21 H u0 p0 c0 {10,S}
22 H u0 p0 c0 {16,S}
23 H u0 p0 c0 {11,S}
24 H u0 p0 c0 {12,S}
25 H u0 p0 c0 {13,S}
26 H u0 p0 c0 {14,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertFalse(out[0].isAromatic())
bonds = set()
for atom in out[0].atoms:
for bond in atom.bonds.itervalues():
bonds.add(bond)
dBonds = 0
for bond in bonds:
if bond.isDouble():
dBonds += 1
self.assertEqual(dBonds, 8)
def testKekulizeCorannulene(self):
"""Test that we can kekulize corannulene."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {5,B} {8,B}
2 C u0 p0 c0 {1,B} {3,B} {10,B}
3 C u0 p0 c0 {2,B} {4,B} {9,B}
4 C u0 p0 c0 {3,B} {5,B} {6,B}
5 C u0 p0 c0 {1,B} {4,B} {7,B}
6 C u0 p0 c0 {4,B} {12,B} {13,B}
7 C u0 p0 c0 {5,B} {14,B} {15,B}
8 C u0 p0 c0 {1,B} {16,B} {20,B}
9 C u0 p0 c0 {3,B} {11,B} {17,B}
10 C u0 p0 c0 {2,B} {18,B} {19,B}
11 C u0 p0 c0 {9,B} {12,B} {21,S}
12 C u0 p0 c0 {6,B} {11,B} {22,S}
13 C u0 p0 c0 {6,B} {14,B} {23,S}
14 C u0 p0 c0 {7,B} {13,B} {24,S}
15 C u0 p0 c0 {7,B} {16,B} {25,S}
16 C u0 p0 c0 {8,B} {15,B} {26,S}
17 C u0 p0 c0 {9,B} {18,B} {27,S}
18 C u0 p0 c0 {10,B} {17,B} {28,S}
19 C u0 p0 c0 {10,B} {20,B} {29,S}
20 C u0 p0 c0 {8,B} {19,B} {30,S}
21 H u0 p0 c0 {11,S}
22 H u0 p0 c0 {12,S}
23 H u0 p0 c0 {13,S}
24 H u0 p0 c0 {14,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {16,S}
27 H u0 p0 c0 {17,S}
28 H u0 p0 c0 {18,S}
29 H u0 p0 c0 {19,S}
30 H u0 p0 c0 {20,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertFalse(out[0].isAromatic())
bonds = set()
for atom in out[0].atoms:
for bond in atom.bonds.itervalues():
bonds.add(bond)
dBonds = 0
for bond in bonds:
if bond.isDouble():
dBonds += 1
self.assertEqual(dBonds, 10)
def testKekulizeCoronene(self):
"""Test that we can kekulize coronene."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {6,B} {12,B}
2 C u0 p0 c0 {1,B} {3,B} {7,B}
3 C u0 p0 c0 {2,B} {4,B} {8,B}
4 C u0 p0 c0 {3,B} {5,B} {9,B}
5 C u0 p0 c0 {4,B} {6,B} {10,B}
6 C u0 p0 c0 {1,B} {5,B} {11,B}
7 C u0 p0 c0 {2,B} {14,B} {15,B}
8 C u0 p0 c0 {3,B} {16,B} {17,B}
9 C u0 p0 c0 {4,B} {18,B} {19,B}
10 C u0 p0 c0 {5,B} {20,B} {21,B}
11 C u0 p0 c0 {6,B} {22,B} {23,B}
12 C u0 p0 c0 {1,B} {13,B} {24,B}
13 C u0 p0 c0 {12,B} {14,B} {25,S}
14 C u0 p0 c0 {7,B} {13,B} {26,S}
15 C u0 p0 c0 {7,B} {16,B} {27,S}
16 C u0 p0 c0 {8,B} {15,B} {28,S}
17 C u0 p0 c0 {8,B} {18,B} {29,S}
18 C u0 p0 c0 {9,B} {17,B} {30,S}
19 C u0 p0 c0 {9,B} {20,B} {31,S}
20 C u0 p0 c0 {10,B} {19,B} {32,S}
21 C u0 p0 c0 {10,B} {22,B} {33,S}
22 C u0 p0 c0 {11,B} {21,B} {34,S}
23 C u0 p0 c0 {11,B} {24,B} {35,S}
24 C u0 p0 c0 {12,B} {23,B} {36,S}
25 H u0 p0 c0 {13,S}
26 H u0 p0 c0 {14,S}
27 H u0 p0 c0 {15,S}
28 H u0 p0 c0 {16,S}
29 H u0 p0 c0 {17,S}
30 H u0 p0 c0 {18,S}
31 H u0 p0 c0 {19,S}
32 H u0 p0 c0 {20,S}
33 H u0 p0 c0 {21,S}
34 H u0 p0 c0 {22,S}
35 H u0 p0 c0 {23,S}
36 H u0 p0 c0 {24,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertFalse(out[0].isAromatic())
bonds = set()
for atom in out[0].atoms:
for bond in atom.bonds.itervalues():
bonds.add(bond)
dBonds = 0
for bond in bonds:
if bond.isDouble():
dBonds += 1
self.assertEqual(dBonds, 12)
def testKekulizeBridgedAromatic(self):
"""Test that we can kekulize a bridged polycyclic aromatic species."""
arom = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,S} {6,B}
2 C u0 p0 c0 {1,B} {3,B} {11,S}
3 C u0 p0 c0 {1,S} {2,B} {4,B}
4 C u0 p0 c0 {3,B} {5,B} {12,S}
5 C u0 p0 c0 {4,B} {6,B} {10,B}
6 C u0 p0 c0 {1,B} {5,B} {7,B}
7 C u0 p0 c0 {6,B} {8,B} {13,S}
8 C u0 p0 c0 {7,B} {9,B} {14,S}
9 C u0 p0 c0 {8,B} {10,B} {15,S}
10 C u0 p0 c0 {5,B} {9,B} {16,S}
11 H u0 p0 c0 {2,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {7,S}
14 H u0 p0 c0 {8,S}
15 H u0 p0 c0 {9,S}
16 H u0 p0 c0 {10,S}
""")
out = generate_kekule_structure(arom)
self.assertEqual(len(out), 1)
self.assertFalse(out[0].isAromatic())
bonds = set()
for atom in out[0].atoms:
for bond in atom.bonds.itervalues():
bonds.add(bond)
dBonds = 0
for bond in bonds:
if bond.isDouble():
dBonds += 1
self.assertEqual(dBonds, 5)
def testKekulizeResonanceIsomer(self):
"""
Tests that an aromatic molecule returns at least one Kekulized resonance isomer.
A molecule formed using an aromatic adjacency list returns both
the aromatic and a kekulized form as resonance isomers.
"""
toluene = Molecule().fromAdjacencyList("""
1 H 0 {2,S}
2 C 0 {3,S} {9,S} {10,S} {1,S}
3 C 0 {4,B} {8,B} {2,S}
4 C 0 {3,B} {5,B} {11,S}
5 C 0 {4,B} {6,B} {12,S}
6 C 0 {5,B} {7,B} {13,S}
7 C 0 {6,B} {8,B} {14,S}
8 C 0 {3,B} {7,B} {15,S}
9 H 0 {2,S}
10 H 0 {2,S}
11 H 0 {4,S}
12 H 0 {5,S}
13 H 0 {6,S}
14 H 0 {7,S}
15 H 0 {8,S}""")
toluene_kekulized = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {8,S}
3 C u0 p0 c0 {2,S} {4,D} {9,S}
4 C u0 p0 c0 {3,D} {5,S} {10,S}
5 C u0 p0 c0 {4,S} {6,D} {11,S}
6 C u0 p0 c0 {1,S} {5,D} {12,S}
7 C u0 p0 c0 {1,S} {13,S} {14,S} {15,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
13 H u0 p0 c0 {7,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {7,S}
""")
kekulized_isomer = generate_kekule_structure(toluene)[0]
self.assertTrue(kekulized_isomer.isIsomorphic(toluene_kekulized))
for isomer in generate_resonance_structures(toluene):
if isomer.isIsomorphic(toluene_kekulized):
break
else: # didn't brake
self.assertTrue(False, "Didn't find the Kekulized toulene in the result of getResonanceIsomers()")
def testMultipleKekulizedResonanceIsomers(self):
"""Test we can make both Kekule structures of o-cresol"""
adjlist_aromatic = """
1 C u0 p0 c0 {2,S} {9,S} {10,S} {11,S}
2 C u0 p0 c0 {1,S} {3,B} {4,B}
3 C u0 p0 c0 {2,B} {5,B} {8,S}
4 C u0 p0 c0 {2,B} {7,B} {15,S}
5 C u0 p0 c0 {3,B} {6,B} {12,S}
6 C u0 p0 c0 {5,B} {7,B} {13,S}
7 C u0 p0 c0 {4,B} {6,B} {14,S}
8 O u0 p2 c0 {3,S} {16,S}
9 H u0 p0 c0 {1,S}
10 H u0 p0 c0 {1,S}
11 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {4,S}
16 H u0 p0 c0 {8,S}
"""
molecule = Molecule().fromAdjacencyList(adjlist_aromatic)
self.assertTrue(molecule.isAromatic(), "Starting molecule should be aromatic")
isomers = generate_resonance_structures(molecule)
self.assertEqual(len(isomers), 3, "Didn't generate 3 resonance isomers")
self.assertFalse(isomers[1].isAromatic(), "Second resonance isomer shouldn't be aromatic")
self.assertFalse(isomers[2].isAromatic(), "Third resonance isomer shouldn't be aromatic")
self.assertFalse(isomers[1].isIsomorphic(isomers[2]), "Second and third resonance isomers should be different")
def testMultipleKekulizedResonanceIsomersRad(self):
"""Test we can make all resonance structures of o-cresol radical"""
adjlist_aromatic = """
1 C u0 p0 c0 {2,S} {9,S} {10,S} {11,S}
2 C u0 p0 c0 {1,S} {3,B} {4,B}
3 C u0 p0 c0 {2,B} {5,B} {8,S}
4 C u0 p0 c0 {2,B} {7,B} {15,S}
5 C u0 p0 c0 {3,B} {6,B} {12,S}
6 C u0 p0 c0 {5,B} {7,B} {13,S}
7 C u0 p0 c0 {4,B} {6,B} {14,S}
8 O u1 p2 c0 {3,S}
9 H u0 p0 c0 {1,S}
10 H u0 p0 c0 {1,S}
11 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {4,S}
"""
molecule = Molecule().fromAdjacencyList(adjlist_aromatic)
self.assertTrue(molecule.isAromatic(), "Starting molecule should be aromatic")
molList = generate_resonance_structures(molecule)
self.assertEqual(len(molList), 6, "Expected 6 resonance structures, but generated {0}.".format(len(molList)))
aromatic = 0
for mol in molList:
if mol.isAromatic():
aromatic += 1
self.assertEqual(aromatic, 1, "Should only have 1 aromatic resonance structure")
@work_in_progress
def testKekulizedResonanceIsomersFused(self):
"""Test we can make aromatic and Kekulized resonance isomers of 2-methylanthracen-1-ol
This fused ring PAH will be harder"""
kekulized1 = """multiplicity 1
1 C u0 p0 c0 {2,S} {17,S} {18,S} {19,S}
2 C u0 p0 c0 {1,S} {7,S} {10,D}
3 C u0 p0 c0 {4,S} {7,D} {9,S}
4 C u0 p0 c0 {3,S} {8,S} {11,D}
5 C u0 p0 c0 {6,S} {8,D} {12,S}
6 C u0 p0 c0 {5,S} {9,D} {13,S}
7 C u0 p0 c0 {2,S} {3,D} {16,S}
8 C u0 p0 c0 {4,S} {5,D} {22,S}
9 C u0 p0 c0 {3,S} {6,D} {27,S}
10 C u0 p0 c0 {2,D} {11,S} {20,S}
11 C u0 p0 c0 {4,D} {10,S} {21,S}
12 C u0 p0 c0 {5,S} {14,D} {23,S}
13 C u0 p0 c0 {6,S} {15,D} {26,S}
14 C u0 p0 c0 {12,D} {15,S} {24,S}
15 C u0 p0 c0 {13,D} {14,S} {25,S}
16 O u0 p2 c0 {7,S} {28,S}
17 H u0 p0 c0 {1,S}
18 H u0 p0 c0 {1,S}
19 H u0 p0 c0 {1,S}
20 H u0 p0 c0 {10,S}
21 H u0 p0 c0 {11,S}
22 H u0 p0 c0 {8,S}
23 H u0 p0 c0 {12,S}
24 H u0 p0 c0 {14,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {13,S}
27 H u0 p0 c0 {9,S}
28 H u0 p0 c0 {16,S}
"""
kekulized2 = """multiplicity 1
1 C u0 p0 c0 {2,S} {17,S} {18,S} {19,S}
2 C u0 p0 c0 {1,S} {7,D} {10,S}
3 C u0 p0 c0 {4,S} {7,S} {9,D}
4 C u0 p0 c0 {3,S} {8,D} {11,S}
5 C u0 p0 c0 {6,S} {8,S} {12,D}
6 C u0 p0 c0 {5,S} {9,S} {13,D}
7 C u0 p0 c0 {2,D} {3,S} {16,S}
8 C u0 p0 c0 {4,D} {5,S} {22,S}
9 C u0 p0 c0 {3,D} {6,S} {27,S}
10 C u0 p0 c0 {2,S} {11,D} {20,S}
11 C u0 p0 c0 {4,S} {10,D} {21,S}
12 C u0 p0 c0 {5,D} {14,S} {23,S}
13 C u0 p0 c0 {6,D} {15,S} {26,S}
14 C u0 p0 c0 {12,S} {15,D} {24,S}
15 C u0 p0 c0 {13,S} {14,D} {25,S}
16 O u0 p2 c0 {7,S} {28,S}
17 H u0 p0 c0 {1,S}
18 H u0 p0 c0 {1,S}
19 H u0 p0 c0 {1,S}
20 H u0 p0 c0 {10,S}
21 H u0 p0 c0 {11,S}
22 H u0 p0 c0 {8,S}
23 H u0 p0 c0 {12,S}
24 H u0 p0 c0 {14,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {13,S}
27 H u0 p0 c0 {9,S}
28 H u0 p0 c0 {16,S}
"""
kekulized3 = """multiplicity 1
1 C u0 p0 c0 {2,S} {17,S} {18,S} {19,S}
2 C u0 p0 c0 {1,S} {7,D} {10,S}
3 C u0 p0 c0 {4,S} {7,S} {9,D}
4 C u0 p0 c0 {3,S} {8,D} {11,S}
5 C u0 p0 c0 {6,D} {8,S} {12,S}
6 C u0 p0 c0 {5,D} {9,S} {13,S}
7 C u0 p0 c0 {2,D} {3,S} {16,S}
8 C u0 p0 c0 {4,D} {5,S} {20,S}
9 C u0 p0 c0 {3,D} {6,S} {21,S}
10 C u0 p0 c0 {2,S} {11,D} {22,S}
11 C u0 p0 c0 {4,S} {10,D} {23,S}
12 C u0 p0 c0 {5,S} {14,D} {24,S}
13 C u0 p0 c0 {6,S} {15,D} {25,S}
14 C u0 p0 c0 {12,D} {15,S} {26,S}
15 C u0 p0 c0 {13,D} {14,S} {27,S}
16 O u0 p2 c0 {7,S} {28,S}
17 H u0 p0 c0 {1,S}
18 H u0 p0 c0 {1,S}
19 H u0 p0 c0 {1,S}
20 H u0 p0 c0 {8,S}
21 H u0 p0 c0 {9,S}
22 H u0 p0 c0 {10,S}
23 H u0 p0 c0 {11,S}
24 H u0 p0 c0 {12,S}
25 H u0 p0 c0 {13,S}
26 H u0 p0 c0 {14,S}
27 H u0 p0 c0 {15,S}
28 H u0 p0 c0 {16,S}
"""
kekulized4 = """multiplicity 1
1 C u0 p0 c0 {2,S} {17,S} {18,S} {19,S}
2 C u0 p0 c0 {1,S} {7,D} {10,S}
3 C u0 p0 c0 {4,D} {7,S} {9,S}
4 C u0 p0 c0 {3,D} {8,S} {11,S}
5 C u0 p0 c0 {6,S} {8,D} {12,S}
6 C u0 p0 c0 {5,S} {9,D} {13,S}
7 C u0 p0 c0 {2,D} {3,S} {16,S}
8 C u0 p0 c0 {4,S} {5,D} {20,S}
9 C u0 p0 c0 {3,S} {6,D} {21,S}
10 C u0 p0 c0 {2,S} {11,D} {22,S}
11 C u0 p0 c0 {4,S} {10,D} {23,S}
12 C u0 p0 c0 {5,S} {14,D} {24,S}
13 C u0 p0 c0 {6,S} {15,D} {25,S}
14 C u0 p0 c0 {12,D} {15,S} {26,S}
15 C u0 p0 c0 {13,D} {14,S} {27,S}
16 O u0 p2 c0 {7,S} {28,S}
17 H u0 p0 c0 {1,S}
18 H u0 p0 c0 {1,S}
19 H u0 p0 c0 {1,S}
20 H u0 p0 c0 {8,S}
21 H u0 p0 c0 {9,S}
22 H u0 p0 c0 {10,S}
23 H u0 p0 c0 {11,S}
24 H u0 p0 c0 {12,S}
25 H u0 p0 c0 {13,S}
26 H u0 p0 c0 {14,S}
27 H u0 p0 c0 {15,S}
28 H u0 p0 c0 {16,S}
"""
m1 = Molecule().fromAdjacencyList(kekulized1)
m2 = Molecule().fromAdjacencyList(kekulized2)
m3 = Molecule().fromAdjacencyList(kekulized3)
m4 = Molecule().fromAdjacencyList(kekulized4)
resonance_forms = (m1, m2, m3, m4)
for starting in resonance_forms:
self.assertFalse(starting.isAromatic(), "Starting molecule should not be aromatic")
isomers = generate_resonance_structures(starting)
# print "starting with {0!r} I generated these:".format(starting)
# print repr(isomers)
for isomer in isomers:
if isomer.isAromatic():
break
else: # didn't break
self.fail("None of the generated resonance isomers {0!r} are aromatic".format(isomers))
for generated in isomers:
for expected in resonance_forms:
if generated.isIsomorphic(expected):
break
else: # didn't break
if generated.isAromatic():
continue # because the aromatic isomer isn't in our resonance_forms list
self.fail("Generated a resonance form {0!r} that was not expected!\n{1}\nAlthough that may be a bug in the unit test (not sure I got them all)".format(generated, generated.toAdjacencyList()))
for expected in resonance_forms:
for generated in isomers:
if expected.isIsomorphic(generated):
break
else: # didn't break
self.fail(("Expected a resonance form {0!r} that was not generated.\n"
"Only generated these:\n{1}").format(expected, '\n'.join([repr(g) for g in isomers])))
def testKeepIsomorphicStructuresFunctionsWhenTrue(self):
"""Test that keepIsomorphic works for resonance structure generation when True."""
mol = Molecule(SMILES='C=C[CH2]')
mol.assignAtomIDs()
out = generate_resonance_structures(mol, keepIsomorphic=True)
self.assertEqual(len(out), 2)
self.assertTrue(out[0].isIsomorphic(out[1]))
self.assertFalse(out[0].isIdentical(out[1]))
def testKeepIsomorphicStructuresFunctionsWhenFalse(self):
"""Test that keepIsomorphic works for resonance structure generation when False."""
mol = Molecule(SMILES='C=C[CH2]')
mol.assignAtomIDs()
out = generate_resonance_structures(mol, keepIsomorphic=False)
self.assertEqual(len(out), 1)
def testFalseNegativeAromaticityPerception(self):
"""Test that we obtain the correct aromatic structure for a monocyclic aromatic that RDKit mis-identifies."""
mol = Molecule(SMILES='[CH2]C=C1C=CC(=C)C=C1')
out = generate_resonance_structures(mol)
aromatic = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {4,B} {5,B} {7,S}
2 C u0 p0 c0 {3,B} {6,B} {8,S}
3 C u0 p0 c0 {2,B} {4,B} {10,S}
4 C u0 p0 c0 {1,B} {3,B} {11,S}
5 C u0 p0 c0 {1,B} {6,B} {13,S}
6 C u0 p0 c0 {2,B} {5,B} {14,S}
7 C u0 p0 c0 {1,S} {9,D} {12,S}
8 C u1 p0 c0 {2,S} {15,S} {16,S}
9 C u0 p0 c0 {7,D} {17,S} {18,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {7,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {8,S}
16 H u0 p0 c0 {8,S}
17 H u0 p0 c0 {9,S}
18 H u0 p0 c0 {9,S}
""")
self.assertEqual(len(out), 5)
self.assertTrue(any([m.isIsomorphic(aromatic) for m in out]))
def testFalseNegativePolycyclicAromaticityPerception(self):
"""Test that we generate proper structures for a polycyclic aromatic that RDKit mis-identifies."""
mol = Molecule(SMILES='C=C1C=CC=C2C=C[CH]C=C12')
out = generate_resonance_structures(mol)
clar = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,B} {3,B} {7,S}
2 C u0 p0 c0 {1,B} {5,B} {6,S}
3 C u0 p0 c0 {1,B} {4,B} {11,S}
4 C u0 p0 c0 {3,B} {8,B} {13,S}
5 C u0 p0 c0 {2,B} {8,B} {15,S}
6 C u0 p0 c0 {2,S} {9,D} {16,S}
7 C u0 p0 c0 {1,S} {10,D} {18,S}
8 C u0 p0 c0 {4,B} {5,B} {14,S}
9 C u0 p0 c0 {6,D} {10,S} {17,S}
10 C u0 p0 c0 {7,D} {9,S} {12,S}
11 C u1 p0 c0 {3,S} {19,S} {20,S}
12 H u0 p0 c0 {10,S}
13 H u0 p0 c0 {4,S}
14 H u0 p0 c0 {8,S}
15 H u0 p0 c0 {5,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {9,S}
18 H u0 p0 c0 {7,S}
19 H u0 p0 c0 {11,S}
20 H u0 p0 c0 {11,S}
""")
self.assertEqual(len(out), 6)
self.assertTrue(any([m.isIsomorphic(clar) for m in out]))
def testFalseNegativePolycylicAromaticityPerception2(self):
"""Test that we obtain the correct aromatic structure for a polycylic aromatic that RDKit mis-identifies."""
mol = Molecule(SMILES='[CH2]C=C1C=CC(=C)C2=C1C=CC=C2')
out = generate_resonance_structures(mol)
aromatic = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,B} {4,B} {8,B}
2 C u0 p0 c0 {1,B} {3,B} {7,B}
3 C u0 p0 c0 {2,B} {5,B} {9,S}
4 C u0 p0 c0 {1,B} {6,B} {12,S}
5 C u0 p0 c0 {3,B} {6,B} {15,S}
6 C u0 p0 c0 {4,B} {5,B} {16,S}
7 C u0 p0 c0 {2,B} {10,B} {17,S}
8 C u0 p0 c0 {1,B} {11,B} {20,S}
9 C u0 p0 c0 {3,S} {13,D} {14,S}
10 C u0 p0 c0 {7,B} {11,B} {18,S}
11 C u0 p0 c0 {8,B} {10,B} {19,S}
12 C u1 p0 c0 {4,S} {21,S} {22,S}
13 C u0 p0 c0 {9,D} {23,S} {24,S}
14 H u0 p0 c0 {9,S}
15 H u0 p0 c0 {5,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {7,S}
18 H u0 p0 c0 {10,S}
19 H u0 p0 c0 {11,S}
20 H u0 p0 c0 {8,S}
21 H u0 p0 c0 {12,S}
22 H u0 p0 c0 {12,S}
23 H u0 p0 c0 {13,S}
24 H u0 p0 c0 {13,S}
""")
self.assertEqual(len(out), 7)
self.assertTrue(any([m.isIsomorphic(aromatic) for m in out]))
class ClarTest(unittest.TestCase):
"""
Contains unit tests for Clar structure methods.
"""
def testClarTransformation(self):
"""Test that clarTransformation generates an aromatic ring."""
mol = Molecule().fromSMILES('c1ccccc1')
sssr = mol.getSmallestSetOfSmallestRings()
_clar_transformation(mol, sssr[0])
mol.updateAtomTypes()
self.assertTrue(mol.isAromatic())
def testClarOptimization(self):
"""Test to ensure pi electrons are conserved during optimization"""
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1') # Naphthalene
output = _clar_optimization(mol)
for molecule, asssr, bonds, solution in output:
# Count pi electrons in molecule
pi = 0
for bond in bonds:
if bond.isDouble():
pi += 2
# Count pi electrons in solution
y = solution[0:len(asssr)]
x = solution[len(asssr):]
pi_solution = 6 * sum(y) + 2 * sum(x)
# Check that both counts give 10 pi electrons
self.assertEqual(pi, 10)
self.assertEqual(pi_solution, 10)
# Check that we only assign 1 aromatic sextet
self.assertEqual(sum(y), 1)
def testPhenanthrene(self):
"""Test that we generate 1 Clar structure for phenanthrene."""
mol = Molecule().fromSMILES('C1=CC=C2C(C=CC3=CC=CC=C32)=C1')
newmol = generate_clar_structures(mol)
struct = Molecule().fromAdjacencyList("""1 C u0 p0 c0 {2,S} {3,B} {5,B}
2 C u0 p0 c0 {1,S} {4,B} {9,B}
3 C u0 p0 c0 {1,B} {6,S} {10,B}
4 C u0 p0 c0 {2,B} {7,S} {8,B}
5 C u0 p0 c0 {1,B} {12,B} {17,S}
6 C u0 p0 c0 {3,S} {7,D} {18,S}
7 C u0 p0 c0 {4,S} {6,D} {19,S}
8 C u0 p0 c0 {4,B} {13,B} {20,S}
9 C u0 p0 c0 {2,B} {14,B} {23,S}
10 C u0 p0 c0 {3,B} {11,B} {24,S}
11 C u0 p0 c0 {10,B} {12,B} {15,S}
12 C u0 p0 c0 {5,B} {11,B} {16,S}
13 C u0 p0 c0 {8,B} {14,B} {21,S}
14 C u0 p0 c0 {9,B} {13,B} {22,S}
15 H u0 p0 c0 {11,S}
16 H u0 p0 c0 {12,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {6,S}
19 H u0 p0 c0 {7,S}
20 H u0 p0 c0 {8,S}
21 H u0 p0 c0 {13,S}
22 H u0 p0 c0 {14,S}
23 H u0 p0 c0 {9,S}
24 H u0 p0 c0 {10,S}
""")
self.assertEqual(len(newmol), 1)
self.assertTrue(newmol[0].isIsomorphic(struct))
def testPhenalene(self):
"""Test that we generate 2 Clar structures for phenalene.
Case where there is one non-aromatic ring."""
mol = Molecule().fromSMILES('C1=CC2=CC=CC3CC=CC(=C1)C=32')
newmol = generate_clar_structures(mol)
struct1 = Molecule().fromAdjacencyList("""1 C u0 p0 c0 {2,S} {6,S} {14,S} {15,S}
2 C u0 p0 c0 {1,S} {3,S} {7,D}
3 C u0 p0 c0 {2,S} {4,B} {5,B}
4 C u0 p0 c0 {3,B} {9,B} {10,S}
5 C u0 p0 c0 {3,B} {8,S} {11,B}
6 C u0 p0 c0 {1,S} {8,D} {16,S}
7 C u0 p0 c0 {2,D} {13,S} {21,S}
8 C u0 p0 c0 {5,S} {6,D} {22,S}
9 C u0 p0 c0 {4,B} {12,B} {18,S}
10 C u0 p0 c0 {4,S} {13,D} {19,S}
11 C u0 p0 c0 {5,B} {12,B} {23,S}
12 C u0 p0 c0 {9,B} {11,B} {17,S}
13 C u0 p0 c0 {7,S} {10,D} {20,S}
14 H u0 p0 c0 {1,S}
15 H u0 p0 c0 {1,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {12,S}
18 H u0 p0 c0 {9,S}
19 H u0 p0 c0 {10,S}
20 H u0 p0 c0 {13,S}
21 H u0 p0 c0 {7,S}
22 H u0 p0 c0 {8,S}
23 H u0 p0 c0 {11,S}
""")
struct2 = Molecule().fromAdjacencyList("""1 C u0 p0 c0 {2,S} {6,S} {14,S} {15,S}
2 C u0 p0 c0 {1,S} {3,B} {7,B}
3 C u0 p0 c0 {2,B} {4,B} {5,S}
4 C u0 p0 c0 {3,B} {9,S} {10,B}
5 C u0 p0 c0 {3,S} {8,S} {11,D}
6 C u0 p0 c0 {1,S} {8,D} {16,S}
7 C u0 p0 c0 {2,B} {13,B} {21,S}
8 C u0 p0 c0 {5,S} {6,D} {22,S}
9 C u0 p0 c0 {4,S} {12,D} {18,S}
10 C u0 p0 c0 {4,B} {13,B} {19,S}
11 C u0 p0 c0 {5,D} {12,S} {23,S}
12 C u0 p0 c0 {9,D} {11,S} {17,S}
13 C u0 p0 c0 {7,B} {10,B} {20,S}
14 H u0 p0 c0 {1,S}
15 H u0 p0 c0 {1,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {12,S}
18 H u0 p0 c0 {9,S}
19 H u0 p0 c0 {10,S}
20 H u0 p0 c0 {13,S}
21 H u0 p0 c0 {7,S}
22 H u0 p0 c0 {8,S}
23 H u0 p0 c0 {11,S}
""")
self.assertEqual(len(newmol), 2)
self.assertTrue(newmol[0].isIsomorphic(struct1) or newmol[0].isIsomorphic(struct2))
self.assertTrue(newmol[1].isIsomorphic(struct2) or newmol[1].isIsomorphic(struct1))
self.assertFalse(newmol[0].isIsomorphic(newmol[1]))
def testCorannulene(self):
"""Test that we generate 5 Clar structures for corannulene
Case where linear relaxation does not give an integer solution"""
mol = Molecule().fromSMILES('C1=CC2=CC=C3C=CC4=C5C6=C(C2=C35)C1=CC=C6C=C4')
newmol = generate_clar_structures(mol)
struct = Molecule().fromAdjacencyList("""1 C u0 p0 c0 {2,S} {5,B} {8,B}
2 C u0 p0 c0 {1,S} {3,B} {10,B}
3 C u0 p0 c0 {2,B} {4,S} {9,B}
4 C u0 p0 c0 {3,S} {5,S} {6,D}
5 C u0 p0 c0 {1,B} {4,S} {7,B}
6 C u0 p0 c0 {4,D} {12,S} {13,S}
7 C u0 p0 c0 {5,B} {14,S} {15,B}
8 C u0 p0 c0 {1,B} {16,B} {20,S}
9 C u0 p0 c0 {3,B} {11,S} {17,B}
10 C u0 p0 c0 {2,B} {18,B} {19,S}
11 C u0 p0 c0 {9,S} {12,D} {21,S}
12 C u0 p0 c0 {6,S} {11,D} {22,S}
13 C u0 p0 c0 {6,S} {14,D} {23,S}
14 C u0 p0 c0 {7,S} {13,D} {24,S}
15 C u0 p0 c0 {7,B} {16,B} {25,S}
16 C u0 p0 c0 {8,B} {15,B} {26,S}
17 C u0 p0 c0 {9,B} {18,B} {27,S}
18 C u0 p0 c0 {10,B} {17,B} {28,S}
19 C u0 p0 c0 {10,S} {20,D} {29,S}
20 C u0 p0 c0 {8,S} {19,D} {30,S}
21 H u0 p0 c0 {11,S}
22 H u0 p0 c0 {12,S}
23 H u0 p0 c0 {13,S}
24 H u0 p0 c0 {14,S}
25 H u0 p0 c0 {15,S}
26 H u0 p0 c0 {16,S}
27 H u0 p0 c0 {17,S}
28 H u0 p0 c0 {18,S}
29 H u0 p0 c0 {19,S}
30 H u0 p0 c0 {20,S}
""")
self.assertEqual(len(newmol), 5)
self.assertTrue(newmol[0].isIsomorphic(struct))
self.assertTrue(newmol[1].isIsomorphic(struct))
self.assertTrue(newmol[2].isIsomorphic(struct))
self.assertTrue(newmol[3].isIsomorphic(struct))
self.assertTrue(newmol[4].isIsomorphic(struct))
def testExocyclicDB(self):
"""Test that Clar structure generation doesn't modify exocyclic double bonds
Important for cases where RDKit considers rings to be aromatic by counting pi-electron contributions
from exocyclic double bonds, while they don't actually contribute to aromaticity"""
mol = Molecule(SMILES="C=C1C=CC=CC1=C")
newmol = generate_clar_structures(mol)
self.assertEquals(len(newmol), 0)
| Molecular-Image-Recognition/Molecular-Image-Recognition | code/rmgpy/molecule/resonanceTest.py | Python | mit | 46,203 | [
"RDKit"
] | 36602f90904e063be216f531fc28dd2360c87a911161d81d6e5d6e2100f35af7 |
from __future__ import print_function
import httplib2
import os
import sys
import dateutil.parser as dateparse
try:
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
except ImportError:
print("""
Google API Modules are required:
pip install --upgrade google-api-python-client
Then visit the link below and follow the instructions to setup your API credentials:
https://developers.google.com/google-apps/calendar/quickstart/python
client_secret.json should be placed in ~/.hotbot
""")
import datetime
import readline
import argparse
from jsonconf import jsonconf
import uuid
from random import randint
from croniter import croniter
import logging
log = logging.getLogger(name='__name__')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
log.addHandler(handler)
log.setLevel(logging.INFO)
# pip install --upgrade google-api-python-client
# https://console.developers.google.com/start/api?id=calendar
# https://console.developers.google.com/apis/
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'HOTBot'
def get_short_unique():
return str(uuid.uuid4())[:8]
def default_input(prompt, default=None):
# https://chistera.yi.org/~dato/blog/entries/2008/02/14/python_raw_input_with_an_editable_default_value_using_readline.html
if not default:
default = ""
def pre_input_hook():
readline.insert_text(default)
readline.redisplay()
prompt += ": "
readline.set_pre_input_hook(pre_input_hook)
try:
return raw_input(prompt)
finally:
readline.set_pre_input_hook(None)
def number_ordinal(n):
# http://stackoverflow.com/questions/9647202/ordinal-numbers-replacement/20007730#20007730
return "%d%s" % (n, "tsnrhtdd"[(n/10 % 10 != 1)*(n % 10 < 4) * n % 10::4])
class HOTBot(object):
guest_values = [
('displayName', 'Guest Name'),
('email', 'Guest email')]
location_values = [
('name', 'Name'),
('location', 'Location'),
('phone', 'Phone'),
('website', 'Website'),
('tag', 'Tag (optional)'),
('reservation', 'Takes reservations')]
event_message_fields = [
('event', 'HOT'),
('day', 10),
('day_ordinal', '10TH'),
('day_name', 'Thursday'),
('month', 2),
('month_ordinal', '3rd'),
('month_name', 'March'),
('year', 2016),
('guest_count', 8),
('name', "Cthulu's Pub"),
('location', "123 Ancient One Ave, R'lyeh, NY"),
('phone', '867-5309'),
('website', 'http://cthuluspub.hp'),
('start_time', '2016-03-10T19:00:00-05:00'),
('short_time', '7:00 PM')
]
event_message_example = (
"It has been decreed, that on {day_name}, the {day_ordinal} "
"day of {month_name}, {year}, that {event} shall be held at "
"{name}. The {guest_count} believers shall arrive at "
"{location} promptly at {short_time}, or risk the wrath of the "
" Ancient Ones.")
def __init__(self, event, flags=None):
self.flags = flags
self.service = None
self.event = event
conf_dir = self.get_conf_dir()
self.event_conf = jsonconf(os.path.join(conf_dir, self.event + ".json"))
self.event_loc_history = jsonconf(os.path.join(conf_dir, self.event + "_history.json"))
if not self.event_loc_history.locations:
self.event_loc_history.locations = {}
self.event_loc_history.save()
self.authorized = False
def get_conf_dir(self):
home_dir = os.path.expanduser('~')
conf_dir = os.path.join(home_dir, '.hotbot')
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
return conf_dir
def get_credentials(self):
conf_dir = self.get_conf_dir()
credential_path = os.path.join(conf_dir, self.event + '_credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
log.warn("No current valid Google credentials. Starting authentication flow...")
flow = client.flow_from_clientsecrets(os.path.join(conf_dir, 'client_secret.json'),
'https://www.googleapis.com/auth/calendar')
flow.user_agent = "HOTBot"
if self.flags:
credentials = tools.run_flow(flow, store, self.flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
log.info('Storing credentials to ' + credential_path)
return credentials
def authorize(self):
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('calendar', 'v3', http=http)
self.authorized = True
def manage_locations(self):
return self.manage_list('locations', HOTBot.location_values,
'Current Locations', 'name')
def reset_loc_history(self, tag=None):
if not tag:
tag = '*'
log.info("Resetting history for [{}] locations...".format(tag))
del self.event_loc_history.locations[tag]
self.event_loc_history.save()
def add_loc_history(self, loc):
if loc in self.event_conf.locations:
l = self.event_conf.locations[loc]
tag = l['tag'].strip().lower()
if not tag:
tag = '*'
if tag not in self.event_loc_history.locations:
self.event_loc_history.locations[tag] = []
self.event_loc_history.locations[tag].append(loc)
self.event_loc_history.save()
def _get_loc_bins(self):
# sort into bins exclusively
bins = {'*': []}
for k, l in self.event_conf.locations.iteritems():
t = l['tag'].strip().lower()
if not t:
t = '*'
if t not in bins:
bins[t] = []
if (t not in self.event_loc_history.locations or
k not in self.event_loc_history.locations[t]):
bins[t].append(k)
return bins
def get_rand_location(self, start_time, tag=None):
if tag:
tag = tag.strip().lower()
else:
tag = '*'
bins = self._get_loc_bins()
if tag not in bins or len(bins[tag]) == 0:
# we've used them all, try to reset history
self.reset_loc_history(tag=tag)
bins = self._get_loc_bins()
if tag not in bins or len(bins[tag]) == 0:
return None
i = randint(0, len(bins[tag]) - 1)
key = bins[tag][i]
loc = self.event_conf.locations[bins[tag][i]]
info = dict(loc)
info['start_time'] = start_time
time = dateparse.parse(start_time)
info['day_name'] = time.strftime('%A')
info['day'] = time.day
info['day_ordinal'] = number_ordinal(time.day)
info['year'] = time.year
info['month'] = time.month
info['month_name'] = time.strftime('%B')
info['month_ordinal'] = number_ordinal(time.month)
info['short_time'] = str(time.hour % 12) + time.strftime(':%M %p')
info['event'] = self.event
info['guest_count'] = len(self.event_conf.guests)
return (key, loc, info)
def insert_events(self):
print('\nInsert event placeholders using cron format.'
'\nSee https://en.wikipedia.org/wiki/Cron format for details.')
loc_tag = default_input("Location Tag (enter for none)", default="")
if not loc_tag:
loc_tag = None
fmt = '%Y-%m-%d'
base = datetime.datetime.now()
def_base = base_str = base.strftime(fmt)
while True:
base_str = default_input("Start Date", default=def_base)
try:
base = datetime.datetime.strptime(base_str, fmt)
except:
print("Invalid Date Format! Use YYYY-MM-DD")
continue
break
count = def_count = 10
while True:
count = default_input("# Events to Insert", default=str(def_count))
try:
count = int(count)
if count < 1:
raise Exception() # lazy way to handle with less code
except:
print("Please enter a valid integer > 0!")
continue
break
duration = def_dur = 60
while True:
duration = default_input("Event Duration (min)", default=str(def_dur))
try:
duration = int(duration)
if duration < 10:
raise Exception()
except:
print("Please enter a valid integer > 10!")
continue
break
cron_fmt = None
cron = None
events = []
event_objs = []
while True:
while True:
cron_fmt = default_input("Cron Expression", default=cron_fmt)
try:
cron = croniter(cron_fmt, start_time=base)
except:
print('\nInvalid Cron Expression!'
'\nSee https://en.wikipedia.org/wiki/Cron format for examples.')
continue
break
events = []
event_objs = []
for _ in range(count):
evt = cron.get_next(ret_type=datetime.datetime)
event_objs.append(evt)
events.append(evt.strftime(fmt + ' %H:%M'))
print("Events to be inserted: \n" + ", ".join(events))
resp = default_input("\nInsert Events (y) or Edit (e)?", default=None)
if resp.lower().startswith('y'):
break
for evt in event_objs:
self.inser_event_placeholder(evt, duration=duration, loc_tag=loc_tag)
def manage_messages(self):
key = 'messages'
fields = [f[0] for f in HOTBot.event_message_fields]
field_dict = {f[0]: f[1] for f in HOTBot.event_message_fields}
if not self.event_conf[key]:
self.event_conf[key] = []
def delete_item(i):
opt = default_input('Confirm delete? (yes/no)').lower()
if opt.startswith('y'):
del self.event_conf[key][i]
self.event_conf.save()
def check_msg(msg):
while True:
try:
output = msg.format(**field_dict)
print('Rendered message: ' + output.replace("\\n", "\n").replace("\\t", "\t"))
opt = default_input("\ns (save), e (edit) ?").lower()
if opt.startswith('s'):
break
# continue for anything else, e is just for show
except KeyError as e:
bad_key = e.args[0]
print("\nInvalid message field: " + bad_key)
print("\nAvailable message fields:\n" + ", ".join(fields))
except IndexError:
print("Text replacement fields must contain a field name!")
except ValueError as e:
print("Invalid formatting: " + e.args[0])
msg = default_input("\nEdit Message: ", default=msg)
return msg
def edit_item(i):
msg = self.event_conf[key][i]
msg = default_input("Message", msg)
msg = check_msg(msg)
self.event_conf[key][i] = msg
self.event_conf.save()
def new_item():
msg = default_input("New Message")
msg = check_msg(msg)
self.event_conf[key].append(msg)
self.event_conf.save()
def print_items():
count = 1
print('\nMessages for event: {}'.format(self.event))
for msg in self.event_conf[key]:
if len(msg) > 70:
msg = msg[:70] + "..."
print('{}: {}'.format(count, msg))
count += 1
print("")
print("Available message fields:\n" + ", ".join(fields))
print("\nExample message:\n" + HOTBot.event_message_example)
print("\nOutput:\n" + HOTBot.event_message_example.format(**field_dict))
print("")
while True:
if len(self.event_conf[key]):
num = len(self.event_conf[key])
print_items()
opt = default_input("\n1-{} (edit), n (new), d (delete), q (quit)".format(num)).lower()
if opt.startswith('q'):
break
elif opt.startswith('n'):
new_item()
else:
delete = False
try:
if opt.startswith('d'):
delete = True
opt = default_input("(1-{}) select".format(num))
opt_i = int(opt)
opt_i -= 1
# purely to throw exception if out of bounds
self.event_conf[key][opt_i]
except (ValueError, IndexError):
print("Invalid selection. Must be 1-{} or n/d/q".format(num))
continue
if delete:
delete_item(opt_i)
else:
edit_item(opt_i)
else:
print("\nNo current entries. Please add one first...")
new_item()
def get_rand_message(self, info):
messages = self.event_conf.messages
if not messages:
messages = [
("Phone: {phone}"
"\nWebsite: {website}"
"\nReservations: {reservation}"
)
]
i = randint(0, len(messages) - 1)
return messages[i].format(**info).replace("\\n", "\n").replace("\\t", "\t")
def manage_guests(self):
return self.manage_list('guests', HOTBot.guest_values,
'Current Guests', 'displayName')
def manage_list(self, key, values, list_msg, list_field):
if not self.event_conf[key]:
self.event_conf[key] = {}
def delete_item(i):
item = self.event_conf[key][i]
opt = default_input('Confirm delete {}? (yes/no)'.format(item[list_field])).lower()
if opt.startswith('y'):
del self.event_conf[key][i]
self.event_conf.save()
def edit_item(i):
item = self.event_conf[key][i]
for v in values:
self.event_conf[key][i][v[0]] = default_input(v[1], item[v[0]])
self.event_conf.save()
def new_item():
item = {}
for v in values:
item[v[0]] = default_input(v[1])
u = get_short_unique()
self.event_conf[key][u] = item
self.event_conf.save()
def print_items():
count = 1
print('\n{} for event: {}'.format(list_msg, self.event))
self.key_map = []
for i in self.event_conf[key]:
print('{}: {}'.format(count, self.event_conf[key][i][list_field]))
self.key_map.append(i)
count += 1
while True:
if len(self.event_conf[key]):
num = len(self.event_conf[key])
print_items()
opt = default_input("\n1-{} (edit), n (new), d (delete), q (quit)".format(num)).lower()
if opt.startswith('q'):
break
elif opt.startswith('n'):
new_item()
else:
delete = False
try:
if opt.startswith('d'):
delete = True
opt = default_input("(1-{}) select".format(num))
opt_i = int(opt)
opt_i -= 1
# purely to throw exception if out of bounds
self.event_conf[key][self.key_map[opt_i]]
opt = self.key_map[opt_i]
except (ValueError, IndexError):
print("Invalid selection. Must be 1-{} or n/d/q".format(num))
continue
if delete:
delete_item(opt)
else:
edit_item(opt)
else:
print("\nNo current entries. Please add one first...")
new_item()
def get_calendars(self):
if not self.authorized:
self.authorize()
cals = self.service.calendarList().list().execute()
cal_list = {}
for c in cals['items']:
cal_list[c['id']] = c['summary']
return cal_list
def select_host_calendar(self):
if not self.authorized:
self.authorize()
cals = self.get_calendars()
print("\nSelect your host calendar for event: {}".format(self.event))
id_list = []
for c in cals:
id_list.append(c)
print("{}: {}".format(len(id_list), cals[c]))
i = raw_input("Choice: ")
try:
i = int(float(i))
cal_id = id_list[i - 1]
self.event_conf.host_cal = cal_id
except (ValueError, IndexError):
print("Invalid selection! Must be a number between 1 and {}".format(
len(id_list) - 1))
self.event_conf.save()
def get_cal_events(self, event=None, days_future=7, max_results=None):
if not self.authorized:
self.authorize()
if not event:
event = self.event
now = datetime.datetime.utcnow()
end = (now + datetime.timedelta(days=days_future)).isoformat() + 'Z'
now = now.isoformat() + 'Z'
result = self.service.events().list(
calendarId=self.event_conf.host_cal, timeMin=now, timeMax=end,
maxResults=None, singleEvents=True,
orderBy='startTime').execute()
events = result.get('items', [])
event_list = []
for e in events:
if e['summary'].startswith('[' + self.event):
event_list.append(e)
return event_list
def inser_event_placeholder(self, start, duration=120, loc_tag=None):
if not self.authorized:
self.authorize()
tzone = self.service.settings().get(setting='timezone').execute()['value']
fmt = '%Y-%m-%dT%H:%M:00'
name = self.event
if loc_tag:
name += (":" + loc_tag)
name = "[" + name + "]"
end = start + datetime.timedelta(minutes=duration)
event = {
'summary': name,
'start': {
'dateTime': start.strftime(fmt),
'timeZone': tzone,
},
'end': {
'dateTime': end.strftime(fmt),
'timeZone': tzone,
}
}
print("Creating {}, {}...".format(name, start.strftime(fmt)))
res = self.service.events().insert(calendarId=self.event_conf.host_cal,
body=event).execute()
print("Created: {}".format(res.get('htmlLink')))
def update_event(self, event, name, description, location):
if not self.authorized:
self.authorize()
cal_id = self.event_conf.host_cal
event["summary"] = name
event['attendees'] = []
for _, g in self.event_conf.guests.iteritems():
event['attendees'].append(g)
event['location'] = location
event["description"] = description
result = None
try:
result = self.service.events().update(calendarId=cal_id, eventId=event['id'],
body=event, sendNotifications=True).execute()
except:
log.exception("Error updating event!")
return result
def main(flags=None):
bot = HOTBot(event=flags.event, flags=flags)
if not bot.event_conf.host_cal:
log.info("No calendar selected. Loading options...")
bot.select_host_calendar()
if flags.edit_guests:
bot.manage_guests()
elif flags.edit_loc:
bot.manage_locations()
elif flags.edit_msg:
bot.manage_messages()
elif flags.select_cal:
bot.select_host_calendar()
elif flags.ins_events:
bot.insert_events()
else:
events = bot.get_cal_events(days_future=flags.days)
if len(events):
evt = events[0]
summary = evt['summary'].strip('[').strip(']').strip().split(':')
tag = None
if len(summary) > 1:
tag = summary[1]
summary = summary[0]
loc = bot.get_rand_location(evt['start']['dateTime'], tag)
if not loc:
log.error("Unable to find location with given parameters!")
sys.exit(1)
else:
key, loc, info = loc
name = bot.event + " - " + loc['name']
log.info("Creating event: " + name)
description = bot.get_rand_message(info)
location = loc['location']
result = bot.update_event(evt, name, description, location)
if not result:
log.error("There seems to have been an error updating the event. Try again later...")
else:
bot.add_loc_history(key)
log.info("Event update success!")
else:
log.error(("No upcoming events found for {} in the next {} days. "
"Either create a placeholder in GCal or "
"search further into the future with the --days option.")
.format(bot.event, flags.days))
sys.exit(1)
def run_script():
# can use --noauth_local_webserver to manually auth
parser = argparse.ArgumentParser(description='HOTBot automated event scheduler',
parents=[tools.argparser])
parser.add_argument(
"--event",
required=True,
help="Event name (used as key for calendar lookup)")
parser.add_argument(
"--days",
help="Number of days in the future to look for events (default: 7)",
type=int,
default=7)
parser.add_argument(
"--edit-guests", dest='edit_guests',
default=False, action='store_true',
help="Edit guests for event")
parser.add_argument(
"--edit-loc", dest='edit_loc',
default=False, action='store_true',
help="Edit locations for event")
parser.add_argument(
"--edit-msg", dest='edit_msg',
default=False, action='store_true',
help="Edit possible messages for event")
parser.add_argument(
"--select-cal", dest='select_cal',
default=False, action='store_true',
help="Select host calendar")
parser.add_argument(
"--ins-events", dest='ins_events',
default=False, action='store_true',
help="Insert event placeholders into calendar with cron formatting")
flags = parser.parse_args()
try:
main(flags)
except SystemExit:
pass
except:
log.exception("Fatal error occured in script: ")
finally:
logging.shutdown()
if __name__ == '__main__':
run_script()
| adammhaile/HOTBot | hotbot/__init__.py | Python | mit | 23,945 | [
"VisIt"
] | 05d55d6904c0266fe67f0f31e4c752b68cbf32a0e766ca34bd7a79848168ef97 |
#!/usr/bin/env python
# EDITABLE SECTIONS ARE MARKED WITH #@#
version="2.5.TAW160730"
authors=["Djurre de Jong", "Jaakko J. Uusitalo", "Tsjerk A. Wassenaar"]
# Parameters are defined for the following (protein) forcefields:
forcefields = ['martini21','martini21p','martini22','martini22p','elnedyn','elnedyn22','elnedyn22p','martini22dna']
notes = [
("DdJ130213","V2.3"),
("DdJ200613","Fixes in cysteine bridge detection and help text."),
("DdJ200820","Fixes in cysteine bridge length and revert elnedyn BB-bonds to bonds."),
("DdJ200826","Inverted 'define NO_RUBBER_BANDS', fixed writing posres when merging and added few comments."),
("DdJ200831","Shortened in-file changelog and fixed some comments."),
("DdJ181013","V2.4"),
]
#
# This program has grown to be pretty complex.
# The routines have been organized in different files.
# For working versions, all files can be incorporated by using the option -cat.
#
# Index of the program files:
#
# 1. Options and documentation @DOC.py
# 2. Description, options and command line parsing @CMD.py
# 3. Helper functions and macros @FUNC.py
# 4. Finegrained to coarsegrained mapping @MAP.py
# 5. Secondary structure determination and interpretation @SS.py
# 6. Force field parameters (MARTINI/ELNEDYN) @FF.py
# 7. Elastic network @ELN.py
# 8. Structure I/O @IO.py
# 9. Topology generation @TOP.py
# 10. Main @MAIN.py
# 11. Web-interface @WEB.py
#
def cat(file_out):
'''Function to 'compile' the martinize script into one file.'''
import re
files_in = 'martinize.py DOC.py CMD.py FUNC.py MAP.py SS.py '+'.py '.join(forcefields)+'.py ELN.py IO.py TOP.py MAIN.py '
pattern1 = re.compile(files_in.replace('.py ','|')[:-1])
pattern2 = re.compile(files_in.replace('.py ','\.|')[:-1])
file_out = open(file_out,'w')
tail = ''; head = True
for f in files_in.split():
for line in open(f).readlines():
# Split the string to avoid the function finding itself
if '__na'+'me__' in line:
head = False
if head:
file_out.write(line)
elif (f == 'martinize.py' and not head) and not ('import' in line and pattern1.search(line)):
tail += pattern2.sub('',line)
elif line[0] == '#':
file_out.write(line)
elif not ('import' in line and pattern1.search(line)):
file_out.write(pattern2.sub('',line))
file_out.write(tail)
###################################
## 1 # OPTIONS AND DOCUMENTATION ## -> @DOC <-
###################################
# This is a simple and versatily option class that allows easy
# definition and parsing of options.
class Option:
def __init__(self,func=str,num=1,default=None,description=""):
self.func = func
self.num = num
self.value = default
self.description = description
def __nonzero__(self):
if self.func == bool:
return self.value != False
return bool(self.value)
def __str__(self):
return self.value and str(self.value) or ""
def setvalue(self,v):
if len(v) == 1:
self.value = self.func(v[0])
else:
self.value = [ self.func(i) for i in v ]
# Lists for gathering arguments to options that can be specified
# multiple times on the command line.
lists = {
'cystines': [],
'merges' : [],
'links' : [],
'multi' : [],
}
# List of Help text and options.
# This way we can simply print this list if the user wants help.
options = [
# NOTE: Options marked with (+) can be given multiple times on the command line
# option type number default description
"""
MARTINIZE.py is a script to create Coarse Grain Martini input files of
proteins, ready for use in the molecular dynamics simulations package
Gromacs. For more information on the Martini forcefield, see:
www.cgmartini.nl
and read our papers:
Monticelli et al., J. Chem. Theory Comput., 2008, 4(5), 819-834
de Jong et al., J. Chem. Theory Comput., 2013, DOI:10.1021/ct300646g
Primary input/output
--------------------
The input file (-f) should be a coordinate file in PDB or GROMOS
format. The format is inferred from the structure of the file. The
input can also be provided through stdin, allowing piping of
structures. The input structure can have multiple frames/models. If an output
structure file (-x) is given, each frame will be coarse grained,
resulting in a multimodel output structure. Having multiple frames may
also affect the topology. If secondary structure is determined
internally, the structure will be averaged over the frames. Likewise,
interatomic distances, as used for backbone bond lengths in Elnedyn
and in elastic networks, are also averaged over the frames available.
If an output file (-o) is indicated for the topology, that file will
be used for the master topology, using #include statements to link the
moleculetype definitions, which are written to separate files. If no
output filename is given, the topology and the moleculetype
definitions are written to stdout.
Secondary structure
-------------------
The secondary structure plays a central role in the assignment of atom
types and bonded interactions in MARTINI. Martinize allows
specification of the secondary structure as a string (-ss), or as a
file containing a specification in GROMACS' ssdump format
(-ss). Alternatively, DSSP can be used for an on-the-fly assignment of
the secondary structure. For this, the option -dssp has to be used
giving the location of the executable as the argument.
The option -collagen will set the whole structure to collagen. If this
is not what you want (eg only part of the structure is collagen, you
can give a secondary structure file/string (-ss) and specifiy collagen
as "F". Parameters for collagen are taken from: Gautieri et al.,
J. Chem. Theory Comput., 2010, 6, 1210-1218.
With multimodel input files, the secondary structure as determined with
DSSP will be averaged over the frames. In this case, a cutoff
can be specified (-ssc) indicating the fraction of frames to match a
certain secondary structure type for designation.
Topology
--------
Several options are available to tune the resulting topology. By
default, termini are charged, and chain breaks are kept neutral. This
behaviour can be changed using -nt and -cb, respectively.
Disulphide bridges can be specified using -cys. This option can be
given multiple times on the command line. The argument is a pair of
cysteine residues, using the format
chain/resn/resi,chain/resn/resi.
It is also possible to let martinize detect cysteine pairs based on a
cut-off distance of 0.22nm, by giving the keyword 'auto' as argument to -cys.
Alternatively, a different cut-off distance can be specified, which
will also trigger a search of pairs satisfying the distance
criterion (eg: -cys 0.32).
In addition to cystine bridges, links between other atoms can be
specified using -link. This requires specification of the atoms, using
the format
chain/resi/resn/atom,chain/resi/resn/atom,bondlength,forceconstant.
If only two atoms are given, a constraint will be added with length
equal to the (average) distance in the coordinate file. If a bond
length is added, but no force constant, then the bondlength will be
used to set a constraint.
Linking atoms requires that the atoms are part of the same
moleculetype. Therefore any link between chains will cause the chains
to be merged. Merges can also be specified explicitly, using the
option -merge with a comma-separated list of chain identifiers to be
joined into one moleculetype. The option -merge can be used several
times. Note that specifying a chain in several merge groups will cause
all chains involved to be merged into a single moleculetype.
The moleculetype definitions are written to topology include (.itp)
files, using a name consisting of the molecule class (e.g. Protein)
and the chain identifier. With -name a name can be specified instead.
By default, martinize only writes a moleculetype for each unique
molecule, inferred from the sequence and the secondary structure
definition. It is possible to force writing a moleculetype definition
for every single molecule, using -sep.
The option -p can be used to write position restraints, using the
force constant specified with -pf, which is set to 1000 kJ/mol
by default.
For stability, elastic bonds are used to retain the structure of
extended strands. The option -ed causes dihedrals to be used
instead.
Different forcefields can be specified with -ff. All the parameters and
options belonging to that forcefield will be set (eg. bonded interactions,
BB-bead positions, Elastic Network, etc.). By default martini 2.1 is
used.
Elastic network
---------------
Martinize can write an elastic network for atom pairs within a cutoff
distance. The force constant (-ef) and the upper distance bound (-eu)
can be speficied. If a force field with an intrinsic Elastic
network is specified (eg. Elnedyn) with -ff, -elastic in implied and
the default values for the force constant and upper cutoff are used.
However, these can be overwritten.
Multiscaling
------------
Martinize can process a structure to yield a multiscale system,
consisting of a coordinate file with atomistic parts and
corresponding, overlaid coarsegrained parts. For chains that are
multiscaled, rather than writing a full moleculetype definition,
additional [atoms] and [virtual_sitesn] sections are written, to
be appended to the atomistic moleculetype definitions.
The option -multi can be specified multiple times, and takes a chain
identifier as argument. Alternatively, the keyword 'all' can be given
as argument, causing all chains to be multiscaled.
========================================================================\n
""",
("-f", Option(str, 1, None, "Input file (PDB|GRO)")),
("-o", Option(str, 1, None, "Output topology (TOP)")),
("-x", Option(str, 1, None, "Output coarse grained structure (PDB)")),
("-n", Option(str, 1, None, "Output index file with CG (and multiscale) beads.")),
("-nmap", Option(str, 1, None, "Output index file containing per bead mapping.")),
("-v", Option(bool, 0, False, "Verbose. Be load and noisy.")),
("-h", Option(bool, 0, False, "Display this help.")),
("-ss", Option(str, 1, None, "Secondary structure (File or string)")),
("-ssc", Option(float, 1, 0.5, "Cutoff fraction for ss in case of ambiguity (default: 0.5).")),
("-dssp", Option(str, 1, None, "DSSP executable for determining structure")),
# ("-pymol", Option(str, 1, None, "PyMOL executable for determining structure")),
("-collagen", Option(bool, 0, False, "Use collagen parameters")),
("-his", Option(bool, 0, False, "Interactively set the charge of each His-residue.")),
("-nt", Option(bool, 0, False, "Set neutral termini (charged is default)")),
("-cb", Option(bool, 0, False, "Set charges at chain breaks (neutral is default)")),
("-cys", Option(lists['cystines'].append, 1, None, "Disulphide bond (+)")),
("-link", Option(lists['links'].append, 1, None, "Link (+)")),
("-merge", Option(lists['merges'].append, 1, None, "Merge chains: e.g. -merge A,B,C (+)")),
# ("-mixed", Option(bool, 0, False, "Allow chains of mixed type (default: False)")),
("-name", Option(str, 1, None, "Moleculetype name")),
("-p", Option(str, 1, 'None', "Output position restraints (None/All/Backbone) (default: None)")),
("-pf", Option(float, 1, 1000, "Position restraints force constant (default: 1000 kJ/mol/nm^2)")),
("-ed", Option(bool, 0, False, "Use dihedrals for extended regions rather than elastic bonds)")),
("-sep", Option(bool, 0, False, "Write separate topologies for identical chains.")),
("-ff", Option(str, 1,'martini21', "Which forcefield to use: "+' ,'.join(n for n in forcefields[:-1]))),
# Fij = Fc exp( -a (rij - lo)**p )
("-elastic", Option(bool, 0, False, "Write elastic bonds")),
("-ef", Option(float, 1, 500, "Elastic bond force constant Fc")),
("-el", Option(float, 1, 0, "Elastic bond lower cutoff: F = Fc if rij < lo")),
("-eu", Option(float, 1, 0.90, "Elastic bond upper cutoff: F = 0 if rij > up")),
("-ea", Option(float, 1, 0, "Elastic bond decay factor a")),
("-ep", Option(float, 1, 1, "Elastic bond decay power p")),
("-em", Option(float, 1, 0, "Remove elastic bonds with force constant lower than this")),
("-eb", Option(str, 1, 'BB', "Comma separated list of bead names for elastic bonds")),
# ("-hetatm", Option(bool, 0, False, "Include HETATM records from PDB file (Use with care!)")),
("-multi", Option(lists['multi'].append, 1, None, "Chain to be set up for multiscaling (+)")),
]
## Martini Quotes
martiniq = [
("Robert Benchley",
"Why don't you get out of that wet coat and into a dry martini?"),
("James Thurber",
"One martini is all right, two is two many, three is not enough"),
("Philip Larkin",
"The chromatic scale is what you use to give the effect of drinking a quinine martini and having an enema simultaneously."),
("William Emerson, Jr.",
"And when that first martini hits the liver like a silver bullet, there is a sigh of contentment that can be heard in Dubuque."),
("Alec Waugh",
"I am prepared to believe that a dry martini slightly impairs the palate, but think what it does for the soul."),
("Gerald R. Ford",
"The three-martini lunch is the epitome of American efficiency. Where else can you get an earful, a bellyful and a snootful at the same time?"),
("P. G. Wodehouse",
"He was white and shaken, like a dry martini."),
]
desc = ""
def help():
"""Print help text and list of options and end the program."""
import sys
for item in options:
if type(item) == str:
print item
for item in options:
if type(item) != str:
print "%10s %s"%(item[0],item[1].description)
print
sys.exit()
##############################
## 2 # COMMAND LINE PARSING ## -> @CMD <-
##############################
import sys,logging
# Helper function to parse atom strings given on the command line:
# resid
# resname/resid
# chain/resname/resid
# resname/resid/atom
# chain/resname/resid/atom
# chain//resid
# chain/resname/atom
def str2atom(a):
a = a.split("/")
if len(a) == 1: # Only a residue number:
return (None,None,int(a[0]),None)
if len(a) == 2: # Residue name and number (CYS/123):
return (None,a[0],int(a[1]),None)
if len(a) == 3:
if a[2].isdigit(): # Chain, residue name, residue number
return (None,a[1],int(a[2]),a[0])
else: # Residue name, residue number, atom name
return (a[2],a[0],int(a[1]),None)
return (a[3],a[1],int(a[2]),a[0])
def option_parser(args,options,lists,version=0):
# Check whether there is a request for help
if '-h' in args or '--help' in args:
help()
# Convert the option list to a dictionary, discarding all comments
options = dict([i for i in options if not type(i) == str])
# This information we would like to print to some files, so let's put it in our information class
options['Version'] = version
options['Arguments'] = args[:]
while args:
ar = args.pop(0)
options[ar].setvalue([args.pop(0) for i in range(options[ar].num)])
## LOGGING ##
# Set the log level and communicate which options are set and what is happening
# If 'Verbose' is set, change the logger level
logLevel = options["-v"] and logging.DEBUG or logging.INFO
logging.basicConfig(format='%(levelname)-7s %(message)s',level=logLevel)
logging.info('MARTINIZE, script version %s'%version)
logging.info('If you use this script please cite:')
logging.info('de Jong et al., J. Chem. Theory Comput., 2013, DOI:10.1021/ct300646g')
# The make the program flexible, the forcefield parameters are defined
# for multiple forcefield. Check if a existing one is defined:
###_tmp = __import__(options['-ff'].value.lower())
###options['ForceField'] = getattr(_tmp,options['-ff'].value.lower())()
try:
try:
# Try to load the forcefield class from a different file
_tmp = __import__(options['-ff'].value.lower())
options['ForceField'] = getattr(_tmp,options['-ff'].value.lower())()
except:
# Try to load the forcefield class from the current file
options['ForceField'] = globals()[options['-ff'].value.lower()]()
except:
logging.error("Forcefield '%s' can not be found."%(options['-ff']))
sys.exit()
# Process the raw options from the command line
# Boolean options are set to more intuitive variables
options['Collagen'] = options['-collagen']
options['chHIS'] = options['-his']
options['ChargesAtBreaks'] = options['-cb']
options['NeutralTermini'] = options['-nt']
options['ExtendedDihedrals'] = options['-ed']
options['RetainHETATM'] = False # options['-hetatm']
options['SeparateTop'] = options['-sep']
options['MixedChains'] = False # options['-mixed']
options['ElasticNetwork'] = options['-elastic']
# Parsing of some other options into variables
options['ElasticMaximumForce'] = options['-ef'].value
options['ElasticMinimumForce'] = options['-em'].value
options['ElasticLowerBound'] = options['-el'].value
options['ElasticUpperBound'] = options['-eu'].value
options['ElasticDecayFactor'] = options['-ea'].value
options['ElasticDecayPower'] = options['-ep'].value
options['ElasticBeads'] = options['-eb'].value.split(',')
options['PosResForce'] = options['-pf'].value
options['PosRes'] = [i.lower() for i in options['-p'].value.split(",")]
if "none" in options['PosRes']: options['PosRes'] = []
if "backbone" in options['PosRes']: options['PosRes'].append("BB")
if options['ForceField'].ElasticNetwork:
# Some forcefields, like elnedyn, always use an elatic network. This is set in the
# forcefield file, with the parameter ElasticNetwork.
options['ElasticNetwork'] = True
# Merges, links and cystines
options['mergeList'] = "all" in lists['merges'] and ["all"] or [i.split(",") for i in lists['merges']]
# Process links
linkList = []
linkListCG = []
for i in lists['links']:
ln = i.split(",")
a, b = str2atom(ln[0]), str2atom(ln[1])
if len(ln) > 3: # Bond with given length and force constant
bl, fc = (ln[2] and float(ln[2]) or None, float(ln[3]))
elif len(a) == 3: # Constraint at given distance
bl, fc = float(a[2]), None
else: # Constraint at distance in structure
bl, fc = None, None
# Store the link, but do not list the atom name in the
# atomistic link list. Otherwise it will not get noticed
# as a valid link when checking for merging chains
linkList.append(((None,a[1],a[2],a[3]),(None,b[1],b[2],b[3])))
linkListCG.append((a,b,bl,fc))
# Cystines -- This should be done for all special bonds listed in the _special_ dictionary
CystineCheckBonds = False # By default, do not detect cystine bridges
CystineMaxDist2 = (10*0.22)**2 # Maximum distance (A) for detection of SS bonds
for i in lists['cystines']:
if i.lower() == "auto":
CystineCheckBonds = True
elif i.replace(".","").isdigit():
CystineCheckBonds = True
CystineMaxDist2 = (10*float(i))**2
else:
# This item should be a pair of cysteines
cysA, cysB = [str2atom(j) for j in i.split(",")]
# Internally we handle the residue number shifted by ord(' ')<<20. We have to add this to the
# cys-residue numbers given here as well.
constant = 32<<20
linkList.append((("SG","CYS",cysA[2]+constant,cysA[3]),("SG","CYS",cysB[2]+constant,cysB[3])))
linkListCG.append((("SC1","CYS",cysA[2]+constant,cysA[3]),("SC1","CYS",cysB[2]+constant,cysB[3]),-1,-1))
# Now we have done everything to it, we can add Link/cystine related stuff to options
# 'multi' is not stored anywhere else, so that we also add
options['linkList'] = linkList
options['linkListCG'] = linkListCG
options['CystineCheckBonds'] = CystineCheckBonds
options['CystineMaxDist2'] = CystineMaxDist2
options['multi'] = lists['multi']
logging.info("Chain termini will%s be charged"%(options['NeutralTermini'] and " not" or ""))
logging.info("Residues at chain brakes will%s be charged"%((not options['ChargesAtBreaks']) and " not" or ""))
if options.has_key('ForceField'):
logging.info("The %s forcefield will be used."%(options['ForceField'].name))
else:
logging.error("Forcefield '%s' has not been implemented."%(options['-ff']))
sys.exit()
if options['ExtendedDihedrals']:
logging.info('Dihedrals will be used for extended regions. (Elastic bonds may be more stable)')
else:
logging.info('Local elastic bonds will be used for extended regions.')
if options['PosRes']:
logging.info("Position restraints will be generated.")
logging.warning("Position restraints are only enabled if -DPOSRES is set in the MDP file")
if options['MixedChains']:
logging.warning("So far no parameters for mixed chains are available. This might crash the program!")
if options['RetainHETATM']:
logging.warning("I don't know how to handle HETATMs. This will probably crash the program.")
return options
#################################################
## 3 # HELPER FUNCTIONS, CLASSES AND SHORTCUTS ## -> @FUNC <-
#################################################
import math
#----+------------------+
## A | STRING FUNCTIONS |
#----+------------------+
# Split a string
def spl(x):
return x.split()
# Split each argument in a list
def nsplit(*x):
return [i.split() for i in x]
# Make a dictionary from two lists
def hash(x,y):
return dict(zip(x,y))
# Function to reformat pattern strings
def pat(x,c="."):
return x.replace(c,"\x00").split()
# Function to generate formatted strings according to the argument type
def formatString(i):
if type(i) == str:
return i
if type(i) == int:
return "%5d"%i
if type(i) == float:
return "%8.5f"%i
else:
return str(i)
#----+----------------+
## B | MATH FUNCTIONS |
#----+----------------+
def cos_angle(a,b):
p = sum([i*j for i,j in zip(a,b)])
q = math.sqrt(sum([i*i for i in a])*sum([j*j for j in b]))
return min(max(-1,p/q),1)
def norm2(a):
return sum([i*i for i in a])
def norm(a):
return math.sqrt(norm2(a))
def distance2(a,b):
return (a[0]-b[0])**2+(a[1]-b[1])**2+(a[2]-b[2])**2
##########################
## 4 # FG -> CG MAPPING ## -> @MAP <-
##########################
dnares3 = " DA DC DG DT"
dnares1 = " dA dC dG dT"
rnares3 = " A C G U"
rnares1 = " rA rC rG rU" #
# Amino acid nucleic acid codes:
# The naming (AA and '3') is not strictly correct when adding DNA/RNA, but we keep it like this for consistincy.
AA3 = spl("TRP TYR PHE HIS HIH ARG LYS CYS ASP GLU ILE LEU MET ASN PRO HYP GLN SER THR VAL ALA GLY"+dnares3+rnares3) #@#
AA1 = spl(" W Y F H H R K C D E I L M N P O Q S T V A G"+dnares1+rnares1) #@#
# Dictionaries for conversion from one letter code to three letter code v.v.
AA123, AA321 = hash(AA1,AA3),hash(AA3,AA1)
# Residue classes:
protein = AA3[:-8] # remove eight to get rid of DNA/RNA here.
water = spl("HOH SOL TIP")
lipids = spl("DPP DHP DLP DMP DSP POP DOP DAP DUP DPP DHP DLP DMP DSP PPC DSM DSD DSS")
nucleic = spl("DAD DCY DGU DTH ADE CYT GUA THY URA DA DC DG DT")
residueTypes = dict(
[(i,"Protein") for i in protein ]+
[(i,"Water") for i in water ]+
[(i,"Lipid") for i in lipids ]+
[(i,"Nucleic") for i in nucleic ]
)
class CoarseGrained:
# Class for mapping an atomistic residue list to a coarsegrained one
# Should get an __init__ function taking a residuelist, atomlist, Pymol selection or ChemPy model
# The result should be stored in a list-type attribute
# The class should have pdbstr and grostr methods
# Standard mapping groups
# Protein backbone
bb = "N CA C O H H1 H2 H3 O1 O2" #@#
# Lipid tails
palmitoyl1 = nsplit("C1B C1C C1D C1E","C1F C1G C1H C1I","C1J C1K C1L C1M","C1N C1O C1P") #@#
palmitoyl2 = nsplit("C2B C2C C2D C2E","C2F C2G C2H C2I","C2J C2K C2L C2M","C2N C2O C2P") #@#
oleyl1 = nsplit("C1B C1C C1D C1E","C1F C1G C1H","C1I C1J","C1K C1L C1M C1N","C1O C1P C1Q C1R") #@#
oleyl2 = nsplit("C2B C2C C2D C2E","C2F C2G C2H","C2I C2J","C2K C2L C2M C2N","C2O C2P C2Q C2R") #@#
#lauroyl1 = []
#stearoyl1 = []
#arachidonoyl1 = []
#linoleyl1 = []
#hexanoyl1 = []
# Lipid head groups
#phoshpatidylcholine =
phosphatydilethanolamine = nsplit("N H1 H2 H3 CA","CB P OA OB OC OD","CC CD OG C2A OH","CE OE C1A OF") #@#
phosphatidylglycerol = nsplit("H1 O1 CA H2 O2 CB","CC P OA OB OC OD","CD CE OG C2A OH","CF OE C1A OF") #@#
#phosphatidylserine =
dna_bb = "P OP1 OP2 O5' O3'","C5' O4' C4'","C3' O3' C2' C1'"
# This is the mapping dictionary
# For each residue it returns a list, each element of which
# lists the atom names to be mapped to the corresponding bead.
# The order should be the standard order of the coarse grained
# beads for the residue. Only atom names matching with those
# present in the list of atoms for the residue will be used
# to determine the bead position. This adds flexibility to the
# approach, as a single definition can be used for different
# states of a residue (e.g., GLU/GLUH).
# For convenience, the list can be specified as a set of strings,
# converted into a list of lists by 'nsplit' defined above.
mapping = {
"ALA": nsplit(bb + " CB"),
"CYS": nsplit(bb,"CB SG"),
"ASP": nsplit(bb,"CB CG OD1 OD2"),
"GLU": nsplit(bb,"CB CG CD OE1 OE2"),
"PHE": nsplit(bb,"CB CG CD1 HD1","CD2 HD2 CE2 HE2","CE1 HE1 CZ HZ"),
"GLY": nsplit(bb),
"HIS": nsplit(bb,"CB CG","CD2 HD2 NE2 HE2","ND1 HD1 CE1 HE1"),
"HIH": nsplit(bb,"CB CG","CD2 HD2 NE2 HE2","ND1 HD1 CE1 HE1"), # Charged Histidine.
"ILE": nsplit(bb,"CB CG1 CG2 CD CD1"),
"LYS": nsplit(bb,"CB CG CD","CE NZ HZ1 HZ2 HZ3"),
"LEU": nsplit(bb,"CB CG CD1 CD2"),
"MET": nsplit(bb,"CB CG SD CE"),
"ASN": nsplit(bb,"CB CG ND1 ND2 OD1 OD2 HD11 HD12 HD21 HD22"),
"PRO": nsplit(bb,"CB CG CD"),
"HYP": nsplit(bb,"CB CG CD OD"),
"GLN": nsplit(bb,"CB CG CD OE1 OE2 NE1 NE2 HE11 HE12 HE21 HE22"),
"ARG": nsplit(bb,"CB CG CD","NE HE CZ NH1 NH2 HH11 HH12 HH21 HH22"),
"SER": nsplit(bb,"CB OG HG"),
"THR": nsplit(bb,"CB OG1 HG1 CG2"),
"VAL": nsplit(bb,"CB CG1 CG2"),
"TRP": nsplit(bb,"CB CG CD2","CD1 HD1 NE1 HE1 CE2","CE3 HE3 CZ3 HZ3","CZ2 HZ2 CH2 HH2"),
"TYR": nsplit(bb,"CB CG CD1 HD1","CD2 HD2 CE2 HE2","CE1 HE1 CZ OH HH"),
"POPE": phosphatydilethanolamine + palmitoyl1 + oleyl2,
"DOPE": phosphatydilethanolamine + oleyl1 + oleyl2,
"DPPE": phosphatydilethanolamine + palmitoyl1 + palmitoyl2,
"POPG": phosphatidylglycerol + palmitoyl1 + oleyl2,
"DOPG": phosphatidylglycerol + oleyl1 + oleyl2,
"DPPG": phosphatidylglycerol + palmitoyl1 + palmitoyl2,
"DA": nsplit("P OP1 OP2 O5' O3' O1P O2P","C5' O4' C4'","C3' C2' C1'","N9 C4","C8 N7 C5","C6 N6 N1","C2 N3"),
"DG": nsplit("P OP1 OP2 O5' O3' O1P O2P","C5' O4' C4'","C3' C2' C1'","N9 C4","C8 N7 C5","C6 O6 N1","C2 N2 N3"),
"DC": nsplit("P OP1 OP2 O5' O3' O1P O2P","C5' O4' C4'","C3' C2' C1'","N1 C6","C5 C4 N4","N3 C2 O2"),
"DT": nsplit("P OP1 OP2 O5' O3' O1P O2P","C5' O4' C4'","C3' C2' C1'","N1 C6","C5 C4 O4 C7 C5M","N3 C2 O2"),
}
# Generic names for side chain beads
residue_bead_names = spl("BB SC1 SC2 SC3 SC4")
# Generic names for DNA beads
residue_bead_names_dna = spl("BB1 BB2 BB3 SC1 SC2 SC3 SC4")
# This dictionary contains the bead names for all residues,
# following the order in 'mapping'
names = {
"POPE": "NH3 PO4 GL1 GL2 C1A C2A C3A C4A C1B C2B D3B C4B C5B".split(),
"POPG": "GLC PO4 GL1 GL2 C1A C2A C3A C4A C1B C2B D3B C4B C5B".split()
}
# Add default bead names for all amino acids
names.update([(i,("BB","SC1","SC2","SC3","SC4")) for i in AA3])
# Add the default bead names for all DNA nucleic acids
names.update([(i,("BB1","BB2","BB3","SC1","SC2","SC3","SC4")) for i in nucleic])
# This dictionary allows determining four letter residue names
# for ones specified with three letters, e.g., resulting from
# truncation to adhere to the PDB format.
# Each entry returns a prototypical test, given as a string,
# and the residue name to be applied if eval(test) is True.
# This is particularly handy to determine lipid types.
# The test assumes there is a local or global array 'atoms'
# containing the atom names of the residue in correct order.
restest = {
"POP": [('atoms[0] == "CA"', "POPG"),
('atoms[0] == "N"', "POPE")]
}
# Crude mass for weighted average. No consideration of united atoms.
# This will probably give only minor deviations, while also giving less headache
mass = {'H': 1,'C': 12,'N': 14,'O': 16,'S': 32,'P': 31,'M': 0}
# Determine average position for a set of weights and coordinates
# This is a rather specific function that requires a list of items
# [(m,(x,y,z),id),..] and returns the weighted average of the
# coordinates and the list of ids mapped to this bead
def aver(b):
mwx,ids = zip(*[((m*x,m*y,m*z),i) for m,(x,y,z),i in b]) # Weighted coordinates
tm = sum(zip(*b)[0]) # Sum of weights
return [sum(i)/tm for i in zip(*mwx)],ids # Centre of mass
# Return the CG beads for an atomistic residue, using the mapping specified above
# The residue 'r' is simply a list of atoms, and each atom is a list:
# [ name, resname, resid, chain, x, y, z ]
def map(r,ca2bb = False):
p = CoarseGrained.mapping[r[0][1]] # Mapping for this residue
if ca2bb: p[0] = ["CA"] # Elnedyn maps BB to CA, ca2bb is False or True
# Get the name, mass and coordinates for all atoms in the residue
a = [(i[0],CoarseGrained.mass.get(i[0][0],0),i[4:]) for i in r]
# Store weight, coordinate and index for atoms that match a bead
q = [[(m,coord,a.index((atom,m,coord))) for atom,m,coord in a if atom in i] for i in p]
# Bead positions
return zip(*[aver(i) for i in q])
# Mapping for index file
def mapIndex(r,ca2bb = False):
p = CoarseGrained.mapping[r[0][1]] # Mapping for this residue
if ca2bb: p[0] = ["CA"] # Elnedyn maps BB to CA, ca2bb is False or True
# Get the name, mass and coordinates for all atoms in the residue
a = [(i[0],CoarseGrained.mass.get(i[0][0],0),i[4:]) for i in r]
# Store weight, coordinate and index for atoms that match a bead
return [[(m,coord,a.index((atom,m,coord))) for atom,m,coord in a if atom in i] for i in p]
#############################
## 5 # SECONDARY STRUCTURE ## -> @SS <-
#############################
import logging,os,sys
import subprocess as subp
#----+--------------------------------------+
## A | SECONDARY STRUCTURE TYPE DEFINITIONS |
#----+--------------------------------------+
# This table lists all coarse grained secondary structure types
# The following are matched lists. Make sure they stay matched.
# The lists do not need to be of the same length. The longer list
# will be truncated when combined with a shorter list, e.g. with
# dihedral definitions, which are not present for coil and termini
#
ss_names = {
"F": "Collagenous Fiber", #@#
"E": "Extended structure (beta sheet)", #@#
"H": "Helix structure", #@#
"1": "Helix start (H-bond donor)", #@#
"2": "Helix end (H-bond acceptor)", #@#
"3": "Ambivalent helix type (short helices)", #@#
"T": "Turn", #@#
"S": "Bend", #@#
"C": "Coil", #@#
}
bbss = ss_names.keys()
bbss = spl(" F E H 1 2 3 T S C") # SS one letter
# The following dictionary contains secondary structure types as assigned by
# different programs. The corresponding Martini secondary structure types are
# listed in cgss
#
# NOTE:
# Each list of letters in the dictionary ss should exactly match the list
# in cgss.
#
ssdefs = {
"dssp": list(".HGIBETSC~"), # DSSP one letter secondary structure code #@#
"pymol": list(".H...S...L"), # Pymol one letter secondary structure code #@#
"gmx": list(".H...ETS.C"), # Gromacs secondary structure dump code #@#
"self": list("FHHHEETSCC") # Internal CG secondary structure codes #@#
}
cgss = list("FHHHEETSCC") # Corresponding CG secondary structure types #@#
#----+-------------------------------------------+
## B | SECONDARY STRUCTURE PATTERN SUBSTITUTIONS |
#----+-------------------------------------------+
# For all structure types specific dihedrals may be used if four or
# more consecutive residues are assigned that type.
# Helix start and end regions are special and require assignment of
# specific types. The following pattern substitutions are applied
# (in the given order). A dot matches any other type.
# Patterns can be added to the dictionaries. This only makes sense
# if for each key in patterns there is a matching key in pattypes.
patterns = {
"H": pat(".H. .HH. .HHH. .HHHH. .HHHHH. .HHHHHH. .HHHHHHH. .HHHH HHHH.") #@#
}
pattypes = {
"H": pat(".3. .33. .333. .3333. .13332. .113322. .1113222. .1111 2222.") #@#
}
#----+----------+
## C | INTERNAL |
#----+----------+
# Pymol Colors
# F E H 1 2 3 T S C
ssnum = ( 13, 4, 2, 2, 2, 2, 6, 22, 0 ) #@#
# Dictionary returning a number for a given type of secondary structure
# This can be used for setting the b-factor field for coloring
ss2num = hash(bbss,ssnum)
# List of programs for which secondary structure definitions can be processed
programs = ssdefs.keys()
# Dictionaries mapping ss types to the CG ss types
ssd = dict([ (i, hash(ssdefs[i],cgss)) for i in programs ])
# From the secondary structure dictionaries we create translation tables
# with which all secondary structure types can be processed. Anything
# not listed above will be mapped to C (coil).
# Note, a translation table is a list of 256 characters to map standard
# ascii characters to.
def tt(program):
return "".join([ssd[program].get(chr(i),"C") for i in range(256)])
# The translation table depends on the program used to obtain the
# secondary structure definitions
sstt = dict([(i,tt(i)) for i in programs])
# The following translation tables are used to identify stretches of
# a certain type of secondary structure. These translation tables have
# every character, except for the indicated secondary structure, set to
# \x00. This allows summing the sequences after processing to obtain
# a single sequence summarizing all the features.
null = "\x00"
sstd = dict([ (i,ord(i)*null+i+(255-ord(i))*null) for i in cgss ])
# Pattern substitutions
def typesub(seq,patterns,types):
seq = null+seq+null
for i,j in zip(patterns,types):
seq = seq.replace(i,j)
return seq[1:-1]
# The following function translates a string encoding the secondary structure
# to a string of corresponding Martini types, taking the origin of the
# secondary structure into account, and replacing termini if requested.
def ssClassification(ss,program="dssp"):
# Translate dssp/pymol/gmx ss to Martini ss
ss = ss.translate(sstt[program])
# Separate the different secondary structure types
sep = dict([(i,ss.translate(sstd[i])) for i in sstd.keys()])
# Do type substitutions based on patterns
# If the ss type is not in the patterns lists, do not substitute
# (use empty lists for substitutions)
typ = [ typesub(sep[i],patterns.get(i,[]),pattypes.get(i,[]))
for i in sstd.keys()]
# Translate all types to numerical values
typ = [ [ord(j) for j in list(i)] for i in typ ]
# Sum characters back to get a full typed sequence
typ = "".join([chr(sum(i)) for i in zip(*typ)])
# Return both the actual as well as the fully typed sequence
return ss, typ
# The following functions are for determination of secondary structure,
# given a list of atoms. The atom format is generic and can be written out
# as PDB or GRO. The coordinates are in Angstrom.
# NOTE: There is the *OLD* DSSP and the *NEW* DSSP, which require
# different calls. The old version uses '--' to indicate reading from stdin
# whereas the new version uses '-i /dev/stdin'
def call_dssp(chain,atomlist,executable='dsspcmbi'):
'''Get the secondary structure, by calling to dssp'''
ssdfile = 'chain_%s.ssd'%chain.id
try:
if os.system(executable+" -V 2>/dev/null"):
logging.debug("New version of DSSP; Executing '%s -i /dev/stdin -o %s'"%(executable,ssdfile))
p = subp.Popen([executable,"-i","/dev/stdin","-o",ssdfile],stderr=subp.PIPE,stdout=subp.PIPE,stdin=subp.PIPE)
else:
logging.debug("Old version of DSSP; Executing '%s -- %s'"%(executable,ssdfile))
p = subp.Popen([executable,"--",ssdfile],stderr=subp.PIPE,stdout=subp.PIPE,stdin=subp.PIPE)
except OSError:
logging.error("A problem occured calling %s."%executable)
sys.exit(1)
for atom in atomlist:
if atom[0][:2] == 'O1': atom=('O',)+atom[1:]
if atom[0][0]!='H' and atom[0][:2]!='O2': p.stdin.write(pdbOut(atom))
p.stdin.write('TER\n')
data = p.communicate()
p.wait()
main,ss = 0,''
for line in open(ssdfile).readlines():
if main and not line[13] == "!": ss+=line[16]
if line[:15] == ' # RESIDUE AA': main=1
return ss
ssDetermination = {
"dssp": call_dssp
}
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
class martini21:
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'martini21'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"),# ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 Na Na"),# PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 Na Na") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .350, .350, .350, .350, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, 1250, 1250, 1250, 1250, 500, 400, 400) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = ( 119.2,134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 25, 25, 25) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = ( 90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
# martini 2.1 doesn't
self.ca2bb = False
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# To be compatible with Elnedyn, all parameters are explicitly defined, even if they are double.
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS
# BB-SC SC-SC BB-SC-SC SC-SC-SC
"TRP": [spl("SC4 SP1 SC4 SC4"),[(0.300,5000)]+[(0.270,None) for i in range(5)], [(210,50),(90,50),(90,50)], [(0,50),(0,200)]],
"TYR": [spl("SC4 SC4 SP1"), [(0.320,5000), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"PHE": [spl("SC4 SC4 SC4"), [(0.310,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIS": [spl("SC4 SP1 SP1"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIH": [spl("SC4 SP1 SQd"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"ARG": [spl("N0 Qd"), [(0.330,5000), (0.340,5000)], [(180,25)]],
"LYS": [spl("C3 Qd"), [(0.330,5000), (0.280,5000)], [(180,25)]],
"CYS": [spl("C5"), [(0.310,7500)]],
"ASP": [spl("Qa"), [(0.320,7500)]],
"GLU": [spl("Qa"), [(0.400,5000)]],
"ILE": [spl("AC1"), [(0.310,None)]],
"LEU": [spl("AC1"), [(0.330,7500)]],
"MET": [spl("C5"), [(0.400,2500)]],
"ASN": [spl("P5"), [(0.320,5000)]],
"PRO": [spl("AC2"), [(0.300,7500)]],
"HYP": [spl("P1"), [(0.300,7500)]],
"GLN": [spl("P4"), [(0.400,5000)]],
"SER": [spl("P1"), [(0.250,7500)]],
"THR": [spl("P1"), [(0.260,None)]],
"VAL": [spl("AC2"), [(0.265,None)]],
"ALA": [],
"GLY": [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = True
self.UseBBBBDihedrals = True
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = []
self.charged = []
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
}
# Defines the connectivity between between beads
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0,1),(1,2),(1,3),(2,3),(2,4),(3,4)], [(0,1,2),(0,1,3)], [(0,2,3,1),(1,2,4,3)]],
"TYR": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"PHE": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIS": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIH": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"GLN": [[(0,1)]],
"ASN": [[(0,1)]],
"SER": [[(0,1)]],
"THR": [[(0,1)]],
"ARG": [[(0,1),(1,2)], [(0,1,2)]],
"LYS": [[(0,1),(1,2)], [(0,1,2)]],
"ASP": [[(0,1)]],
"GLU": [[(0,1)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = False
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 6
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
def bbGetBond(self,r,a,ss):
# Retrieve parameters for each residue from table defined above
b1 = self.bbBondDictS.get(r[0],self.bbBondDictD).get(ss[0],self.bbBondDictD.get(ss[0]))
b2 = self.bbBondDictS.get(r[1],self.bbBondDictD).get(ss[1],self.bbBondDictD.get(ss[1]))
# Determine which parameters to use for the bond
return ( (b1[0]+b2[0])/2, min(b1[1],b2[1]) )
def bbGetAngle(self,r,ca,ss):
# PRO in helices is dominant
if r[1] == "PRO" and ss[1] in "H123":
return self.bbAngleDictS["PRO"].get(ss[1])
else:
# Retrieve parameters for each residue from table defined above
a = [ self.bbAngleDictS.get(r[0],self.bbAngleDictD).get(ss[0],self.bbAngleDictD.get(ss[0])),
self.bbAngleDictS.get(r[1],self.bbAngleDictD).get(ss[1],self.bbAngleDictD.get(ss[1])),
self.bbAngleDictS.get(r[2],self.bbAngleDictD).get(ss[2],self.bbAngleDictD.get(ss[2])) ]
# Sort according to force constant
a.sort(key=lambda i: (i[1],i[0]))
# This selects the set with the smallest force constant and the smallest angle
return a[0]
def messages(self):
import logging
'''Prints any force-field specific logging messages.'''
logging.info('Note: Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
class martini21p:
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'martini21p'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"),# ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 Na Na"),# PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 Na Na") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .350, .350, .350, .350, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, 1250, 1250, 1250, 1250, 500, 400, 400) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = ( 119.2,134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 25, 25, 25) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = ( 90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
# martini 2.1 doesn't
self.ca2bb = False
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# To be compatible with Elnedyn, all parameters are explicitly defined, even if they are double.
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS
# BB-SC SC-SC BB-SC-SC SC-SC-SC
"TRP": [spl("SC4 SP1 SC4 SC4"),[(0.300,5000)]+[(0.270,None) for i in range(5)], [(210,50),(90,50),(90,50)], [(0,50),(0,200)]],
"TYR": [spl("SC4 SC4 SP1"), [(0.320,5000), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"PHE": [spl("SC4 SC4 SC4"), [(0.310,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIS": [spl("SC4 SP1 SP1"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIH": [spl("SC4 SP1 SQd"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"ARG": [spl("N0 Qd"), [(0.330,5000), (0.340,5000)], [(180,25)]],
"LYS": [spl("C3 Qd"), [(0.330,5000), (0.280,5000)], [(180,25)]],
"CYS": [spl("C5"), [(0.310,7500)]],
"ASP": [spl("Qa"), [(0.320,7500)]],
"GLU": [spl("Qa"), [(0.400,5000)]],
"ILE": [spl("C1"), [(0.310,None)]],
"LEU": [spl("C1"), [(0.330,7500)]],
"MET": [spl("C5"), [(0.400,2500)]],
"ASN": [spl("P5"), [(0.320,5000)]],
"PRO": [spl("C2"), [(0.300,7500)]],
"HYP": [spl("P1"), [(0.300,7500)]],
"GLN": [spl("P4"), [(0.400,5000)]],
"SER": [spl("P1"), [(0.250,7500)]],
"THR": [spl("P1"), [(0.260,None)]],
"VAL": [spl("C2"), [(0.265,None)]],
"ALA": [],
"GLY": [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = True
self.UseBBBBDihedrals = True
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = []
self.charged = []
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
}
# Defines the connectivity between between beads
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0,1),(1,2),(1,3),(2,3),(2,4),(3,4)], [(0,1,2),(0,1,3)], [(0,2,3,1),(1,2,4,3)]],
"TYR": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"PHE": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIS": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIH": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"GLN": [[(0,1)]],
"ASN": [[(0,1)]],
"SER": [[(0,1)]],
"THR": [[(0,1)]],
"ARG": [[(0,1),(1,2)], [(0,1,2)]],
"LYS": [[(0,1),(1,2)], [(0,1,2)]],
"ASP": [[(0,1)]],
"GLU": [[(0,1)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = False
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 6
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
def bbGetBond(self,r,a,ss):
# Retrieve parameters for each residue from table defined above
b1 = self.bbBondDictS.get(r[0],self.bbBondDictD).get(ss[0],self.bbBondDictD.get(ss[0]))
b2 = self.bbBondDictS.get(r[1],self.bbBondDictD).get(ss[1],self.bbBondDictD.get(ss[1]))
# Determine which parameters to use for the bond
return ( (b1[0]+b2[0])/2, min(b1[1],b2[1]) )
def bbGetAngle(self,r,ca,ss):
# PRO in helices is dominant
if r[1] == "PRO" and ss[1] in "H123":
return self.bbAngleDictS["PRO"].get(ss[1])
else:
# Retrieve parameters for each residue from table defined above
a = [ self.bbAngleDictS.get(r[0],self.bbAngleDictD).get(ss[0],self.bbAngleDictD.get(ss[0])),
self.bbAngleDictS.get(r[1],self.bbAngleDictD).get(ss[1],self.bbAngleDictD.get(ss[1])),
self.bbAngleDictS.get(r[2],self.bbAngleDictD).get(ss[2],self.bbAngleDictD.get(ss[2])) ]
# Sort according to force constant
a.sort(key=lambda i: (i[1],i[0]))
# This selects the set with the smallest force constant and the smallest angle
return a[0]
def messages(self):
'''Prints any force-field specific logging messages.'''
logging.info('Note: Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
# New martini 2.2 parameters.
# Changed:
# Unstructured Pro backbone bead
# Proline side chains
# Phe sidechain
# Trp sidechain
# Helix BB-bonds to constraint
class martini22:
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'martini22'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"), # ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 P4 P4"), # PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .310, .310, .310, .310, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, None, None, None, None, 1250, 1250, 1250) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = ( 119.2,134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 20, 20, 20) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = ( 90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
# martini 2.1 doesn't
self.ca2bb = False
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# To be compatible with Elnedyn, all parameters are explicitly defined, even if they are double.
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS
# BB-SC SC-SC BB-SC-SC SC-SC-SC
"TRP": [spl("SC4 SNd SC5 SC5"),[(0.300,5000)]+[(0.270,None) for i in range(5)], [(210,50),(90,50),(90,50)], [(0,50),(0,200)]],
"TYR": [spl("SC4 SC4 SP1"), [(0.320,5000), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"PHE": [spl("SC5 SC5 SC5"), [(0.310,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIS": [spl("SC4 SP1 SP1"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIH": [spl("SC4 SP1 SQd"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"ARG": [spl("N0 Qd"), [(0.330,5000), (0.340,5000)], [(180,25)]],
"LYS": [spl("C3 Qd"), [(0.330,5000), (0.280,5000)], [(180,25)]],
"CYS": [spl("C5"), [(0.310,7500)]],
"ASP": [spl("Qa"), [(0.320,7500)]],
"GLU": [spl("Qa"), [(0.400,5000)]],
"ILE": [spl("AC1"), [(0.310,None)]],
"LEU": [spl("AC1"), [(0.330,7500)]],
"MET": [spl("C5"), [(0.400,2500)]],
"ASN": [spl("P5"), [(0.320,5000)]],
"PRO": [spl("C3"), [(0.300,7500)]],
"HYP": [spl("P1"), [(0.300,7500)]],
"GLN": [spl("P4"), [(0.400,5000)]],
"SER": [spl("P1"), [(0.250,7500)]],
"THR": [spl("P1"), [(0.260,None)]],
"VAL": [spl("AC2"), [(0.265,None)]],
"ALA": [],
"GLY": [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = True
self.UseBBBBDihedrals = True
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = []
self.charged = []
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
}
# Defines the connectivity between between beads
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0,1),(1,2),(1,3),(2,3),(2,4),(3,4)], [(0,1,2),(0,1,3)], [(0,2,3,1),(1,2,4,3)]],
"TYR": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"PHE": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIS": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIH": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"GLN": [[(0,1)]],
"ASN": [[(0,1)]],
"SER": [[(0,1)]],
"THR": [[(0,1)]],
"ARG": [[(0,1),(1,2)], [(0,1,2)]],
"LYS": [[(0,1),(1,2)], [(0,1,2)]],
"ASP": [[(0,1)]],
"GLU": [[(0,1)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = False
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 6
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
def bbGetBond(self,r,a,ss):
# Retrieve parameters for each residue from table defined above
b1 = self.bbBondDictS.get(r[0],self.bbBondDictD).get(ss[0],self.bbBondDictD.get(ss[0]))
b2 = self.bbBondDictS.get(r[1],self.bbBondDictD).get(ss[1],self.bbBondDictD.get(ss[1]))
# Determine which parameters to use for the bond
return ( (b1[0]+b2[0])/2, min(b1[1],b2[1]) )
def bbGetAngle(self,r,ca,ss):
# PRO in helices is dominant
if r[1] == "PRO" and ss[1] in "H123":
return self.bbAngleDictS["PRO"].get(ss[1])
else:
# Retrieve parameters for each residue from table defined above
a = [ self.bbAngleDictS.get(r[0],self.bbAngleDictD).get(ss[0],self.bbAngleDictD.get(ss[0])),
self.bbAngleDictS.get(r[1],self.bbAngleDictD).get(ss[1],self.bbAngleDictD.get(ss[1])),
self.bbAngleDictS.get(r[2],self.bbAngleDictD).get(ss[2],self.bbAngleDictD.get(ss[2])) ]
# Sort according to force constant
a.sort(key=lambda i: (i[1],i[0]))
# This selects the set with the smallest force constant and the smallest angle
return a[0]
def messages(self):
'''Prints any force-field specific logging messages.'''
import logging
logging.warning('Martini version 2.2 is in beta release. It has not been extensively tested and problems might occur.')
logging.info('Note: Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
# New martini 2.2p parameters.
# Changed:
# Unstructured Pro backbone bead
# Proline side chains
# Phe sidechain
# Trp sidechain
# Polar beads
# Helix BB-bonds to constraint
# Todo:
# Helix BB-bond length
class martini22p:
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'martini22p'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"), # ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 P4 P4"), # PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .310, .310, .310, .310, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, None, None, None, None, 1250, 1250, 1250) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = ( 119.2,134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 25, 25, 25) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = ( 90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
# martini 2.1 doesn't
self.ca2bb = False
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# To be compatible with Elnedyn, all parameters are explicitly defined, even if they are double.
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS V-SITES
# BB-SC SC-SC BB-SC-SC SC-SC-SC
"TRP": [spl("SC4 SNd SC5 SC5"),[(0.300,5000)]+[(0.270,None) for i in range(5)], [(210,50),(90,50),(90,50)], [(0,50),(0,200)]],
"TYR": [spl("SC4 SC4 SP1"), [(0.320,5000), (0.270,None), (0.270,None),(0.270,None)], [(150,50),(150,50)], [(0,50)]],
"PHE": [spl("SC5 SC5 SC5"), [(0.310,7500), (0.270,None), (0.270,None),(0.270,None)], [(150,50),(150,50)], [(0,50)]],
"HIS": [spl("SC4 SP1 SP1"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)], [(150,50),(150,50)], [(0,50)]],
"HIH": [spl("SC4 SP1 SQd D"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None),(0.11,None)],[(150,50),(150,50)], [(0,50)]],
"GLN": [spl("Nda D D"), [(0.400,5000), (0.280,None)], [], [], [(0.5,)]],
"ASN": [spl("Nda D D"), [(0.320,5000), (0.280,None)], [], [], [(0.5,)]],
"SER": [spl("N0 D D"), [(0.250,7500), (0.280,None)], [], [], [(0.5,)]],
"THR": [spl("N0 D D"), [(0.260,9000), (0.280,None)], [], [], [(0.5,)]],
"ARG": [spl("N0 Qd D"), [(0.330,5000), (0.340,5000), (0.110,None)], [(180,25)]],
"LYS": [spl("C3 Qd D"), [(0.330,5000), (0.280,5000), (0.110,None)], [(180,25)]],
"ASP": [spl("Qa D"), [(0.320,7500), (0.110,None)]],
"GLU": [spl("Qa D"), [(0.400,5000), (0.110,None)]],
"CYS": [spl("C5"), [(0.310,7500)]],
"ILE": [spl("C1"), [(0.310,None)]],
"LEU": [spl("C1"), [(0.330,7500)]],
"MET": [spl("C5"), [(0.400,2500)]],
"PRO": [spl("C3"), [(0.300,7500)]],
"HYP": [spl("P1"), [(0.300,7500)]],
"VAL": [spl("C2"), [(0.265,None)]],
"ALA": [],
"GLY": [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = True
self.UseBBBBDihedrals = True
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = ["GLN","ASN","SER","THR"]
self.charged = ["ARG","LYS","ASP","GLU","HIH"]
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
"GLN":[[0,36,36], [0,0.42,-0.42]],
"ASN":[[0,36,36], [0,0.46,-0.46]],
"SER":[[0,36,36], [0,0.40,-0.40]],
"THR":[[0,36,36], [0,0.36,-0.36]],
"ARG":[[72,36,36], [0,0,1]],
"LYS":[[72,36,36], [0,0,1]],
"HIH":[[45,45,36,36], [0,0,0,1]],
"ASP":[[36,36], [0,-1]],
"GLU":[[36,36], [0,-1]],
}
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0,1),(1,2),(1,3),(2,3),(2,4),(3,4)], [(0,1,2),(0,1,3)], [(0,2,3,1),(1,2,4,3)]],
"TYR": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"PHE": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIS": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIH": [[(0,1),(1,2),(1,3),(2,3),(3,4)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"GLN": [[(0,1),(2,3)], [], [], [(1,2,3)]],
"ASN": [[(0,1),(2,3)], [], [], [(1,2,3)]],
"SER": [[(0,1),(2,3)], [], [], [(1,2,3)]],
"THR": [[(0,1),(2,3)], [], [], [(1,2,3)]],
"ARG": [[(0,1),(1,2),(2,3)], [(0,1,2)]],
"LYS": [[(0,1),(1,2),(2,3)], [(0,1,2)]],
"ASP": [[(0,1),(1,2)]],
"GLU": [[(0,1),(1,2)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = False
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 6
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
def bbGetBond(self,r,a,ss):
# Retrieve parameters for each residue from table defined above
b1 = self.bbBondDictS.get(r[0],self.bbBondDictD).get(ss[0],self.bbBondDictD.get(ss[0]))
b2 = self.bbBondDictS.get(r[1],self.bbBondDictD).get(ss[1],self.bbBondDictD.get(ss[1]))
# Determine which parameters to use for the bond
return ( (b1[0]+b2[0])/2, min(b1[1],b2[1]) )
def bbGetAngle(self,r,ca,ss):
# PRO in helices is dominant
if r[1] == "PRO" and ss[1] in "H123":
return self.bbAngleDictS["PRO"].get(ss[1])
else:
# Retrieve parameters for each residue from table defined above
a = [ self.bbAngleDictS.get(r[0],self.bbAngleDictD).get(ss[0],self.bbAngleDictD.get(ss[0])),
self.bbAngleDictS.get(r[1],self.bbAngleDictD).get(ss[1],self.bbAngleDictD.get(ss[1])),
self.bbAngleDictS.get(r[2],self.bbAngleDictD).get(ss[2],self.bbAngleDictD.get(ss[2])) ]
# Sort according to force constant
a.sort(key=lambda i: (i[1],i[0]))
# This selects the set with the smallest force constant and the smallest angle
return a[0]
def messages(self):
'''Prints any force-field specific logging messages.'''
import logging
logging.warning('Martini version 2.2 is in beta release. It has not been extensively tested and problems might occur.')
logging.warning('Bead names of charges in sidechains differ between .top/.itp and .pdb.')
logging.warning('Using names in topology, as Gromacs does, gives the correct result.')
logging.info('Note: Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
class elnedyn:
def __init__(self):
'''The forcefield has been implemented with some changes compared to the published parameters:
- Backbone-Backbone bonds are constraints in stead of strong bonds.
- Trp has an extra constrain added to the sidechain
- The Backbone sidechain bonds with high force constants are replaced by constraints except for Trp.
'''
# parameters are defined here for the following (protein) forcefields:
self.name = 'elnedyn'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"), # ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 Na Na"), # PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 Na Na") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .350, .350, .350, .350, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, 1250, 1250, 1250, 1250, 500, 400, 400) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = (119.2, 134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 25, 25, 25) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = (90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
self.ca2bb = True
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# Sidechain parameters for Elnedyn. (read from cg-2.1.dat).
# For HIS the order of bonds is changed and a bond with fc=0 is added.
# In the elnedyn2, TRP has an extra, cross-ring constraint
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS
'TRP': [spl("SC4 SP1 SC4 SC4"), [(0.255,73000), (0.220,None), (0.250,None), (0.280,None), (0.255,None), (0.35454,None)], [(142,30), (143,20), (104,50)], [(180,200)]],
'TYR': [spl("SC4 SC4 SP1"), [(0.335, 6000), (0.335,6000), (0.240,None), (0.310,None), (0.310,None)], [(70,100), (130, 50)]],
'PHE': [spl("SC4 SC4 SC4"), [(0.340, 7500), (0.340,7500), (0.240,None), (0.240,None), (0.240,None)], [(70,100), (125,100)]],
'HIS': [spl("SC4 SP1 SP1"), [(0.195, None), (0.193,None), (0.295,None), (0.216,None)], [(135,100),(115, 50)]],
'ARG': [spl("N0 Qd"), [(0.250,12500), (0.350,6200)], [(150,15)]],
'LYS': [spl("C3 Qd"), [(0.250,12500), (0.300,9700)], [(150,20)]],
'CYS': [spl("C5"), [(0.240, None)]],
'ASP': [spl("Qa"), [(0.255, None)]],
'GLU': [spl("Qa"), [(0.310, 2500)]],
'ILE': [spl("C1"), [(0.225,13250)]],
'LEU': [spl("C1"), [(0.265, None)]],
'MET': [spl("C5"), [(0.310, 2800)]],
'ASN': [spl("P5"), [(0.250, None)]],
'PRO': [spl("C2"), [(0.190, None)]],
'GLN': [spl("P4"), [(0.300, 2400)]],
'SER': [spl("P1"), [(0.195, None)]],
'THR': [spl("P1"), [(0.195, None)]],
'VAL': [spl("C2"), [(0.200, None)]],
'GLY': [],
'ALA': [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = False
self.UseBBBBDihedrals = False
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = []
self.charged = []
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
}
# Defines the connectivity between between beads
# Connectivity records for Elnedyn (read from cg-2.1.dat).
# For HIS the order of bonds is changed and a bond with fc=0 is added.
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0, 1), (1, 2), (2, 4), (4, 3), (3, 1), (1, 4)],[(0, 1, 2), (0, 1, 4), (0, 1, 3)],[(1, 2, 3, 4)]],
"TYR": [[(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)],[(0, 1, 2), (0, 1, 3)]],
"PHE": [[(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)],[(0, 1, 2), (0, 1, 3)]],
"HIS": [[(0, 1), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"GLN": [[(0,1)]],
"ASN": [[(0,1)]],
"SER": [[(0,1)]],
"THR": [[(0,1)]],
"ARG": [[(0,1),(1,2)], [(0,1,2)]],
"LYS": [[(0,1),(1,2)], [(0,1,2)]],
"ASP": [[(0,1)]],
"GLU": [[(0,1)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = True
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 1
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
# For Elnedyn we need something else to get the bond length (much simpler due to Ca position BB's)
def bbGetBond(self,r,ca,ss):
import math
# The 150000 forceconstant gave an error message, turning to constraints would be better.
return ( math.sqrt(distance2(ca[0],ca[1]))/10., 150000 )
def bbGetAngle(self,r,ca,ss):
import math
# Elnedyn takes angles from structure, with fc=40
return (math.acos(cos_angle([i-j for i,j in zip(ca[0],ca[1])],[i-j for i,j in zip(ca[2],ca[1])]))/d2r, 40)
def messages(self):
'''Prints any force-field specific logging messages.'''
import logging
logging.info('The Elnedyn forcefield has been implemented with some changes compared to the published parameters:')
logging.info('- Backbone-Backbone bonds use high force constant bonds instead of constraints.')
logging.info('- Trp has an extra constrain added to the sidechain.')
logging.info('- The Backbone sidechain bonds with high force constants are replaced by constraints except for Trp and His.')
logging.info('- Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
logging.warning('Elnedyn topologies might not give numerical stable simulations with a 20fs timestep.')
logging.warning('This can be solved by setting all S-type bead masses to 72amu.')
pass
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
class elnedyn22:
'''The forcefield has been implemented with some changes compared to the published parameters:
- Backbone-Backbone bonds are constraints in stead of strong bonds.
- Trp has an extra constrain added to the sidechain
- The Backbone-Sidechain bonds with high force constants are replaced by constraints except for Trp and His.
'''
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'elnedyn22'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"), # ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 P4 P4"), # PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .350, .350, .350, .350, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, 1250, 1250, 1250, 1250, 500, 400, 400) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = (119.2, 134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 25, 25, 25) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = (90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
self.ca2bb = True
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# Sidechain parameters for Elnedyn. (read from cg-2.1.dat).
# For HIS the order of bonds is changed and a bond with fc=0 is added.
# In the elnedyn2, TRP has an extra, cross-ring constraint
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS
'TRP': [spl("SC4 SNd SC5 SC5"), [(0.255,73000), (0.220,None), (0.250,None), (0.280,None), (0.255,None), (0.35454,None)], [(142,30), (143,20), (104,50)], [(180,200)]],
'TYR': [spl("SC4 SC4 SP1"), [(0.335, 6000), (0.335,6000), (0.240,None), (0.310,None), (0.310,None)], [(70,100), (130, 50)]],
'PHE': [spl("SC5 SC5 SC5"), [(0.340, 7500), (0.340,7500), (0.240,None), (0.240,None), (0.240,None)], [(70,100), (125,100)]],
'HIS': [spl("SC4 SP1 SP1"), [(0.195, None), (0.193,None), (0.295,None), (0.216,None)], [(135,100),(115, 50)]],
'HIH': [spl("SC4 SP1 SP1"), [(0.195, None), (0.193,None), (0.295,None), (0.216,None)], [(135,100),(115, 50)]],
'ARG': [spl("N0 Qd"), [(0.250,12500), (0.350,6200)], [(150,15)]],
'LYS': [spl("C3 Qd"), [(0.250,12500), (0.300,9700)], [(150,20)]],
'CYS': [spl("C5"), [(0.240, None)]],
'ASP': [spl("Qa"), [(0.255, None)]],
'GLU': [spl("Qa"), [(0.310, 2500)]],
'ILE': [spl("C1"), [(0.225,13250)]],
'LEU': [spl("C1"), [(0.265, None)]],
'MET': [spl("C5"), [(0.310, 2800)]],
'ASN': [spl("P5"), [(0.250, None)]],
'PRO': [spl("C3"), [(0.190, None)]],
'GLN': [spl("P4"), [(0.300, 2400)]],
'SER': [spl("P1"), [(0.195, None)]],
'THR': [spl("P1"), [(0.195, None)]],
'VAL': [spl("C2"), [(0.200, None)]],
'GLY': [],
'ALA': [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = False
self.UseBBBBDihedrals = False
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = []
self.charged = []
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
}
# Defines the connectivity between between beads
# Connectivity records for Elnedyn (read from cg-2.1.dat).
# For HIS the order of bonds is changed and a bond with fc=0 is added.
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0, 1), (1, 2), (2, 4), (4, 3), (3, 1), (1, 4)],[(0, 1, 2), (0, 1, 4), (0, 1, 3)],[(1, 2, 3, 4)]],
"TYR": [[(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"PHE": [[(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"HIS": [[(0, 1), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"HIH": [[(0, 1), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"GLN": [[(0,1)]],
"ASN": [[(0,1)]],
"SER": [[(0,1)]],
"THR": [[(0,1)]],
"ARG": [[(0,1),(1,2)], [(0,1,2)]],
"LYS": [[(0,1),(1,2)], [(0,1,2)]],
"ASP": [[(0,1)]],
"GLU": [[(0,1)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = True
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 1
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
# For Elnedyn we need something else to get the bond length (much simpler due to Ca position BB's)
def bbGetBond(self,r,ca,ss):
import math
# The 150000 forceconstant gave an error message, turning to constraints would be better.
return ( math.sqrt(distance2(ca[0],ca[1]))/10., 150000 )
def bbGetAngle(self,r,ca,ss):
import math
# Elnedyn takes angles from structure, with fc=40
return (math.acos(cos_angle([i-j for i,j in zip(ca[0],ca[1])],[i-j for i,j in zip(ca[2],ca[1])]))/d2r, 40)
def messages(self):
'''Prints any force-field specific logging messages.'''
import logging
logging.info('The elnedyn forcefield has been implemented with some changes compared to the published parameters:')
#logging.info('- Backbone-Backbone bonds are constraints in stead of high force constant bonds.')
logging.info('- Backbone-Backbone bonds use high force constant bonds instead of constraints.')
logging.info('- Trp has an extra constrain added to the sidechain.')
logging.info('- The Backbone sidechain bonds with high force constants are replaced by constraints except for Trp and His.')
logging.info('- Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
logging.warning('Elnedyn topologies might not give numerical stable simulations with a 20fs timestep.')
logging.warning('This can be solved by setting all S-type bead masses to 72amu.')
logging.warning('Martini version 2.2 is in beta release. It has not been extensively tested and problems might occur.')
pass
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
class elnedyn22p:
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'elnedyn22p'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"), # ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 P4 P4"), # PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .350, .350, .350, .350, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, 1250, 1250, 1250, 1250, 500, 400, 400) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = (119.2, 134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 25, 25, 25) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = (90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
self.ca2bb = True
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# Sidechain parameters for Elnedyn. (read from cg-2.1.dat).
# For HIS the order of bonds is changed and a bond with fc=0 is added.
# In the elnedyn2, TRP has an extra, cross-ring constraint
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS V-SITES
'TRP': [spl("SC4 SNd SC5 SC5"), [(0.255,73000), (0.220,None), (0.250,None), (0.280,None), (0.255,None), (0.35454,None)], [(142,30), (143,20), (104,50)], [(180,200)]],
'TYR': [spl("SC4 SC4 SP1"), [(0.335, 6000), (0.335,6000), (0.240,None), (0.310,None), (0.310,None)], [(70,100), (130, 50)]],
'PHE': [spl("SC5 SC5 SC5"), [(0.340, 7500), (0.340,7500), (0.240,None), (0.240,None), (0.240,None)], [(70,100), (125,100)]],
'HIS': [spl("SC4 SP1 SP1"), [(0.195, None), (0.193,None), (0.295,None), (0.216,None)], [(135,100),(115, 50)]],
'HIH': [spl("SC4 SP1 SQd"), [(0.195,94000), (0.193,None), (0.295,None), (0.216,None), (0.11,None)], [(135,100),(115, 50)]],
'GLN': [spl("Nda D D"), [(0.300, 2400), (0.280,None)], [], [], [(0.5,)]],
'ASN': [spl("Nda D D"), [(0.250,61000), (0.280,None)], [], [], [(0.5,)]],
'SER': [spl("N0 D D"), [(0.195,94000), (0.280,None)], [], [], [(0.5,)]],
'THR': [spl("N0 D D"), [(0.195,94000), (0.280,None)], [], [], [(0.5,)]],
'ARG': [spl("N0 Qd D"), [(0.250,12500), (0.350,6200), (0.110,None)], [(150,15)]],
'LYS': [spl("C3 Qd D"), [(0.250,12500), (0.300,9700), (0.110,None)], [(150,20)]],
'ASP': [spl("Qa D"), [(0.255, None), (0.110,None)]],
'GLU': [spl("Qa D"), [(0.310, 2500), (0.110,None)]],
'CYS': [spl("C5"), [(0.240, None)]],
'ILE': [spl("C1"), [(0.225,13250)]],
'LEU': [spl("C1"), [(0.265, None)]],
'MET': [spl("C5"), [(0.310, 2800)]],
'PRO': [spl("C3"), [(0.190, None)]],
'HYP': [spl("P1"), [(0.190, None)]],
'VAL': [spl("C2"), [(0.200, None)]],
'GLY': [],
'ALA': [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = False
self.UseBBBBDihedrals = False
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = ["GLN","ASN","SER","THR"]
self.charged = ["ARG","LYS","ASP","GLU","HIH"]
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
"GLN":[[0,36,36], [0,0.42,-0.42]],
"ASN":[[0,36,36], [0,0.46,-0.46]],
"SER":[[0,36,36], [0,0.40,-0.40]],
"THR":[[0,36,36], [0,0.36,-0.36]],
"HIH":[[72,72,36,36], [0,0,0,1]],
"ARG":[[72,36,36], [0,0,1]],
"LYS":[[72,36,36], [0,0,1]],
"ASP":[[36,36], [0,-1]],
"GLU":[[36,36], [0,-1]],
}
# Defines the connectivity between between beads
# The polar sidechains have charged dummy beads, connected with a constraint
# The charged sidechains have a charged dummy bead.
self.connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0, 1), (1, 2), (2, 4), (4, 3), (3, 1), (1, 4)],[(0, 1, 2), (0, 1, 4), (0, 1, 3)],[(1, 2, 3, 4)]],
"TYR": [[(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"PHE": [[(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"HIS": [[(0, 1), (1, 2), (1, 3), (2, 3)], [(0, 1, 2), (0, 1, 3)]],
"HIH": [[(0, 1), (1, 2), (1, 3), (2, 3), (3, 4)], [(0, 1, 2), (0, 1, 3)], [(0, 2, 3, 1)]],
"GLN": [[(0, 1), (2, 3)], [], [], [(1,2,3)]],
"ASN": [[(0, 1), (2, 3)], [], [], [(1,2,3)]],
"SER": [[(0, 1), (2, 3)], [], [], [(1,2,3)]],
"THR": [[(0, 1), (2, 3)], [], [], [(1,2,3)]],
"ARG": [[(0, 1), (1, 2), (2, 3)], [(0,1,2)]],
"LYS": [[(0, 1), (1, 2), (2, 3)], [(0,1,2)]],
"ASP": [[(0, 1), (1, 2)]],
"GLU": [[(0, 1), (1, 2)]],
"CYS": [[(0, 1)]],
"ILE": [[(0, 1)]],
"LEU": [[(0, 1)]],
"MET": [[(0, 1)]],
"PRO": [[(0, 1)]],
"HYP": [[(0, 1)]],
"VAL": [[(0, 1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = True
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 1
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
# For Elnedyn we need something else to get the bond length (much simpler due to Ca position BB's)
def bbGetBond(self,r,ca,ss):
import math
# The 150000 forceconstant gave an error message, turning to constraints would be better.
return ( math.sqrt(distance2(ca[0],ca[1]))/10., 150000 )
def bbGetAngle(self,r,ca,ss):
import math
# Elnedyn takes angles from structure, with fc=40
return (math.acos(cos_angle([i-j for i,j in zip(ca[0],ca[1])],[i-j for i,j in zip(ca[2],ca[1])]))/d2r, 40)
def messages(self):
'''Prints any force-field specific logging messages.'''
import logging
logging.info('The elnedyn forcefield has been implemented with some changes compared to the published parameters:')
#logging.info('- Backbone-Backbone bonds are constraints in stead of high force constant bonds.')
logging.info('- Backbone-Backbone bonds use high force constant bonds instead of constraints.')
logging.info('- Trp has an extra constrain added to the sidechain.')
logging.info('- The Backbone sidechain bonds with high force constants are replaced by constraints except for Trp and His and the polar sidechains.')
logging.info('- Cysteine bonds are 0.24 nm constraints, instead of the published 0.39nm/5000kJ/mol.')
logging.warning('Elnedyn topologies might not give numerical stable simulations with a 20fs timestep.')
logging.warning('This can be solved by setting all S-type bead masses to 72amu.')
logging.warning('Martini version 2.2 is in beta release. It has not been extensively tested and problems might occur.')
pass
################################
## 6 # FORCE FIELD PARAMETERS ## -> @FF <-
################################
# New martini 2.2 parameters.
# Changed:
# Unstructured Pro backbone bead
# Proline side chains
# Phe sidechain
# Trp sidechain
# Helix BB-bonds to constraint
class martini22dna:
def __init__(self):
# parameters are defined here for the following (protein) forcefields:
self.name = 'martini22dna'
# Charged types:
self.charges = {"Qd":1, "Qa":-1, "SQd":1, "SQa":-1, "RQd":1, "AQa":-1} #@#
#----+---------------------+
## A | BACKBONE PARAMETERS |
#----+---------------------+
#
# bbss lists the one letter secondary structure code
# bbdef lists the corresponding default backbone beads
# bbtyp lists the corresponding residue specific backbone beads
#
# bbd lists the structure specific backbone bond lengths
# bbkb lists the corresponding bond force constants
#
# bba lists the structure specific angles
# bbka lists the corresponding angle force constants
#
# bbd lists the structure specific dihedral angles
# bbkd lists the corresponding force constants
#
# -=NOTE=-
# if the secondary structure types differ between bonded atoms
# the bond is assigned the lowest corresponding force constant
#
# -=NOTE=-
# if proline is anywhere in the helix, the BBB angle changes for
# all residues
#
###############################################################################################
## BEADS ## #
# F E H 1 2 3 T S C # SS one letter
self.bbdef = spl(" N0 Nda N0 Nd Na Nda Nda P5 P5") # Default beads #@#
self.bbtyp = { # #@#
"ALA": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4"), # ALA specific #@#
"PRO": spl(" C5 N0 C5 N0 Na N0 N0 P4 P4"), # PRO specific #@#
"HYP": spl(" C5 N0 C5 N0 N0 N0 N0 P4 P4") # HYP specific #@#
} # #@#
## BONDS ## #
self.bbldef = (.365, .350, .310, .310, .310, .310, .350, .350, .350) # BB bond lengths #@#
self.bbkb = (1250, 1250, None, None, None, None, 1250, 1250, 1250) # BB bond kB #@#
self.bbltyp = {} # #@#
self.bbkbtyp = {} # #@#
## ANGLES ## #
self.bbadef = ( 119.2,134, 96, 96, 96, 96, 100, 130, 127) # BBB angles #@#
self.bbka = ( 150, 25, 700, 700, 700, 700, 20, 20, 20) # BBB angle kB #@#
self.bbatyp = { # #@#
"PRO": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127), # PRO specific #@#
"HYP": ( 119.2,134, 98, 98, 98, 98, 100, 130, 127) # PRO specific #@#
} # #@#
self.bbkatyp = { # #@#
"PRO": ( 150, 25, 100, 100, 100, 100, 25, 25, 25), # PRO specific #@#
"HYP": ( 150, 25, 100, 100, 100, 100, 25, 25, 25) # PRO specific #@#
} # #@#
## DIHEDRALS ## #
self.bbddef = ( 90.7, 0, -120, -120, -120, -120) # BBBB dihedrals #@#
self.bbkd = ( 100, 10, 400, 400, 400, 400) # BBBB kB #@#
self.bbdmul = ( 1, 1, 1, 1, 1, 1) # BBBB mltplcty #@#
self.bbdtyp = {} # #@#
self.bbkdtyp = {} # #@#
#
###############################################################################################
# Some Forcefields use the Ca position to position the BB-bead (me like!)
# martini 2.1 doesn't
self.ca2bb = False
# BBS angle, equal for all ss types
# Connects BB(i-1),BB(i),SC(i), except for first residue: BB(i+1),BB(i),SC(i)
# ANGLE Ka
self.bbsangle = [ 100, 25] #@#
# Bonds for extended structures (more stable than using dihedrals)
# LENGTH FORCE
self.ebonds = { #@#
'short': [ .640, 2500], #@#
'long' : [ .970, 2500] #@#
} #@#
#----+-----------------------+
## B | SIDE CHAIN PARAMETERS |
#----+-----------------------+
# To be compatible with Elnedyn, all parameters are explicitly defined, even if they are double.
self.sidechains = {
#RES# BEADS BONDS ANGLES DIHEDRALS
# BB-SC SC-SC BB-SC-SC SC-SC-SC
"TRP": [spl("SC4 SNd SC5 SC5"),[(0.300,5000)]+[(0.270,None) for i in range(5)], [(210,50),(90,50),(90,50)], [(0,50),(0,200)]],
"TYR": [spl("SC4 SC4 SP1"), [(0.320,5000), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"PHE": [spl("SC5 SC5 SC5"), [(0.310,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIS": [spl("SC4 SP1 SP1"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"HIH": [spl("SC4 SP1 SQd"), [(0.320,7500), (0.270,None), (0.270,None),(0.270,None)],[(150,50),(150,50)], [(0,50)]],
"ARG": [spl("N0 Qd"), [(0.330,5000), (0.340,5000)], [(180,25)]],
"LYS": [spl("C3 Qd"), [(0.330,5000), (0.280,5000)], [(180,25)]],
"CYS": [spl("C5"), [(0.310,7500)]],
"ASP": [spl("Qa"), [(0.320,7500)]],
"GLU": [spl("Qa"), [(0.400,5000)]],
"ILE": [spl("AC1"), [(0.310,None)]],
"LEU": [spl("AC1"), [(0.330,7500)]],
"MET": [spl("C5"), [(0.400,2500)]],
"ASN": [spl("P5"), [(0.320,5000)]],
"PRO": [spl("C3"), [(0.300,7500)]],
"HYP": [spl("P1"), [(0.300,7500)]],
"GLN": [spl("P4"), [(0.400,5000)]],
"SER": [spl("P1"), [(0.250,7500)]],
"THR": [spl("P1"), [(0.260,None)]],
"VAL": [spl("AC2"), [(0.265,None)]],
"ALA": [],
"GLY": [],
}
# Not all (eg Elnedyn) forcefields use backbone-backbone-sidechain angles and BBBB-dihedrals.
self.UseBBSAngles = True
self.UseBBBBDihedrals = True
# Martini 2.2p has polar and charged residues with seperate charges.
self.polar = []
self.charged = []
# If masses or charged diverge from standard (45/72 and -/+1) they are defined here.
self.mass_charge = {
#RES MASS CHARGE
}
# Defines the connectivity between between beads
self.aa_connectivity = {
#RES BONDS ANGLES DIHEDRALS V-SITE
"TRP": [[(0,1),(1,2),(1,3),(2,3),(2,4),(3,4)], [(0,1,2),(0,1,3)], [(0,2,3,1),(1,2,4,3)]],
"TYR": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"PHE": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIS": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"HIH": [[(0,1),(1,2),(1,3),(2,3)], [(0,1,2),(0,1,3)], [(0,2,3,1)]],
"GLN": [[(0,1)]],
"ASN": [[(0,1)]],
"SER": [[(0,1)]],
"THR": [[(0,1)]],
"ARG": [[(0,1),(1,2)], [(0,1,2)]],
"LYS": [[(0,1),(1,2)], [(0,1,2)]],
"ASP": [[(0,1)]],
"GLU": [[(0,1)]],
"CYS": [[(0,1)]],
"ILE": [[(0,1)]],
"LEU": [[(0,1)]],
"MET": [[(0,1)]],
"PRO": [[(0,1)]],
"HYP": [[(0,1)]],
"VAL": [[(0,1)]],
"ALA": [],
"GLY": [],
}
#----+----------------+
## C | DNA/RNA bases |
#----+----------------+
self.dna_bb = {
'atoms' : spl("Q0 C2 N0"),
'bonds' : [(0.120,5000),(0.220,5000),(0.320,5000)],
'angles' : [(10.0, 100), (20.0, 100), (30.0, 100)],
'dih' : [(10.0, 100, 10), (20.0, 100, 10), (30.0, 100, 10),],
}
self.dna_con = {
'bonds' : [(0,1),(1,2),(2,3)],
'angles' : [],
'dih' : [],
}
# To be compatible with Elnedyn, all parameters are explicitly defined, even if they are double.
self.bases = {
#RES# BEADS BONDS ANGLES DIHEDRALS
# BB-SC SC-SC BB-SC-SC SC-SC-SC
"DA": [spl("SNa SNa SP1 SNa"),
[(0.120,5000),(0.220,5000),(0.320,5000),(0.420,5000),(0.520,5000),],
[(10.0, 100),(20.0, 100),(30.0, 100),(40.0, 100),(50.0, 100),(60.0, 100),(70.0, 100),(80.0, 100)],
[(10.0, 100, 10),(20.0, 100, 10),(30.0, 100, 10),(40.0, 100, 10)],
[(50.0, 100)]],
"DC": [spl("SNa SPa SPd"),
[(0.120,5000),(0.220,5000),(0.320,5000),(0.420,5000),],
[(10.0, 100),(20.0, 100),(30.0, 100),(40.0, 100),(50.0, 100),(60.0, 100),(70.0, 100)],
[(10.0, 100, 10),(20.0, 100, 10),(30.0, 100, 10),(40.0, 100, 10)],
[]],
"DG": [spl("SNa SPd SP1 SNa"),
[(0.120,5000),(0.220,5000),(0.320,5000),(0.420,5000),(0.520,5000),],
[(10.0, 100),(20.0, 100),(30.0, 100),(40.0, 100),(50.0, 100),(60.0, 100),(70.0, 100),(80.0, 100)],
[(10.0, 100, 10),(20.0, 100, 10),(30.0, 100, 10),(40.0, 100, 10)],
[(50.0, 100)]],
"DT": [spl("SNa SP1 Pa"),
[(0.120,5000),(0.220,5000),(0.320,5000),(0.420,5000),],
[(10.0, 100),(20.0, 100),(30.0, 100),(40.0, 100),(50.0, 100),(60.0, 100),(70.0, 100)],
[(10.0, 100, 10),(20.0, 100, 10),(30.0, 100, 10),(40.0, 100, 10)],
[]],
}
self.base_connectivity = {
#RES BONDS ANGLES DIHEDRALS IMPROPERS V-SITE
"DA": [[(2,3),(3,4),(4,5),(5,6),(6,3)],
[(1,2,3), (2,3,4), (2,3,6), (3,4,5), (3,2,7), (4,3,6), (4,5,6), (5,6,3)],
[(0,1,2,3), (1,2,3,4), (7,2,3,4), (3,2,7,8)],
[(3,4,5,6)],
[]],
"DC": [[(2,3),(3,4),(4,5),(5,3)],
[(1,2,3), (2,3,4), (2,3,5), (3,4,5), (3,2,6), (4,3,5), (4,5,3)],
[(0,1,2,3), (1,2,3,4), (6,2,3,4), (3,2,6,7)],
[],
[]],
"DG": [[(2,3),(3,4),(4,5),(5,6),(6,3)],
[(1,2,3), (2,3,4), (2,3,6), (3,4,5), (3,2,7), (4,3,6), (4,5,6), (5,6,3)],
[(0,1,2,3), (1,2,3,4), (7,2,3,4), (3,2,7,8)],
[(3,4,5,6)],
[]],
"DT": [[(2,3),(3,4),(4,5),(5,3)],
[(1,2,3), (2,3,4), (2,3,5), (3,4,5), (3,2,6), (4,3,5), (4,5,3)],
[(0,1,2,3), (1,2,3,4), (6,2,3,4), (3,2,6,7)],
[],
[]],
}
#----+----------------+
## D | SPECIAL BONDS |
#----+----------------+
self.special = {
# Used for sulfur bridges
# ATOM 1 ATOM 2 BOND LENGTH FORCE CONSTANT
(("SC1","CYS"), ("SC1","CYS")): (0.24, None),
}
# By default use an elastic network
self.ElasticNetwork = False
# Elastic networks bond shouldn't lead to exclusions (type 6)
# But Elnedyn has been parametrized with type 1.
self.EBondType = 6
#----+----------------+
## D | INTERNAL STUFF |
#----+----------------+
## BACKBONE BEAD TYPE ##
# Dictionary of default bead types (*D)
self.bbBeadDictD = hash(bbss,self.bbdef)
# Dictionary of dictionaries of types for specific residues (*S)
self.bbBeadDictS = dict([(i,hash(bbss,self.bbtyp[i])) for i in self.bbtyp.keys()])
# combine the connectivity records for different molecule types
self.connectivity = dict(self.base_connectivity.items() + self.aa_connectivity.items())
## BB BOND TYPE ##
# Dictionary of default abond types (*D)
self.bbBondDictD = hash(bbss,zip(self.bbldef,self.bbkb))
# Dictionary of dictionaries for specific types (*S)
self.bbBondDictS = dict([(i,hash(bbss,zip(self.bbltyp[i],self.bbkbtyp[i]))) for i in self.bbltyp.keys()])
# This is tricky to read, but it gives the right bondlength/force constant
## BBB ANGLE TYPE ##
# Dictionary of default angle types (*D)
self.bbAngleDictD = hash(bbss,zip(self.bbadef,self.bbka))
# Dictionary of dictionaries for specific types (*S)
self.bbAngleDictS = dict([(i,hash(bbss,zip(self.bbatyp[i],self.bbkatyp[i]))) for i in self.bbatyp.keys()])
## BBBB DIHEDRAL TYPE ##
# Dictionary of default dihedral types (*D)
self.bbDihedDictD = hash(bbss,zip(self.bbddef,self.bbkd,self.bbdmul))
# Dictionary of dictionaries for specific types (*S)
self.bbDihedDictS = dict([(i,hash(bbss,zip(self.bbdtyp[i],self.bbkdtyp[i]))) for i in self.bbdtyp.keys()])
# The following function returns the backbone bead for a given residue and
# secondary structure type.
# 1. Look up the proper dictionary for the residue
# 2. Get the proper type from it for the secondary structure
# If the residue is not in the dictionary of specials, use the default
# If the secondary structure is not listed (in the residue specific
# dictionary) revert to the default.
def bbGetBead(self,r1,ss="C"):
if r1 in dnares3:
return self.dna_bb['atoms']
elif r1 in rnares3:
return self.rna_bb['atoms']
else:
return self.bbBeadDictS.get(r1,self.bbBeadDictD).get(ss,self.bbBeadDictD.get(ss))
def bbGetBond(self,r,ca,ss):
# Retrieve parameters for each residue from table defined above
if r[0] in dnares3:
if ca == (0, 1):
return self.dna_bb['bonds'][0]
elif ca == (1, 2):
return self.dna_bb['bonds'][1]
else:
return self.dna_bb['bonds'][2]
# This is not implemented properly yet
elif r[0] in rnares3:
return self.rna_bb['bonds']
else:
b1 = self.bbBondDictS.get(r[0],self.bbBondDictD).get(ss[0],self.bbBondDictD.get(ss[0]))
b2 = self.bbBondDictS.get(r[1],self.bbBondDictD).get(ss[1],self.bbBondDictD.get(ss[1]))
# Determine which parameters to use for the bond
return ( (b1[0]+b2[0])/2, min(b1[1],b2[1]) )
def bbGetAngle(self,r,ca,ss):
if r[0] in dnares3:
if ca == (0, 1, 2):
return self.dna_bb['angles'][0]
elif ca == (1, 2, 0):
return self.dna_bb['angles'][1]
else:
return self.dna_bb['angles'][2]
# This is not implemented properly yet
elif r[0] in rnares3:
return self.rna_bb['angles']
else:
# PRO in helices is dominant
if r[1] == "PRO" and ss[1] in "H123":
return self.bbAngleDictS["PRO"].get(ss[1])
else:
# Retrieve parameters for each residue from table defined above
a = [ self.bbAngleDictS.get(r[0],self.bbAngleDictD).get(ss[0],self.bbAngleDictD.get(ss[0])),
self.bbAngleDictS.get(r[1],self.bbAngleDictD).get(ss[1],self.bbAngleDictD.get(ss[1])),
self.bbAngleDictS.get(r[2],self.bbAngleDictD).get(ss[2],self.bbAngleDictD.get(ss[2])) ]
# Sort according to force constant
a.sort(key=lambda i: (i[1],i[0]))
# This selects the set with the smallest force constant and the smallest angle
return a[0]
def bbGetDihedral(self,r,ca,ss):
if r[0] in dnares3:
if ca == (0, 1, 2, 0):
return self.dna_bb['dih'][0]
elif ca == (1, 2, 0, 1):
return self.dna_bb['dih'][1]
else:
return self.dna_bb['dih'][2]
# This is not implemented properly yet
elif r[0] in rnares3:
return self.rna_bb['angles']
def messages(self):
'''Prints any force-field specific logging messages.'''
import logging
logging.warning('Martini version 2.2dna is not yet finished.')
pass
#########################
## 7 # ELASTIC NETWORK ## -> @ELN <-
#########################
import math
## ELASTIC NETWORK ##
# Only the decay function is defined here, the network
# itself is set up through the Topology class
# The function to determine the decay scaling factor for the elastic network
# force constant, based on the distance and the parameters provided.
# This function is very versatile and can be fitted to most commonly used
# profiles, including a straight line (rate=0)
def decayFunction(distance,shift,rate,power):
return math.exp(-rate*math.pow(distance-shift,power))
def rubberBands(atomList,lowerBound,upperBound,decayFactor,decayPower,forceConstant,minimumForce):
out = []
u2 = upperBound**2
while len(atomList) > 3:
bi,xi = atomList.pop(0)
for bj,xj in atomList[2:]:
# Mind the nm/A conversion -- This has to be standardized! Global use of nm?
d2 = distance2(xi,xj)/100
if d2 < u2:
dij = math.sqrt(d2)
fscl = forceConstant*decayFunction(dij,lowerBound,decayFactor,decayPower)
if fscl > minimumForce:
out.append({
"atoms":(bi[0],bj[0]),
"parameters": (dij,"%f"%fscl),
"comments": "%s%d%s(%s)-%s%d%s(%s)"%(bi[3],bi[2],bi[4],bi[7],bj[3],bj[2],bj[4],bj[7])
})
return out
#######################
## 8 # STRUCTURE I/O ## -> @IO <-
#######################
import logging,math,random,sys
#----+---------+
## A | PDB I/O |
#----+---------+
d2r = 3.14159265358979323846264338327950288/180
# Reformatting of lines in structure file
pdbAtomLine = "ATOM %5d %4s%4s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f\n"
pdbBoxLine = "CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1\n"
def pdbBoxString(box):
# Box vectors
u, v, w = box[0:3], box[3:6], box[6:9]
# Box vector lengths
nu,nv,nw = [math.sqrt(norm2(i)) for i in (u,v,w)]
# Box vector angles
alpha = nv*nw == 0 and 90 or math.acos(cos_angle(v,w))/d2r
beta = nu*nw == 0 and 90 or math.acos(cos_angle(u,w))/d2r
gamma = nu*nv == 0 and 90 or math.acos(cos_angle(u,v))/d2r
return pdbBoxLine % (10*norm(u),10*norm(v),10*norm(w),alpha,beta,gamma)
def pdbAtom(a):
##01234567890123456789012345678901234567890123456789012345678901234567890123456789
##ATOM 2155 HH11 ARG C 203 116.140 48.800 6.280 1.00 0.00
if a.startswith("TER"):
return 0
# NOTE: The 27th field of an ATOM line in the PDB definition can contain an
# insertion code. We shift that 20 bits and add it to the residue number
# to ensure that residue numbers will be unique.
## ===> atom name, res name, res id, chain,
return (a[12:16].strip(),a[17:20].strip(),int(a[22:26])+(ord(a[26])<<20),a[21],
## x, y, z
float(a[30:38]),float(a[38:46]),float(a[46:54]))
def pdbOut(atom,i=1):
insc = atom[2]>>20
resi = atom[2]-(insc<<20)
if resi > 1000000:
# With negative residue numbers the insertion code ends up one too low
# Subtracting from the residue number then gives something very large
insc += 1
resi = atom[2]-(insc<<20)
pdbline = "ATOM %5i %-3s %3s%2s%4i%1s %8.3f%8.3f%8.3f%6.2f%6.2f %1s \n"
return pdbline%((i,atom[0][:3],atom[1],atom[3],resi%1000,chr(insc)) + atom[4:] + (1,40,atom[0][0]))
def isPdbAtom(a):
return a.startswith("ATOM") or (options["-hetatm"] and a.startswith("HETATM")) or a.startswith("TER")
def pdbBoxRead(a):
fa, fb, fc, aa, ab, ac = [float(i) for i in a.split()[1:7]]
ca, cb, cg, sg = math.cos(d2r*aa), math.cos(d2r*ab), math.cos(d2r*ac) , math.sin(d2r*ac)
wx, wy = 0.1*fc*cb, 0.1*fc*(ca-cb*cg)/sg
wz = math.sqrt(0.01*fc*fc - wx*wx - wy*wy)
return [0.1*fa, 0, 0, 0.1*fb*cg, 0.1*fb*sg, 0, wx, wy, wz]
# Function for splitting a PDB file in chains, based
# on chain identifiers and TER statements
def pdbChains(pdbAtomList):
chain = []
for atom in pdbAtomList:
if not atom: # Was a "TER" statement
if chain:
yield chain
else:
logging.info("Skipping empty chain definition")
chain = []
continue
if not chain or chain[-1][3] == atom[3]:
chain.append(atom)
else:
yield chain
chain = [atom]
if chain:
yield chain
# Simple PDB iterator
def pdbFrameIterator(streamIterator):
title, atoms, box = [], [], []
for i in streamIterator:
if i.startswith("ENDMDL"):
yield "".join(title), atoms, box
title, atoms, box = [], [], []
elif i.startswith("TITLE"):
title.append(i)
elif i.startswith("CRYST1"):
box = pdbBoxRead(i)
elif i.startswith("ATOM") or i.startswith("HETATM"):
atoms.append(pdbAtom(i))
if atoms:
yield "".join(title), atoms, box
#----+---------+
## B | GRO I/O |
#----+---------+
groline = "%5d%-5s%5s%5d%8.3f%8.3f%8.3f\n"
def groBoxRead(a):
b = [float(i) for i in a.split()] + 6*[0] # Padding for rectangular boxes
return b[0],b[3],b[4],b[5],b[1],b[6],b[7],b[8],b[2] # Return full definition xx,xy,xz,yx,yy,yz,zx,zy,zz
def groAtom(a):
# In PDB files, there might by an insertion code. To handle this, we internally add
# constant to all resids. To be consistent, we have to do the same for gro files.
# 32 equal ord(' '), eg an empty insertion code
constant = 32<<20
#012345678901234567890123456789012345678901234567890
# 1PRN N 1 4.168 11.132 5.291
## ===> atom name, res name, res id, chain,
return (a[10:15].strip(), a[5:10].strip(), int(a[:5])+constant, " ",
## x, y, z
10*float(a[20:28]),10*float(a[28:36]),10*float(a[36:44]))
# Simple GRO iterator
def groFrameIterator(streamIterator):
while True:
try:
title = streamIterator.next()
except StopIteration:
break
natoms = streamIterator.next().strip()
if not natoms:
break
natoms = int(natoms)
atoms = [groAtom(streamIterator.next()) for i in range(natoms)]
box = groBoxRead(streamIterator.next())
yield title, atoms, box
#----+-------------+
## C | GENERAL I/O |
#----+-------------+
# It is not entirely clear where this fits in best.
# Called from main.
def getChargeType(resname,resid,choices):
'''Get user input for the charge of residues, based on list with choises.'''
print 'Which %s type do you want for residue %s:'%(resname,resid+1)
for i,choice in choices.iteritems():
print '%s. %s'%(i,choice)
choice = None
while choice not in choices.keys():
choice = input('Type a number:')
return choices[choice]
# *NOTE*: This should probably be a CheckableStream class that
# reads in lines until either of a set of specified conditions
# is met, then setting the type and from thereon functioning as
# a normal stream.
def streamTag(stream):
# Tag the stream with the type of structure file
# If necessary, open the stream, taking care of
# opening using gzip for gzipped files
# First check whether we have have an open stream or a file
# If it's a file, check whether it's zipped and open it
if type(stream) == str:
if stream.endswith("gz"):
logging.info('Read input structure from zipped file.')
s = gzip.open(stream)
else:
logging.info('Read input structure from file.')
s = open(stream)
else:
logging.info('Read input structure from command-line')
s = stream
# Read a few lines, but save them
x = [s.readline(), s.readline()]
if x[-1].strip().isdigit():
# Must be a GRO file
logging.info("Input structure is a GRO file. Chains will be labeled consecutively.")
yield "GRO"
else:
# Must be a PDB file then
# Could wind further to see if we encounter an "ATOM" record
logging.info("Input structure is a PDB file.")
yield "PDB"
# Hand over the lines that were stored
for i in x:
yield i
# Now give the rest of the lines from the stream
for i in s:
yield i
#----+-----------------+
## D | STRUCTURE STUFF |
#----+-----------------+
# This list allows to retrieve atoms based on the name or the index
# If standard, dictionary type indexing is used, only exact matches are
# returned. Alternatively, partial matching can be achieved by setting
# a second 'True' argument.
class Residue(list):
def __getitem__(self,tag):
if type(tag) == int:
# Call the parent class __getitem__
return list.__getitem__(self,tag)
if type(tag) == str:
for i in self:
if i[0] == tag:
return i
else:
return
if tag[1]:
return [i for i in self if tag[0] in i[0]] # Return partial matches
else:
return [i for i in self if i[0] == tag[0]] # Return exact matches only
def residues(atomList):
residue = [atomList[0]]
for atom in atomList[1:]:
if (atom[1] == residue[-1][1] and # Residue name check
atom[2] == residue[-1][2] and # Residue id check
atom[3] == residue[-1][3]): # Chain id check
residue.append(atom)
else:
yield Residue(residue)
residue = [atom]
yield Residue(residue)
def residueDistance2(r1,r2):
return min([distance2(i,j) for i in r1 for j in r2])
def breaks(residuelist,selection=("N","CA","C"),cutoff=2.5):
# Extract backbone atoms coordinates
bb = [[atom[4:] for atom in residue if atom[0] in selection] for residue in residuelist]
# Needed to remove waters residues from mixed residues.
bb = [res for res in bb if res != []]
# We cannot rely on some standard order for the backbone atoms.
# Therefore breaks are inferred from the minimal distance between
# backbone atoms from adjacent residues.
return [ i+1 for i in range(len(bb)-1) if residueDistance2(bb[i],bb[i+1]) > cutoff]
def contacts(atoms,cutoff=5):
rla = range(len(atoms))
crd = [atom[4:] for atom in atoms]
return [(i,j) for i in rla[:-1] for j in rla[i+1:]
if distance2(crd[i],crd[j]) < cutoff]
def add_dummy(beads,dist=0.11,n=2):
# Generate a random vector in a sphere of -1 to +1, to add to the bead position
v = [random.random()*2.-1,random.random()*2.-1,random.random()*2.-1]
# Calculated the length of the vector and divide by the final distance of the dummy bead
norm_v = norm(v)/dist
# Resize the vector
vn = [i/norm_v for i in v]
# m sets the direction of the added vector, currently only works when adding one or two beads.
m = 1
for j in range(n):
newName = 'SCD'
newBead = (newName,tuple([i+(m*j) for i,j in zip(beads[-1][1],vn)]), beads[-1][2])
beads.append(newBead)
m *= -2
return beads
def check_merge(chains, m_list=[], l_list=[], ss_cutoff=0):
chainIndex = range(len(chains))
if 'all' in m_list:
logging.info("All chains will be merged in a single moleculetype.")
return chainIndex, [chainIndex]
chainID = [chain.id for chain in chains]
# Mark the combinations of chains that need to be merged
merges = []
if m_list:
# Build a dictionary of chain IDs versus index
# To give higher priority to top chains the lists are reversed
# before building the dictionary
chainIndex.reverse()
chainID.reverse()
dct = dict(zip(chainID,chainIndex))
chainIndex.reverse()
# Convert chains in the merge_list to numeric, if necessary
# NOTE The internal numbering is zero-based, while the
# command line chain indexing is one-based. We have to add
# one to the number in the dictionary to bring it on par with
# the numbering from the command line, but then from the
# result we need to subtract one again to make indexing
# zero-based
merges = [[(i.isdigit() and int(i) or dct[i]+1)-1 for i in j] for j in m_list]
for i in merges:
i.sort()
# Rearrange merge list to a list of pairs
pairs = [(i[j],i[k]) for i in merges for j in range(len(i)-1) for k in range(j+1,len(i))]
# Check each combination of chains for connections based on
# ss-bridges, links and distance restraints
for i in chainIndex[:-1]:
for j in chainIndex[i+1:]:
if (i,j) in pairs:
continue
# Check whether any link links these two groups
for a,b in l_list:
if ((a in chains[i] and b in chains[j]) or
(a in chains[j] and b in chains[i])):
logging.info("Merging chains %d and %d to allow link %s"%(i+1,j+1,str((a,b))))
pairs.append( i<j and (i,j) or (j,i) )
break
if (i,j) in pairs:
continue
# Check whether any cystine bond given links these two groups
#for a,b in s_list:
# if ((a in chains[i] and b in chains[j]) or
# (a in chains[j] and b in chains[i])):
# logging.info("Merging chains %d and %d to allow cystine bridge"%(i+1,j+1))
# pairs.append( i<j and (i,j) or (j,i) )
# break
#if (i,j) in pairs:
# continue
# Check for cystine bridges based on distance
if not ss_cutoff:
continue
# Get SG atoms from cysteines from either chain
# Check this pair of chains
for cysA in chains[i]["CYS"]:
for cysB in chains[j]["CYS"]:
d2 = distance2(cysA["SG"][4:7],cysB["SG"][4:7])
if d2 <= ss_cutoff:
logging.info("Found SS contact linking chains %d and %d (%f nm)"%(i+1,j+1,math.sqrt(d2)/10))
pairs.append((i,j))
break
if (i,j) in pairs:
break
# Sort the combinations
pairs.sort(reverse=True)
merges = []
while pairs:
merges.append(set([pairs[-1][0]]))
for i in range(len(pairs)-1,-1,-1):
if pairs[i][0] in merges[-1]:
merges[-1].add(pairs.pop(i)[1])
elif pairs[i][1] in merges[-1]:
merges[-1].add(pairs.pop(i)[0])
merges = [list(i) for i in merges]
for i in merges:
i.sort()
order = [j for i in merges for j in i]
if merges:
logging.warning("Merging chains.")
logging.warning("This may change the order of atoms and will change the number of topology files.")
logging.info("Merges: " + ", ".join([str([j+1 for j in i]) for i in merges]))
if len(merges) == 1 and len(merges[0]) > 1 and set(merges[0]) == set(chainIndex):
logging.info("All chains will be merged in a single moleculetype")
# Determine the order for writing; merged chains go first
merges.extend([[j] for j in chainIndex if not j in order])
order.extend([j for j in chainIndex if not j in order])
return order, merges
## !! NOTE !! ##
## XXX The chain class needs to be simplified by extracting things to separate functions/classes
class Chain:
# Attributes defining a chain
# When copying a chain, or slicing, the attributes in this list have to
# be handled accordingly.
_attributes = ("residues","sequence","seq","ss","ssclass","sstypes")
def __init__(self,options,residuelist=[],name=None,multiscale=False):
self.residues = residuelist
self._atoms = [atom[:3] for residue in residuelist for atom in residue]
self.sequence = [residue[0][1] for residue in residuelist]
# *NOTE*: Check for unknown residues and remove them if requested
# before proceeding.
self.seq = "".join([AA321.get(i,"X") for i in self.sequence])
self.ss = ""
self.ssclass = ""
self.sstypes = ""
self.mapping = []
self.multiscale = multiscale
self.options = options
# Unknown residues
self.unknowns = "X" in self.seq
# Determine the type of chain
self._type = ""
self.type()
# Determine number of atoms
self.natoms = len(self._atoms)
# BREAKS: List of indices of residues where a new fragment starts
# Only when polymeric (protein, DNA, RNA, ...)
# For now, let's remove it for the Nucleic acids...
self.breaks = self.type() in ("Protein","Mixed") and breaks(self.residues) or []
# LINKS: List of pairs of pairs of indices of linked residues/atoms
# This list is used for cysteine bridges and peptide bonds involving side chains
# The list has items like ((#resi1, #atid1), (#resi2, #atid2))
# When merging chains, the residue number needs ot be update, but the atom id
# remains unchanged.
# For the coarse grained system, it needs to be checked which beads the respective
# atoms fall in, and bonded terms need to be added there.
self.links = []
# Chain identifier; try to read from residue definition if no name is given
self.id = name or residuelist and residuelist[0][0][3] or ""
# Container for coarse grained beads
self._cg = None
def __len__(self):
# Return the number of residues
# DNA/RNA contain non-CAP d/r to indicate type. We remove those first.
return len(''.join(i for i in self.seq if i.isupper()))
def __add__(self,other):
newchain = Chain(name=self.id+"+"+other.id)
# Combine the chain items that can be simply added
for attr in self._attributes:
setattr(newchain, attr, getattr(self,attr) + getattr(other,attr))
# Set chain items, shifting the residue numbers
shift = len(self)
newchain.breaks = self.breaks + [shift] + [i+shift for i in other.breaks]
newchain.links = self.links + [((i[0]+shift,i[1]),(j[0]+shift,j[1])) for i,j in other.links]
newchain.natoms = len(newchain.atoms())
newchain.multiscale = self.multiscale or other.multiscale
# Return the merged chain
return newchain
def __eq__(self,other):
return (self.seq == other.seq and
self.ss == other.ss and
self.breaks == other.breaks and
self.links == other.links and
self.multiscale == other.multiscale)
# Extract a residue by number or the list of residues of a given type
# This facilitates selecting residues for links, like chain["CYS"]
def __getitem__(self,other):
if type(other) == str:
if not other in self.sequence:
return []
return [i for i in self.residues if i[0][1] == other]
elif type(other) == tuple:
# This functionality is set up for links
# between coarse grained beads. So these are
# checked first,
for i in self.cg():
if other == i[:4]:
return i
else:
for i in self.atoms():
if other[:3] == i[:3]:
return i
else:
return []
return self.sequence[other]
# Extract a piece of a chain as a new chain
def __getslice__(self,i,j):
newchain = Chain(self.options,name=self.id)
# Extract the slices from all lists
for attr in self._attributes:
setattr(newchain, attr, getattr(self,attr)[i:j])
# Breaks that fall within the start and end of this chain need to be passed on.
# Residue numbering is increased by 20 bits!!
# XXX I don't know if this works.
ch_sta,ch_end = newchain.residues[0][0][2],newchain.residues[-1][0][2]
newchain.breaks = [crack for crack in self.breaks if ch_sta < (crack<<20) < ch_end]
newchain.links = [link for link in self.links if ch_sta < (link<<20) < ch_end]
newchain.multiscale = self.multiscale
newchain.natoms = len(newchain.atoms())
newchain.type()
# Return the chain slice
return newchain
def _contains(self,atomlist,atom):
atnm,resn,resi,chn = atom
# If the chain does not match, bail out
if chn != self.id:
return False
# Check if the whole tuple is in
if atnm and resn and resi:
return (atnm,resn,resi) in self.atoms()
# Fetch atoms with matching residue id
match = (not resi) and atomlist or [j for j in atomlist if j[2] == resi]
if not match:
return False
# Select atoms with matching residue name
match = (not resn) and match or [j for j in match if j[1] == resn]
if not match:
return False
# Check whether the atom is given and listed
if not atnm or [j for j in match if j[0] == atnm]:
return True
# It just is not in the list!
return False
def __contains__(self,other):
return self._contains(self.atoms(),other) or self._contains(self.cg(),other)
def __hash__(self):
return id(self)
def atoms(self):
if not self._atoms:
self._atoms = [atom[:3] for residue in self.residues for atom in residue]
return self._atoms
# Split a chain based on residue types; each subchain can have only one type
def split(self):
chains = []
chainStart = 0
for i in range(len(self.sequence)-1):
if residueTypes.get(self.sequence[i],"Unknown") != residueTypes.get(self.sequence[i+1],"Unknown"):
# Use the __getslice__ method to take a part of the chain.
chains.append(self[chainStart:i+1])
chainStart = i+1
if chains:
logging.debug('Splitting chain %s in %s chains'%(self.id,len(chains)+1))
return chains + [self[chainStart:]]
def getname(self,basename=None):
name = []
if basename: name.append(basename)
if self.type() and not basename: name.append(self.type())
if type(self.id) == int:
name.append(chr(64+self.id))
elif self.id.strip():
name.append(str(self.id))
return "_".join(name)
def set_ss(self,ss,source="self"):
if len(ss) == 1:
self.ss = len(self)*ss
else:
self.ss = ss
# Infer the Martini backbone secondary structure types
self.ssclass, self.sstypes = ssClassification(self.ss,source)
def dss(self,method=None,executable=None):
# The method should take a list of atoms and return a
# string of secondary structure classifications
if self.type() == "Protein":
if method:
atomlist = [atom for residue in self.residues for atom in residue]
self.set_ss(ssDetermination[method](self,atomlist,executable),source=method)
else:
self.set_ss(len(self)*"C")
else:
self.set_ss(len(self.sequence)*"-")
return self.ss
def type(self,other=None):
if other:
self._type = other
elif not self._type and len(self):
# Determine the type of chain
self._type = set([residueTypes.get(i,"Unknown") for i in set(self.sequence)])
self._type = len(self._type) > 1 and "Mixed" or list(self._type)[0]
return self._type
# XXX The following (at least the greater part of it) should be made a separate function, put under "MAPPING"
def cg(self,force=False,com=False):
# Generate the coarse grained structure
# Set the b-factor field to something that reflects the secondary structure
# If the coarse grained structure is set already, just return,
# unless regeneration is forced.
if self._cg and not force:
return self._cg
self._cg = []
atid = 1
bb = [1]
fail = False
previous = ''
for residue,rss,resname in zip(self.residues,self.sstypes,self.sequence):
# For DNA we need to get the O3' to the following residue when calculating COM
# The force and com options ensure that this part does not affect itp generation or anything else
if com:
# Just an initialization, this should complain if it isn't updated in the loop
store = 0
for ind, i in enumerate(residue):
if i[0] == "O3'":
if previous != '':
residue[ind] = previous
previous = i
else:
store = ind
previous = i
# We couldn't remove the O3' from the 5' end residue during the loop so we do it now
if store > 0:
del residue[store]
# Check if residues names has changed, for example because user has set residues interactively.
residue = [(atom[0],resname)+atom[2:] for atom in residue]
if residue[0][1] in ("SOL","HOH","TIP"):
continue
if not residue[0][1] in CoarseGrained.mapping.keys():
logging.warning("Skipped unknown residue %s\n"%residue[0][1])
continue
# Get the mapping for this residue
# CG.map returns bead coordinates and mapped atoms
# This will fail if there are (too many) atoms missing, which is
# only problematic if a mapped structure is written; the topology
# is inferred from the sequence. So this is the best place to raise
# an error
try:
beads, ids = map(residue,ca2bb=self.options['ForceField'].ca2bb)
beads = zip(CoarseGrained.names[residue[0][1]],beads,ids)
if residue[0][1] in self.options['ForceField'].polar:
beads = add_dummy(beads,dist=0.14,n=2)
elif residue[0][1] in self.options['ForceField'].charged:
beads = add_dummy(beads,dist=0.11,n=1)
except ValueError:
logging.error("Too many atoms missing from residue %s %d(ch:%s):",residue[0][1],residue[0][2]-(32<<20),residue[0][3])
logging.error(repr([ i[0] for i in residue ]))
fail = True
for name,(x,y,z),ids in beads:
# Add the bead with coordinates and secondary structure id to the list
self._cg.append((name,residue[0][1][:3],residue[0][2],residue[0][3],x,y,z,ss2num[rss]))
# Add the ids to the list, after converting them to indices to the list of atoms
self.mapping.append([atid+i for i in ids])
# Increment the atom id; This pertains to the atoms that are included in the output.
atid += len(residue)
# Keep track of the numbers for CONECTing
bb.append(bb[-1]+len(beads))
if fail:
logging.error("Unable to generate coarse grained structure due to missing atoms.")
sys.exit(1)
return self._cg
def conect(self):
# Return pairs of numbers that should be CONECTed
# First extract the backbone IDs
cg = self.cg()
bb = [i+1 for i,j in zip(range(len(cg)),cg) if j[0] == "BB"]
bb = zip(bb,bb[1:]+[len(bb)])
# Set the backbone CONECTs (check whether the distance is consistent with binding)
conect = [(i,j) for i,j in bb[:-1] if distance2(cg[i-1][4:7],cg[j-1][4:7]) < 14]
# Now add CONECTs for sidechains
for i,j in bb:
nsc = j-i-1
##################
## 7 # TOPOLOGY ## -> @TOP <-
##################
import logging,math
# This is a generic class for Topology Bonded Type definitions
class Bonded:
# The init method is generic to the bonded types,
# but may call the set method if atoms are given
# as (ID, ResidueName, SecondaryStructure) tuples
# The set method is specific to the different types.
def __init__(self,other=None,options=None,**kwargs):
self.atoms = []
self.type = -1
self.parameters = []
self.comments = []
self.category = None
if options and type(options) == dict:
self.options = options
if other:
# If other is given, then copy the attributes
# if it is of the same class or set the
# attributes according to the key names if
# it is a dictionary
if other.__class__ == self.__class__:
for attr in dir(other):
if not attr[0] == "_":
setattr(self,attr,getattr(other,attr))
elif type(other) == dict:
for attr in other.keys():
setattr(self,attr,other[attr])
elif type(other) in (list,tuple):
self.atoms = other
# For every item in the kwargs keys, set the attribute
# with the same name. This can be used to specify the
# attributes directly or to override attributes
# copied from the 'other' argument.
for key in kwargs:
setattr(self,key,kwargs[key])
# If atoms are given as tuples of
# (ID, ResidueName[, SecondaryStructure])
# then determine the corresponding parameters
# from the lists above
if self.atoms and type(self.atoms[0]) == tuple:
self.set(self.atoms,**kwargs)
def __nonzero__(self):
return bool(self.atoms)
def __str__(self):
if not self.atoms or not self.parameters:
return ""
s = ["%5d" % i for i in self.atoms]
# For exclusions, no type is defined, which equals -1
if self.type != -1: s.append(" %5d " % self.type)
# Print integers and floats in proper format and neglect None terms
s.extend([formatString(i) for i in self.parameters if i != None])
if self.comments:
s.append(';')
if type(self.comments) == str:
s.append(self.comments)
else:
s.extend([str(i) for i in self.comments])
return " ".join(s)
def __iadd__(self,num):
self.atoms = [i+int(num) for i in self.atoms]
return self
def __add__(self,num):
out = self.__class__(self)
out += num
return out
def __eq__(self,other):
if type(other) in (list,tuple):
return self.atoms == other
else:
return self.atoms == other.atoms and self.type == other.type and self.parameters == other.parameters
# This function needs to be overridden for descendents
def set(self,atoms,**kwargs):
pass
# The set method of this class will look up parameters for backbone beads
# Side chain bonds ought to be set directly, using the constructor
# providing atom numbers, bond type, and parameters
# Constraints are bonds with kb = None, which can be extracted
# using the category
class Bond(Bonded):
def set(self,atoms,**kwargs):
ids,r,ss,ca = zip(*atoms)
self.atoms = ids
self.type = 1
self.positionCa = ca
self.comments = "%s(%s)-%s(%s)" % (r[0],ss[0],r[1],ss[1])
# The category can be used to keep bonds sorted
self.category = kwargs.get("category")
self.parameters = self.options['ForceField'].bbGetBond(r,ca,ss)
# Bonds also can be constraints. We could change the type further on, but this is more general.
# Even better would be to add a new type: BB-Constraint
if self.parameters[1] == None:
self.category = 'Constraint'
# Overriding __str__ method to suppress printing of bonds with Fc of 0
def __str__(self):
if len(self.parameters) > 1 and self.parameters[1] == 0:
return ""
return Bonded.__str__(self)
# Similar to the preceding class
class Angle(Bonded):
def set(self,atoms,**kwargs):
ids,r,ss,ca = zip(*atoms)
self.atoms = ids
self.type = 2
self.positionCa = ca
self.comments = "%s(%s)-%s(%s)-%s(%s)" % (r[0],ss[0],r[1],ss[1],r[2],ss[2])
self.category = kwargs.get("category")
self.parameters = self.options['ForceField'].bbGetAngle(r,ca,ss)
# Similar to the preceding class
class Vsite(Bonded):
def set(self,atoms,**kwargs):
ids,r,ss,ca = zip(*atoms)
self.atoms = ids
self.type = 1
self.positionCa = ca
self.comments = "%s"% (r[0])
self.category = kwargs.get("category")
self.parameters = kwargs.get("parameters")
# Similar to the preceding class
class Exclusion(Bonded):
def set(self,atoms,**kwargs):
ids,r,ss,ca = zip(*atoms)
self.atoms = ids
self.positionCa = ca
self.comments = "%s"% (r[0])
self.category = kwargs.get("category")
self.parameters = kwargs.get("parameters")
# Similar to the preceding class
class Dihedral(Bonded):
def set(self,atoms,**kwargs):
ids,r,ss,ca = zip(*atoms)
self.atoms = ids
self.type = 1
self.positionCa = ca
self.comments = "%s(%s)-%s(%s)-%s(%s)-%s(%s)" % (r[0],ss[0],r[1],ss[1],r[2],ss[2],r[3],ss[3])
self.category = kwargs.get("category")
if ''.join(i for i in ss) == 'FFFF':
# Collagen
self.parameters = self.options['ForceField'].bbDihedDictD['F']
elif ''.join(i for i in ss) == 'EEEE' and self.options['ExtendedDihedrals']:
# Use dihedrals
self.parameters = self.options['ForceField'].bbDihedDictD['E']
elif set(ss).issubset("H123"):
# Helix
self.parameters = self.options['ForceField'].bbDihedDictD['H']
else:
self.parameters = None
# This list allows to retrieve Bonded class items based on the category
# If standard, dictionary type indexing is used, only exact matches are
# returned. Alternatively, partial matching can be achieved by setting
# a second 'True' argument.
class CategorizedList(list):
def __getitem__(self,tag):
if type(tag) == int:
# Call the parent class __getitem__
return list.__getitem__(self,tag)
if type(tag) == str:
return [i for i in self if i.category == tag]
if tag[1]:
return [i for i in self if tag[0] in i.category]
else:
return [i for i in self if i.category == tag[0]]
class Topology:
def __init__(self,other=None,options=None,name=""):
self.name = ''
self.nrexcl = 1
self.atoms = CategorizedList()
self.vsites = CategorizedList()
self.exclusions = CategorizedList()
self.bonds = CategorizedList()
self.angles = CategorizedList()
self.dihedrals = CategorizedList()
self.impropers = CategorizedList()
self.constraints = CategorizedList()
self.posres = CategorizedList()
self.sequence = []
self.secstruc = ""
# Okay, this is sort of funny; we will add a
# #define mapping virtual_sitesn
# to the topology file, followed by a header
# [ mapping ]
self.mapping = []
# For multiscaling we have to keep track of the number of
# real atoms that correspond to the beads in the topology
self.natoms = 0
self.multiscale = options['multi']
if options:
self.options = options
else:
self.options = {}
if not other:
# Returning an empty instance
return
elif isinstance(other,Topology):
for attrib in ["atoms","vsites","bonds","angles","dihedrals","impropers","constraints","posres"]:
setattr(self,attrib,getattr(other,attrib,[]))
elif isinstance(other,Chain):
if other.type() == "Protein":
self.fromAminoAcidSequence(other)
elif other.type() == "Nucleic":
# Currently there are no Martini Nucleic Acids
self.fromNucleicAcidSequence(other)
elif other.type() == "Mixed":
logging.warning('Mixed Amino Acid /Nucleic Acid chains are not yet implemented')
# How can you have a mixed chain?
# Well, you could get a covalently bound lipid or piece of DNA to a protein :S
# But how to deal with that?
# Probably one should separate the chains into blocks of specified type,
# determine the locations of links, then construct the topologies for the
# blocks and combine them according to the links.
pass
else:
# This chain should not be polymeric, but a collection of molecules
# For each unique residue type fetch the proper moleculetype
self.fromMoleculeList(other)
if name:
self.name = name
def __iadd__(self,other):
if not isinstance(other,Topology):
other = Topology(other)
shift = len(self.atoms)
last = self.atoms[-1]
# The following used work: zip>list expansions>zip back, but that only works if
# all the tuples in the original list of of equal length. With masses and charges
# that is not necessarly the case.
for atom in other.atoms:
atom = list(atom)
atom[0] += shift # Update atom numbers
atom[2] += last[2] # Update residue numbers
atom[5] += last[5] # Update charge group numbers
self.atoms.append(tuple(atom))
for attrib in ["bonds","vsites","angles","dihedrals","impropers","constraints","posres"]:
getattr(self,attrib).extend([source+shift for source in getattr(other,attrib)])
return self
def __add__(self,other):
out = Topology(self)
if not isinstance(other,Topology):
other = Topology(other)
out += other
return out
def __str__(self):
if self.multiscale:
out = [ '; MARTINI (%s) Multiscale virtual sites topology section for "%s"' %(self.options['ForceField'].name,self.name) ]
else:
string = '; MARTINI (%s) Coarse Grained topology file for "%s"' %(self.options['ForceField'].name, self.name)
string += '\n; Created by py version %s \n; Using the following options: ' %(self.options['Version'])
string += ' '.join(self.options['Arguments'])
out = [ string ]
if self.sequence:
out += [
'; Sequence:',
'; ' + ''.join([ AA321.get(AA) for AA in self.sequence ]),
'; Secondary Structure:',
'; ' + self.secstruc,
]
# Do not print a molecule name when multiscaling
# In that case, the topology created here needs to be appended
# at the end of an atomistic moleculetype
if not self.multiscale:
out += [ '\n[ moleculetype ]',
'; Name Exclusions',
'%-15s %3d' % (self.name,self.nrexcl)]
out.append('\n[ atoms ]')
# For virtual sites and dummy beads we have to be able to specify the mass.
# Thus we need two different format strings:
fs8 = '%5d %5s %5d %5s %5s %5d %7.4f ; %s'
fs9 = '%5d %5s %5d %5s %5s %5d %7.4f %7.4f ; %s'
out.extend([len(i)==9 and fs9%i or fs8%i for i in self.atoms])
# Print out the vsites only if they excist. Right now it can only be type 1 virual sites.
vsites = [str(i) for i in self.vsites]
if vsites:
out.append('\n[ virtual_sites2 ]')
out.extend(vsites)
# Print out the exclusions only if they excist.
exclusions = [str(i) for i in self.exclusions]
if exclusions:
out.append('\n[ exclusions ]')
out.extend(exclusions)
if self.multiscale:
out += ['\n;\n; Coarse grained to atomistic mapping\n;',
'#define mapping virtual_sitesn',
'[ mapping ]']
for i,j in self.mapping:
out.append( ("%5d 2 "%i)+" ".join(["%5d"%k for k in j]) )
logging.info('Created virtual sites section for multiscaled topology')
return "\n".join(out)
# Bonds in order: backbone, backbone-sidechain, sidechain, short elastic, long elastic
out.append("\n[ bonds ]")
for bondType,bondDesc in (
("BB","Backbone bonds"),
("SC","Sidechain bonds"),
("Elastic short", "Short elastic bonds for extended regions"),
("Elastic long", "Long elastic bonds for extended regions"),
("Cystine","Cystine bridges"),
("Link","Links")):
bonds = [ str(i) for i in self.bonds[bondType] if not i.parameters[1] == None ]
if bonds:
out.append("; "+bondDesc)
out.extend(bonds)
# Rubber Bands
bonds = [str(i) for i in self.bonds["Rubber",True]]
if bonds:
# Add a CPP style directive to allow control over the elastic network
out.append("#ifndef NO_RUBBER_BANDS")
# The GMX preprocessor keeps refusing to correctly parse equations or macros... TAW160730
# out.append("#ifndef RUBBER_FC\n#define RUBBER_FC %f\n#endif"%self.options['ElasticMaximumForce'])
out.extend(bonds)
out.append("#endif")
# Constraints
out.append("\n[ constraints ]")
out.extend([str(i) for i in self.bonds["Constraint"]])
for bondType,bondDesc in (
("Cystine","Cystine bridges"),
("Link","Links")):
bonds = [ str(i) for i in self.bonds[bondType] if i.parameters[1] == None ]
if bonds:
out.append("; "+bondDesc)
out.extend(bonds)
# Angles
out.append("\n[ angles ]")
out.append("; Backbone angles")
out.extend([str(i) for i in self.angles["BBB"]])
out.append("; Backbone-sidechain angles")
out.extend([str(i) for i in self.angles["BBS"]])
out.append("; Sidechain angles")
out.extend([str(i) for i in self.angles["SC"]])
# Dihedrals
out.append("\n[ dihedrals ]")
out.append("; Backbone dihedrals")
out.extend([str(i) for i in self.dihedrals["BBBB"] if i.parameters])
out.append("; Sidechain improper dihedrals")
out.extend([str(i) for i in self.dihedrals["SC"] if i.parameters])
# Postition Restraints
if self.posres:
out.append("\n#ifdef POSRES")
out.append("#ifndef POSRES_FC\n#define POSRES_FC %.2f\n#endif"%self.options['PosResForce'])
out.append(" [ position_restraints ]")
out.extend([' %5d 1 POSRES_FC POSRES_FC POSRES_FC'%i for i in self.posres])
out.append("#endif")
logging.info('Created coarsegrained topology')
return "\n".join(out)
# The sequence function can be used to generate the topology for
# a sequence :) either given as sequence or as chain
def fromAminoAcidSequence(self,sequence,secstruc=None,links=None,breaks=None,
mapping=None,rubber=False,multi=False):
# Shift for the atom numbers of the atomistic part in a chain
# that is being multiscaled
shift = 0
# First check if we get a sequence or a Chain instance
if isinstance(sequence, Chain):
chain = sequence
links = chain.links
breaks = chain.breaks
# If the mapping is not specified, the actual mapping is taken,
# used to construct the coarse grained system from the atomistic one.
# The function argument "mapping" could be used to use a default
# mapping scheme in stead, like the mapping for the GROMOS96 force field.
mapping = mapping or chain.mapping
multi = self.options['multi'] or chain.multiscale
self.secstruc = chain.sstypes or len(chain)*"C"
self.sequence = chain.sequence
# If anything hints towards multiscaling, do multiscaling
self.multiscale = self.multiscale or chain.multiscale or multi
if self.multiscale:
shift = self.natoms
self.natoms += len(chain.atoms())
elif not secstruc:
# If no secondary structure is provided, set all to coil
chain = None
self.secstruc = len(self.sequence)*"C"
else:
# If a secondary structure is provided, use that. chain is none.
chain = None
self.secstruc = secstruc
logging.debug(self.secstruc)
logging.debug(self.sequence)
# Fetch the sidechains
# Pad with empty lists for atoms, bonds, angles
# and dihedrals, and take the first four lists out
# This will avoid errors for residues for which
# these are not defined.
sc = [(self.options['ForceField'].sidechains[res]+5*[[]])[:5] for res in self.sequence]
# ID of the first atom/residue
# The atom number and residue number follow from the last
# atom c.q. residue id in the list processed in the topology
# thus far. In the case of multiscaling, the real atoms need
# also be accounted for.
startAtom = self.natoms + 1
startResi = self.atoms and self.atoms[-1][2]+1 or 1
# Backbone bead atom IDs
bbid = [startAtom]
for i in zip(*sc)[0]:
bbid.append(bbid[-1]+len(i)+1)
# Calpha positions, to get Elnedyn BBB-angles and BB-bond lengths
# positionCa = [residue[1][4:] for residue in chain.residues]
# The old method (line above) assumed no hydrogens: Ca would always be
# the second atom of the residue. Now we look at the name.
positionCa = []
for residue in chain.residues:
for atom in residue:
if atom[0] == "CA":
positionCa.append(atom[4:])
# Residue numbers for this moleculetype topology
resid = range(startResi,startResi+len(self.sequence))
# This contains the information for deriving backbone bead types,
# bb bond types, bbb/bbs angle types, and bbbb dihedral types and
# Elnedyn BB-bondlength BBB-angles
seqss = zip(bbid,self.sequence,self.secstruc,positionCa)
# Fetch the proper backbone beads
bb = [self.options['ForceField'].bbGetBead(res,typ) for num,res,typ,Ca in seqss]
# If termini need to be charged, change the bead types
if not self.options['NeutralTermini']:
bb[0] ="Qd"
bb[-1] = "Qa"
# If breaks need to be charged, change the bead types
if self.options['ChargesAtBreaks']:
for i in breaks:
bb[i] = "Qd"
bb[i-1] = "Qa"
# For backbone parameters, iterate over fragments, inferred from breaks
for i,j in zip([0]+breaks,breaks+[-1]):
# Extract the fragment
frg = j==-1 and seqss[i:] or seqss[i:j]
# Iterate over backbone bonds
self.bonds.extend([Bond(pair,category="BB",options=self.options,) for pair in zip(frg,frg[1:])])
# Iterate over backbone angles
# Don't skip the first and last residue in the fragment
self.angles.extend([Angle(triple,options=self.options,category="BBB") for triple in zip(frg,frg[1:],frg[2:])])
# Get backbone quadruples
quadruples = zip(frg,frg[1:],frg[2:],frg[3:])
# No i-1,i,i+1,i+2 interactions defined for Elnedyn
if self.options['ForceField'].UseBBBBDihedrals:
# Process dihedrals
for q in quadruples:
id,rn,ss,ca = zip(*q)
# Maybe do local elastic networks
if ss == ("E","E","E","E") and not self.options['ExtendedDihedrals']:
# This one may already be listed as the 2-4 bond of a previous one
if not (id[0],id[2]) in self.bonds:
self.bonds.append(Bond(options=self.options,atoms=(id[0],id[2]),parameters=self.options['ForceField'].ebonds['short'],type=1,
comments="%s(%s)-%s(%s) 1-3"%(rn[0],id[0],rn[2],id[2]),
category="Elastic short"))
self.bonds.append(Bond(options=self.options,atoms=(id[1],id[3]),parameters=self.options['ForceField'].ebonds['short'],type=1,
comments="%s(%s)-%s(%s) 2-4"%(rn[1],id[1],rn[3],id[3]),
category="Elastic short"))
self.bonds.append(Bond(options=self.options,atoms=(id[0],id[3]),parameters=self.options['ForceField'].ebonds['long'],type=1,
comments="%s(%s)-%s(%s) 1-4"%(rn[0],id[0],rn[3],id[3]),
category="Elastic long"))
else:
# Since dihedrals can return None, we first collect them separately and then
# add the non-None ones to the list
dihed = Dihedral(q,options=self.options,category="BBBB")
if dihed:
self.dihedrals.append(dihed)
# Elnedyn does not use backbone-backbone-sidechain-angles
if self.options['ForceField'].UseBBSAngles:
# Backbone-Backbone-Sidechain angles
# If the first residue has a sidechain, we take SBB, otherwise we skip it
# For other sidechains, we 'just' take BBS
if len(frg) > 1 and frg[1][0]-frg[0][0] > 1:
self.angles.append(Angle(options=self.options,atoms=(frg[0][0]+1,frg[0][0],frg[1][0]),parameters=self.options['ForceField'].bbsangle,type=2,
comments="%s(%s)-%s(%s) SBB"%(frg[0][1],frg[0][2],frg[1][1],frg[1][2]),
category="BBS"))
# Start from first residue: connects sidechain of second residue
for (ai,ni,si,ci),(aj,nj,sj,cj),s in zip(frg[0:],frg[1:],sc[1:]):
if s[0]:
self.angles.append(Angle(options=self.options,atoms=(ai,aj,aj+1),parameters=self.options['ForceField'].bbsangle,type=2,
comments="%s(%s)-%s(%s) SBB"%(ni,si,nj,sj),
category="BBS"))
# Now do the atom list, and take the sidechains along
#
# AtomID AtomType ResidueID ResidueName AtomName ChargeGroup Charge ; Comments
#
atid = startAtom
for resi,resname,bbb,sidechn,ss in zip(resid,self.sequence,bb,sc,self.secstruc):
scatoms, bon_par, ang_par, dih_par, vsite_par = sidechn
# Side chain bonded terms
# Collect bond, angle and dihedral connectivity
bon_con,ang_con,dih_con,vsite_con = (self.options['ForceField'].connectivity[resname]+4*[[]])[:4]
# Side Chain Bonds/Constraints
for atids,par in zip(bon_con,bon_par):
if par[1] == None:
self.bonds.append(Bond(options=self.options,atoms=atids,parameters=[par[0]],type=1,
comments=resname,category="Constraint"))
else:
self.bonds.append(Bond(options=self.options,atoms=atids,parameters=par,type=1,
comments=resname,category="SC"))
# Shift the atom numbers
self.bonds[-1] += atid
# Side Chain Angles
for atids,par in zip(ang_con,ang_par):
self.angles.append(Angle(options=self.options,atoms=atids,parameters=par,type=2,
comments=resname,category="SC"))
# Shift the atom numbers
self.angles[-1] += atid
# Side Chain Dihedrals
for atids,par in zip(dih_con,dih_par):
self.dihedrals.append(Dihedral(options=self.options,atoms=atids,parameters=par,type=2,
comments=resname,category="SC"))
# Shift the atom numbers
self.dihedrals[-1] += atid
# Side Chain V-Sites
for atids,par in zip(vsite_con,vsite_par):
self.vsites.append(Vsite(options=self.options,atoms=atids,parameters=par,type=1,
comments=resname,category="SC"))
# Shift the atom numbers
self.vsites[-1] += atid
# Side Chain exclusions
# The new polarizable forcefield give problems with the charges in the sidechain, if the backbone is also charged.
# To avoid that, we add explicit exclusions
if bbb in self.options['ForceField'].charges.keys() and resname in self.options['ForceField'].mass_charge.keys():
for i in [i for i, d in enumerate(scatoms) if d=='D']:
self.exclusions.append(Exclusion(options=self.options,atoms=(atid,i+atid+1),comments='%s(%s)'%(resname,resi),parameters=(None,)))
# All residue atoms
counter = 0 # Counts over beads
for atype,aname in zip([bbb]+list(scatoms),CoarseGrained.residue_bead_names):
if self.multiscale:
atype,aname = "v"+atype,"v"+aname
# If mass or charge diverse, we adopt it here.
# We don't want to do this for BB beads because of charged termini.
if resname in self.options['ForceField'].mass_charge.keys() and counter != 0:
M,Q = self.options['ForceField'].mass_charge[resname]
aname = Q[counter-1]>0 and 'SCP' or Q[counter-1]<0 and 'SCN' or aname
self.atoms.append((atid,atype,resi,resname,aname,atid,Q[counter-1],M[counter-1],ss))
else:
self.atoms.append((atid,atype,resi,resname,aname,atid,self.options['ForceField'].charges.get(atype,0),ss))
# Doing this here save going over all the atoms onesmore.
# Generate position restraints for all atoms or Backbone beads only.
if 'all' in self.options['PosRes']:
self.posres.append((atid))
elif aname in self.options['PosRes']:
self.posres.append((atid))
if mapping:
self.mapping.append((atid,[i+shift for i in mapping[counter]]))
atid += 1
counter += 1
# The rubber bands are best applied outside of the chain class, as that gives
# more control when chains need to be merged. The possibility to do it on the
# chain level is retained to allow building a complete chain topology in
# a straightforward manner after importing this script as module.
if rubber and chain:
rubberList = rubberBands(
[(i,j[4:7]) for i,j in zip(self.atoms,chain.cg()) if i[4] in ElasticBeads],
ElasticLowerBound,ElasticUpperBound,
ElasticDecayFactor,ElasticDecayPower,
ElasticMaximumForce,ElasticMinimumForce)
self.bonds.extend([Bond(i,options=self.options,type=6,category="Rubber band") for i in rubberList])
# Note the equivalent of atomistic atoms that have been processed
if chain and self.multiscale:
self.natoms += len(chain.atoms())
def fromNucleicAcidSequence(self,sequence,secstruc=None,links=None,breaks=None,
mapping=None,rubber=False,multi=False):
# Shift for the atom numbers of the atomistic part in a chain
# that is being multiscaled
shift = 0
# First check if we get a sequence or a Chain instance
if isinstance(sequence, Chain):
chain = sequence
links = chain.links
breaks = chain.breaks
# If the mapping is not specified, the actual mapping is taken,
# used to construct the coarse grained system from the atomistic one.
# The function argument "mapping" could be used to use a default
# mapping scheme in stead, like the mapping for the GROMOS96 force field.
mapping = mapping or chain.mapping
multi = self.options['multi'] or chain.multiscale
self.secstruc = chain.sstypes or len(chain)*"C"
self.sequence = chain.sequence
# If anything hints towards multiscaling, do multiscaling
self.multiscale = self.multiscale or chain.multiscale or multi
if self.multiscale:
shift = self.natoms
self.natoms += len(chain.atoms())
elif not secstruc:
# If no secondary structure is provided, set all to coil
chain = None
self.secstruc = len(self.sequence)*"C"
else:
# If a secondary structure is provided, use that. chain is none.
chain = None
self.secstruc = secstruc
logging.debug(self.secstruc)
logging.debug(self.sequence)
# Fetch the base information
# Pad with empty lists for atoms, bonds, angles
# and dihedrals, and take the first five lists out
# This will avoid errors for residues for which
# these are not defined.
sc = [(self.options['ForceField'].bases[res]+6*[[]])[:6] for res in self.sequence]
# ID of the first atom/residue
# The atom number and residue number follow from the last
# atom c.q. residue id in the list processed in the topology
# thus far. In the case of multiscaling, the real atoms need
# also be accounted for.
startAtom = self.natoms + 1
startResi = self.atoms and self.atoms[-1][2]+1 or 1
# Backbone bead atom IDs
bbid = [[startAtom,startAtom+1,startAtom+2]]
for i in zip(*sc)[0]:
bbid1 = bbid[-1][0]+len(i)+3
bbid.append([bbid1,bbid1+1,bbid1+2])
#bbid.append(bbid[-1]+len(i)+1)
# Residue numbers for this moleculetype topology
resid = range(startResi,startResi+len(self.sequence))
# This contains the information for deriving backbone bead types,
# bb bond types, bbb/bbs angle types, and bbbb dihedral types.
seqss = zip(bbid,self.sequence,self.secstruc)
# Fetch the proper backbone beads
# Since there are three beads we need to split these to the list
bb = [self.options['ForceField'].bbGetBead(res,typ) for num,res,typ in seqss]
bb3 = [i for j in bb for i in j]
# This is going to be usefull for the type of the last backbone bead.
# If termini need to be charged, change the bead types
#if not self.options['NeutralTermini']:
# bb[0] ="Qd"
# bb[-1] = "Qa"
# If breaks need to be charged, change the bead types
#if self.options['ChargesAtBreaks']:
# for i in breaks:
# bb[i] = "Qd"
# bb[i-1] = "Qa"
# For backbone parameters, iterate over fragments, inferred from breaks
for i,j in zip([0]+breaks,breaks+[-1]):
# Extract the fragment
frg = j==-1 and seqss[i:] or seqss[i:j]
# Expand the 3 bb beads per residue into one long list
# Resulting list contains three tuples per residue
# We use the useless ca parameter to get the correct backbone bond from bbGetBond
frg = [(j[0][i],j[1],j[2],i) for j in frg for i in range(len(j[0]))]
# Iterate over backbone bonds
self.bonds.extend([Bond(pair,category="BB",options=self.options,) for pair in zip(frg,frg[1:])])
# Iterate over backbone angles
# Don't skip the first and last residue in the fragment
self.angles.extend([Angle(triple,options=self.options,category="BBB") for triple in zip(frg,frg[1:],frg[2:])])
# Get backbone quadruples
quadruples = zip(frg,frg[1:],frg[2:],frg[3:])
# No i-1,i,i+1,i+2 interactions defined for Elnedyn
# Process dihedrals
for q in quadruples:
id,rn,ss,ca = zip(*q)
# Since dihedrals can return None, we first collect them separately and then
# add the non-None ones to the list
dihed = Dihedral(q,options=self.options,category="BBBB")
if dihed:
self.dihedrals.append(dihed)
# Now do the atom list, and take the sidechains along
#
atid = startAtom
# We need to do some trickery to get all 3 bb beads in to these lists
# This adds each element to a list three times, feel free to shorten up
resid3 = [i for i in resid for j in range(3)]
sequence3 = [i for i in self.sequence for j in range(3)]
sc3 = [i for i in sc for j in range(3)]
secstruc3 = [i for i in self.secstruc for j in range(3)]
count = 0
for resi,resname,bbb,sidechn,ss in zip(resid3,sequence3,bb3,sc3,secstruc3):
# We only want one side chain per three backbone beads so this skips the others
if (count % 3) == 0:
# Note added impropers in contrast to aa
scatoms, bon_par, ang_par, dih_par, imp_par, vsite_par = sidechn
# Side chain bonded terms
# Collect bond, angle and dihedral connectivity
# Impropers needed to be added here for DNA
bon_con,ang_con,dih_con,imp_con,vsite_con = (self.options['ForceField'].connectivity[resname]+5*[[]])[:5]
# Side Chain Bonds/Constraints
for atids,par in zip(bon_con,bon_par):
if par[1] == None:
self.bonds.append(Bond(options=self.options,atoms=atids,parameters=[par[0]],type=1,
comments=resname,category="Constraint"))
else:
self.bonds.append(Bond(options=self.options,atoms=atids,parameters=par,type=1,
comments=resname,category="SC"))
# Shift the atom numbers
self.bonds[-1] += atid
# Side Chain Angles
for atids,par in zip(ang_con,ang_par):
self.angles.append(Angle(options=self.options,atoms=atids,parameters=par,type=2,
comments=resname,category="SC"))
# Shift the atom numbers
self.angles[-1] += atid
# Side Chain Dihedrals
for atids,par in zip(dih_con,dih_par):
self.dihedrals.append(Dihedral(options=self.options,atoms=atids,parameters=par,type=1,
comments=resname,category="BSC"))
# Shift the atom numbers
self.dihedrals[-1] += atid
# Side Chain Impropers
for atids,par in zip(imp_con,imp_par):
self.dihedrals.append(Dihedral(options=self.options,atoms=atids,parameters=par,type=2,
comments=resname,category="SC"))
# Shift the atom numbers
self.dihedrals[-1] += atid
# Side Chain V-Sites
for atids,par in zip(vsite_con,vsite_par):
self.vsites.append(Vsite(options=self.options,atoms=atids,parameters=par,type=1,
comments=resname,category="SC"))
# Shift the atom numbers
self.vsites[-1] += atid
# Currently DNA needs exclusions for the base
# The loop runs over the first backbone bead so 3 needs to be added to the indices
for i in range(len(scatoms)):
for j in range(i+1, len(scatoms)):
self.exclusions.append(Exclusion(options=self.options,atoms=(i+atid+3,j+atid+3),comments='%s(%s)'%(resname,resi),parameters=(None,)))
# All residue atoms
counter = 0 # Counts over beads
# Need to tweak this to get all the backbone beads to the list with the side chain
bbbset = [bb3[count], bb3[count+1], bb3[count+2]]
for atype,aname in zip(bbbset+list(scatoms),CoarseGrained.residue_bead_names_dna):
if self.multiscale:
atype,aname = "v"+atype,"v"+aname
self.atoms.append((atid,atype,resi,resname,aname,atid,self.options['ForceField'].charges.get(atype,0),ss))
# Doing this here saves going over all the atoms onesmore.
# Generate position restraints for all atoms or Backbone beads only.
if 'all' in self.options['PosRes']:
self.posres.append((atid))
elif aname in self.options['PosRes']:
self.posres.append((atid))
if mapping:
self.mapping.append((atid,[i+shift for i in mapping[counter]]))
atid += 1
counter += 1
count += 1
# One more thing, we need to remove dihedrals (2) and an angle (1) that reach beyond the 3' end
# This is stupid to do now but the total number of atoms seems not to be available before
# This iterate the list in reverse order so that removals don't affect later checks
for i in range(len(self.dihedrals)-1,-1,-1):
if (max(self.dihedrals[i].atoms) > self.atoms[-1][0]):
del self.dihedrals[i]
for i in range(len(self.angles)-1,-1,-1):
if (max(self.angles[i].atoms) > self.atoms[-1][0]):
del self.angles[i]
def fromMoleculeList(self,other):
pass
#############
## 8 # MAIN # -> @MAIN <-
#############
import sys,logging,random,math,os,re
def main(options):
# Check whether to read from a gro/pdb file or from stdin
# We use an iterator to wrap around the stream to allow
# inferring the file type, without consuming lines already
inStream = streamTag(options["-f"] and options["-f"].value or sys.stdin)
# The streamTag iterator first yields the file type, which
# is used to specify the function for reading frames
fileType = inStream.next()
if fileType == "GRO":
frameIterator = groFrameIterator
else:
frameIterator = pdbFrameIterator
## ITERATE OVER FRAMES IN STRUCTURE FILE ##
# Now iterate over the frames in the stream
# This should become a StructureFile class with a nice .next method
model = 1
cgOutPDB = None
ssTotal = []
cysteines = []
for title,atoms,box in frameIterator(inStream):
if fileType == "PDB":
# The PDB file can have chains, in which case we list and process them specifically
# TER statements are also interpreted as chain separators
# A chain may have breaks in which case the breaking residues are flagged
chains = [ Chain(options,[i for i in residues(chain)]) for chain in pdbChains(atoms) ]
else:
# The GRO file does not define chains. Here breaks in the backbone are
# interpreted as chain separators.
residuelist = [residue for residue in residues(atoms)]
# The breaks are indices to residues
broken = breaks(residuelist)
# Reorder, such that each chain is specified with (i,j,k)
# where i and j are the start and end of the chain, and
# k is a chain identifier
chains = zip([0]+broken,broken+[len(residuelist)],range(len(broken)+1))
chains = [ Chain(options,residuelist[i:j],name=chr(65+k)) for i,j,k in chains ]
for chain in chains:
chain.multiscale = "all" in options['multi'] or chain.id in options['multi']
# Check the chain identifiers
if model == 1 and len(chains) != len(set([i.id for i in chains])):
# Ending down here means that non-consecutive blocks of atoms in the
# PDB file have the same chain ID. The warning pertains to PDB files only,
# since chains from GRO files get a unique chain identifier assigned.
logging.warning("Several chains have identical chain identifiers in the PDB file.")
# Check if chains are of mixed type. If so, split them.
# Note that in some cases HETATM residues are part of a
# chain. This will get problematic. But we cannot cover
# all, probably.
if not options['MixedChains']:
demixedChains = []
for chain in chains:
demixedChains.extend(chain.split())
chains = demixedChains
n = 1
logging.info("Found %d chains:"%len(chains))
for chain in chains:
logging.info(" %2d: %s (%s), %d atoms in %d residues."%(n,chain.id,chain._type,chain.natoms,len(chain)))
n += 1
# Check all chains
keep = []
for chain in chains:
if chain.type() == "Water":
logging.info("Removing %d water molecules (chain %s)."%(len(chain),chain.id))
elif chain.type() in ("Protein","Nucleic"):
keep.append(chain)
# This is currently not active:
elif options['RetainHETATM']:
keep.append(chain)
else:
logging.info("Removing HETATM chain %s consisting of %d residues."%(chain.id,len(chain)))
chains = keep
# Here we interactively check the charge state of resides
# Can be easily expanded to residues other than HIS
for chain in chains:
for i,resname in enumerate(chain.sequence):
if resname == 'HIS' and options['chHIS']:
choices = {0:'HIH',1:'HIS'}
choice = getChargeType(resname,i,choices)
chain.sequence[i] = choice
# Check which chains need merging
if model == 1:
order, merge = check_merge(chains, options['mergeList'], options['linkList'], options['CystineCheckBonds'] and options['CystineMaxDist2'])
# Get the total length of the sequence
seqlength = sum([len(chain) for chain in chains])
logging.info('Total size of the system: %s residues.'%seqlength)
## SECONDARY STRUCTURE
ss = ''
if options['Collagen']:
for chain in chains:
chain.set_ss("F")
ss += chain.ss
elif options["-ss"]:
# XXX We need error-catching here,
# in case the file doesn't excist, or the string contains bogus.
# If the string given for the sequence consists strictly of upper case letters
# and does not appear to be a file, assume it is the secondary structure
ss = options["-ss"].value.replace('~','L').replace(' ','L')
if ss.isalnum() and ss.isupper() and not os.path.exists(options["-ss"].value):
ss = options["-ss"].value
logging.info('Secondary structure read from command-line:\n'+ss)
else:
# There ought to be a file with the name specified
ssfile = [ i.strip() for i in open(options["-ss"].value) ]
# Try to read the file as a Gromacs Secondary Structure Dump
# Those have an integer as first line
if ssfile[0].isdigit():
logging.info('Will read secondary structure from file (assuming Gromacs ssdump).')
ss = "".join([ i for i in ssfile[1:] ])
else:
# Get the secondary structure type from DSSP output
logging.info('Will read secondary structure from file (assuming DSSP output).')
pss = re.compile(r"^([ 0-9-]{4}[0-9]){2}")
ss = "".join([i[16] for i in open(options["-ss"].value) if re.match(pss,i)])
# Now set the secondary structure for each of the chains
sstmp = ss
for chain in chains:
ln = min(len(sstmp),len(chain))
chain.set_ss(sstmp[:ln])
sstmp = ss[:ln]
else:
if options["-dssp"]:
method, executable = "dssp", options["-dssp"].value
#elif options["-pymol"]:
# method, executable = "pymol", options["-pymol"].value
else:
logging.warning("No secondary structure or determination method speficied. Protein chains will be set to 'COIL'.")
method, executable = None, None
for chain in chains:
ss += chain.dss(method, executable)
# Used to be: if method in ("dssp","pymol"): but pymol is not supported
if method in ["dssp"]:
logging.debug('%s determined secondary structure:\n'%method.upper()+ss)
# Collect the secondary structure classifications for different frames
ssTotal.append(ss)
# Write the coarse grained structure if requested
if options["-x"].value:
logging.info("Writing coarse grained structure.")
if cgOutPDB == None:
cgOutPDB = open(options["-x"].value,"w")
cgOutPDB.write("MODEL %8d\n"%model)
cgOutPDB.write(title)
cgOutPDB.write(pdbBoxString(box))
atid = 1
for i in order:
ci = chains[i]
if ci.multiscale:
for r in ci.residues:
for name,resn,resi,chain,x,y,z in r:
insc = resi>>20
resid = resi - (insc<<20)
if resid > 1000000:
insc += 1
resid = resi - (insc<<20)
cgOutPDB.write(pdbAtomLine%(atid,name,resn[:3],chain,resid,chr(insc),x,y,z,1,0))
atid += 1
coarseGrained = ci.cg(com=True)
if coarseGrained:
for name,resn,resi,chain,x,y,z,ssid in coarseGrained:
insc = resi>>20
resid = resi - (insc<<20)
if resid > 1000000:
insc += 1
resid = resi - (insc<<20)
if ci.multiscale:
name = "v"+name
cgOutPDB.write(pdbAtomLine%(atid,name,resn[:3],chain,resid,chr(insc),x,y,z,1,ssid))
atid += 1
cgOutPDB.write("TER\n")
else:
logging.warning("No mapping for coarse graining chain %s (%s); chain is skipped."%(ci.id,ci.type()))
cgOutPDB.write("ENDMDL\n")
# Gather cysteine sulphur coordinates
cyslist = [cys["SG"] for chain in chains for cys in chain["CYS"]]
cysteines.append([cys for cys in cyslist if cys])
model += 1
# Write the index file if requested.
# Mainly of interest for multiscaling.
# Could be improved by adding separte groups for BB, SC, etc.
if options["-n"].value:
logging.info("Writing index file.")
# Lists for All-atom, Virtual sites and Coarse Grain.
NAA,NVZ,NCG = [],[],[]
atid = 1
for i in order:
ci = chains[i]
coarseGrained = ci.cg(force=True)
if ci.multiscale:
NAA.extend([" %5d"%(a+atid) for a in range(ci.natoms)])
atid += ci.natoms
if coarseGrained:
if ci.multiscale:
NVZ.extend([" %5d"%(a+atid) for a in range(len(coarseGrained))])
else:
NCG.extend([" %5d"%(a+atid) for a in range(len(coarseGrained))])
atid += len(coarseGrained)
outNDX = open(options["-n"].value,"w")
outNDX.write("\n[ AA ]\n"+"\n".join([" ".join(NAA[i:i+15]) for i in range(0,len(NAA),15)]))
outNDX.write("\n[ VZ ]\n"+"\n".join([" ".join(NVZ[i:i+15]) for i in range(0,len(NVZ),15)]))
outNDX.write("\n[ CG ]\n"+"\n".join([" ".join(NCG[i:i+15]) for i in range(0,len(NCG),15)]))
outNDX.close()
# Write the index file for mapping AA trajectory if requested
if options["-nmap"].value:
logging.info("Writing trajectory index file.")
atid = 1
outNDX = open(options["-nmap"].value,"w")
# Get all AA atoms as lists of atoms in residues
# First we skip hetatoms and unknowns then iterate over beads
# In DNA the O3' atom is mapped together with atoms from the next residue
# This stores it until we get to the next residue
o3_shift = ''
for i_count, i in enumerate(residues(atoms)):
if i[0][1] in ("SOL","HOH","TIP"):
continue
if not i[0][1] in CoarseGrained.mapping.keys():
continue
nra = 0
names = [j[0] for j in i]
# This gives out a list of atoms in residue, each tuple has other
# stuff in it that's needed elsewhere so we just take the last
# element which is the atom index (in that residue)
for j_count, j in enumerate(mapIndex(i)):
outNDX.write('[ Bead %i of residue %i ]\n'%(j_count+1,i_count+1))
line = ''
for k in j:
if names[k[2]] == "O3'":
line += '%s '%(str(o3_shift))
o3_shift = k[2]+atid
else:
line += '%i '%(k[2]+atid)
line += '\n'
nra += len(j)
outNDX.write(line)
atid += nra
# Evertything below here we only need, if we need to write a Topology
if options['-o']:
# Collect the secondary structure stuff and decide what to do with it
# First rearrange by the residue
ssTotal = zip(*ssTotal)
ssAver = []
for i in ssTotal:
si = list(set(i))
if len(si) == 1:
# Only one type -- consensus
ssAver.append(si[0])
else:
# Transitions between secondary structure types
i = list(i)
si = [(1.0*i.count(j)/len(i),j) for j in si]
si.sort()
if si[-1][0] > options["-ssc"].value:
ssAver.append(si[-1][1])
else:
ssAver.append(" ")
ssAver = "".join(ssAver)
logging.info('(Average) Secondary structure has been determined (see head of .itp-file).')
# Divide the secondary structure according to the division in chains
# This will set the secondary structure types to be used for the
# topology.
for chain in chains:
chain.set_ss(ssAver[:len(chain)])
ssAver = ssAver[len(chain):]
# Now the chains are complete, each consisting of a residuelist,
# and a secondary structure designation if the chain is of type 'Protein'.
# There may be mixed chains, there may be HETATM things.
# Water has been discarded. Maybe this has to be changed at some point.
# The order in the coarse grained files matches the order in the set of chains.
#
# If there are no merges to be done, i.e. no global Elnedyn network, no
# disulphide bridges, no links, no distance restraints and no explicit merges,
# then we can write out the topology, which will match the coarse grained file.
#
# If there are merges to be done, the order of things may be changed, in which
# case the coarse grained structure will not match with the topology...
## CYSTINE BRIDGES ##
# Extract the cysteine coordinates (for all frames) and the cysteine identifiers
if options['CystineCheckBonds']:
logging.info("Checking for cystine bridges, based on sulphur (SG) atoms lying closer than %.4f nm"%math.sqrt(options['CystineMaxDist2']/100))
cyscoord = zip(*[[j[4:7] for j in i] for i in cysteines])
cysteines = [i[:4] for i in cysteines[0]]
bl, kb = options['ForceField'].special[(("SC1","CYS"),("SC1","CYS"))]
# Check the distances and add the cysteines to the link list if the
# SG atoms have a distance smaller than the cutoff.
rlc = range(len(cysteines))
for i in rlc[:-1]:
for j in rlc[i+1:]:
# Checking the minimum distance over all frames
# But we could also take the maximum, or the mean
d2 = min([distance2(a,b) for a,b in zip(cyscoord[i],cyscoord[j])])
if d2 <= options['CystineMaxDist2']:
a, b = cysteines[i], cysteines[j]
options['linkListCG'].append((("SC1","CYS",a[2],a[3]),("SC1","CYS",b[2],b[3]),bl,kb))
a,b = (a[0],a[1],a[2]-(32<<20),a[3]),(b[0],b[1],b[2]-(32<<20),b[3])
logging.info("Detected SS bridge between %s and %s (%f nm)"%(a,b,math.sqrt(d2)/10))
## REAL ITP STUFF ##
# Check whether we have identical chains, in which case we
# only write the ITP for one...
# This means making a distinction between chains and
# moleculetypes.
molecules = [tuple([chains[i] for i in j]) for j in merge]
# At this point we should have a list or dictionary of chains
# Each chain should be given a unique name, based on the value
# of options["-o"] combined with the chain identifier and possibly
# a number if there are chains with identical identifiers.
# For each chain we then write an ITP file using the name for
# moleculetype and name + ".itp" for the topology include file.
# In addition we write a master topology file, using the value of
# options["-o"], with an added extension ".top" if not given.
# XXX *NOTE*: This should probably be gathered in a 'Universe' class
itp = 0
moleculeTypes = {}
for mi in range(len(molecules)):
mol = molecules[mi]
# Check if the moleculetype is already listed
# If not, generate the topology from the chain definition
if not mol in moleculeTypes or options['SeparateTop']:
# Name of the moleculetype
# XXX: The naming should be changed; now it becomes Protein_X+Protein_Y+...
name = "+".join([chain.getname(options['-name'].value) for chain in mol])
moleculeTypes[mol] = name
# Write the molecule type topology
top = Topology(mol[0],options=options,name=name)
for m in mol[1:]:
top += Topology(m,options=options)
# Have to add the connections, like the connecting network
# Gather coordinates
mcg, coords = zip(*[(j[:4],j[4:7]) for m in mol for j in m.cg(force=True)])
mcg = list(mcg)
# Run through the link list and add connections (links = cys bridges or hand specified links)
for atomA,atomB,bondlength,forceconst in options['linkListCG']:
if bondlength == -1 and forceconst == -1:
bondlength, forceconst = options['ForceField'].special[(atomA[:2],atomB[:2])]
# Check whether this link applies to this group
atomA = atomA in mcg and mcg.index(atomA)
atomB = atomB in mcg and mcg.index(atomB)
if atomA and atomB:
cat = (mcg[atomA][1] == "CYS" and mcg[atomB][1] == "CYS") and "Cystine" or "Link"
top.bonds.append(Bond((atomA+1,atomB+1),options=options,type=1,parameters=(bondlength,forceconst),category=cat))
# Elastic Network
# The elastic network is added after the topology is constructed, since that
# is where the correct atom list with numbering and the full set of
# coordinates for the merged chains are available.
if options['ElasticNetwork']:
rubberType = options['ForceField'].EBondType
rubberList = rubberBands(
[(i,j) for i,j in zip(top.atoms,coords) if i[4] in options['ElasticBeads']],
options['ElasticLowerBound'],options['ElasticUpperBound'],
options['ElasticDecayFactor'],options['ElasticDecayPower'],
options['ElasticMaximumForce'],options['ElasticMinimumForce'])
top.bonds.extend([Bond(i,options=options,type=rubberType,category="Rubber band") for i in rubberList])
# Write out the MoleculeType topology
destination = options["-o"] and open(moleculeTypes[mol]+".itp",'w') or sys.stdout
destination.write(str(top))
itp += 1
# Check whether other chains are equal to this one
# Skip this step if we are to write all chains to separate moleculetypes
if not options['SeparateTop']:
for j in range(mi+1,len(molecules)):
if not molecules[j] in moleculeTypes and mol == molecules[j]:
# Molecule j is equal to a molecule mi
# Set the name of the moleculetype to the one of that molecule
moleculeTypes[molecules[j]] = moleculeTypes[mol]
logging.info('Written %d ITP file%s'%(itp,itp>1 and "s" or ""))
# WRITING THE MASTER TOPOLOGY
# Output stream
top = options["-o"] and open(options['-o'].value,'w') or sys.stdout
# ITP file listing
itps = '\n'.join(['#include "%s.itp"'%molecule for molecule in set(moleculeTypes.values())])
# Molecule listing
logging.info("Output contains %d molecules:"%len(molecules))
n = 1
for molecule in molecules:
chainInfo = (n, moleculeTypes[molecule], len(molecule)>1 and "s" or " ", " ".join([i.id for i in molecule]))
logging.info(" %2d-> %s (chain%s %s)"%chainInfo)
n += 1
molecules = '\n'.join(['%s \t 1'%moleculeTypes[molecule] for molecule in molecules])
# Set a define if we are to use rubber bands
useRubber = options['ElasticNetwork'] and "#define RUBBER_BANDS" or ""
# XXX Specify a better, version specific base-itp name.
# Do not set a define for position restrains here, as people are more used to do it in mdp file?
top.write(
'''#include "martini.itp"
%s
%s
[ system ]
; name
Martini system from %s
[ molecules ]
; name number
%s''' % (useRubber, itps, options["-f"] and options["-f"].value or "stdin", molecules))
logging.info('Written topology files')
# Maybe there are forcefield specific log messages?
options['ForceField'].messages()
# The following lines are always printed (if no errors occur).
print "\n\tThere you are. One MARTINI. Shaken, not stirred.\n"
Q = martiniq.pop(random.randint(0,len(martiniq)-1))
print "\n", Q[1], "\n%80s"%("--"+Q[0]), "\n"
if __name__ == '__main__':
import sys,logging
args = sys.argv[1:]
# The argument cat is only given once: when concatenating to on exportable script.
if '-cat' in args:
cat('martinize-'+version+'.py')
sys.exit()
# Get the possible commandline arguments arguments and help text.
options,lists = options,lists
# Parse commandline options.
options = option_parser(args,options,lists,version)
main(options)
| Tsjerk/MartiniTools | martinize.py | Python | gpl-2.0 | 271,798 | [
"ChemPy",
"GROMOS",
"Gromacs",
"PyMOL"
] | 40a790fb7db2ce02fe7c956dad9969a495493e324f482e5ebb3645093004a0ae |
#!/usr/bin/env python
import sys
import BioClasses
import cPickle
import pysam
import scipy
def main():
# load data from the GTF pic
with open( "/home/paul/Resources/H_sapiens/test.hg19.chr.pic" ) as f:
genes = cPickle.load( f )
# load the indexed FASTA file
fastafile = pysam.Fastafile( "/home/paul/Resources/H_sapiens/hg19.ens.chr.fa" )
# Nucl. Acids Res. (2008) 36 (11): 3707-3715. doi: 10.1093/nar/gkn248
matrix = """5 2 0 0 0 0 0 0 1 2 2
33 6 6 25 48 2 25 2 12 21 29
7 16 8 0 0 0 0 0 0 4 11
3 24 34 23 0 46 23 46 35 21 6"""
M = list()
for row in matrix.split( "\n" ):
M.append( map( int, row.split( "\t" )))
MM = scipy.array( M )
P = BioClasses.PSWM( MM )
print MM, MM.shape
print
print P
# iterate over a few Transcript objects
c = 0
for gene_id,G in genes.iteritems():
for transcript_id,T in G.transcripts.iteritems():
if c > 100:
break
# arbitrary flanks
print T
T.TSS_Sequence.set_flank_right( 50 )
T.TSS_Sequence.set_flank_left( 30 )
# get the sequence before you compute TOP scores
T.TSS_Sequence.get_sequence( fastafile )
# compute TOP scores and get the highest
T.TSS_Sequence.compute_TOP_score( P )
print T.TSS_Sequence
c += 1
# close the fastafile
fastafile.close()
if __name__ == "__main__":
main() | polarise/python-bioclasses | test/test_TSS_Sequence.py | Python | gpl-2.0 | 1,311 | [
"pysam"
] | 6982b8268d413982f82ff54c883b92c809c1780fa44f35b9c96e042979d3f359 |
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'vasp'
tab.settings['Output file name'] = 'OUTCAR'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'average'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'air'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.5
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'sfrac=0.0'
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.0
tab.settings['ATR material refractive index'] = 4.0
# Add new scenarios
sfracs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6 , 0.7, 0.8, 0.9, 1.0 ]
for sfrac in sfracs:
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['ATR S polarisation fraction'] = sfrac
tab.settings['Legend'] = 'sfrac='+str(sfrac)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 1200
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Vasp Na2(SO4)2 Calculation'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 500
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
| JohnKendrick/PDielec | Examples/ATR/Na2SO42/script.py | Python | mit | 1,777 | [
"VASP"
] | dae344448d3903288f20cee20b196a5042fa6e271863139d3a61f0dc331e63a1 |
from voxel_globe.common_tasks import shared_task, VipTask
from voxel_globe.websockets import ws_logger
import random, time
@shared_task(base=VipTask, bind=True)
def success_task(self):
ws_logger.debug(self, "d " + str(random.random()))
ws_logger.info(self, "i " + str(random.random()))
ws_logger.warn(self, "w " + str(random.random()))
ws_logger.message(self, "Important message about task %s!!" % self.request.id)
self.update_state(state='Initializing', meta={"site_name": "Exciting text"})
self.update_state(state='Processing', meta={"site_name": "Exciting text"})
return {"site_name": "Exciting text"}
@shared_task(base=VipTask, bind=True)
def fail_task(self):
ws_logger.error(self, "e " + str(random.random()))
ws_logger.fatal(self, "f " + str(random.random()))
raise ValueError("Because reasons")
@shared_task(base=VipTask, bind=True)
def long_task(self):
self.update_state(state="PROCESSING", meta={"index":0, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 1")
self.update_state(state="PROCESSING", meta={"index":1, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 2")
self.update_state(state="PROCESSING", meta={"index":2, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 3")
self.update_state(state="PROCESSING", meta={"index":3, "total": 5})
# self.update_state(state="PROCESSING", meta={"poetry":"let us go then you and i"})
time.sleep(5)
ws_logger.message(self, "Important message 4")
self.update_state(state="PROCESSING", meta={"index":4, "total": 5})
time.sleep(5)
ws_logger.message(self, "Important message 5")
self.update_state(state="PROCESSING", meta={"index":5, "total": 5})
| ngageoint/voxel-globe | voxel_globe/channel_test/tasks.py | Python | mit | 1,715 | [
"exciting"
] | 85b06932973c6316cdac26e550020585ae82fc0491e0707243d13f7c4cdb56ef |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author: "James Sumners (@jsumners)"
version_added: "2.3"
short_description: Manage runit services.
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, killed, reloaded, once ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
service_dir:
required: false
default: /var/service
description:
- directory runsv watches for services
service_src:
required: false
default: /etc/sv
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start sv dnscache, if not running
- runit:
name: dnscache
state: started
# Example action to stop sv dnscache, if running
- runit:
name: dnscache
state: stopped
# Example action to kill sv dnscache, in all cases
- runit:
name: dnscache
state: killed
# Example action to restart sv dnscache, in all cases
- runit:
name: dnscache
state: restarted
# Example action to reload sv dnscache, in all cases
- runit:
name: dnscache
state: reloaded
# Example using alt sv directory location
- runit:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ ]
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd,'force-stop',self.src_full])
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(' (\d+)s', out)
if m:
self.duration = m.group(1)
if re.search('run:', out):
self.state = 'started'
elif re.search('down:', out):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool'),
dist = dict(required=False, default='runit'),
service_dir = dict(required=False, default='/var/service'),
service_src = dict(required=False, default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e), exception=traceback.format_exc())
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv,state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/system/runit.py | Python | bsd-3-clause | 8,577 | [
"Brian"
] | cd6de0943fdffd4277ce48092baf390380d45cd5d7b04fc6066dacc4e3f5972c |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glance_store
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import six
from six.moves import http_client as http
import webob
from glance.api import policy
from glance.common import exception
from glance.common import timeutils
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
def _get_member_repo(self, req, image):
try:
# For public, private, and community images, a forbidden exception
# with message "Only shared images have members." is thrown.
return self.gateway.get_member_repo(image, req.context)
except exception.Forbidden as e:
msg = (_("Error fetching members of image %(image_id)s: "
"%(inner_msg)s") % {"image_id": image.image_id,
"inner_msg": e.msg})
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_image(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
return image_repo.get(image_id)
except (exception.NotFound):
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("You are not authorized to lookup image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_member(self, req, image, member_id):
member_repo = self._get_member_repo(req, image)
try:
return member_repo.get(member_id)
except (exception.NotFound):
msg = (_("%(m_id)s not found in the member list of the image "
"%(i_id)s.") % {"m_id": member_id,
"i_id": image.image_id})
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = (_("You are not authorized to lookup the members of the "
"image %s.") % image.image_id)
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
image_member_factory = self.gateway.get_image_member_factory(
req.context)
try:
new_member = image_member_factory.new_image_member(image,
member_id)
member_repo.add(new_member)
return new_member
except exception.Forbidden:
msg = _("Not allowed to create members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Duplicate:
msg = _("Member %(member_id)s is duplicated for image "
"%(image_id)s") % {"member_id": member_id,
"image_id": image_id}
LOG.warning(msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.ImageMemberLimitExceeded as e:
msg = (_("Image member limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id,
"e": encodeutils.exception_to_unicode(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>,
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id)
try:
member.status = status
member_repo.save(member)
return member
except exception.Forbidden:
msg = _("Not allowed to update members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except ValueError as e:
msg = (_("Incorrect request: %s")
% encodeutils.exception_to_unicode(e))
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>,
'created_at': ..,
'updated_at': ..}, ..
]}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
members = []
try:
for member in member_repo.list():
members.append(member)
except exception.Forbidden:
msg = _("Not allowed to list members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
return dict(members=members)
def show(self, req, image_id, member_id):
"""
Returns the membership of the tenant wrt to the image_id specified.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
return self._lookup_member(req, image, member_id)
except webob.exc.HTTPForbidden as e:
# Convert Forbidden to NotFound to prevent information
# leakage.
raise webob.exc.HTTPNotFound(explanation=e.explanation)
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id)
try:
member_repo.remove(member)
return webob.Response(body='', status=http.NO_CONTENT)
except exception.Forbidden:
msg = _("Not allowed to delete members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _('Expected a member in the form: '
'{"member": "image_id"}')
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _('Expected a status in the form: '
'{"status": "status"}')
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = jsonutils.dumps(totalview, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def show(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
# TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
# 'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {
'readOnly': True,
'type': 'string'
}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = glance.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return glance.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
| rajalokan/glance | glance/api/v2/image_members.py | Python | apache-2.0 | 14,662 | [
"Brian"
] | f8b5af9a7cca99d5e25551d52c7fc55dc0b20a8094a1135dfdf7220ea6564988 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
_mvn_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
```
self.batch_shape + self.event_shape
```
or
```
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
is_reparameterized=True,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._mu] + cov.inputs,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape(self):
return self._cov.batch_shape()
def _get_batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape(self):
return array_ops.stack([self._cov.vector_space_dimension()])
def _get_event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat_v2([self._cov.vector_shape(), [n]], 0)
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat_v2(
(array_ops.stack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
@distribution_util.AppendDocstring(_mvn_prob_note)
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
@distribution_util.AppendDocstring(_mvn_prob_note)
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _variance(self):
return self.sigma
def _mode(self):
return array_ops.identity(self._mu)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_stdev]) as ns:
cov = operator_pd_diag.OperatorPDSqrtDiag(diag_stdev,
verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_stdev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stdev=nn.softplus(diag_stdev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_large, v, diag_small]) as ns:
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator_pd_diag.OperatorPDDiag(
diag_large, verify_pd=validate_args),
v,
diag=diag_small,
verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Cholesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[chol]) as ns:
cov = operator_pd_cholesky.OperatorPDCholesky(chol,
verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[sigma]) as ns:
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(
_MultivariateNormalOperatorPD, _MultivariateNormalOperatorPD)
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
| AndreasMadsen/tensorflow | tensorflow/contrib/distributions/python/ops/mvn.py | Python | apache-2.0 | 28,686 | [
"Gaussian"
] | f2b84b08b86cc17a502da0855e2c302c62d3e727aa1586843e5837d99f564118 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from timeit import default_timer as timer
import os
from pyscf.nao import system_vars_c, prod_basis_c, tddft_iter_c
from numpy import allclose
t1 = timer()
sv = system_vars_c().init_siesta_xml(label='siesta', cd='.', force_gamma=True)
t2 = timer(); print('system_vars_c().init_siesta_xml ', t2-t1); t1 = timer()
pbb = prod_basis_c().init_prod_basis_pp_batch(sv)
t2 = timer(); print('prod_basis_c().init_prod_basis_pp_batch(sv) ', t2-t1); t1 = timer()
pba = prod_basis_c().init_prod_basis_pp(sv)
t2 = timer(); print('prod_basis_c().init_prod_basis_pp(sv) ', t2-t1); t1 = timer()
for a,b in zip(pba.bp2info,pbb.bp2info):
for a1,a2 in zip(a.atoms,b.atoms): assert a1==a2
for a1,a2 in zip(a.cc2a, b.cc2a): assert a1==a2
assert allclose(a.vrtx, b.vrtx)
assert allclose(a.cc, b.cc)
print(abs(pbb.get_da2cc_coo().tocsr()-pba.get_da2cc_coo().tocsr()).sum(), \
abs(pbb.get_dp_vertex_coo().tocsr()-pba.get_dp_vertex_coo().tocsr()).sum())
""" This is iterative TDDFT with SIESTA starting point """
#td = tddft_iter_c(pb.sv, pb)
#t2 = timer(); print(t2-t1); t1 = timer()
#dn0 = td.apply_rf0(td.moms1[:,0])
#t2 = timer(); print(t2-t1); t1 = timer()
| gkc1000/pyscf | pyscf/nao/test/silver_55/test_pb_batch.py | Python | apache-2.0 | 1,825 | [
"PySCF",
"SIESTA"
] | ef46249da5966ce3bfa7530e7f7ee636d9264df779b09ff628dfe5e7acb39f62 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options for authenticating with the API.
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
type: str
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
type: str
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
type: path
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment variable.
type: str
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
- Please note that this only works with clusters configured to use HTTP Basic Auth. If your cluster has a
different form of authentication (e.g. OAuth2 in OpenShift), this option will not work as expected and you
should look into the C(k8s_auth) module, as that might do what you need.
type: str
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
- Please read the description of the C(username) option for a discussion of when this option is applicable.
type: str
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE environment
variable.
type: path
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment
variable.
type: path
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. The full certificate chain must be provided to
avoid certificate validation errors. Can also be specified via K8S_AUTH_SSL_CA_CERT environment variable.
type: path
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
aliases: [ verify_ssl ]
proxy:
description:
- The URL of an HTTP proxy to use for the connection. Can also be specified via K8S_AUTH_PROXY environment variable.
- Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY).
version_added: "2.9"
persist_config:
description:
- Whether or not to save the kube config refresh tokens.
Can also be specified via K8S_AUTH_PERSIST_CONFIG environment variable.
- When the k8s context is using a user credentials with refresh tokens (like oidc or gke/gcloud auth),
the token is refreshed by the k8s python client library but not saved by default. So the old refresh token can
expire and the next auth might fail. Setting this flag to true will tell the k8s python client to save the
new refresh token to the kube config file.
- Default to false.
- Please note that the current version of the k8s python client library does not support setting this flag to True yet.
- "The fix for this k8s python library is here: https://github.com/kubernetes-client/python-base/pull/169"
type: bool
version_added: "2.10"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
- "To avoid SSL certificate validation errors when C(validate_certs) is I(True), the full
certificate chain for the API server must be provided via C(ca_cert) or in the
kubeconfig file."
'''
| roadmapper/ansible | lib/ansible/plugins/doc_fragments/k8s_auth_options.py | Python | gpl-3.0 | 4,390 | [
"VisIt"
] | 10c50fd8d6a76951e7bd4f7a867186f77245d88777d3892f2f1438de813a820a |
#/**********************************************************************
#** This program is part of 'MOOSE', the
#** Messaging Object Oriented Simulation Environment.
#** Copyright (C) 2003-2014 Upinder S. Bhalla. and NCBS
#** It is made available under the terms of the
#** GNU Lesser General Public License version 2.1
#** See the file COPYING.LIB for the full notice.
#**********************************************************************/
from __future__ import print_function
'''
This LIF network with Ca plasticity is based on:
David Higgins, Michael Graupner, Nicolas Brunel
Memory Maintenance in Synapses with Calcium-Based
Plasticity in the Presence of Background Activity
PLOS Computational Biology, 2014.
Implemented by: Aditya Gilra, NCBS, Bangalore, October, 2014.
This variant has 400 LIF neurons
Upi Bhalla, Nov 2014: Appended single neuron model.
This script is a reduced version of the model that generates the panels
in Figure 6. It takes just a couple of minutes to run 30 seconds of
simulation time. That is, 400 neurons, 1 detailed model with 36
compartments plus 16 spines each having 2 compartments and 34 molecules,
and lots of synapses.
'''
## import modules and functions to be used
import numpy as np
import matplotlib.pyplot as plt
import random
import time
import moose
from numpy import random as nprand
from moose.neuroml.NeuroML import NeuroML
import sys
sys.path.append( "/home/bhalla/moose/trunk/Demos/util" )
import rdesigneur as rd
#cellname = "./cells_channels/CA1_nochans.morph.xml"
cellname = "./cells_channels/ca1_minimal.p"
fname = "reduced"
#############################################
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
moose.seed(100) # set seed for reproducibility of simulations
#############################################
# All parameters as per:
# David Higgins, Michael Graupner, Nicolas Brunel
# Memory Maintenance in Synapses with Calcium-Based
# Plasticity in the Presence of Background Activity
# PLOS Computational Biology, 2014.
#############################################
#############################################
# Neuron model
#############################################
# equation: dv/dt = (1/taum)*(-(v-el)) + inp
# with spike when v>vt, reset to vr
PI = 3.14159265358979
useGssa = True
combineSegments = False
el = -70e-3 #V # Resting potential
vt = -50e-3 #V # Spiking threshold
Rm = 20e6 #Ohm # Only taum is needed, but LIF neuron accepts
Cm = 1e-9 #F # Rm and Cm and constructs taum=Rm*Cm
taum = Rm*Cm #s # Membrane time constant is 20 ms
vr = -60e-3 #V # Reset potential
Iinject = 10e-3/Rm # constant current injection into LIF neuron
# same as setting el=-70+15=-55 mV and inp=0
noiseInj = True # inject noisy current into each cell: boolean
noiseInjSD = 5e-3/Rm #A # SD of noise added to 'current'
# SD*sqrt(taum) is used as noise current SD
#############################################
# Network parameters: numbers
#############################################
N = 400 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
#############################################
# Simulation parameters
#############################################
simtime = 30 #s # Simulation time
interTetInterval = 5.0 # sec
updateDt = 0.2 #s: time to update live display
dt = 1e-3 #s # time step
#############################################
# Network parameters: synapses (not for ExcInhNetBase)
#############################################
## With each presynaptic spike in exc / inh neuron,
## J / -g*J is added to post-synaptic Vm -- delta-fn synapse
## Since LIF neuron used below is derived from Compartment class,
## conductance-based synapses (SynChan class) can also be used.
C = 100 # Number of incoming connections on each neuron (exc or inh)
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
J = 0.2e-3 #V # exc strength is J (in V as we add to voltage)
# Critical J is ~ 0.45e-3 V in paper for N = 10000, C = 1000
# See what happens for J = 0.2e-3 V versus J = 0.8e-3 V
g = 4.0 # -gJ is the inh strength. For exc-inh balance g >~ f(1-f)=4
syndelay = dt # synaptic delay:
refrT = 0.0 # s # absolute refractory time
#############################################
# Ca Plasticity parameters: synapses (not for ExcInhNetBase)
#############################################
CaPlasticity = True # set it True or False to turn on/off plasticity
tauCa = 22.6936e-3 # s # Ca decay time scale
tauSyn = 346.3615 # s # synaptic plasticity time scale
## in vitro values in Higgins et al 2014, faster plasticity
CaPre = 0.56175 # mM
CaPost = 1.2964 # mM
## in vivo values in Higgins et al 2014, slower plasticity
#CaPre = 0.33705 # mM
#CaPost = 0.74378 # mM
delayD = 4.6098e-3 # s # CaPre is added to Ca after this delay
# proxy for rise-time of NMDA
thetaD = 1.0 # mM # depression threshold for Ca
thetaP = 1.3 # mM # potentiation threshold for Ca
gammaD = 331.909 # factor for depression term
gammaP = 725.085 # factor for potentiation term
eqWeight = 0.5 # initial synaptic weight
# gammaP/(gammaP+gammaD) = eq weight w/o noise
# but see eqn (22), noiseSD also appears
bistable = True # if bistable is True, use bistable potential for weights
noisy = True # use noisy weight updates given by noiseSD
noiseSD = 3.3501 # if noisy, use noiseSD (3.3501 from Higgins et al 2014)
#noiseSD = 0.1 # if bistable==False, use a smaller noise than in Higgins et al 2014
#############################################
# Here we set up a single neuron to fit in this network
#############################################
diffDt = 0.005
chemDt = 0.005
ePlotDt = 0.5e-3
cPlotDt = 0.005
#############################################
def buildRdesigneur():
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
cellProto = [ [cellname, 'elec'] ]
chanProto = [
['./cells_channels/hd.xml'], \
['./cells_channels/kap.xml'], \
['./cells_channels/kad.xml'], \
['./cells_channels/kdr.xml'], \
['./cells_channels/na3.xml'], \
['./cells_channels/nax.xml'], \
['./cells_channels/CaConc.xml'], \
['./cells_channels/Ca.xml'], \
['./cells_channels/NMDA.xml'], \
['./cells_channels/Glu.xml'], \
['./cells_channels/GABA.xml'] \
]
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
chemProto = [ \
[ 'psd53.g', 'ltpModel'] \
]
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are p, g, L, len, dia.
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
chanDistrib = [ \
["hd", "#dend#,#apical#,#user#", "Gbar", "5e-2*(1+(p*3e4))" ], \
["kdr", "#", "Gbar", "100" ], \
["na3", "#soma#,#dend#,#apical#,#user#", "Gbar", "250" ], \
["nax", "#axon#", "Gbar", "1250" ], \
["nax", "#soma#", "Gbar", "100" ], \
["kap", "#axon#,#soma#", "Gbar", "300" ], \
["kap", "#dend#,#apical#,#user#", "Gbar", \
"300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
["Ca_conc", "#soma#,#dend#,#apical#,#user#", "tau", "0.0133" ], \
["kad", "#dend#,#apical#,#user#", "Gbar", \
"300*H(p*1e6-100)*(1+p*1e4)" ], \
["Ca", "#soma#", "Gbar", "10e-3" ], \
["Ca", "#dend#,#apical#,#user#", "Gbar", "50e-3" ], \
["glu", "#dend#,#apical#", "Gbar", "50" ], \
["NMDA", "#dend#,#apical#", "Gbar", "20" ], \
["GABA", "#dend#,#apical#,#user#", "Gbar", "100*H(250e-6 - p)" ], \
]
spineDistrib = [ \
["spine", '#apical#', \
"spineSpacing", "H(p-400e-6)*H(800e-6-p)*10e-6", \
"spineSpacingDistrib", "1e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
chemDistrib = [ \
[ "ltpModel", "#apical#", "install", "1" ] \
]
'''
'''
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField couplingExpr [wildcard][spatialExpn]
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = [
[ 'Ca_conc', 'Ca', 'psd/Ca_input', 'concInit', 8e-5, 1 ],
[ 'Ca_conc', 'Ca', 'dend/Ca_dend_input', 'concInit', 8e-5, 1 ],
[ 'psd/tot_PSD_R', 'n', 'glu', 'modulation', 0.5, 0.002 ],
]
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rdes = rd.rdesigneur(
useGssa = useGssa, \
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
passiveDistrib = passiveDistrib, \
spineDistrib = spineDistrib, \
chanDistrib = chanDistrib, \
chemDistrib = chemDistrib, \
cellProto = cellProto, \
spineProto = spineProto, \
chanProto = chanProto, \
chemProto = chemProto, \
adaptorList = adaptorList
)
return rdes
#############################################
def makeDetailedNeuron():
rdes = buildRdesigneur()
rdes.buildModel( '/model' )
#bcs.addAllPlots()
def connectDetailedNeuron():
excProb = 0.005
excSeed = 1234
inhProb = 0.005
inhSeed = 4567
numExc = 0
numNMDA = 0
numInh = 0
delayMax = 0.010
delayMin = 0.002
excWeightMax = 5
nmdaWeightMax = 2
inhWeightMax = 50
# Note we use the same seed for all 3 exc connections, to make sure
# they are all equivalent.
seed = excSeed
totGluWt = 0.0
totNMDAWt = 0.0
totGABAWt = 0.0
for x in moose.wildcardFind( '/model/elec/#/glu/##[ISA=Synapse]' ):
exc = moose.connect( '/network', 'spikeOut', x, 'addSpike','sparse')
exc.setRandomConnectivity( excProb, seed )
seed = seed + 1
if exc.numEntries > 0:
numExc += exc.numEntries
assert( exc.numEntries == x.numField )
x.vec.delay = delayMin + nprand.rand( exc.numEntries ) * ( delayMax - delayMin )
x.vec.weight = nprand.rand( exc.numEntries ) * excWeightMax
#x.parent.tick = 4
x.parent.parent.tick = 4
print('+', end=' ')
totGluWt += sum(x.vec.weight) * x.parent.parent.Gbar
seed = excSeed
for x in moose.wildcardFind( '/model/elec/#/NMDA/##[ISA=Synapse]' ):
#print " x = ", x
exc = moose.connect( '/network', 'spikeOut', x, 'addSpike','sparse')
exc.setRandomConnectivity( excProb, seed )
seed = seed + 1
if exc.numEntries > 0:
numNMDA += exc.numEntries
assert( exc.numEntries == x.numField )
x.vec.delay = delayMin + nprand.rand( exc.numEntries ) * ( delayMax - delayMin )
x.vec.weight = nprand.rand( exc.numEntries ) * nmdaWeightMax
#x.parent.tick = 4
x.parent.parent.tick = 4
print('*', end=' ')
totNMDAWt += sum(x.vec.weight) * x.parent.parent.Gbar
seed = inhSeed
for x in moose.wildcardFind( '/model/elec/#/GABA/##[ISA=Synapse]' ):
#print x
inh = moose.connect( '/network', 'spikeOut', x, 'addSpike','sparse')
inh.setRandomConnectivity( inhProb, seed )
seed = seed + 1
if inh.numEntries > 0:
numInh += inh.numEntries
x.vec.delay = delayMin + nprand.rand( inh.numEntries ) * ( delayMax - delayMin )
x.vec.weight = nprand.rand( inh.numEntries ) * inhWeightMax
#x.parent.tick = 4
x.parent.parent.tick = 4
print('-', end=' ')
totGABAWt += sum(x.vec.weight) * x.parent.parent.Gbar
print('connectDetailedNeuron: numExc = ', numExc, ', numNMDA=', numNMDA, ', numInh = ', numInh)
print('connectDetailedNeuron: totWts Glu = ', totGluWt, ', NMDA = ', totNMDAWt, ', GABA = ', totGABAWt)
#############################################
# Exc-Inh network base class without connections
#############################################
class ExcInhNetBase:
"""Simulates and plots LIF neurons (exc and inh separate).
Author: Aditya Gilra, NCBS, Bangalore, India, October 2014
"""
def __init__(self,N=N,fexc=fexc,el=el,vt=vt,Rm=Rm,Cm=Cm,vr=vr,\
refrT=refrT,Iinject=Iinject):
""" Constructor of the class """
self.N = N # Total number of neurons
self.fexc = fexc # Fraction of exc neurons
self.NmaxExc = int(fexc*N) # max idx of exc neurons, rest inh
self.el = el # Resting potential
self.vt = vt # Spiking threshold
self.taum = taum # Membrane time constant
self.vr = vr # Reset potential
self.refrT = refrT # Absolute refractory period
self.Rm = Rm # Membrane resistance
self.Cm = Cm # Membrane capacitance
self.Iinject = Iinject # constant input current
self.noiseInjSD = noiseInjSD # SD of injected noise
self.simif = False # whether the simulation is complete
self._setup_network()
def __str__(self):
return "LIF network of %d neurons "\
"having %d exc." % (self.N,self.NmaxExc)
def _setup_network(self):
"""Sets up the network (_init_network is enough)"""
self.network = moose.LIF( 'network', self.N );
moose.le( '/network' )
self.network.vec.Em = self.el
self.network.vec.thresh = self.vt
self.network.vec.refractoryPeriod = self.refrT
self.network.vec.Rm = self.Rm
self.network.vec.vReset = self.vr
self.network.vec.Cm = self.Cm
if not noiseInj:
self.network.vec.inject = self.Iinject
else:
## inject a constant + noisy current
## values are set in self.simulate()
self.noiseTables = moose.StimulusTable('noiseTables',self.N)
moose.connect( self.noiseTables, 'output', \
self.network, 'setInject', 'OneToOne')
def _init_network(self,v0=el):
"""Initialises the network variables before simulation"""
self.network.vec.initVm = v0
def simulate(self,simtime=simtime,dt=dt,plotif=False,**kwargs):
self.dt = dt
self.simtime = simtime
self.T = np.ceil(simtime/dt)
self.trange = np.arange(0,self.simtime,dt)
# Build in the LTP stimulus
offset = Iinject * 0.5
injBaseline = np.repeat( self.Iinject, self.T )
start = np.ceil( simtime / (interTetInterval * dt) )
for i in range( 3 ):
end = start + np.ceil( 0.5 / dt )
injBaseline[ start:end ] += offset
start = start + np.ceil( interTetInterval / dt )
for i in range(self.N):
if noiseInj:
## Gaussian white noise SD added every dt interval should be
## divided by sqrt(dt), as the later numerical integration
## will multiply it by dt.
## See the Euler-Maruyama method, numerical integration in
## http://www.scholarpedia.org/article/Stochastic_dynamical_systems
self.noiseTables.vec[i].vector = injBaseline + \
np.random.normal( \
scale=self.noiseInjSD*np.sqrt(self.Rm*self.Cm/self.dt), \
size=self.T ) # scale = SD
self.noiseTables.vec[i].stepSize = 0 # use current time
# as x value for interpolation
self.noiseTables.vec[i].stopTime = self.simtime
self._init_network(**kwargs)
if plotif:
self._init_plots()
def _init_plots(self):
## make a few tables to store a few Vm-s
numVms = 10
self.plots = moose.Table( '/plotVms', numVms )
## draw numVms out of N neurons
nrnIdxs = random.sample(list(range(self.N)),numVms)
for i in range( numVms ):
moose.connect( self.network.vec[nrnIdxs[i]], 'VmOut', \
self.plots.vec[i], 'input')
## make self.N tables to store spikes of all neurons
self.spikes = moose.Table( '/plotSpikes', self.N )
moose.connect( self.network, 'spikeOut', \
self.spikes, 'input', 'OneToOne' )
## make 2 tables to store spikes of all exc and all inh neurons
self.spikesExc = moose.Table( '/plotSpikesAllExc' )
for i in range(self.NmaxExc):
moose.connect( self.network.vec[i], 'spikeOut', \
self.spikesExc, 'input' )
self.spikesInh = moose.Table( '/plotSpikesAllInh' )
for i in range(self.NmaxExc,self.N):
moose.connect( self.network.vec[i], 'spikeOut', \
self.spikesInh, 'input' )
def _plot(self, fig):
""" plots the spike raster for the simulated net"""
plt.figure(1)
ax = plt.subplot(221)
cleanAx( ax, 'B' )
plt.ylabel( 'Neuron #', fontsize = 16 )
for i in range(0,self.NmaxExc):
if i==0: label = 'Exc. spike trains'
else: label = ''
spikes = self.spikes.vec[i].vector
ax.plot(spikes,[i]*len(spikes),\
'b.',marker='.', markersize = 2, label=label)
for i in range(self.NmaxExc,self.N):
if i==self.NmaxExc: label = 'Inh. spike trains'
else: label = ''
spikes = self.spikes.vec[i].vector
ax.plot(spikes,[i]*len(spikes),\
'r.',marker='.', markersize = 2, label=label)
#############################################
# Exc-Inh network class with Ca plasticity based connections
# (inherits from ExcInhNetBase)
#############################################
class ExcInhNet(ExcInhNetBase):
""" Recurrent network simulation """
def __init__(self,J=J,incC=C,fC=fC,scaleI=g,syndelay=syndelay,**kwargs):
"""Overloads base (parent) class"""
self.J = J # exc connection weight
self.incC = incC # number of incoming connections per neuron
self.fC = fC # fraction of exc incoming connections
self.excC = int(fC*incC)# number of exc incoming connections
self.scaleI = scaleI # inh weight is scaleI*J
self.syndelay = syndelay# synaptic delay
# call the parent class constructor
ExcInhNetBase.__init__(self,**kwargs)
def __str__(self):
return "LIF network of %d neurons "\
"of which %d are exc." % (self.N,self.NmaxExc)
def _init_network(self,**args):
ExcInhNetBase._init_network(self,**args)
def _init_plots(self):
ExcInhNetBase._init_plots(self)
self.recN = 5 # number of neurons for which to record weights and Ca
if CaPlasticity:
## make tables to store weights of recN exc synapses
## for each post-synaptic exc neuron
self.weights = moose.Table( '/plotWeights', self.excC*self.recN )
for i in range(self.recN): # range(self.N) is too large
for j in range(self.excC):
moose.connect( self.weights.vec[self.excC*i+j], 'requestOut',
self.synsEE.vec[i*self.excC+j].synapse[0], 'getWeight')
def _setup_network(self):
## Set up the neurons without connections
ExcInhNetBase._setup_network(self)
## Now, add in the connections...
## Each pre-synaptic spike cause Vm of post-neuron to rise by
## synaptic weight in one time step i.e. delta-fn synapse.
## Since LIF neuron is derived from Compartment class,
## conductance-based synapses (SynChan class) can also be used.
## E to E synapses can be plastic
## Two ways to do this:
## 1) Each LIF neuron has one incoming postsynaptic SynHandler,
## which collects the activation from all presynaptic neurons,
## but then a common Ca pool is used.
## 2) Each LIF neuron has multiple postsyanptic SynHandlers,
## one for each pre-synaptic neuron, i.e. one per synapse,
## then each synapse has a different Ca pool.
## Here we go with option 2) as per Higgins et al 2014 (Brunel private email)
## separate SynHandler per EE synapse, thus NmaxExc*excC
if CaPlasticity:
self.synsEE = moose.GraupnerBrunel2012CaPlasticitySynHandler( \
'/network/synsEE', self.NmaxExc*self.excC )
else:
self.synsEE = moose.SimpleSynHandler( \
'/network/synsEE', self.NmaxExc*self.excC )
moose.useClock( 0, '/network/synsEE', 'process' )
## I to E synapses are not plastic
self.synsIE = moose.SimpleSynHandler( '/network/synsIE', self.NmaxExc )
## all synapses to I neurons are not plastic
self.synsI = moose.SimpleSynHandler( '/network/synsI', self.N-self.NmaxExc )
## connect all SynHandlers to their respective neurons
for i in range(self.NmaxExc):
moose.connect( self.synsIE.vec[i], 'activationOut', \
self.network.vec[i], 'activation' )
for i in range(self.NmaxExc,self.N):
moose.connect( self.synsI.vec[i-self.NmaxExc], 'activationOut', \
self.network.vec[i], 'activation' )
## Connections from some Exc/Inh neurons to each Exc neuron
for i in range(0,self.NmaxExc):
self.synsIE.vec[i].numSynapses = self.incC-self.excC
## Connections from some Exc neurons to each Exc neuron
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(list(range(self.NmaxExc)),self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synidx = i*self.excC+synnum
synHand = self.synsEE.vec[synidx]
## connect each synhandler to the post-synaptic neuron
moose.connect( synHand, 'activationOut', \
self.network.vec[i], 'activation' )
## important to set numSynapses = 1 for each synHandler,
## doesn't create synapses if you set the full array of SynHandlers
synHand.numSynapses = 1
synij = synHand.synapse[0]
connectExcId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
if CaPlasticity:
## set parameters for the Ca Plasticity SynHandler
## have to be set for each SynHandler
## doesn't set for full array at a time
synHand.CaInit = 0.0
synHand.tauCa = tauCa
synHand.tauSyn = tauSyn
synHand.CaPre = CaPre
synHand.CaPost = CaPost
synHand.delayD = delayD
synHand.thetaD = thetaD
synHand.thetaP = thetaP
synHand.gammaD = gammaD
synHand.gammaP = gammaP
synHand.weightMax = 1.0 # bounds on the weight
synHand.weightMin = 0.0
synHand.weightScale = \
self.J*2.0 # 0.2 mV, weight*weightScale is activation
# typically weight <~ 0.5, so activation <~ J
synHand.noisy = noisy
synHand.noiseSD = noiseSD
synHand.bistable = bistable
moose.connect( self.network.vec[i], \
'spikeOut', synHand, 'addPostSpike')
synij.weight = eqWeight # activation = weight*weightScale
# weightScale = 2*J
# weight <~ 0.5
## Randomly set 5% of them to be 1.0
if np.random.uniform()<0.05:
synij.weight = 1.0
else:
synij.weight = self.J # no weightScale here, activation = weight
## Connections from some Inh neurons to each Exc neuron
## draw inhC=incC-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(list(range(self.NmaxExc,self.N)),self.incC-self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsIE.vec[i].synapse[synnum]
connectInhId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = -self.scaleI*self.J # activation = weight
## Connections from some Exc/Inh neurons to each Inh neuron
for i in range(self.N-self.NmaxExc):
## each neuron has incC number of synapses
self.synsI.vec[i].numSynapses = self.incC
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(list(range(self.NmaxExc)),self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsI.vec[i].synapse[synnum]
connectExcId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = self.J # activation = weight
## draw inhC=incC-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(list(range(self.NmaxExc,self.N)),self.incC-self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsI.vec[i].synapse[ self.excC + synnum ]
connectInhId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = -self.scaleI*self.J # activation = weight
moose.useClock( 0, '/network/synsIE', 'process' )
moose.useClock( 0, '/network/synsI', 'process' )
#############################################
# Analysis functions
#############################################
def rate_from_spiketrain(spiketimes,fulltime,dt,tau=50e-3):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units.
"""
sigma = tau/2.
## normalized Gaussian kernel, integral with dt is normed to 1
## to count as 1 spike smeared over a finite interval
norm_factor = 1./(np.sqrt(2.*np.pi)*sigma)
gauss_kernel = np.array([norm_factor*np.exp(-x**2/(2.*sigma**2))\
for x in np.arange(-5.*sigma,5.*sigma+dt,dt)])
kernel_len = len(gauss_kernel)
## need to accommodate half kernel_len on either side of fulltime
rate_full = np.zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
## only the middle fulltime part of the rate series
## This is already in Hz,
## since should have multiplied by dt for above convolution
## and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
#############################################
# Make plots
#############################################
def extra_plots(net):
## extra plots apart from the spike rasters
timeseries = net.trange
## individual neuron firing rates
fig3 = plt.figure()
plt.subplot(221)
num_to_plot = 10
#rates = []
for nrni in range(num_to_plot):
rate = rate_from_spiketrain(\
net.spikes.vec[nrni].vector,simtime,dt, 1.0 )
plt.plot(timeseries,rate)
plt.title("Rates of "+str(num_to_plot)+" exc nrns")
plt.ylabel("Hz")
plt.ylim(0,100)
plt.subplot(222)
for nrni in range(num_to_plot):
rate = rate_from_spiketrain(\
net.spikes.vec[net.NmaxExc+nrni].vector,simtime,dt, 1.0 )
plt.plot(timeseries,rate)
plt.title("Rates of "+str(num_to_plot)+" inh nrns")
plt.ylim(0,100)
## population firing rates
plt.subplot(223)
rate = rate_from_spiketrain(net.spikesExc.vector,simtime,dt)\
/float(net.NmaxExc) # per neuron
plt.plot(timeseries,rate)
plt.ylim(0,100)
plt.title("Exc population rate")
plt.ylabel("Hz")
plt.xlabel("Time (s)")
plt.subplot(224)
rate = rate_from_spiketrain(net.spikesInh.vector,simtime,dt)\
/float(net.N-net.NmaxExc) # per neuron
plt.plot(timeseries,rate)
plt.ylim(0,100)
plt.title("Inh population rate")
plt.xlabel("Time (s)")
fig3.tight_layout()
def makeScatterPlot( m, n, v ):
fig4 = plt.figure()
dx = 100e-6
dy = 100e-6
x = np.arange( m ) * dx
x = np.tile( x, n )
y = np.arange( n ) * dy
y = y.repeat( m )
#z = np.arange( m * n )
#ret = plt.scatter( x, y, s = 64, c = v, vmin = -0.065, vmax = -0.055 )
cmap = plt.get_cmap('afmhot')
ret = plt.scatter( x, y, s = 64, c = v, vmin = 0.5, vmax = 1.0, cmap = cmap )
plt.xlim( -dx, dx * m )
plt.ylim( -dy, dy * n )
return fig4, ret
def buildNeuronPlots( rdes ):
if not moose.exists( '/graphs' ):
graphs = moose.Neutral( '/graphs' )
vtab = moose.Table( '/graphs/vtab' )
catab = moose.Table( '/graphs/catab' )
moose.connect( vtab, "requestOut", rdes.soma, "getVm" )
caSoma = moose.element( rdes.soma.path + "/Ca_conc" )
moose.connect( catab, "requestOut", caSoma, "getCa" )
elist = moose.wildcardFind( '/model/chem/psd/tot_PSD_R[]' )
rtab = moose.Table2( '/graphs/rtab', len( elist ) ).vec
for i in zip( elist, rtab ):
moose.connect( i[1], "requestOut", i[0], "getN" )
elist = moose.wildcardFind( '/model/chem/spine/Ca[]' )
pcatab = moose.Table2( '/graphs/pcatab', len( elist ) ).vec
for i in zip( elist, pcatab ):
moose.connect( i[1], "requestOut", i[0], "getConc" )
def cleanAx( ax, label, showXlabel = False ):
ax.spines['top'].set_visible( False )
ax.spines['right'].set_visible( False )
ax.tick_params( direction = 'out' )
if not showXlabel:
ax.set_xticklabels( [] )
for tick in ax.xaxis.get_major_ticks():
tick.tick2On = False
for tick in ax.yaxis.get_major_ticks():
tick.tick2On = False
ax.text( -0.18, 1.0, label, fontsize = 18, weight = 'bold', transform=ax.transAxes )
def saveNeuronPlots( fig, rdes ):
#fig = plt.figure( figsize=(12, 10), facecolor='white' )
#fig.subplots_adjust( left = 0.18 )
plt.figure(1)
ax = plt.subplot(222)
cleanAx( ax, 'C' )
plt.ylabel( 'Vm (mV)', fontsize = 16 )
vtab = moose.element( '/graphs/vtab' )
t = np.arange( 0, len( vtab.vector ), 1 ) * vtab.dt
plt.plot( t, vtab.vector * 1000, label="Vm" )
#plt.legend()
ax = plt.subplot(223)
cleanAx( ax, 'D', showXlabel = True )
pcatab = list( moose.vec( '/graphs/pcatab' ) )[0::2]
t = np.arange( 0, len( pcatab[0].vector ), 1 ) * pcatab[0].dt
for i in pcatab:
plt.plot( t, i.vector * 1000 )
plt.ylabel( '[Ca] (uM)', fontsize = 16 )
plt.xlabel( 'Time (s)', fontsize = 16 )
ax = plt.subplot(224)
cleanAx( ax, 'E', showXlabel = True )
rtab = list( moose.vec( '/graphs/rtab' ) )[0::2]
t = np.arange( 0, len( rtab[0].vector ), 1 ) * rtab[0].dt
for i in rtab:
plt.plot( t, i.vector )
plt.ylabel( '# of inserted GluRs', fontsize = 16 )
plt.xlabel( 'Time (s)', fontsize = 16 )
'''
for i in moose.wildcardFind( '/graphs/#' ):
i.xplot( fname + '.xplot', i.name )
'''
if __name__=='__main__':
plt.ion()
## ExcInhNetBase has unconnected neurons,
## ExcInhNet connects them
## Instantiate either ExcInhNetBase or ExcInhNet below
#net = ExcInhNetBase(N=N)
net = ExcInhNet(N=N)
print(net)
moose.le( '/' )
moose.le( '/network' )
rdes = buildRdesigneur()
rdes.buildModel( '/model' )
buildNeuronPlots( rdes )
connectDetailedNeuron()
## Important to distribute the initial Vm-s
## else weak coupling gives periodic synchronous firing
plotif = True
net.simulate(simtime,plotif=plotif,\
v0=np.random.uniform(el-20e-3,vt,size=N))
# moose simulation
moose.useClock( 1, '/network', 'process' )
moose.useClock( 2, '/plotSpikes', 'process' )
moose.useClock( 3, '/plotVms', 'process' )
if CaPlasticity:
moose.useClock( 3, '/plotWeights', 'process' )
moose.useClock( 3, '/plotCa', 'process' )
moose.setClock( 0, dt )
moose.setClock( 1, dt )
moose.setClock( 2, dt )
moose.setClock( 3, dt )
moose.setClock( 9, dt )
if plotif:
Vm = net.network.vec.Vm
fig = plt.figure( 1, figsize=(12, 10), facecolor='white' )
fig.subplots_adjust( left = 0.18 )
fig2, ret = makeScatterPlot( 20, 20, Vm )
title = fig2.text( 0.1, 0.95, "Simulation starting..." )
moose.reinit()
t1 = time.time()
print('starting')
for currTime in np.arange( 0, simtime, updateDt ):
moose.start(updateDt)
lastt = net.network.vec.lastEventTime
lastt = np.exp( 2 * (lastt - currTime ) )
title.set_text( "t = " + str( currTime ) )
ret.set_array( lastt )
fig2.canvas.draw()
print('runtime, t = ', time.time() - t1)
if plotif:
net._plot( fig )
extra_plots(net)
saveNeuronPlots( fig, rdes )
plt.show()
plt.savefig( fname + '.svg', bbox_inches='tight')
print( "Hit 'enter' to exit" )
try:
raw_input()
except NameError as e:
input( )
| BhallaLab/moose | moose-examples/paper-2015/Fig6_NetMultiscale/ReducedModel.py | Python | gpl-3.0 | 36,806 | [
"Gaussian",
"MOOSE",
"NEURON"
] | 8c758dec3eaf91495218b024ca36abe69f3b5a16f5ef716b16814848ec32d4d9 |
'''
This __init__ file is for a PySCeS "eXtension module" (pysx), the following variables can be set for each module
'''
pysx_name = 'Pysces module template'
pysx_oscompat = ['posix','nt']
pysx_base_class = 'CONTRIB_demo'
pysx_author = 'Brett G. Olivier'
pysx_email = 'bgoli@users.sourceforge.net'
pysx_affiliation = 'Triple-J Group for Molecular Cell Physiology'
pysx_web = 'http://pysces.sourceforge.net'
pysx_notes = ''
from demotest import *
| asttra/pysces | pysces/contrib/demo/__init__.py | Python | bsd-3-clause | 448 | [
"PySCeS"
] | 11d9c1dc535467c7ecacae7749e91bbf54ba68991c2efaf408f6d2372c8f0878 |
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import subprocess
import tempfile
import pwd
import grp
import signal
import socket
# Constants
TIMEOUT_NFS = 10 # seconds
EXPORTS = "/etc/exports"
MOUNT = "/bin/mount"
UMOUNT = "/bin/umount"
SU = "/bin/su"
UID = 36
GUID = 36
USER = "vdsm"
GROUP = "kvm"
TESTFILE = "vdsmTest"
def usage():
print("Usage: " + sys.argv[0] + " server:/target")
print("nfs-check is a python script to validate nfs targets to use"
" with oVirt project.")
print("Some operations includes: mount the nfs target,"
" create a file as %s:%s and remove it." % (USER, GROUP))
sys.exit(0)
class Alarm(Exception):
pass
class Nfs(object):
def handler(self, signum, frame):
raise Alarm()
def mount(self, server, target, pathName):
cmd = "%s:%s" % (server, target)
process = subprocess.Popen([MOUNT, "-t", "nfs", cmd, pathName],
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(TIMEOUT_NFS)
print("Current hostname: %s - IP addr %s" % (self.getHostName(),
self.getLocalIP()))
print("Trying to %s -t nfs %s..." % (MOUNT, cmd))
try:
errorMsg = process.communicate()[1].strip()
signal.alarm(0)
except Alarm:
print("Timeout, cannot mount the nfs! Please check the status "
"of NFS service or/and the Firewall settings!")
self.exitCode(-1)
# get return from mount cmd
ret = process.poll()
# Let's check if the NFS Server is local machine
localIP = self.getLocalIP()
serverIP = self.getIP(server)
localMachine = False
# check if server (argument) IP address is the same for
# hostname IP address
for ip in serverIP:
if localIP == ip:
localMachine = True
if ret != 0 and localMachine:
ret = self.checkLocalServer(ret, errorMsg, target)
elif ret != 0:
print("return = %s error %s" % (ret, errorMsg))
return ret
def checkLocalServer(self, ret, errorMsg, target):
print("NFS Server is local machine, looking local configurations..")
if "access denied" in errorMsg:
print("return = %s error msg = %s" % (ret, errorMsg))
print("Access Denied: Cannot mount nfs!")
if not os.path.isfile(EXPORTS):
print(EXPORTS + " doesn't exist, please create one"
" and start nfs server!")
else:
targetFound = False
with open(EXPORTS, 'r') as f:
for line in f.readlines():
if target in line.split(" ")[0]:
targetFound = True
if targetFound:
print("Please include %s into %s and restart"
" nfs server!" % (target, EXPORTS))
elif "does not exist" in errorMsg:
print("return = %s error msg = %s" % (ret, errorMsg))
else:
print("NFS server down?")
print("return = %s error msg = %s" % (ret, errorMsg))
return ret
def getIP(self, Server):
ip = []
try:
addrList = socket.getaddrinfo(Server, None)
except:
print("Cannot get address from %s" % Server)
self.exitCode(-1)
for item in addrList:
ip.append(item[4][0])
return ip
def getHostName(self):
return socket.gethostname()
def getLocalIP(self):
addr = "0.0.0.0"
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror as err:
print("INFO: Cannot resolve hostname"
": %s %s" % (socket.gethostname(), err))
return addr
def exitCode(self, ret):
sys.exit(ret)
def tests(self, pathName):
ret = 0
try:
if pwd.getpwnam(USER).pw_uid != UID:
print("WARNING: %s user has UID [%s] which is different from "
"the required [%s]" %
(USER, pwd.getpwnam(USER).pw_uid, UID))
except:
print("Cannot find %s user! You must have %s user created!" %
(USER, USER))
ret = -1
try:
if grp.getgrnam(GROUP).gr_gid != GUID:
print("WARNING: %s group has GUID [%s] which is different "
"from the required [%s]" %
(GROUP, grp.getgrnam(GROUP).gr_gid, GUID))
except:
print("Cannot find %s group! The system must have %s group" %
(GROUP, GROUP))
ret = -1
if ret != -1:
fileTest = pathName + "/" + TESTFILE
cmdTouch = "/bin/touch " + fileTest
process, errorMsg, ret = self.runCommand(cmdTouch)
errorMsg = errorMsg.strip()
if ret != -1:
# get the return from the command
ret = process.poll()
if ret != 0:
if "Permission denied" in errorMsg:
print("Permission denied: %s user as %s cannot "
"create a file into %s" %
(USER, GROUP, pathName))
print("Suggestions: please verify the permissions of "
"target (chmod or/and selinux booleans)")
print("return = %s error msg = %s" % (ret, errorMsg))
ret = -1
elif "Read-only file system" in errorMsg:
print("Please make sure the target NFS contain the "
"read and WRITE access")
print("return = %s error msg = %s" % (ret, errorMsg))
ret = -1
else:
print("return = %s error msg = %s" % (ret, errorMsg))
ret = -1
# remove the file
if ret != -1:
print("Removing %s file.." % TESTFILE)
cmdRemove = "/bin/rm " + fileTest
process, errorMsg, ret = self.runCommand(cmdRemove)
errorMsg = errorMsg.strip()
if ret != -1:
# get the return from the command
ret = process.poll()
if ret != 0:
print("Error removing %s file, error = %s " %
(TESTFILE, errorMsg))
ret = -1
return ret
def runCommand(self, cmd):
ret = 0
process = subprocess.Popen([SU, USER, "-c", cmd, "-s", "/bin/bash"],
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(TIMEOUT_NFS)
try:
errorMsg = process.communicate()[1]
signal.alarm(0)
except Alarm:
print("Timeout, cannot execute: %s" % cmd)
ret = -1
return process, errorMsg, ret
def umount(self, pathName):
process = subprocess.Popen([UMOUNT, pathName], shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(TIMEOUT_NFS)
try:
errorMsg = process.communicate()[1].strip()
signal.alarm(0)
except Alarm:
print("Timeout, cannot %s the nfs!" % UMOUNT)
self.exitCode(-1)
# get the return from the command
ret = process.poll()
if ret != 0:
print("cannot execute %s!" % UMOUNT)
print("return = %s error msg = %s" % (ret, errorMsg))
return ret
if __name__ == "__main__":
if os.geteuid() != 0:
print("You must be root to run this script.")
sys.exit(-1)
if len(sys.argv) != 2 or ":" not in sys.argv[1]:
usage()
nfsData = sys.argv[1].split(":")
NFS_SERVER = nfsData[0]
NFS_TARGET = nfsData[1]
nfs = Nfs()
LOCALPATH = tempfile.mkdtemp()
try:
ret = nfs.mount(NFS_SERVER, NFS_TARGET, LOCALPATH)
if ret != 0:
nfs.exitCode(ret)
print("Executing NFS tests..")
ret = nfs.tests(LOCALPATH)
if ret != 0:
print("Status of tests [Failed]")
print("For more troubleshooting tips, visit "
"https://www.ovirt.org/documentation/how-to/"
"troubleshooting/troubleshooting-nfs-storage-issues")
else:
print("Status of tests [OK]")
print("Disconnecting from NFS Server..")
ret = nfs.umount(LOCALPATH)
if ret != 0:
print("Umount [Failed]\n")
nfs.exitCode(ret)
finally:
os.removedirs(LOCALPATH)
print("Done!")
| oVirt/vdsm | contrib/nfs-check.py | Python | gpl-2.0 | 10,118 | [
"VisIt"
] | b511c40ca59669eebd07ead9acbdc4ffb222335b22f33fad8c751f06e6702f19 |
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 Tim Hatch <tim@timhatch.com>
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Rene Zhang <rz99@cornell.edu>
# Copyright (c) 2015 Steven Myint <hg@stevenmyint.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""exceptions handling (raising, catching, exceptions classes) checker
"""
import inspect
import sys
import six
from six.moves import builtins
import astroid
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
is_raising,
check_messages,
inherit_from_std_ex,
EXCEPTIONS_MODULE,
safe_infer,
has_known_bases)
from pylint.interfaces import IAstroidChecker
def _builtin_exceptions():
def predicate(obj):
return isinstance(obj, type) and issubclass(obj, BaseException)
members = inspect.getmembers(six.moves.builtins, predicate)
return {exc.__name__ for (_, exc) in members}
def _annotated_unpack_infer(stmt, context=None):
"""
Recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements.
Returns an iterator which yields tuples in the format
('original node', 'infered node').
"""
if isinstance(stmt, (astroid.List, astroid.Tuple)):
for elt in stmt.elts:
inferred = safe_infer(elt)
if inferred and inferred is not astroid.YES:
yield elt, inferred
return
for infered in stmt.infer(context):
if infered is astroid.YES:
continue
yield stmt, infered
PY3K = sys.version_info >= (3, 0)
OVERGENERAL_EXCEPTIONS = ('Exception',)
BUILTINS_NAME = builtins.__name__
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be catched by the most specific handler.'),
'E0702': ('Raising %s while only classes or instances are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0703': ('Exception context set to something which is not an '
'exception, nor None',
'bad-exception-context',
'Used when using the syntax "raise ... from ...", '
'where the exception context is not an exception, '
'nor None.',
{'minversion': (3, 0)}),
'E0704': ('The raise statement is not inside an except clause',
'misplaced-bare-raise',
'Used when a bare raise is not used inside an except clause. '
'This generates an error, since there are no active exceptions '
'to be reraised. An exception to this rule is represented by '
'a bare raise inside a finally clause, which might work, as long '
'as an exception is raised inside the try block, but it is '
'nevertheless a code smell that must not be relied upon.'),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'E0712': ('Catching an exception which doesn\'t inherit from BaseException: %s',
'catching-non-exception',
'Used when a class which doesn\'t inherit from \
BaseException is used as an exception in an except clause.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0705': ('Catching previously caught exception type %s',
'duplicate-except',
'Used when an except catches a type that was already caught by '
'a previous handler.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.',
{'maxversion': (3, 0)}),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
}
class ExceptionsChecker(BaseChecker):
"""checks for
* excepts without exception filter
* type of raise argument : string, Exceptions, other values
"""
__implements__ = IAstroidChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' :'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
def open(self):
self.builtin_exceptions = _builtin_exceptions()
super(ExceptionsChecker, self).open()
@check_messages('nonstandard-exception', 'misplaced-bare-raise',
'raising-bad-type', 'raising-non-exception',
'notimplemented-raised', 'bad-exception-context')
def visit_raise(self, node):
"""visit raise possibly inferring value"""
if node.exc is None:
self._check_misplaced_bare_raise(node)
return
if PY3K and node.cause:
self._check_bad_exception_context(node)
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = next(astroid.unpack_infer(expr))
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_misplaced_bare_raise(self, node):
# Filter out if it's present in __exit__.
scope = node.scope()
if (isinstance(scope, astroid.FunctionDef)
and scope.is_method()
and scope.name == '__exit__'):
return
current = node
# Stop when a new scope is generated or when the raise
# statement is found inside a TryFinally.
ignores = (astroid.ExceptHandler, astroid.FunctionDef, astroid.TryFinally)
while current and not isinstance(current.parent, ignores):
current = current.parent
expected = (astroid.ExceptHandler,)
if (not current
or not isinstance(current.parent, expected)):
self.add_message('misplaced-bare-raise', node=node)
def _check_bad_exception_context(self, node):
"""Verify that the exception context is properly set.
An exception context can be only `None` or an exception.
"""
cause = safe_infer(node.cause)
if cause in (astroid.YES, None):
return
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message('bad-exception-context',
node=node)
elif (not isinstance(cause, astroid.ClassDef) and
not inherit_from_std_ex(cause)):
self.add_message('bad-exception-context',
node=node)
def _check_raise_value(self, node, expr):
"""check for bad values, string exception and class inheritance
"""
value_found = True
if isinstance(expr, astroid.Const):
value = expr.value
if not isinstance(value, str):
# raising-string will be emitted from python3 porting checker.
self.add_message('raising-bad-type', node=node,
args=value.__class__.__name__)
elif ((isinstance(expr, astroid.Name) and
expr.name in ('None', 'True', 'False')) or
isinstance(expr, (astroid.List, astroid.Dict, astroid.Tuple,
astroid.Module, astroid.FunctionDef))):
emit = True
if not PY3K and isinstance(expr, astroid.Tuple) and expr.elts:
# On Python 2, using the following is not an error:
# raise (ZeroDivisionError, None)
# raise (ZeroDivisionError, )
# What's left to do is to check that the first
# argument is indeed an exception.
# Verifying the other arguments is not
# the scope of this check.
first = expr.elts[0]
inferred = safe_infer(first)
if isinstance(inferred, astroid.Instance):
# pylint: disable=protected-access
inferred = inferred._proxied
if (inferred is astroid.YES or
isinstance(inferred, astroid.ClassDef)
and inherit_from_std_ex(inferred)):
emit = False
if emit:
self.add_message('raising-bad-type',
node=node,
args=expr.name)
elif ((isinstance(expr, astroid.Name) and expr.name == 'NotImplemented')
or (isinstance(expr, astroid.Call) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'NotImplemented')):
self.add_message('notimplemented-raised', node=node)
elif isinstance(expr, (astroid.Instance, astroid.ClassDef)):
if isinstance(expr, astroid.Instance):
# pylint: disable=protected-access
expr = expr._proxied
if (isinstance(expr, astroid.ClassDef) and
not inherit_from_std_ex(expr) and
has_known_bases(expr)):
if expr.newstyle:
self.add_message('raising-non-exception', node=node)
else:
self.add_message('nonstandard-exception', node=node)
else:
value_found = False
else:
value_found = False
return value_found
def _check_catching_non_exception(self, handler, exc, part):
if isinstance(exc, astroid.Tuple):
# Check if it is a tuple of exceptions.
inferred = [safe_infer(elt) for elt in exc.elts]
if any(node is astroid.YES for node in inferred):
# Don't emit if we don't know every component.
return
if all(node and inherit_from_std_ex(node)
for node in inferred):
return
if not isinstance(exc, astroid.ClassDef):
# Don't emit the warning if the infered stmt
# is None, but the exception handler is something else,
# maybe it was redefined.
if (isinstance(exc, astroid.Const) and
exc.value is None):
if ((isinstance(handler.type, astroid.Const) and
handler.type.value is None) or
handler.type.parent_of(exc)):
# If the exception handler catches None or
# the exception component, which is None, is
# defined by the entire exception handler, then
# emit a warning.
self.add_message('catching-non-exception',
node=handler.type,
args=(part.as_string(), ))
else:
self.add_message('catching-non-exception',
node=handler.type,
args=(part.as_string(), ))
return
if (not inherit_from_std_ex(exc) and
exc.name not in self.builtin_exceptions):
if has_known_bases(exc):
self.add_message('catching-non-exception',
node=handler.type,
args=(exc.name, ))
@check_messages('bare-except', 'broad-except',
'binary-op-exception', 'bad-except-order',
'catching-non-exception', 'duplicate-except')
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
if handler.type is None:
if not is_raising(handler.body):
self.add_message('bare-except', node=handler)
# check if a "except:" is followed by some other
# except
if index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('bad-except-order', node=node, args=msg)
elif isinstance(handler.type, astroid.BoolOp):
self.add_message('binary-op-exception',
node=handler, args=handler.type.op)
else:
try:
excs = list(_annotated_unpack_infer(handler.type))
except astroid.InferenceError:
continue
for part, exc in excs:
if exc is astroid.YES:
continue
if (isinstance(exc, astroid.Instance)
and inherit_from_std_ex(exc)):
# pylint: disable=protected-access
exc = exc._proxied
self._check_catching_non_exception(handler, exc, part)
if not isinstance(exc, astroid.ClassDef):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astroid.ClassDef)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('bad-except-order',
node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == EXCEPTIONS_MODULE
and not is_raising(handler.body)):
self.add_message('broad-except',
args=exc.name, node=handler.type)
if exc in exceptions_classes:
self.add_message('duplicate-except',
args=exc.name, node=handler.type)
exceptions_classes += [exc for _, exc in excs]
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
| axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/checkers/exceptions.py | Python | apache-2.0 | 16,289 | [
"VisIt"
] | 3a009e9c0d53528faabcf6f564e72d83eda76144fe9993baf49a07cec775ee55 |
from __future__ import annotations
import os
import sys
from unittest import mock
import pytest
from dxtbx.model import ExperimentList
from xia2.Handlers.Phil import PhilIndex
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XSample import XSample
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XWavelength import XWavelength
def exercise_dials_indexer(dials_data, tmp_dir, nproc=None):
if nproc is not None:
PhilIndex.params.xia2.settings.multiprocessing.nproc = nproc
template = dials_data("insulin").join("insulin_1_###.img").strpath
indexer = DialsIndexer()
indexer.set_working_directory(tmp_dir)
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
cryst = XCrystal("CRYST1", None)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
indexer.index()
assert indexer.get_indexer_cell() == pytest.approx(
(78.14, 78.14, 78.14, 90, 90, 90), rel=1e-3
)
solution = indexer.get_solution()
assert solution["rmsd"] == pytest.approx(0.03545, abs=1e-3)
assert solution["metric"] == pytest.approx(0.02517, abs=5e-3)
assert solution["number"] == 22
assert solution["lattice"] == "cI"
beam_centre = indexer.get_indexer_beam_centre()
assert beam_centre == pytest.approx(
(94.41567208118963, 94.51337522659865), abs=1e-3
)
print(indexer.get_indexer_experiment_list()[0].crystal)
print(indexer.get_indexer_experiment_list()[0].detector)
# test serialization of indexer
json_str = indexer.as_json()
indexer2 = DialsIndexer.from_json(string=json_str)
indexer2.index()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_beam_centre() == pytest.approx(
indexer2.get_indexer_beam_centre()
)
indexer.eliminate()
indexer2.eliminate()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_lattice() == "hR"
assert indexer2.get_indexer_lattice() == "hR"
def test_dials_indexer_serial(regression_test, ccp4, dials_data, run_in_tmpdir):
with mock.patch.object(sys, "argv", []):
exercise_dials_indexer(dials_data, run_in_tmpdir.strpath, nproc=1)
| xia2/xia2 | tests/Modules/Indexer/test_DIALS_indexer.py | Python | bsd-3-clause | 2,609 | [
"CRYSTAL"
] | 1cdfeec7ecf81cc54356e48da69a74ec66126ee64c837389c343320b680ac45a |
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from espressomd.interactions import FeneBond
from time import time
from espressomd.accumulators import Correlator
from espressomd.observables import ParticleVelocities, ParticleBodyAngularVelocities
from tests_common import single_component_maxwell
@ut.skipIf(espressomd.has_features("THERMOSTAT_IGNORE_NON_VIRTUAL"),
"Skipped because of THERMOSTAT_IGNORE_NON_VIRTUAL")
class LangevinThermostat(ut.TestCase):
"""Tests the velocity distribution created by the Langevin thermostat against
the single component Maxwell distribution."""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.set_domain_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0
system.seed = range(system.cell_system.get_state()["n_nodes"])
if espressomd.has_features("PARTIAL_PERIODIC"):
system.periodicity = 0, 0, 0
@classmethod
def setUpClass(cls):
np.random.seed(42)
def check_velocity_distribution(self, vel, minmax, n_bins, error_tol, kT):
"""check the recorded particle distributions in vel againsta histogram with n_bins bins. Drop velocities outside minmax. Check individual histogram bins up to an accuracy of error_tol agaisnt the analytical result for kT."""
for i in range(3):
hist = np.histogram(
vel[:, i], range=(-minmax, minmax), bins=n_bins, normed=False)
data = hist[0] / float(vel.shape[0])
bins = hist[1]
for j in range(n_bins):
found = data[j]
expected = single_component_maxwell(
bins[j], bins[j + 1], kT)
self.assertLessEqual(abs(found - expected), error_tol)
def test_aa_verify_single_component_maxwell(self):
"""Verifies the normalization of the analytical expression."""
self.assertLessEqual(
abs(single_component_maxwell(-10, 10, 4.) - 1.), 1E-4)
def test_global_langevin(self):
"""Test for global Langevin parameters."""
N = 200
system = self.system
system.part.clear()
system.time_step = 0.06
# Place particles
system.part.add(pos=np.random.random((N, 3)))
# Enable rotation if compiled in
if espressomd.has_features("ROTATION"):
system.part[:].rotation = 1, 1, 1
kT = 1.1
gamma = 3.5
system.thermostat.set_langevin(kT=kT, gamma=gamma)
# Warmup
system.integrator.run(100)
# Sampling
loops = 400
v_stored = np.zeros((N * loops, 3))
omega_stored = np.zeros((N * loops, 3))
for i in range(loops):
system.integrator.run(1)
v_stored[i * N:(i + 1) * N, :] = system.part[:].v
if espressomd.has_features("ROTATION"):
omega_stored[i * N:(i + 1) * N, :] = system.part[:].omega_body
v_minmax = 5
bins = 4
error_tol = 0.016
self.check_velocity_distribution(
v_stored, v_minmax, bins, error_tol, kT)
if espressomd.has_features("ROTATION"):
self.check_velocity_distribution(
omega_stored, v_minmax, bins, error_tol, kT)
@ut.skipIf(not espressomd.has_features("LANGEVIN_PER_PARTICLE"),
"Test requires LANGEVIN_PER_PARTICLE")
def test_langevin_per_particle(self):
"""Test for Langevin particle. Covers all combinations of
particle specific gamma and temp set or not set.
"""
N = 400
system = self.system
system.part.clear()
system.time_step = 0.06
system.part.add(pos=np.random.random((N, 3)))
if espressomd.has_features("ROTATION"):
system.part[:].rotation = 1, 1, 1
kT = 0.9
gamma = 3.2
gamma2 = 4.3
kT2 = 1.5
system.thermostat.set_langevin(kT=kT, gamma=gamma)
# Set different kT on 2nd half of particles
system.part[int(N / 2):].temp = kT2
# Set different gamma on half of the partiles (overlap over both kTs)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
system.part[
int(N / 4):int(3 * N / 4)].gamma = gamma2, gamma2, gamma2
else:
system.part[int(N / 4):int(3 * N / 4)].gamma = gamma2
system.integrator.run(50)
loops = 600
v_kT = np.zeros((int(N / 2) * loops, 3))
v_kT2 = np.zeros((int(N / 2 * loops), 3))
if espressomd.has_features("ROTATION"):
omega_kT = np.zeros((int(N / 2) * loops, 3))
omega_kT2 = np.zeros((int(N / 2 * loops), 3))
for i in range(loops):
system.integrator.run(1)
v_kT[int(i * N / 2):int((i + 1) * N / 2),
:] = system.part[:int(N / 2)].v
v_kT2[int(i * N / 2):int((i + 1) * N / 2),
:] = system.part[int(N / 2):].v
if espressomd.has_features("ROTATION"):
omega_kT[int(i * N / 2):int((i + 1) * N / 2),
:] = system.part[:int(N / 2)].omega_body
omega_kT2[int(i * N / 2):int((i + 1) * N / 2),
:] = system.part[int(N / 2):].omega_body
v_minmax = 5
bins = 4
error_tol = 0.016
self.check_velocity_distribution(v_kT, v_minmax, bins, error_tol, kT)
self.check_velocity_distribution(v_kT2, v_minmax, bins, error_tol, kT2)
if espressomd.has_features("ROTATION"):
self.check_velocity_distribution(
omega_kT, v_minmax, bins, error_tol, kT)
self.check_velocity_distribution(
omega_kT2, v_minmax, bins, error_tol, kT2)
def setup_diff_mass_rinertia(self, p):
if espressomd.has_features("MASS"):
p.mass = 0.5
if espressomd.has_features("ROTATION"):
p.rotation = 1, 1, 1
# Make sure rinertia does not change diff coeff
if espressomd.has_features("ROTATIONAL_INERTIA"):
p.rinertia = 0.4, 0.4, 0.4
def test_diffusion(self):
"""This tests rotational and translational diffusion coeff via green-kubo"""
system = self.system
system.part.clear()
kT = 1.37
dt = 0.1
system.time_step = dt
# Translational gamma. We cannot test per-component, if rotation is on,
# because body and space frames become different.
gamma = 3.1
# Rotational gamma
gamma_rot_i = 4.7
gamma_rot_a = 4.2, 1, 1.2
# If we have langevin per particle:
# per particle kT
per_part_kT = 1.6
# Translation
per_part_gamma = 1.63
# Rotational
per_part_gamma_rot_i = 2.6
per_part_gamma_rot_a = 2.4, 3.8, 1.1
# Particle with global thermostat params
p_global = system.part.add(pos=(0, 0, 0))
# Make sure, mass doesn't change diff coeff
self.setup_diff_mass_rinertia(p_global)
# particle specific gamma, kT, and both
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
p_gamma = system.part.add(pos=(0, 0, 0))
self.setup_diff_mass_rinertia(p_gamma)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
p_gamma.gamma = per_part_gamma, per_part_gamma, per_part_gamma
if espressomd.has_features("ROTATION"):
p_gamma.gamma_rot = per_part_gamma_rot_a
else:
p_gamma.gamma = per_part_gamma
if espressomd.has_features("ROTATION"):
p_gamma.gamma_rot = per_part_gamma_rot_i
p_kT = system.part.add(pos=(0, 0, 0))
self.setup_diff_mass_rinertia(p_kT)
p_kT.temp = per_part_kT
p_both = system.part.add(pos=(0, 0, 0))
self.setup_diff_mass_rinertia(p_both)
p_both.temp = per_part_kT
if espressomd.has_features("PARTICLE_ANISOTROPY"):
p_both.gamma = per_part_gamma, per_part_gamma, per_part_gamma
if espressomd.has_features("ROTATION"):
p_both.gamma_rot = per_part_gamma_rot_a
else:
p_both.gamma = per_part_gamma
if espressomd.has_features("ROTATION"):
p_both.gamma_rot = per_part_gamma_rot_i
# Thermostat setup
if espressomd.has_features("ROTATION"):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
# particle anisotropy and rotation
system.thermostat.set_langevin(
kT=kT, gamma=gamma, gamma_rotation=gamma_rot_a)
else:
# Rotation without particle anisotropy
system.thermostat.set_langevin(
kT=kT, gamma=gamma, gamma_rotation=gamma_rot_i)
else:
# No rotation
system.thermostat.set_langevin(kT=kT, gamma=gamma)
system.cell_system.skin = 0.4
system.integrator.run(100)
# Correlators
vel_obs = {}
omega_obs = {}
corr_vel = {}
corr_omega = {}
all_particles = [p_global]
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
all_particles.append(p_gamma)
all_particles.append(p_kT)
all_particles.append(p_both)
# linear vel
vel_obs = ParticleVelocities(ids=system.part[:].id)
corr_vel = Correlator(obs1=vel_obs, tau_lin=20, tau_max=1.4, delta_N=1,
corr_operation="componentwise_product", compress1="discard1")
system.auto_update_accumulators.add(corr_vel)
# angular vel
if espressomd.has_features("ROTATION"):
omega_obs = ParticleBodyAngularVelocities(ids=system.part[:].id)
corr_omega = Correlator(
obs1=omega_obs, tau_lin=20, tau_max=1.5, delta_N=1,
corr_operation="componentwise_product", compress1="discard1")
system.auto_update_accumulators.add(corr_omega)
system.integrator.run(150000)
system.auto_update_accumulators.remove(corr_vel)
corr_vel.finalize()
if espressomd.has_features("ROTATION"):
system.auto_update_accumulators.remove(corr_omega)
corr_omega.finalize()
# Verify diffusion
# Translation
# Cast gammas to vector, to make checks independent of
# PARTICLE_ANISOTROPY
gamma = np.ones(3) * gamma
per_part_gamma = np.ones(3) * per_part_gamma
self.verify_diffusion(p_global, corr_vel, kT, gamma)
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
self.verify_diffusion(p_gamma, corr_vel, kT, per_part_gamma)
self.verify_diffusion(p_kT, corr_vel, per_part_kT, gamma)
self.verify_diffusion(
p_both, corr_vel, per_part_kT, per_part_gamma)
# Rotation
if espressomd.has_features("ROTATION"):
# Decide on effective gamma rotation, since for rotation it is
# direction dependent
eff_gamma_rot = None
per_part_eff_gamma_rot = None
if espressomd.has_features("PARTICLE_ANISOTROPY"):
eff_gamma_rot = gamma_rot_a
eff_per_part_gamma_rot = per_part_gamma_rot_a
else:
eff_gamma_rot = gamma_rot_i * np.ones(3)
eff_per_part_gamma_rot = per_part_gamma_rot_i * np.ones(3)
self.verify_diffusion(p_global, corr_omega, kT, eff_gamma_rot)
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
self.verify_diffusion(
p_gamma, corr_omega, kT, eff_per_part_gamma_rot)
self.verify_diffusion(
p_kT, corr_omega, per_part_kT, eff_gamma_rot)
self.verify_diffusion(p_both, corr_omega,
per_part_kT, eff_per_part_gamma_rot)
def verify_diffusion(self, p, corr, kT, gamma):
"""Verifify diffusion coeff.
p: particle, corr: dict containing correltor with particle as key,
kT=kT, gamma=gamma as 3 component vector.
"""
c = corr
# Integral of vacf via Green-Kubo
# D= int_0^infty <v(t_0)v(t_0+t)> dt (o 1/3, since we work
# componentwise)
i = p.id
acf = c.result()[:, [0, 2 + 3 * i, 2 + 3 * i + 1, 2 + 3 * i + 2]]
np.savetxt("acf.dat", acf)
# Integrate w. trapez rule
for coord in 1, 2, 3:
I = np.trapz(acf[:, coord], acf[:, 0])
ratio = I / (kT / gamma[coord - 1])
self.assertAlmostEqual(ratio, 1., delta=0.07)
def test_00__friction_trans(self):
"""Tests the translational friction-only part of the thermostat."""
system = self.system
# Translation
gamma_t_i = 2
gamma_t_a = 0.5, 2, 1.5
v0 = 5.
system.time_step = 0.0005
system.part.clear()
system.part.add(pos=(0, 0, 0), v=(v0, v0, v0))
if espressomd.has_features("MASS"):
system.part[0].mass = 3
if espressomd.has_features("PARTICLE_ANISOTROPY"):
system.thermostat.set_langevin(kT=0, gamma=gamma_t_a)
else:
system.thermostat.set_langevin(kT=0, gamma=gamma_t_i)
system.time = 0
for i in range(100):
system.integrator.run(10)
for j in range(3):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
self.assertAlmostEqual(
system.part[0].v[j], v0 * np.exp(-gamma_t_a[j] / system.part[0].mass * system.time), places=2)
else:
self.assertAlmostEqual(
system.part[0].v[j], v0 * np.exp(-gamma_t_i / system.part[0].mass * system.time), places=2)
@ut.skipIf(not espressomd.has_features("ROTATION"), "Skipped for lack of ROTATION")
def test_00__friction_rot(self):
"""Tests the rotational friction-only part of the thermostat."""
system = self.system
# Translation
gamma_t_i = 2
gamma_t_a = 0.5, 2, 1.5
gamma_r_i = 3
gamma_r_a = 1.5, 0.7, 1.2
o0 = 5.
system.time_step = 0.0005
system.part.clear()
system.part.add(
pos=(0, 0, 0), omega_body=(o0, o0, o0), rotation=(1, 1, 1))
if espressomd.has_features("ROTATIONAL_INERTIA"):
system.part[0].rinertia = 2, 2, 2
if espressomd.has_features("PARTICLE_ANISOTROPY"):
system.thermostat.set_langevin(
kT=0, gamma=gamma_t_a, gamma_rotation=gamma_r_a)
else:
system.thermostat.set_langevin(
kT=0, gamma=gamma_t_i, gamma_rotation=gamma_r_i)
system.time = 0
for i in range(100):
system.integrator.run(10)
if espressomd.has_features("ROTATIONAL_INERTIA"):
rinertia = system.part[0].rinertia
else:
rinertia = (1, 1, 1)
for j in range(3):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
self.assertAlmostEqual(
system.part[0].omega_body[j], o0 * np.exp(-gamma_r_a[j] / rinertia[j] * system.time), places=2)
else:
self.assertAlmostEqual(
system.part[0].omega_body[j], o0 * np.exp(-gamma_r_i / rinertia[j] * system.time), places=2)
@ut.skipIf(not espressomd.has_features("VIRTUAL_SITES"), "Skipped for lack of VIRTUAL_SITES")
def test_virtual(self):
system = self.system
system.time_step = 0.01
system.part.clear()
virtual = system.part.add(pos=[0, 0, 0], virtual=True, v=[1, 0, 0])
physical = system.part.add(pos=[0, 0, 0], virtual=False, v=[1, 0, 0])
system.thermostat.set_langevin(
kT=0, gamma=1, gamma_rotation=1., act_on_virtual=False)
system.integrator.run(0)
np.testing.assert_almost_equal(np.copy(virtual.f), [0, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.f), [-1, 0, 0])
system.thermostat.set_langevin(
kT=0, gamma=1, gamma_rotation=1., act_on_virtual=True)
system.integrator.run(0)
np.testing.assert_almost_equal(np.copy(virtual.f), [-1, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.f), [-1, 0, 0])
if __name__ == "__main__":
ut.main()
| hmenke/espresso | testsuite/python/langevin_thermostat.py | Python | gpl-3.0 | 17,386 | [
"ESPResSo"
] | 834bb973feea5e557e12937a0ded402ad640a25a316b668b02f2f101af3647a6 |
# !/usr/bin/python
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# TO DO: - logging
# ---------------------------------IMPORTS-------------------------------------
import httplib
import json
import os
import sys
import string
import urllib
import urllib2
import shutil
import copy
import xml.etree.ElementTree as ET
import requests
from rdflib import Graph
from config import gen_config as gc
# ----------------------------------GLOBALS------------------------------------
# URLs
# This returns the list of reference proteomes from Uniprot
REF_PROT_LIST_URL = gc.REF_PROT_LIST_URL
# Retrieve the proteome rdf file
PROTEOME_URL = gc.PROTEOME_URL
PROTEOME_XML_URL = gc.PROTEOME_XML_URL
# Retrieve the genome's xml file
ENA_XML_URL = gc.ENA_XML_URL
# ENA url for file download
ENA_DATA_URL = gc.ENA_DATA_URL
ENA_DATA_URL_GZIP = gc.ENA_DATA_URL_GZIP
# ENA url for assembly data retrieval via taxon id
ENA_TAX_URL = gc.ENA_TAX_URL
# ENA GCA report file label
GCA_REP_LABEL = gc.GCA_REP_LBL
# NCBI URL for sequence download
NCBI_SEQ_URL = gc.NCBI_SEQ_URL
# ENA file formats
FORMATS = {"xml": ".xml", "fasta": ".fa"}
# Maximum sequences per file
MAX_SEQS = 100000
# ---------------------------------------------------------------------- #STEP1
def fetch_ref_proteomes():
"""
This method returns a list of all reference proteome accessions available
from Uniprot
"""
ref_prot_list = []
response = urllib2.urlopen(REF_PROT_LIST_URL)
for ref_prot in response:
ref_prot_list.append(ref_prot.strip())
return ref_prot_list
# -----------------------------------------------------------------------------
def export_gca_accessions(upid_gca):
"""
Retrieves reference proteomes ids and their associated gca accessions
as well as the taxonomic rank/domain (eukaryotes, bacteria etc)
upid_gca: Uniprot's tab separated file (UPID_GCA.tsv)
"""
# need to check if the path provided is a valid file
upid_gca_fp = open(upid_gca, 'r')
prot_gca_pairs = {}
for prot in upid_gca_fp:
prot = prot.strip().split('\t')
if prot[1] != '':
prot_gca_pairs[prot[0]] = prot[1]
else:
prot_gca_pairs[prot[0]] = -1
upid_gca_fp.close()
return prot_gca_pairs
# -----------------------------------------------------------------------------
def extract_genome_acc(prot_rdf):
"""
Extracts and returns the assembly accession from the proteome rdf
which provided as input. Returns -1 if not available
prot_rdf: A Uniprot's proteome rdf url or file path
"""
g = Graph()
response = requests.get(prot_rdf).status_code
if response == httplib.OK:
g.load(prot_rdf)
for s, p, o in g:
if string.find(o, "GCA") != -1:
return os.path.split(o)[1]
return -1
# -----------------------------------------------------------------------------
def proteome_rdf_scanner(proteome):
"""
Scans a Uniprot's reference proteome rdf file and looks for all
available accessions. Returns a dictionary with GCA and WGS accessions
where applicable
prot_rdf: Uniprot's proteome rdf url or file path
"""
# need to do some http error handling here and if resource is unavailable
# return None and or http error code
prot_rdf = PROTEOME_URL % proteome
accessions = {"GCA": -1, "WGS": -1}
wgs_flag = False
g = Graph()
if requests.get(prot_rdf).status_code == httplib.OK:
g.load(prot_rdf)
# scan for accessions
for s, p, o in g:
# look for ENA accessions
if string.find(o, "/embl/") != -1:
if string.find(o, "GCA") != -1:
accessions["GCA"] = os.path.split(o)[1]
elif wgs_flag is True:
accessions["WGS"] = os.path.split(o)[1]
# if WGS keyword found, set flag to true
elif string.find(o, "WGS") != -1:
wgs_flag = True
else:
pass
return accessions
# ---------------------------------------------------------------------- #STEP2
def proteome_xml_scanner(proteome):
"""
Scans a Uniprot's reference proteome rdf file and looks for all
available accessions. Returns a dictionary with GCA and WGS accessions
where applicable
prot_xml: Uniprot's proteome rdf url or file path
"""
# need to do some http error handling here and if resource is unavailable
# return None and or http error code
prot_xml = PROTEOME_XML_URL % proteome
prefix = "{http://uniprot.org/uniprot}%s"
accessions = {"GCA": -1, "WGS": -1}
if requests.get(prot_xml).status_code == httplib.OK:
xml_root = ET.fromstring(requests.get(prot_xml).content)
proteome = xml_root.find(prefix % "proteome")
# look for a GCA accession
gen_assembly = None
gen_assembly = proteome.find(prefix % "genome_assembly")
if gen_assembly is not None:
accessions["GCA"] = gen_assembly.text
prot_components = proteome.findall(prefix % "component")
# scan WGS accession
for component in prot_components:
component_name = component.get("name")
if component_name.find("WGS") != -1:
accessions["WGS"] = component.find(prefix % "genome_accession").text
else:
pass
return accessions
# ---------------------------------------------------------------------- #STEP2
def fetch_genome_acc(prot):
"""
Returns a proteome's corresponding assembly accession (ENA) in a
dictionary format {proteome_acc:gca_acc}
prot: One of (file|list|acc)
- file: Uniprot's proteome list file
- list: a list of reference proteome accessions (fetch_ref_proteomes)
- acc: A single Uniprot ref. proteome accession
"""
gen_acc = None
ref_prot_list = None
gens = {}
# ref proteome accession list
if isinstance(prot, list):
ref_prot_list = prot
# ref proteome list file
elif os.path.isfile(prot):
prot_file = open(prot, 'r')
ref_prot_list = prot_file.readlines()
prot_file.close()
# single ref proteome accession
else:
proteome = string.strip(prot)
rdf_url = gc.PROTEOME_URL % (proteome)
res_handle = urllib.urlopen(rdf_url)
if res_handle.getcode() == httplib.OK:
gen_acc = extract_genome_acc(rdf_url)
gens[proteome] = gen_acc
return gens
# do this for files and lists
for proteome in ref_prot_list:
proteome = string.strip(proteome)
rdf_url = gc.PROTEOME_URL % (proteome)
res_handle = urllib.urlopen(rdf_url)
if res_handle.getcode() == httplib.OK:
gen_acc = extract_genome_acc(rdf_url)
gens[proteome] = gen_acc
return gens
# -----------------------------------------------------------------------------
def fetch_ena_file(acc, file_format, dest_dir, compressed=True):
"""
Retrieves a file given a valid ENA accession and stores it in the
indicated destination in the selected format
acc: A valid ENA entry accession
format: A valid ENA file format (xml, fasta, txt)
dest_dit: A valid path to destination directory
"""
seq_url = None
file_path = None
if file_format.find("xml") != -1:
seq_url = ENA_XML_URL % acc
file_path = os.path.join(dest_dir, acc + FORMATS[file_format])
# fetching compressed file
else:
if compressed is True:
seq_url = ENA_DATA_URL_GZIP % (acc, file_format)
file_path = os.path.join(dest_dir, acc + FORMATS[file_format] + ".gz")
else:
seq_url = ENA_DATA_URL % (acc, file_format)
file_path = os.path.join(dest_dir, acc + FORMATS[file_format])
# check url is available
response = requests.get(seq_url)
if response.status_code == 200:
urllib.urlretrieve(seq_url, file_path)
if os.path.exists(file_path):
return True
return False
# ---------------------------------------------------------------------- #STEP3
def extract_assembly_accs(accession):
"""
Loads an xml tree from a file or a string (usually an http response),
and returns a list with the genome assembly's chromosomes
accession: A valid ENA GCA accession (without the assembly version)
"""
accessions = []
root = None
assembly_link = None
assembly = None
assembly_xml = requests.get(ENA_XML_URL % accession).content
if os.path.isfile(assembly_xml):
# parse xml tree and return root node
root = ET.parse(assembly_xml).getroot()
else:
# fromstring returns the xml root directly
root = ET.fromstring(assembly_xml)
assembly = root.find("ASSEMBLY")
if assembly is not None:
# either parse the assembly report file or get the WGS range
assembly_link = assembly.find(
"ASSEMBLY_LINKS")
if assembly_link is not None:
# export url link and fetch all relevant assembly accessions
url_link = assembly_link.find(
"ASSEMBLY_LINK").find("URL_LINK").find("URL").text
# need to check for GCA_REP_LABEL
accessions = assembly_report_parser(url_link, url=True)
else:
# no assembly link provided - look for WGS element
wgs = None
wgs = assembly.find("WGS_SET")
# get wgs accession
wgs_acc = get_wgs_set_accession(
wgs.find("PREFIX").text, wgs.find("VERSION").text)
# get wgs range and return as single accession
accessions.append(get_wgs_range(wgs_acc))
# move this outside this function to where accessions are requested ??
else:
accessions.append(get_wgs_range(accession))
return accessions
# ---------------------------------------------------------------------- #STEP4
def download_genomes(gen, dest_dir):
"""
Downloads all chromosome files of a given assembly accession (ENA) in
dest_dir
gen: Single accession (string) or a list of genome accessions (GC*)
dest_dir: The path of the destination directory to export the fasta
files
"""
# need to add logging
accessions = None
if os.path.isfile(gen):
gen_fp = open(gen, 'r')
for gen_acc in gen_fp:
gen_acc = string.strip(gen_acc)
if string.find(gen_acc, '.') != -1:
gen_acc = gen_acc.partition('.')
gen_acc = gen_acc[0]
gen_dir = os.path.join(dest_dir, gen_acc)
try:
os.mkdir(gen_dir)
except:
print "Unable to generate directory for accession: ", gen
accessions = extract_assembly_accs(gen_acc)
if len(accessions) > 0:
for acc in accessions:
fetch_ena_file(acc, "fasta", gen_dir)
gen_acc = None
gen_dir = None
accessions = None
gen_fp.close()
else:
if string.find(gen, '.') != -1:
gen = gen.partition('.')
gen = gen[0]
gen_dir = os.path.join(dest_dir, gen)
try:
os.mkdir(gen_dir)
except:
print "Unable to generate directory for accession: ", gen
accessions = extract_assembly_accs(gen)
if len(accessions) > 0:
for acc in accessions:
fetch_ena_file(acc, "fasta", gen_dir)
# if no accessions found, write to log file
else:
return None
return True
# -----------------------------------------------------------------------------
def fetch_genome(gen, dest_dir):
"""
Downloads and parses xml file of the given genome accession (gen), and
downloads all chromosome files in fasta format in destination directory
(dest_dir). The xml file is deleted after completion.
gen: ENA assembly accession (GCA*)
dest_dir: Destination of the output directory
"""
gen_dir = os.path.join(dest_dir, gen.partition('.')[0])
try:
os.mkdir(gen_dir)
except:
pass
fetch_ena_file(gen, "xml", gen_dir)
gen_xml = os.path.join(gen_dir, gen + ".xml")
chroms = extract_assembly_accs(gen_xml)
for chrom in chroms:
fetch_ena_file(chrom, "fasta", gen_dir)
os.remove(gen_xml) # remove xml file when done
# -----------------------------------------------------------------------------
def rdf_accession_search(ref_prot_acc, sub_str):
"""
Parses rdf url and returns a list of ENA accessions
rdf_url: The url to a Uniprot's reference proteome rdf url
sub_str: A sub string to look for in the rdf file
"""
accessions = []
rdf_graph = Graph()
rdf_url = PROTEOME_URL % ref_prot_acc
response = requests.get(rdf_url).status_code
if response == httplib.OK:
rdf_graph.load(rdf_url)
for s, p, o in rdf_graph:
if string.find(o, sub_str) != -1:
accessions.append(os.path.split(o)[1])
else:
# return http status code
# return response.status_code
pass
return accessions
# -----------------------------------------------------------------------------
def assembly_report_parser(assembly_report, url=True):
"""
Parses an assembly report file and returns a list of all available
accessions (scaffolds, contigs etc)
report_url: A url provided within an ENA assembly xml file. This is the
text of URL tag under ASSEMBLY/ASSEMBLY_LINKS/ASSEMBLY_LINK.By default this
is an ftp request url. Converting to http to fetch assembly accessions.
"""
accessions = []
# switch from ftp to http to fetch assembly accessions on the go
report_url = None
ass_rep_file = None
if url is True:
report_url = assembly_report.replace("ftp://", "http://")
# fetch assembly report file contents and store in a list, omitting header
ass_rep_file = requests.get(report_url).content.split('\n')[1:]
# if empty line, remove it
if ass_rep_file[len(ass_rep_file) - 1] == '':
ass_rep_file.pop(len(ass_rep_file) - 1)
else:
ass_rep_file = open(assembly_report, 'r')
# parse list and export assembly accessions
for line in ass_rep_file:
line = line.strip().split('\t')
if line[0].find('.') != -1:
accessions.append(line[0])
return accessions
# -----------------------------------------------------------------------------
def get_wgs_set_accession(prefix, version):
"""
Generates ENA WGS accession using the WGS accession pattern
(4 letter prefix) and 2-digit build version. The WGS accession is
generated by appending prefix and version with a postfix of 6-zeros.
For more information please visit ENA service-news: http://goo.gl/LnIyQ3
prefix: A 4-char string representing the ENA WGS accession
version: A 2-digit representing the WGS build version
"""
postfix = "000000"
wgs_accession = None
if int(version) % 10 != 0:
wgs_accession = prefix + '0' + version + postfix
else:
wgs_accession = prefix + version + postfix
return wgs_accession
# -----------------------------------------------------------------------------
def get_wgs_range(wgs_acc):
"""
Fetches the wgs related xml file from ENA and exports the wgs range
wgs_acc: A valid ENA wgs accession
"""
wgs_range = None
response = requests.get(ENA_XML_URL % wgs_acc)
if response.status_code == httplib.OK:
wgs_xml_str = response.content
wgs_xml_root = ET.fromstring(wgs_xml_str)
if wgs_xml_root.find("entry") is not None:
wgs_xrefs = wgs_xml_root.find("entry").findall("xref")
for xref_el in wgs_xrefs:
if xref_el.get("db") == "ENA-WGS":
wgs_range = xref_el.get("id")
return wgs_range
# -----------------------------------------------------------------------------
def lsf_cmd_generator(upid, gca_acc, domain, exec_path, proj_dir):
"""
Generates an lsf job command for downloading a new genome. Returns an
LSF specific bsub command
upid: Uniprot's reference proteome id
gca_acc: ENA's genome accession. -1 if there's no available id
domain: Proteome's taxonomic domain
exec_path: The path to the pipeline executable
proj_dir: The path to the project directory
"""
subdir_idx = upid[8:]
prot_dir = os.path.join(os.path.join(proj_dir, subdir_idx), upid)
cmd = ("bsub -M %s "
"-R \"rusage[mem=%s,tmp=%s]\" "
"-o \"%s\" "
"-e \"%s\" "
"-u \"%s\" "
"-n 4 "
"-Ep \"rm -rf luigi\" "
"-g %s "
"python %s DownloadGenome --upid %s --gca-acc %s --project-dir %s --domain %s") % (
gc.MEM, gc.MEM, gc.TMP_MEM,
os.path.join(prot_dir, "download.out"),
os.path.join(prot_dir, "download.err"),
gc.USER_EMAIL, gc.LSF_GEN_GROUP,
exec_path, upid, gca_acc,
proj_dir, domain)
return cmd
# -----------------------------------------------------------------------------
def genome_script_generator(upid, domain, gen_size, out_dir):
"""
Generates a shell script for a proteome with ip upid under out_dir.
Memory is reserved according to genome size
upid: Uniprot's unique proteome id
domain: The domain under which a proteome has been classified
gen_size: The genome's size
out_dir: Destination directory
"""
# create shell script for upid within
shell_fp = open(os.path.join(out_dir, upid + ".sh"), 'w')
# mem = gen_size * something might not need these for downloads
mem_size = 8000
tmp_size = gen_size * 2
tmp_dir = "/tmp/%s_$LSB_JOBID" % (upid)
# generate proteome destination directory
prot_dest_dir = os.path.join(
os.path.join(os.path.split(out_dir)[0], domain), upid)
shell_fp.write("#!/bin/csh\n")
shell_fp.write("#BSUB -M %s\n" % mem_size)
shell_fp.write("#BSUB -R \"rusage[mem=%s,tmp=%s]\"\n" % (mem_size, tmp_size))
# create a directory using the proteomes unique id extended by jobid
shell_fp.write("#BSUB -E \"mkdir -m 777 -p %s\"\n" % tmp_dir)
shell_fp.write("#BSUB -o \"%s/%sJ.out\"\n" % (tmp_dir, chr(37)))
shell_fp.write("#BSUB -e \"%s/%sJ.err\"\n" % (tmp_dir, chr(37)))
shell_fp.write("#BSUB -u \"%s\"\n" % gc.USER_EMAIL) # email this user
# need to write files back to genome dir prot_dest_dir
shell_fp.write(
"#BSUB -f \"%s/download.out < /tmp/%sJ/%sJ.out\"\n" % (prot_dest_dir,
chr(37),
chr(37)))
shell_fp.write(
"#BSUB -f \"%s/download.err < /tmp/%sJ/%sJ.err\"\n" % (prot_dest_dir,
chr(37),
chr(37)))
# delete everything on termination or completion of job
shell_fp.write("#BSUB -Ep \"rm -rf %s\"\n" % tmp_dir)
shell_fp.write("#BSUB -g %s/%s \n\n" % (gc.LSF_GEN_GROUP % domain))
# call executable
shell_fp.write("python %s %s %s \n\n" %
(gc.GEN_DWLD_EXEC, os.path.join(prot_dest_dir, upid + ".json"),
prot_dest_dir))
# copy files to destination
shell_fp.write("cp %s/*.gz %s/.\n" % (tmp_dir, prot_dest_dir))
# -----------------------------------------------------------------------------
def load_upid_gca_file(upid_gca_file):
"""
Parses Uniprot's upid tsv file and exports all important information
in json format
upid_gca_file: UPID_GCA file provided by trembl
returns: A dictionary of upid, gca and domain mappings {upid: {"GCA": , "DOM": }}
"""
upid_gca_dict = {}
upid_fp = open(upid_gca_file, 'r')
try:
for upid_line in upid_fp:
upid_line = upid_line.strip().split('\t')
# add GCA accession
if upid_line[1] != '':
upid_gca_dict[upid_line[0]] = {"GCA": upid_line[1]}
else:
upid_gca_dict[upid_line[0]] = {"GCA": -1}
upid_gca_dict[upid_line[0]]["DOM"] = upid_line[2]
upid_fp.close()
except:
raise IOError
return upid_gca_dict
# -----------------------------------------------------------------------------
def load_upid_gca_pairs():
"""
This is an alternative version to load_upid_gca_file loading the pairs from
Uniprot's REST API. Returns a dictionary of upid, gca accession pairs,
including the species kingdom
"""
id_pairs = {}
response = requests.get(gc.REF_PROT_REST_URL)
if response.status_code == 200:
content = response.content
prot_lines = content.split('\n')
# remove header line
prot_lines.pop(0)
for prot_line in prot_lines:
if prot_line != '':
prot_line = prot_line.strip().split('\t')
tax_str = prot_line[2].split(',')[0].lower().strip()
gca_acc = prot_line[1]
if gca_acc != '':
id_pairs[prot_line[0]] = {
"GCA": prot_line[1], "DOM": tax_str}
else:
id_pairs[prot_line[0]] = {"GCA": -1, "DOM": tax_str}
else:
# raise an error here
pass
return id_pairs
# -----------------------------------------------------------------------------
def fetch_genome_accessions(upid, gca_acc):
"""
Fetches and returns a list of all accessions for a specific ref. proteome
upid: Uniprot's ref. proteome id
gca_acc: An ENA GCA accession associated with the upid (if available or -1)
"""
gen_accs = []
gca_acc = str(gca_acc)
# there's a GCA accession
if gca_acc != "-1":
gca_acc = gca_acc.split('.')[0]
gen_accs = extract_assembly_accs(gca_acc)
else:
prot_accs = proteome_rdf_scanner(upid)
# no GCA or WGS, get any accessions from proteome rdf
if prot_accs["GCA"] == -1 and prot_accs["WGS"] == -1:
gen_accs = rdf_accession_search(upid, "/embl/")
# found a GCA accession in the rdf file
elif prot_accs["GCA"] != -1 and prot_accs["WGS"] == -1:
gen_accs = extract_assembly_accs(prot_accs["GCA"])
# WGS found
elif prot_accs["GCA"] == -1 and prot_accs["WGS"] != -1:
# call get_wgs_range directly here
gen_accs = extract_assembly_accs(prot_accs["WGS"])
# for all other cases this function will return an empty list
return gen_accs
# -----------------------------------------------------------------------------
def fetch_wgs_range_accs(wgs_range):
"""
Splits the WGS range into distinct accessions for metadata retrieval
wgs_range: A valid ENA-WGS set range
"""
wgs_accs = []
wgs_end_points = wgs_range.strip().split('-')
wgs_prefix = wgs_end_points[0][0:5]
wgs_start = int(wgs_end_points[0][5:])
wgs_end = int(wgs_end_points[1][5:])
wgs_acc = ''
while wgs_start < wgs_end:
wgs_acc = wgs_prefix + str(wgs_start)
wgs_accs.append(wgs_acc)
wgs_start += 1
wgs_acc = ''
# include the last accession
wgs_accs.append(wgs_end_points[1])
return wgs_accs
# -----------------------------------------------------------------------------
def genome_download_validator(genome_dir):
"""
Loop over Genome Download output directory and report any upids with
erroneous output
genome_dir: The path to Genome Download output directory
"""
erroneous_genomes = {}
# list all kingdom dirs under genome output directory
project_dirs = os.listdir(genome_dir)
# filter out items that are not directories
kingdom_dirs = [x for x in project_dirs if os.path.isdir(os.path.join(genome_dir, x))]
for kingdom in kingdom_dirs:
erroneous_genomes[kingdom] = []
# list all genome directories per kindom
kingdom_dir_loc = os.path.join(genome_dir, kingdom)
kingdom_dirs = os.listdir(kingdom_dir_loc)
for genome in kingdom_dirs:
genome_dir_loc = os.path.join(kingdom_dir_loc, genome)
if os.path.exists(os.path.join(genome_dir_loc, "download.out")):
download_fp = open(os.path.join(genome_dir_loc, "download.out"))
for line in download_fp.readlines():
if line.find("Success") != -1:
success = 1
break
download_fp.close()
if success == 0:
erroneous_genomes[kingdom].append(genome)
success = 0
fp_out = open(os.path.join(genome_dir, "download_report.txt"), 'w')
for kingdom in erroneous_genomes.keys():
if len(erroneous_genomes[kingdom]) > 0:
fp_out.write(kingdom + '\n')
for proteome in erroneous_genomes[kingdom]:
fp_out.write(proteome + '\n')
fp_out.write('\n')
fp_out.close()
# -----------------------------------------------------------------------------
def download_fasta_from_ncbi(accession, dest_dir):
"""
Download fasta sequences from NCBI. In case of ENA obsolete sequences use
this function to download the relevant files
accession: A genome accession to download
dest_dir: Destination directory to save the file to
return: True on success, otherwise False
"""
seq_url = None
file_path = None
seq_url = NCBI_SEQ_URL % (accession)
file_path = os.path.join(dest_dir, accession + '.fa')
urllib.urlretrieve(seq_url, file_path)
if os.path.exists(file_path):
return True
return False
# -----------------------------------------------------------------------------
def download_sequence_report_files(project_dir, upid_gca_file):
"""
Loads upid_gca_file json file and downloads from ENA all sequence report
files per GCA accession. Skips if no GCA accession available
project_dir: The path to a project directory as generated by Genome
Download pipeline (genome_downloader.py)
upid_gca_file: upid_gca file in json format as generated by the Genome
download pipeline (genome_downloader.py)
returns: void
"""
err_seq_rep_files = {}
upid_gca_fp = open(upid_gca_file, 'r')
acc_pairs = json.load(upid_gca_fp)
upid_gca_fp.close()
for upid in acc_pairs.keys():
if acc_pairs[upid]["GCA"] != -1:
domain_dir = os.path.join(project_dir, acc_pairs[upid]["DOM"])
if not os.path.exists(domain_dir):
os.mkdir(domain_dir)
updir = os.path.join(domain_dir, upid)
if not os.path.exists(updir):
os.mkdir(updir)
seq_rep_url = gc.SEQ_REP_URL_TEMPLATE % (acc_pairs[upid]["GCA"][0:7],
acc_pairs[upid]["GCA"][0:10],
acc_pairs[upid]["GCA"])
filename = "%s_sequence_report.txt" % acc_pairs[upid]["GCA"]
urllib.urlretrieve(seq_rep_url, os.path.join(updir, filename))
# check file exists or if it is empty
if not os.path.exists(filename):
err_seq_rep_files[upid] = {"GCA": acc_pairs[upid]["GCA"],
"DOM": acc_pairs[upid]["DOM"]}
continue
elif os.path.getsize(filename) == 0:
err_seq_rep_files[upid] = {"GCA": acc_pairs[upid]["GCA"],
"DOM": acc_pairs[upid]["DOM"]}
if len(err_seq_rep_files.keys()) > 0:
fp_out = open(os.path.join(project_dir, "err_seq_rep_files.json"), 'w')
json.dump(err_seq_rep_files, fp_out)
fp_out.close()
# -----------------------------------------------------------------------------
def sequence_report_to_json(seq_report_file, dest_dir=None):
"""
Convert a GCA sequence report file (ENA) from .txt to .json format
seq_report_file: The path to a valid GCA related sequence report file
dest_dir: The path to destination directory. If None use the directory
of the input file
return: Accession dictionary
"""
acc_dict = {}
seq_rep_fp = open(seq_report_file, 'r')
# discard header line
seq_rep_fp.readline()
for line in seq_rep_fp:
line = line.strip().split('\t')
acc_dict[line[0]] = {"sequence_name": line[1], "sequence-length": line[2],
"sequence-role": line[3], "replicon-name": line[4],
"replicon-type": line[5], "assembly-unit": line[6]}
seq_rep_fp.close()
if dest_dir is None:
dest_dir = os.path.split(seq_report_file)[0]
filename = os.path.basename(seq_report_file).partition(".")[0]
fp_out = open(os.path.join(dest_dir, filename + ".json"), 'w')
json.dump(acc_dict, fp_out)
fp_out.close()
return acc_dict
# -----------------------------------------------------------------------------
def split_and_download(wgs_range, dest_dir):
"""
Function to split and download smaller segments of large genome assemblies
wgs_range: A WGS assembly sequence accession range from ENA
(e.g. CBTL0100000001-CBTL0111673940)
dest_dir: The path to the destination directory
returns: void
"""
# split the range into separate accessions
accessions = fetch_wgs_range_accs(wgs_range)
file_no = len(accessions) / MAX_SEQS
remainder = len(accessions) % MAX_SEQS
count = 0
# indexes
idx1 = 0
idx2 = MAX_SEQS
while count < file_no:
accession = accessions[idx1] + '-' + accessions[idx2]
urllib.urlretrieve(ENA_DATA_URL % accession,
os.path.join(dest_dir, accession + '.fa'))
idx1 = idx2 + 1
idx2 = idx2 + MAX_SEQS
count += 1
# check if sequences are not split evenly, and do and extra step for the
# remaining seqs
if remainder != 0:
idx1 = idx1 = idx2 + 1
# get the last accession
idx2 = accessions[-1]
accession = accessions[idx1] + '-' + accessions[idx2]
urllib.urlretrieve(ENA_DATA_URL % accession,
os.path.join(dest_dir, accession + '.fa'))
# -----------------------------------------------------------------------------
def fetch_accessions_from_proteome_xml(proteome):
"""
Parses Uniprot's proteome xml and extracts all available ENA accessions
proteome: A valid Uniprot's proteome accession
returns: A list of genome accessions
"""
prot_accessions = []
# namespace prefix # or register a namespace in the ET
prefix = "{http://uniprot.org/uniprot}%s"
response = requests.get(gc.PROTEOME_XML_URL % proteome)
if response.status_code == 200:
# convert from string to xml format
prot_tree_root = ET.fromstring(response.content)
# get proteome node
proteome = prot_tree_root.find(prefix % "proteome")
# get proteome's component nodes/genome accession nodes
component_nodes = proteome.findall(prefix % "component")
# loop over all component nodes and extract genome accessions
for node in component_nodes:
gen_acc_nodes = node.findall(prefix % "genome_accession")
for gen_acc_node in gen_acc_nodes:
prot_accessions.append(gen_acc_node.text)
return prot_accessions
# -----------------------------------------------------------------------------
def check_accession_availability(accession):
"""
Check whether a specific accession is available from ENA
accession: sequence accession
return: True if accession is available, False otherwise
"""
# we can expand this by adding a db option (e.g. ena, uniprot, ncbi)
response = requests.get(ENA_XML_URL % accession)
if response.status_code == httplib.OK:
xml_root = ET.fromstring(response.content)
# If the entry exists, there should be an entry node in the xml file
entry_node = None
entry_node = xml_root.find("entry")
if entry_node is None:
return False
return True
# -----------------------------------------------------------------------------
def copy_wgs_set_from_ftp(wgs_acc, dest_dir):
"""
Copy wgs set sequences from physical location on cluster
wsg_acc: A valid WGS set accession (e.g. AAVU01000000)
dest_dir: Destination directory where the sequences will be copied to
return: void
"""
# build path
wgs_subdir = os.path.join(gc.ENA_FTP_WGS_PUB, wgs_acc[0:2].lower()) #AA
wgs_filename = wgs_acc[0:6] + ".fasta.gz"
# check if wgs sequences are in public dir and copy to destination
if os.path.exists(os.path.join(wgs_subdir, wgs_filename)):
shutil.copyfile(os.path.join(wgs_subdir, wgs_filename),
os.path.join(dest_dir, wgs_filename))
# look for wgs set in suppressed sequences
else:
wgs_subdir = os.path.join(gc.ENA_FTP_WGS_SUP, wgs_acc[0:2].lower())
if os.path.exists(os.path.join(wgs_subdir, wgs_filename)):
shutil.copyfile(os.path.join(wgs_subdir, wgs_filename),
os.path.join(dest_dir, wgs_filename))
else:
sys.exit("WGS set %s requested does not exist." % wgs_acc)
# -----------------------------------------------------------------------------
def proteome_xml_accessions_to_dict(upid):
"""
Parses a valid proteome xml file and returns all accessions in the form of
a dictionary. Component names from proteome xml are used as dictionary keys
upid: A valid Uniprot proteome upid
returns: A dictionary with all proteome associated accessions.
"""
proteome_accs = {"GCA": -1, "WGS": -1}
other = {}
# namespace prefix # or register a namespace in the ET
prefix = "{http://uniprot.org/uniprot}%s"
response = requests.get(gc.PROTEOME_XML_URL % upid)
if response.status_code == 200:
# convert from string to xml format
prot_tree_root = ET.fromstring(response.content)
# get proteome node
proteome = prot_tree_root.find(prefix % "proteome")
# get proteome's component nodes/genome accession nodes
gca_acc = None
gca_acc = proteome.find(prefix % "genomeAssembly").find(prefix % "genomeAssembly")
if gca_acc is not None:
proteome_accs["GCA"] = gca_acc.text
component_nodes = proteome.findall(prefix % "component")
# loop over all component nodes and extract genome accessions
for node in component_nodes:
name = node.get("name")
if name.find("WGS") != -1:
accession = node.find(prefix % "genome_accession").text
proteome_accs["WGS"] = accession
else:
accession = node.find(prefix % "genome_accession")
# if there is an accession available
if accession is not None:
accession = accession.text
other[name] = accession
proteome_accs["OTHER"] = copy.deepcopy(other)
return proteome_accs
# -----------------------------------------------------------------------------
def copy_gca_report_file_from_ftp(gca_accession, dest_dir):
"""
Copies the corresponding GCA report file from the ftp
gca_accession: A valid GCA accession
return: True if the file was found, False otherwise
"""
seq_report_file = gca_accession + "_sequence_report.txt"
genomic_regions_file = gca_accession + "_regions.txt"
# 1st layer subdir GCA_XXX
gca_dir = os.path.join(gc.ENA_GCA_SEQ_REPORT, gca_accession[0:7])
# 2nd layer subdir GCA_XXXXXX
gca_dir = os.path.join(gca_dir, gca_accession[0:10])
report_file_path = os.path.join(gca_dir, seq_report_file)
region_file_path = os.path.join(gca_dir, genomic_regions_file)
# sanity check if the file actually exists
if os.path.exists(report_file_path):
shutil.copyfile(report_file_path, os.path.join(dest_dir,
seq_report_file))
if os.path.exists(region_file_path):
shutil.copyfile(region_file_path, os.path.join(dest_dir,
genomic_regions_file))
return True
return False
# -----------------------------------------------------------------------------
def get_genome_unique_accessions(upid, to_file=False, output_dir=None):
"""
This function will extract all available accessions from the relevant
proteome xml file and return a list of unique accessions that represent a
complete genome. This will be a combination of assembly accessions provided
by ENA and any additional accessions found in the proteome xml file
upid: A valid Uniprot Proteome id
output_dir: The path to the output dir
return: A list with all unique genome accessions
"""
# GCA NA - Set to 1 when GCA accession is available, but GCA report file is not available from ENA
complete_genome_accs = {"GCA": -1, "WGS": -1, "OTHER": [], "GCA_NA": 0}
proteome_acc_dict = proteome_xml_accessions_to_dict(upid)
complete_genome_accs["GCA"] = proteome_acc_dict["GCA"]
complete_genome_accs["WGS"] = proteome_acc_dict["WGS"]
if proteome_acc_dict["GCA"] != -1:
# create a temporary copy of the assembly report file
if output_dir is None:
output_dir = "/tmp"
check_exists = copy_gca_report_file_from_ftp(proteome_acc_dict["GCA"], output_dir)
# try downloading the files from the URL if unsuccessful
if check_exists is False:
url_check = download_gca_report_file_from_url(proteome_acc_dict["GCA"], output_dir)
# get assembly report file path
gca_report_filename = proteome_acc_dict["GCA"] + "_sequence_report.txt"
if os.path.exists(os.path.join(output_dir, gca_report_filename)):
gca_accs = assembly_report_parser(os.path.join(output_dir, gca_report_filename),
url=False)
accs_no_version = [x.partition('.')[0] for x in gca_accs]
proteome_set = set(proteome_acc_dict["OTHER"].values())
gca_set = set(accs_no_version)
# construct a new set with unique accessions from both sets
unique_accs = proteome_set.union(gca_set)
# add unique accessions in dictionary
complete_genome_accs["OTHER"].extend(unique_accs)
else:
print "Genome Assembly report file for %s is unavailable" % upid
complete_genome_accs["OTHER"].extend(proteome_acc_dict["OTHER"].values())
if complete_genome_accs["WGS"] != -1:
complete_genome_accs["GCA_NA"] = 1
else:
complete_genome_accs["OTHER"].extend(proteome_acc_dict["OTHER"].values())
# write proteome accessions to json file
if to_file is True:
fp_out = open(os.path.join(output_dir, upid+"_accessions.json"), 'w')
json.dump(proteome_acc_dict, fp_out)
fp_out.close()
return complete_genome_accs
# -----------------------------------------------------------------------------
def extract_wgs_acc_from_gca_xml(gca_accession):
"""
Parses ENA's GCA xml file and extracts the WGS set accession if available
gca_accession: A valid GCA accession
return: A WGS set accesison, None if not found
"""
xml_root = None
wgs_acc = None
assembly_xml = requests.get(ENA_XML_URL % gca_accession).content
if os.path.isfile(assembly_xml):
# parse xml tree and return root node
xml_root = ET.parse(assembly_xml).getroot()
else:
# fromstring returns the xml root directly
xml_root = ET.fromstring(assembly_xml)
assembly = xml_root.find("ASSEMBLY")
if assembly is not None:
# no assembly link provided - look for WGS element
wgs_node = assembly.find("WGS_SET")
# get wgs accession
wgs_acc = get_wgs_set_accession(
wgs_node.find("PREFIX").text, wgs_node.find("VERSION").text)
return wgs_acc
# -----------------------------------------------------------------------------
def download_gca_report_file_from_url(gca_accession, dest_dir):
"""
Loads an xml tree from a file or a string (usually an http response),
and returns a list with the genome assembly's chromosomes
accession: A valid ENA GCA accession (without the assembly version)
"""
accessions = []
root = None
assembly_link = None
assembly = None
url_links = []
assembly_xml = requests.get(ENA_XML_URL % gca_accession).content
if os.path.isfile(assembly_xml):
# parse xml tree and return root node
root = ET.parse(assembly_xml).getroot()
else:
# fromstring returns the xml root directly
root = ET.fromstring(assembly_xml)
assembly = root.find("ASSEMBLY")
if assembly is not None:
# either parse the assembly report file or get the WGS range
assembly_links = assembly.find("ASSEMBLY_LINKS")
if assembly_links is not None:
# export url link and fetch all relevant assembly accessions
assembly_link_nodes = assembly_links.findall(
"ASSEMBLY_LINK")
for node in assembly_link_nodes:
assembly_link = node.find("URL_LINK").find("URL").text
url_links.append(assembly_link.replace("ftp:", "http:"))
for url in url_links:
filename = url.split('/')[-1]
urllib.urlretrieve(url, os.path.join(dest_dir, filename))
return True
return False
# -----------------------------------------------------------------------------
def get_genome_subdirectory_ranges(genome_acc_list):
"""
This function generates a list of subdir ranges that can be used to
organize genome files into multiple subdirs in a way that the location
of a specific fasta file is easily detectable for the last 3 digits of the
accession (e.g. JJRO01080032, KK558359). It takes into account LSF cluster
limitations
genome_acc_list: A list with all accessions in a particular assembly
return: A list of indexes that will be used as subdirectory names
"""
max_index = 999 # largest 3 digit number
subdir_ranges = []
file_indexes = []
# construct a list with all the last 3 digits from the assembly accessions
for acc in genome_acc_list:
file_indexes.append(acc[-3:])
# sort the list to devise the ranges
file_idx_sorted = sorted(file_indexes)
no_files = len(file_idx_sorted)
index = gc.MAX_ALLOWED_FILES
while index < no_files:
subdir_ranges.append(file_idx_sorted.pop(index))
index = index + gc.MAX_ALLOWED_FILES # increase by max allowed files
# append the right most index
if file_idx_sorted[-1] < max_index:
subdir_ranges.append(file_idx_sorted[-1])
else:
subdir_ranges.append(max_index)
return subdir_ranges
# -----------------------------------------------------------------------------
if __name__ == '__main__':
pass
| Rfam/rfam-production | scripts/export/genomes/genome_fetch.py | Python | apache-2.0 | 44,321 | [
"VisIt"
] | 71cc0496f38d0c6fdb8c38009d32a1705f4df8ed4e131fb4fd42fa8333ef0e07 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
This file implements an expression syntax, similar to printf, for adding
ANSI colors to text.
See colorize(), cwrite(), and cprint() for routines that can generate
colored output.
colorize will take a string and replace all color expressions with
ANSI control codes. If the isatty keyword arg is set to False, then
the color expressions will be converted to null strings, and the
returned string will have no color.
cwrite and cprint are equivalent to write() and print() calls in
python, but they colorize their output. If the stream argument is
not supplied, they write to sys.stdout.
Here are some example color expressions:
@r Turn on red coloring
@R Turn on bright red coloring
@*{foo} Bold foo, but don't change text color
@_{bar} Underline bar, but don't change text color
@*b Turn on bold, blue text
@_B Turn on bright blue text with an underline
@. Revert to plain formatting
@*g{green} Print out 'green' in bold, green text, then reset to plain.
@*ggreen@. Print out 'green' in bold, green text, then reset to plain.
The syntax consists of:
color-expr = '@' [style] color-code '{' text '}' | '@.' | '@@'
style = '*' | '_'
color-code = [krgybmcwKRGYBMCW]
text = .*
'@' indicates the start of a color expression. It can be followed
by an optional * or _ that indicates whether the font should be bold or
underlined. If * or _ is not provided, the text will be plain. Then
an optional color code is supplied. This can be [krgybmcw] or [KRGYBMCW],
where the letters map to black(k), red(r), green(g), yellow(y), blue(b),
magenta(m), cyan(c), and white(w). Lowercase letters denote normal ANSI
colors and capital letters denote bright ANSI colors.
Finally, the color expression can be followed by text enclosed in {}. If
braces are present, only the text in braces is colored. If the braces are
NOT present, then just the control codes to enable the color will be output.
The console can be reset later to plain text with '@.'.
To output an @, use '@@'. To output a } inside braces, use '}}'.
"""
import re
import sys
class ColorParseError(Exception):
"""Raised when a color format fails to parse."""
def __init__(self, message):
super(ColorParseError, self).__init__(message)
# Text styles for ansi codes
styles = {'*': '1', # bold
'_': '4', # underline
None: '0'} # plain
# Dim and bright ansi colors
colors = {'k': 30, 'K': 90, # black
'r': 31, 'R': 91, # red
'g': 32, 'G': 92, # green
'y': 33, 'Y': 93, # yellow
'b': 34, 'B': 94, # blue
'm': 35, 'M': 95, # magenta
'c': 36, 'C': 96, # cyan
'w': 37, 'W': 97} # white
# Regex to be used for color formatting
color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)'
# Force color even if stdout is not a tty.
_force_color = False
class match_to_ansi(object):
def __init__(self, color=True):
self.color = color
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
return "\033[%sm" % s
else:
return ''
def __call__(self, match):
"""Convert a match object generated by color_re into an ansi color code
This can be used as a handler in re.sub
"""
style, color, text = match.groups()
m = match.group(0)
if m == '@@':
return '@'
elif m == '@.':
return self.escape(0)
elif m == '@':
return ColorParseError("Incomplete color format: '%s' in '%s'"
% (color, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError("invalid color specifier: '%s' in '%s'"
% (color, match.string))
string += ';' + str(colors[color])
colored_text = ''
if text:
colored_text = text + self.escape(0)
return self.escape(string) + colored_text
def colorize(string, **kwargs):
"""Take a string and replace all color expressions with ANSI control
codes. Return the resulting string.
If color=False is supplied, output will be plain text without
control codes, for output to non-console devices.
"""
color = kwargs.get('color', True)
return re.sub(color_re, match_to_ansi(color), string)
def clen(string):
"""Return the length of a string, excluding ansi color sequences."""
return len(re.sub(r'\033[^m]*m', '', string))
def cextra(string):
"""Length of extra color characters in a string"""
return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=sys.stdout, color=None):
"""Replace all color expressions in string with ANSI control
codes and write the result to the string. If color is
False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
"""
if color is None:
color = stream.isatty() or _force_color
stream.write(colorize(string, color=color))
def cprint(string, stream=sys.stdout, color=None):
"""Same as cwrite, but writes a trailing newline to the stream."""
cwrite(string + "\n", stream, color)
def cescape(string):
"""Replace all @ with @@ in the string provided."""
return str(string).replace('@', '@@')
class ColorStream(object):
def __init__(self, stream, color=None):
self._stream = stream
self._color = color
def write(self, string, **kwargs):
raw = kwargs.get('raw', False)
raw_write = getattr(self._stream, 'write')
color = self._color
if self._color is None:
if raw:
color = True
else:
color = self._stream.isatty() or _force_color
raw_write(colorize(string, color=color))
def writelines(self, sequence, **kwargs):
raw = kwargs.get('raw', False)
for string in sequence:
self.write(string, self.color, raw=raw)
| jH0ward/psi4 | psi4/driver/util/tty/color.py | Python | lgpl-3.0 | 7,149 | [
"Psi4"
] | 9ba4042cedf537d1a89e0a1bc49058503b1a2fdaaa130b73de3546acbcc95321 |
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""File containing the classes that generate parameters and catalogs for shear."""
import galsim
import numpy as np
from . import constants
def makeBuilder(shear_type, obs_type, multiepoch, ps_dir):
"""Return a ShearBuilder appropriate for the given options.
@param[in] shear_type Shear field type: either "constant" or "variable".
@param[in] obs_type Observation type: either "ground" or "space".
@param[in] multiepoch Multiepoch? True or False
@param[in] ps_dir Directory with tabulated iCosmo shear power spectra.
"""
if shear_type == 'variable':
return VariableShearBuilder(ps_dir=ps_dir, obs_type=obs_type, multiepoch=multiepoch)
elif shear_type == 'constant':
return ConstantShearBuilder(obs_type=obs_type, multiepoch=multiepoch)
else:
raise ValueError("Invalid shear_type: %s - must be 'constant' or 'variable'" % shear_type)
class ShearBuilder(object):
"""A ShearBuilder is a class that can carry out the steps necessary to define a shear field for
some galaxy population in GREAT3. It must be able to generate parameters of the shear field,
and assign them to galaxies in a catalog."""
def generateFieldParameters(self, rng, field_index):
"""Return a dict of metaparameters for the given field. These will be passed to
generateCatalog() when it is called.
@param[in] rng A galsim.BaseDeviate to be used for any random numbers.
@param[in] field_index Index of the field of images being simulated.
"""
raise NotImplementedError("ShearBuilder is abstract.")
def generateSubfieldParameters(self, rng, subfield_index, field_parameters):
"""Return a dict of metaparameters for the given subfield. These will be passed to
generateCatalog() when it is called.
@param[in] rng A galsim.BaseDeviate to be used for any random numbers.
@param[in] subfield_index Index of patch of sky being simulated.
@param[in] field_parameters Output from generateFieldParameters().
"""
raise NotImplementedError("ShearBuilder is abstract.")
def generateEpochParameters(self, rng, subfield_index, epoch_index):
"""Return a dict of metaparameters for the given epoch. These will be passed to
generateCatalog() when it is called.
@param[in] rng A galsim.BaseDeviate to be used for any random numbers.
@param[in] epoch_index Index of the epoch of a given subfield being simulated.
"""
raise NotImplementedError("ShearBuilder is abstract.")
def generateCatalog(self, rng, catalog, parameters, offsets, subfield_index):
"""Fill the g1 and g2 columns of the given catalog with the lensing shear and magnification
values to apply at each point.
@param[in] rng A galsim.BaseDeviate to be used for any random numbers.
@param[in,out] catalog A structured NumPy array to fill. The 'index', 'x',
and 'y' columns will already be filled, and should be
used to evaluate the shear field for spatially-variable
shear. The 'g1' and 'g2' columns should be filled with this
method. Other columns may be present as well, and should be
ignored.
@param[in] parameters A dict of metaparameters, as returned by the generateParameters()
method.
@param[in] offsets Offset of this subfield with respect to the first in the field.
@param[in] subfield_index Index of this subfield within the branch, used to construct a
unique object ID for each galaxy.
"""
raise NotImplementedError("ShearBuilder is abstract.")
class ConstantShearBuilder(ShearBuilder):
"""ShearBuilder for constant shear fields.
We assume that constant shear branches contain a number of fields, each with a randomly oriented
shear with some minimum and maximum possible value. Shear magnitudes are chosen randomly within
the unit disk, which emphasizes somewhat larger shear values.
"""
def __init__(self, obs_type, multiepoch, min_g=1E-2, max_g=5E-2):
self.min_g = min_g
self.max_g = max_g
self.obs_type = obs_type
self.multiepoch = multiepoch
def generateFieldParameters(self, rng, field_index):
"""Generate the constant shear values (two components) for this field."""
theta = 2.0*rng()*np.pi
shear_rng = galsim.DistDeviate(rng, function = lambda g : g,
x_min = self.min_g, x_max = self.max_g)
g = shear_rng()
return dict(g1=g*np.cos(2.0*theta), g2=g*np.sin(2.0*theta), mu=1.)
def generateSubfieldParameters(self, rng, subfield_index, field_parameters):
"""Generate the constant shear values (two components) for this subfield. For constant
shear, these are the same as the shear values for the field to which the subfield
belongs."""
return field_parameters
def generateEpochParameters(self, rng, subfield_index, epoch_index):
raise NotImplementedError("ConstantShearBuilder makes shear parameters at field level!")
def generateCatalog(self, rng, catalog, parameters, offsets, subfield_index):
"""Put the shear value for this field into the galaxy catalog.
For constant shear within the field, the offset of the subfield within the field is not
relevant, so this argument does nothing.
"""
xsize = constants.xsize[self.obs_type][self.multiepoch]
ysize = constants.ysize[self.obs_type][self.multiepoch]
catalog["x_field_pos"] = (catalog["x"]+1+0.5*xsize)/xsize-1
catalog["y_field_pos"] = (catalog["y"]+1+0.5*ysize)/ysize-1
for record in catalog:
record["g1"] = parameters["g1"]
record["g2"] = parameters["g2"]
record["mu"] = parameters["mu"]
record["ID"] = 1e6*subfield_index + 1e3*record["x_field_pos"] + record["y_field_pos"]
class VariableShearBuilder(ShearBuilder):
"""ShearBuilder for variable shear fields.
For variable shear, we assume that each field has an underlying shear field which is sampled at
a different subset of positions for each subfield. The shears are drawn according to a
cosmological shear power spectrum multiplied by some nuisance function.
"""
# Define some basic parameters related to our grid settings. Note that the real ell_max is not
# as given here, because we use multiple offset grids to access smaller scales / higher ell.
# However, since this is only used to define nuisance functions, it's okay. What it means is
# that the nuisance function will not be very significant at those higher ell, i.e., the power
# spectrum will be closer to cosmological.
ell_min = 360./constants.image_size_deg
ell_max = ell_min*constants.nrows/2.
# We define a pivot/central scale for the nuisance function based on a geometric mean of the
# min, max usable ell values.
ell_piv = np.sqrt(ell_min*ell_max)
# Define some variables we need to construct filenames for tabulated cosmological shear power
# spectra.
infile_pref = 'cosmo-fid.zmed'
zmed_str = ['0.75', '1.00', '1.25']
nzmed = len(zmed_str)
# Choose some values for the shapelets nuisance functions. We go up to order 5 and allow a
# maximum variation in P(k) due to any given shapelets order that is ~10% (max_amp).
max_order = 5
beta = 0.3
max_amp = 0.1
# Set up empty cache for power spectrum parameters and galsim.PowerSpectrum object
cached_ps_tab = None
cached_ps_nuisance = None
cached_ps = None
# Include a multiplicative factor for the shear power spectrum to make the metric more sensitive
# to m. This will be applied to all randomly-constructed shear power spectra.
mult_factor = 2.
def __init__(self, ps_dir, obs_type, multiepoch):
self.ps_dir = ps_dir
self.obs_type = obs_type
self.multiepoch = multiepoch
def generateFieldParameters(self, rng, field_index):
"""Generate the parameters that determine the shear power spectrum for this field."""
# Need to decide on parameters for the cosmological part of the power spectrum first.
# For this, we just need to choose a single random number to interpolate between our
# cosmological P(k) that are tabulated from iCosmo.
#
# This will just be a random number from a uniform distribution that goes from 0 to N_P-1
# where N_P is the number that are tabulated.
ps_tab = (self.nzmed-1)*rng()
# Now, need to choose parameters for the nuisance shapelets functions. It is an array of
# self.max_order shapelets parameters according to the specified maximum amplitude.
ps_nuisance = np.zeros(self.max_order)
for i_order in range(self.max_order):
ps_nuisance[i_order] = -self.max_amp + 2*self.max_amp*rng()
return dict(ps_tab=ps_tab, ps_nuisance=ps_nuisance)
def generateSubfieldParameters(self, rng, subfield_index, field_parameters):
"""Generate the shear power spectrum parameters for this subfield. This is the same as for
the field, so this function is a no-op."""
return field_parameters
def generateEpochParameters(self, rng, subfield_index, epoch_index):
raise NotImplementedError("VariableShearBuilder makes shear parameters at field level!")
def generateCatalog(self, rng, catalog, parameters, offsets, subfield_index):
"""For a galaxy catalog with positions included, determine the lensing shear and
magnification to assign to each galaxy in the catalog."""
# We need a cache for a grid of shear values covering the entire field, i.e., including all
# possible positions in all subfields (modulo sub-pixel offsets from the subfield grid -
# we're not trying to take those into account). If there is nothing in the cache for this
# field, then make a new grid and save it in the cache.
#
ps_tab = parameters["ps_tab"]
ps_nuisance = parameters["ps_nuisance"]
if ps_tab != self.cached_ps_tab:
# If nothing is cached for this power spectrum, then first we have to define the power
# spectrum in a way that the galsim lensing engine can use it.
# Begin by identifying and reading in the proper files for the cosmological part of the
# power spectrum.
file_index = np.floor(ps_tab)
residual = ps_tab - file_index
import os
infile1 = os.path.join(self.ps_dir ,
self.infile_pref + self.zmed_str[int(file_index)]+'.out')
data1 = np.loadtxt(infile1).transpose()
ell = data1[0]
p1 = data1[1]
infile2 = os.path.join(self.ps_dir ,
self.infile_pref + self.zmed_str[int(file_index)+1]+'.out')
data2 = np.loadtxt(infile2).transpose()
p2 = data2[1]
# Now make a geometric mean to get the cosmological power spectrum.
p_cos = (p1**(1.-residual))*(p2**residual)
p_cos *= self.mult_factor
# Construct the shapelets nuisance functions
x = np.log10(ell/self.ell_piv)
n_ell = len(ell)
b_values = np.zeros((self.max_order, n_ell))
for order in range(0, self.max_order):
b_values[order,:] = self._bn(order, x, self.beta)
nuisance_func = np.zeros(n_ell)
for order in range(0, self.max_order):
nuisance_func += ps_nuisance[order]*b_values[order,:]
p_use = p_cos*(1.0+nuisance_func)
# Note: units for ell, p_use are 1/radians and radians^2, respectively.
# Now, we have arrays we can use to make a power spectrum object with E-mode power
# only. While we are at it, we cache it and its parameters.
ps_lookup = galsim.LookupTable(ell, p_use, x_log=True, f_log=True)
self.cached_ps = galsim.PowerSpectrum(ps_lookup, units = galsim.radians)
self.cached_ps_tab = ps_tab
self.cached_ps_nuisance = ps_nuisance
# Define the grid on which we want to get shears.
# This is a little tricky: we have a setup for subfield locations within the field that
# is defined in builder.py function generateSubfieldOffsets(). The first subfield is
# located at the origin, and to represent it alone, we would need a constants.nrows x
# constants.ncols grid of shears. But since we subsample by a parameter given as
# constants.subfield_grid_subsampling, each grid dimension must be larger by that
# amount.
if constants.nrows != constants.ncols:
raise NotImplementedError("Currently variable shear grids require nrows=ncols")
n_grid = constants.subfield_grid_subsampling * constants.nrows
grid_spacing = constants.image_size_deg / n_grid
# Run buildGrid() to get the shears and convergences on this grid. However, we also
# want to effectively change the value of k_min that is used for the calculation, to get
# a reasonable shear correlation function on large scales without excessive truncation.
# We also define a grid center such that the position of the first pixel is (0,0).
grid_center = 0.5 * (constants.image_size_deg - grid_spacing)
self.cached_ps.buildGrid(grid_spacing = grid_spacing,
ngrid = n_grid,
units = galsim.degrees,
rng = rng,
center = (grid_center, grid_center),
kmin_factor=3)
# Now that our cached PS has a grid of shears / convergences, we can use getLensing() to
# get the quantities we need for a lensing measurement at any position, so this part of
# the calculation is done.
# Now get the shears/convergences for each galaxy position in the
# catalog. This is fastest if done all at once, with one call to getLensing(). And this is
# actually slightly tricky, because we have to take into account:
# (1) The position of the galaxy within the subfield.
# (2) The offset of the subfield with respect to the field.
# And make sure we've gotten the units right for both of these. We are ignoring centroid
# shifts of order 1 pixel (max 0.2" for ground data) which can occur within an image.
#
# We can define object indices in x, y directions - i.e., make indices that range
# from 0 to constants.nrows-1.
xsize = constants.xsize[self.obs_type][self.multiepoch]
ysize = constants.ysize[self.obs_type][self.multiepoch]
x_ind = (catalog["x"]+1+0.5*xsize)/xsize-1
y_ind = (catalog["y"]+1+0.5*ysize)/ysize-1
# Turn this into (x, y) positions within the subfield, in degrees.
x_pos = x_ind * constants.image_size_deg / constants.nrows
y_pos = y_ind * constants.image_size_deg / constants.ncols
# But now we have to add the subfield offset. These are calculated as a fraction of the
# separation between galaxies, so we have to convert to degrees.
x_pos += offsets[0] * constants.image_size_deg / constants.nrows
y_pos += offsets[1] * constants.image_size_deg / constants.ncols
catalog["g1"], catalog["g2"], catalog["mu"] = \
self.cached_ps.getLensing(pos=(x_pos, y_pos), units=galsim.degrees)
# Previous numbers were in degrees. But now we need to save some numbers for ID generation,
# which have to be ints. So we will save them in units of subfield grid spacing, i.e.,
# within a given subfield, galaxies are spaced by constants.subfield_grid_subsampling.
# Right now x_ind, y_ind are integers (spaced by 1) and offsets[0] and offsets[1] span the
# range (0, 1/constants.subfield_grid_subsampling), so the line below has
# constants.subfield_grid_subsampling multiplying both.
catalog["x_field_pos"] = np.round(
constants.subfield_grid_subsampling * (offsets[0] + x_ind)).astype(int)
catalog["y_field_pos"] = np.round(
constants.subfield_grid_subsampling * (offsets[1] + y_ind)).astype(int)
for record in catalog:
record["ID"] = 1e6*subfield_index + 1e3*record["x_field_pos"] + record["y_field_pos"]
def _bn(self, n, x, beta):
"""A helper function to compute shapelets functions for a given order n, for specified x and
width beta.
"""
phi_n_x = self._phin(n, x/beta)
return phi_n_x / np.sqrt(beta)
def _hermite(self, n, x):
try:
import scipy.special as spec
# get the H_n function from scipy
hn = spec.hermite(n)
# evaluate it at our x
return hn(x)
except:
# If you don't have scipy, use a simple recursion relation:
if n == 0:
return 1.
else:
hkm1 = 1.
hk = 2.*x
for k in range(1,n):
hk, hkm1 = 2.*x*hk - 2.*k*hkm1, hk
return hk
def _phin(self, n, x):
"""A helper function defining shapelets basis functions at an array of positions x, for
order n."""
import math
hn_x = self._hermite(n,x)
# Put in the exponential factor, and properly normalize it.
phi_n_x = hn_x * np.exp(-(x**2)/2.)
phi_n_x /= np.sqrt((2.**n) * np.sqrt(np.pi) * math.factorial(n))
return phi_n_x
| barnabytprowe/great3-public | great3sims/shear.py | Python | bsd-3-clause | 19,773 | [
"Galaxy"
] | 480e01411f366c3cb1bf71dc2a71e559cdd7fa5ff34f53a3ee8281867037d541 |
#!/usr/bin/env python
import os, sys, string, subprocess, re, socket, getopt
# If hostname equals head_node, this script will run
head_node = 'hpcbuild'
# Moose stable and devel checkout locations
moose_stable = 'https://hpcsc.inl.gov/svn/herd/trunk/moose'
moose_devel = 'https://hpcsc.inl.gov/svn/herd/trunk/devel/moose'
# We exclude these applications:
excluded_applications = set(['r7_moose', 'rattlesnake', 'elk'])
# Comment Syntax Coverage command:
comment_syntax_cmd = [ 'moose/framework/contrib/nsiqcppstyle/nsiqcppstyle', '--quiet', '--basedir=/moose/framework', '-f', 'moose/framework/contrib/nsiqcppstyle/syntax_style', '--output=html', '--url=https://hpcsc.inl.gov/moose/browser/trunk', '-o', 'output.html', 'moose']
rsync_comment_syntax_cmd = ['/usr/bin/rsync', '-av', '--delete', 'output.html', os.getenv('TRAC_SERVER') + ':/srv/www/ssl/MOOSE/coverage/' ]
_USAGE = """
updateStable.py repo_revision
Where repo_revision is the target merge revision.
"""
def buildList(dir_path):
if os.path.exists(os.path.join(dir_path, 'run_tests')):
run_tests = open(os.path.join(dir_path, 'run_tests'))
run_tests_contents = run_tests.read()
run_tests.close()
try:
return re.findall(r"app_name\s+=\s+'.*?([^/]*?)'", run_tests_contents, re.M)[0]
except IndexError:
return os.path.basename(dir_path)
def buildStatus():
tmp_apps = []
tmp_passed = []
# Open line itemed list of applications passing their tests
log_file = open('moose/test_results.log', 'r')
tmp_passed = string.split(log_file.read(), '\n')
log_file.close()
# Remove trailing \n element which creates an empty item
tmp_passed.pop()
# Get a list of applications tested, by searching each directory presently containing a run_test application
for app_dir in os.listdir('.'):
tmp_apps.append(buildList(os.path.join(os.getcwd(), app_dir)))
# Now get any applications inside the moose directory (modules, test, unit)
for app_dir in os.listdir('moose'):
tmp_apps.append(buildList(os.path.join(os.getcwd(), 'moose', app_dir)))
# Return boolean if all application tests passed
if len(((set(tmp_apps) - excluded_applications) - set(tmp_passed) - set([None]))) != 0:
print 'Failing tests:', string.join(((set(tmp_apps) - excluded_applications) - set(tmp_passed) - set([None])))
return False
else:
return True
def getCoverage():
# A list of stuff we don't want to include in code coverage. Add more here if needed (wild cards accepted).
filter_out = [ 'contrib/mtwist*',
'/usr/include*',
'*/mpich*/*',
'*/libmesh/*',
'*/gcc_4.7.2/*',
'*/moab/*',
'*/tbb/*',
'*/petsc*/*',
'*/dtk_opt/*',
'*/dtk_moab/*'
]
# Use the same commands from the coverage_html script to generate the raw.info file
coverage_cmd = [ os.getenv('LCOV_BIN'),
'--base-directory', 'moose/framework',
'--directory', 'moose/framework/src/',
'--capture',
'--ignore-errors', 'gcov,source',
'--output-file', 'raw.info'
]
# Put the lcov filtering command together
filter_cmd = [os.getenv('LCOV_BIN')]
for sgl_filter in filter_out:
filter_cmd.extend(['-r', 'raw.info', sgl_filter])
filter_cmd.extend(['-o', 'moose.info'])
# Generate the raw.info
runCMD(coverage_cmd, True)
# Generate the moose.info (a filtered list of actual code coverage were after)
coverage_results = runCMD(filter_cmd, True)
# Find the percentage graciously givin to us by our filter_cmd:
coverage_score = coverage_results[(coverage_results.find('lines......: ') + 13):coverage_results.find('% ')]
# Return the results
if float(coverage_score) <= 80.0:
print 'Failed Code Coverage: ' + str(coverage_score)
return False
else:
print 'Succeeded Code Coverage: ' + str(coverage_score)
return True
def runCMD(cmd_opts, quiet=False):
print 'Running command:', cmd_opts
a_proc = subprocess.Popen(cmd_opts, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retstr = a_proc.communicate()
if not a_proc.poll() == 0:
print 'Error:', retstr[1]
sys.exit(1)
else:
if not quiet:
print retstr[0]
return retstr[0]
def parseLOG(merge_log):
svn_log = []
final_log = ''
merge_list = re.split('\n+', merge_log)
for item in merge_list:
if re.match(r'(^-+)', item) != None:
svn_log.append(' ----\n')
else:
tmp_item = str.lower(item)
for cmd_language in ['close #', 'closed #', 'closes #', 'fix #', 'fixed #', 'fixes #', 'references #', 'refs #', 'addresses #', 're #', 'see #', 'close: #', 'closed: #', 'closes: #', 'fix: #', 'fixed: #', 'fixes: #', 'references: #', 'refs: #', 'addresses: #', 're: #', 'see: #']:
if tmp_item.find(cmd_language) != -1:
pos_start = (int(tmp_item.find(cmd_language)) + (len(cmd_language) - 2))
pos_end = (int(tmp_item.find(cmd_language)) + (len(cmd_language) - 1))
item = str(item[:pos_start]) + '-' + str(item[pos_end:])
svn_log.append(item + '\n')
for log_line in svn_log:
final_log = final_log + str(log_line)
return final_log
def writeLog(message):
log_file = open('svn_log.log', 'w')
log_file.write(message)
log_file.close()
def clobberRevisions(revision_list):
tmp_list = ''
for item in revision_list:
if item != '':
tmp_list = tmp_list + ' -' + item
return tmp_list
def printUsage(message):
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def process_args():
try:
placeholder, opts = getopt.getopt(sys.argv[1:], '', ['help'])
except getopt.GetoptError:
printUsage('Invalid arguments.')
if not opts:
printUsage('No options specified')
try:
if (opts[0] == ''):
printUsage('Invalid arguments.')
except:
printUsage('Invalid arguments.')
return opts[0]
if __name__ == '__main__':
if os.getenv('STABLE'):
runCMD(comment_syntax_cmd)
runCMD(rsync_comment_syntax_cmd)
arg_revision = process_args()
coverage_status = getCoverage()
if buildStatus() and coverage_status:
# Checking out moose-stable
checkout_moose_stable = [os.getenv('SVN_BIN'), 'co', '--quiet', moose_stable, 'moose-stable']
runCMD(checkout_moose_stable)
# Get Merged version numbers
print 'Get revisions merged...'
get_merged_revisions = [os.getenv('SVN_BIN'), 'mergeinfo', moose_devel, '--show-revs', 'eligible', 'moose-stable']
log_versions = runCMD(get_merged_revisions)
# Group the revisions together and build our 'svn log -r' command
get_revision_logs = [os.getenv('SVN_BIN'), 'log' ]
merged_revisions = string.split(log_versions, '\n')
if merged_revisions[0] != '':
for revision in merged_revisions:
if revision != '' and int(revision.split('r')[1]) <= int(arg_revision) and int(revision.split('r')[1]) != 1:
get_revision_logs.append('-' + revision)
else:
print 'I detect no merge information... strange.'
sys.exit(1)
# Get each revision log
print 'Getting each log for revision merged...'
get_revision_logs.append(moose_devel)
log_data = runCMD(get_revision_logs)
# Parse through and write the log file with out any command langauge present
writeLog(parseLOG(log_data))
# Merge our local created moose-stable with moose-trunk
print 'Merging moose-stable from moose-devel only to the revision at which bitten was commanded to checkout'
merge_moose_trunk = [os.getenv('SVN_BIN'), 'merge', '-r1:' + str(arg_revision), moose_devel, 'moose-stable' ]
runCMD(merge_moose_trunk)
# Commit the changes!
print 'Commiting merged moose-stable'
commit_moose_stable = [os.getenv('SVN_BIN'), 'ci', '--username', 'moosetest', '-F', 'svn_log.log', 'moose-stable']
runCMD(commit_moose_stable)
else:
# This is the system 'head_node', but buildStatus() returned False... so exit as an error
sys.exit(1)
else:
# This is not one of the systems in 'head_node', so exit normally
sys.exit(0)
| shanestafford/moose | framework/scripts/updateStable.py | Python | lgpl-2.1 | 8,251 | [
"Elk",
"MOOSE"
] | 2e5328884660932153b74637758ccb6362a6b90b3e1d19b904abb0da8ffd9ea2 |
#!/opt/moose/miniconda/bin/python
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pylab
data = np.genfromtxt('./elas_plas_nl1_cycle_out.csv', delimiter=',', names=True)
fig = plt.figure()
ax1 = fig.add_subplot(111)
mpl.rcParams.update({'font.size': 10})
ax1.set_xlabel("Time")
ax1.set_ylabel("Strain (%)")
ax1.plot(data['time'], data['eff_plastic_strain'], label='Effective Plastic Strain', color='k')
ax1.plot(data['time'], data['tot_strain_yy'], label='Total YY Strain', color='b')
ax1.plot(data['time'], data['pl_strain_yy'], label='Plastic YY Strain', color='r')
ax1.plot(data['time'], data['el_strain_yy'], label='Elastic YY Strain', color='g')
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
leg = ax1.legend(loc='best')
plt.savefig('plot_cycled_strain.pdf')
plt.show(fig)
| Chuban/moose | modules/combined/test/tests/inelastic_strain/elas_plas/plot_cycled_strain.py | Python | lgpl-2.1 | 866 | [
"MOOSE"
] | ed385570b422f9a91699e606978f6404813ffb403d39f623dedae1c11414d611 |
import plotly.offline as py
import plotly.graph_objs as go
soap = go.Bar(
x=['iPad 2', 'iPhone 4S', 'Samsung Galaxy S4'],
y=[5.109, 9.694, 1.465],
name="SOAP")
rest_xml = go.Bar(
x=['iPad 2', 'iPhone 4S', 'Samsung Galaxy S4'],
y=[3.301, 4.973, 1.019],
name="REST (XML)")
rest_json = go.Bar(
x=['iPad 2', 'iPhone 4S', 'Samsung Galaxy S4'],
y=[.187, .320, .158],
name="REST (JSON)")
data = [soap, rest_xml, rest_json]
layout = go.Layout(
title='SOAP vs REST (XML) vs REST (JSON)',
yaxis=dict(
title='segundos',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
width=1000,
height=642
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='SOAP-vs-REST')
| zgsolucoes/Treinamento | 06-Microservices/RESTful/fig/source/SOAP-vs-REST.py | Python | apache-2.0 | 883 | [
"Galaxy"
] | 183c3d1b347ac7d16a58320f3918952112e4357ac68154b260920cbb9ebffa2c |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import traceback
import os
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".gui.plug")
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
ngettext = glocale.translation.ngettext # else "nearby" comments are ignored
from ..managedwindow import ManagedWindow
from gramps.gen.errors import UnavailableError, WindowActiveError
from gramps.gen.plug import PluginRegister, PTYPE_STR, load_addon_file
from ..utils import open_file_with_default_application
from ..pluginmanager import GuiPluginManager
from . import tool
from ._guioptions import add_gui_options
from ..dialog import InfoDialog, OkDialog
from ..editors import EditPerson
from ..glade import Glade
from ..listmodel import ListModel, NOSORT, TOGGLE
from gramps.gen.const import URL_WIKISTRING, USER_HOME, WIKI_EXTRAPLUGINS_RAWDATA
from gramps.gen.config import config
from ..widgets.progressdialog import (LongOpStatus, ProgressMonitor,
GtkProgressDialog)
def display_message(message):
"""
A default callback for displaying messages.
"""
print(message)
RELOAD = 777 # A custom Gtk response_type for the Reload button
#-------------------------------------------------------------------------
#
# PluginStatus: overview of all plugins
#
#-------------------------------------------------------------------------
class PluginStatus(ManagedWindow):
"""Displays a dialog showing the status of loaded plugins"""
HIDDEN = '<span color="red">%s</span>' % _('Hidden')
AVAILABLE = '<span weight="bold" color="blue">%s</span>'\
% _('Visible')
def __init__(self, dbstate, uistate, track=[]):
self.dbstate = dbstate
self.__uistate = uistate
self.title = _("Plugin Manager")
ManagedWindow.__init__(self, uistate, track,
self.__class__)
self.__pmgr = GuiPluginManager.get_instance()
self.__preg = PluginRegister.get_instance()
dialog = Gtk.Dialog(title="", transient_for=uistate.window,
destroy_with_parent=True)
dialog.add_button(_('_Close'), Gtk.ResponseType.CLOSE)
self.set_window(dialog, None, self.title)
self.setup_configs('interface.pluginstatus', 750, 400)
self.window.connect('response', self.__on_dialog_button)
notebook = Gtk.Notebook()
#first page with all registered plugins
vbox_reg = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scrolled_window_reg = Gtk.ScrolledWindow()
self.list_reg = Gtk.TreeView()
# model: plugintype, hidden, pluginname, plugindescr, pluginid
self.model_reg = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING)
self.selection_reg = self.list_reg.get_selection()
self.list_reg.set_model(self.model_reg)
self.list_reg.connect('button-press-event', self.button_press_reg)
col0_reg = Gtk.TreeViewColumn(_('Type'), Gtk.CellRendererText(), text=0)
col0_reg.set_sort_column_id(0)
col0_reg.set_resizable(True)
self.list_reg.append_column(col0_reg)
col = Gtk.TreeViewColumn(_('Status'), Gtk.CellRendererText(), markup=1)
col.set_sort_column_id(1)
self.list_reg.append_column(col)
col2_reg = Gtk.TreeViewColumn(_('Name'), Gtk.CellRendererText(), text=2)
col2_reg.set_sort_column_id(2)
col2_reg.set_resizable(True)
self.list_reg.append_column(col2_reg)
col = Gtk.TreeViewColumn(_('Description'), Gtk.CellRendererText(), text=3)
col.set_sort_column_id(3)
col.set_resizable(True)
self.list_reg.append_column(col)
self.list_reg.set_search_column(2)
scrolled_window_reg.add(self.list_reg)
vbox_reg.pack_start(scrolled_window_reg, True, True, 0)
hbutbox = Gtk.ButtonBox()
hbutbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.__info_btn = Gtk.Button(label=_("Info"))
hbutbox.add(self.__info_btn)
self.__info_btn.connect('clicked', self.__info, self.list_reg, 4) # id_col
self.__hide_btn = Gtk.Button(label=_("Hide/Unhide"))
hbutbox.add(self.__hide_btn)
self.__hide_btn.connect('clicked', self.__hide,
self.list_reg, 4, 1) # list, id_col, hide_col
if __debug__:
self.__edit_btn = Gtk.Button(label=_("Edit"))
hbutbox.add(self.__edit_btn)
self.__edit_btn.connect('clicked', self.__edit, self.list_reg, 4) # id_col
self.__load_btn = Gtk.Button(label=_("Load"))
hbutbox.add(self.__load_btn)
self.__load_btn.connect('clicked', self.__load, self.list_reg, 4) # id_col
vbox_reg.pack_start(hbutbox, False, False, 0)
notebook.append_page(vbox_reg,
tab_label=Gtk.Label(label=_('Registered Plugins')))
#second page with loaded plugins
vbox_loaded = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scrolled_window = Gtk.ScrolledWindow()
self.list = Gtk.TreeView()
self.model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING, object,
GObject.TYPE_STRING, GObject.TYPE_STRING)
self.selection = self.list.get_selection()
self.list.set_model(self.model)
self.list.connect('button-press-event', self.button_press)
self.list.connect('cursor-changed', self.cursor_changed)
col = Gtk.TreeViewColumn(_('Loaded'), Gtk.CellRendererText(),
markup=0)
col.set_sort_column_id(0)
col.set_resizable(True)
self.list.append_column(col)
col1 = Gtk.TreeViewColumn(_('File'), Gtk.CellRendererText(),
text=1)
col1.set_sort_column_id(1)
col1.set_resizable(True)
self.list.append_column(col1)
col = Gtk.TreeViewColumn(_('Status'), Gtk.CellRendererText(),
markup=5)
col.set_sort_column_id(5)
self.list.append_column(col)
col2 = Gtk.TreeViewColumn(_('Message'), Gtk.CellRendererText(), text=2)
col2.set_sort_column_id(2)
col2.set_resizable(True)
self.list.append_column(col2)
self.list.set_search_column(1)
scrolled_window.add(self.list)
vbox_loaded.pack_start(scrolled_window, True, True, 0)
hbutbox = Gtk.ButtonBox()
hbutbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.__info_btn = Gtk.Button(label=_("Info"))
hbutbox.add(self.__info_btn)
self.__info_btn.connect('clicked', self.__info, self.list, 4) # id_col
self.__hide_btn = Gtk.Button(label=_("Hide/Unhide"))
hbutbox.add(self.__hide_btn)
self.__hide_btn.connect('clicked', self.__hide,
self.list, 4, 5) # list, id_col, hide_col
if __debug__:
self.__edit_btn = Gtk.Button(label=_("Edit"))
hbutbox.add(self.__edit_btn)
self.__edit_btn.connect('clicked', self.__edit, self.list, 4) # id_col
self.__load_btn = Gtk.Button(label=_("Load"))
self.__load_btn.set_sensitive(False)
hbutbox.add(self.__load_btn)
self.__load_btn.connect('clicked', self.__load, self.list, 4) # id_col
vbox_loaded.pack_start(hbutbox, False, False, 5)
notebook.append_page(vbox_loaded,
tab_label=Gtk.Label(label=_('Loaded Plugins')))
#third page with method to install plugin
install_page = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scrolled_window = Gtk.ScrolledWindow()
self.addon_list = Gtk.TreeView()
# model: help_name, name, ptype, image, desc, use, rating, contact, download, url
self.addon_model = Gtk.ListStore(GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
self.addon_list.set_model(self.addon_model)
#self.addon_list.connect('button-press-event', self.button_press)
col = Gtk.TreeViewColumn(_('Addon Name'), Gtk.CellRendererText(),
text=1)
col.set_sort_column_id(1)
self.addon_list.append_column(col)
col = Gtk.TreeViewColumn(_('Type'), Gtk.CellRendererText(),
text=2)
col.set_sort_column_id(2)
self.addon_list.append_column(col)
col = Gtk.TreeViewColumn(_('Description'), Gtk.CellRendererText(),
text=4)
col.set_sort_column_id(4)
self.addon_list.append_column(col)
self.addon_list.connect('cursor-changed', self.button_press_addon)
install_row = Gtk.Box()
install_row.pack_start(Gtk.Label(label=_("Path to Addon:")), False, True, 0)
self.install_addon_path = Gtk.Entry()
button = Gtk.Button()
img = Gtk.Image()
img.set_from_icon_name('document-open', Gtk.IconSize.BUTTON)
button.add(img)
button.connect('clicked', self.__select_file)
install_row.pack_start(self.install_addon_path, True, True, 0)
install_row.pack_start(button, False, False, 0)
scrolled_window.add(self.addon_list)
install_page.pack_start(scrolled_window, True, True, 0)
#add some spce under the scrollbar
install_page.pack_start(Gtk.Label(label=''), False, False, 0)
#path to addon path line
install_page.pack_start(install_row, False, False, 0)
hbutbox = Gtk.ButtonBox()
hbutbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.__add_btn = Gtk.Button(label=_("Install Addon"))
hbutbox.add(self.__add_btn)
self.__add_btn.connect('clicked', self.__get_addon_top)
self.__add_all_btn = Gtk.Button(label=_("Install All Addons"))
hbutbox.add(self.__add_all_btn)
self.__add_all_btn.connect('clicked', self.__get_all_addons)
self.__refresh_btn = Gtk.Button(label=_("Refresh Addon List"))
hbutbox.add(self.__refresh_btn)
self.__refresh_btn.connect('clicked', self.__refresh_addon_list)
install_page.pack_start(hbutbox, False, True, 5)
# notebook.append_page(install_page,
# tab_label=Gtk.Label(label=_('Install Addons')))
#add the notebook to the window
self.window.get_content_area().pack_start(notebook, True, True, 0)
if __debug__:
# Only show the "Reload" button when in debug mode
# (without -O on the command line)
self.window.add_button(_("Reload"), RELOAD)
#obtain hidden plugins from the pluginmanager
self.hidden = self.__pmgr.get_hidden_plugin_ids()
self.window.show_all()
self.__populate_lists()
self.list_reg.columns_autosize()
def __on_dialog_button(self, dialog, response_id):
if response_id == Gtk.ResponseType.CLOSE:
self.close(dialog)
else: # response_id == RELOAD
self.__reload(dialog)
def __refresh_addon_list(self, obj):
"""
Reloads the addons from the wiki into the list.
"""
from urllib.request import urlopen
from ..utils import ProgressMeter
URL = "%s%s" % (URL_WIKISTRING, WIKI_EXTRAPLUGINS_RAWDATA)
try:
fp = urlopen(URL)
except:
print("Error: cannot open %s" % URL)
return
pm = ProgressMeter(_("Refreshing Addon List"),
parent=self.uistate.window)
pm.set_pass(header=_("Reading gramps-project.org..."))
state = "read"
rows = []
row = []
lines = fp.readlines()
pm.set_pass(total=len(lines), header=_("Reading gramps-project.org..."))
for line in lines:
pm.step()
if line.startswith("|-") or line.startswith("|}"):
if row != []:
rows.append(row)
state = "row"
row = []
elif state == "row":
if line.startswith("|"):
row.append(line[1:].strip())
else:
state = "read"
fp.close()
rows.sort(key=lambda row: (row[1], row[0]))
self.addon_model.clear()
# clear the config list:
config.get('plugin.addonplugins')[:] = []
pm.set_pass(total=len(rows), header=_("Checking addon..."))
for row in rows:
pm.step()
try:
# from wiki:
help_name, ptype, image, desc, use, rating, contact, download = row
except:
continue
help_url = _("Unknown Help URL")
if help_name.startswith("[[") and help_name.endswith("]]"):
name = help_name[2:-2]
if "|" in name:
help_url, name = name.split("|", 1)
elif help_name.startswith("[") and help_name.endswith("]"):
name = help_name[1:-1]
if " " in name:
help_url, name = name.split(" ", 1)
else:
name = help_name
url = _("Unknown URL")
if download.startswith("[[") and download.endswith("]]"):
# Not directly possible to get the URL:
url = download[2:-2]
if "|" in url:
url, text = url.split("|", 1)
# need to get a page that says where it is:
fp = urlopen("%s%s%s" % (URL_WIKISTRING, url,
"&action=edit&externaledit=true&mode=file"))
for line in fp:
if line.startswith("URL="):
junk, url = line.split("=", 1)
break
fp.close()
elif download.startswith("[") and download.endswith("]"):
url = download[1:-1]
if " " in url:
url, text = url.split(" ", 1)
if (url.endswith(".zip") or
url.endswith(".ZIP") or
url.endswith(".tar.gz") or
url.endswith(".tgz")):
# Then this is ok:
self.addon_model.append(row=[help_name, name, ptype, image, desc, use,
rating, contact, download, url])
config.get('plugin.addonplugins').append([help_name, name, ptype, image, desc, use,
rating, contact, download, url])
pm.close()
config.save()
def __get_all_addons(self, obj):
"""
Get all addons from the wiki and install them.
"""
from ..utils import ProgressMeter
pm = ProgressMeter(
_("Install all Addons"), _("Installing..."), message_area=True,
parent=self.uistate.window)
pm.set_pass(total=len(self.addon_model))
errors = []
for row in self.addon_model:
pm.step()
(help_name, name, ptype, image, desc, use, rating, contact,
download, url) = row
load_addon_file(url, callback=pm.append_message)
self.uistate.viewmanager.do_reg_plugins(self.dbstate, self.uistate)
pm.message_area_ok.set_sensitive(True)
self.__rebuild_load_list()
self.__rebuild_reg_list()
def __get_addon_top(self, obj):
"""
Toplevel method to get an addon.
"""
from ..utils import ProgressMeter
pm = ProgressMeter(
_("Installing Addon"), message_area=True,
parent=self.uistate.window)
pm.set_pass(total=2, header=_("Reading gramps-project.org..."))
pm.step()
self.__get_addon(obj, callback=pm.append_message)
pm.step()
pm.message_area_ok.set_sensitive(True)
def __get_addon(self, obj, callback=display_message):
"""
Get an addon from the wiki or file system and install it.
"""
path = self.install_addon_path.get_text()
load_addon_file(path, callback)
self.uistate.viewmanager.do_reg_plugins(self.dbstate, self.uistate)
self.__rebuild_load_list()
self.__rebuild_reg_list()
def __select_file(self, obj):
"""
Select a file from the file system.
"""
fcd = Gtk.FileChooserDialog(title=_("Load Addon"),
transient_for=self.__uistate.window)
fcd.add_buttons(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_Open'), Gtk.ResponseType.OK)
name = self.install_addon_path.get_text()
dir = os.path.dirname(name)
if not os.path.isdir(dir):
dir = USER_HOME
name = ''
elif not os.path.isfile(name):
name = ''
fcd.set_current_folder(dir)
if name:
fcd.set_filename(name)
status = fcd.run()
if status == Gtk.ResponseType.OK:
path = fcd.get_filename()
if path:
self.install_addon_path.set_text(path)
fcd.destroy()
def __populate_lists(self):
""" Build the lists of plugins """
self.__populate_load_list()
self.__populate_reg_list()
self.__populate_addon_list()
def __populate_addon_list(self):
"""
Build the list of addons from the config setting.
"""
self.addon_model.clear()
for row in config.get('plugin.addonplugins'):
try:
help_name, name, ptype, image, desc, use, rating, contact, download, url = row
except:
continue
self.addon_model.append(row=[help_name, name, ptype, image, desc, use,
rating, contact, download, url])
def __populate_load_list(self):
""" Build list of loaded plugins"""
fail_list = self.__pmgr.get_fail_list()
for i in fail_list:
# i = (filename, (exception-type, exception, traceback), pdata)
err = i[1][0]
pdata = i[2]
hidden = pdata.id in self.hidden
if hidden:
hiddenstr = self.HIDDEN
else:
hiddenstr = self.AVAILABLE
if err == UnavailableError:
self.model.append(row=[
'<span color="blue">%s</span>' % _('Unavailable'),
i[0], str(i[1][1]), None, pdata.id, hiddenstr])
else:
self.model.append(row=[
'<span weight="bold" color="red">%s</span>' % _('Fail'),
i[0], str(i[1][1]), i[1], pdata.id, hiddenstr])
success_list = sorted(self.__pmgr.get_success_list(),
key=lambda x: (x[0], x[2]._get_name()))
for i in success_list:
# i = (filename, module, pdata)
pdata = i[2]
modname = i[1].__name__
hidden = pdata.id in self.hidden
if hidden:
hiddenstr = self.HIDDEN
else:
hiddenstr = self.AVAILABLE
self.model.append(row=[
'<span weight="bold" color="#267726">%s</span>' % _("OK"),
i[0], pdata.description, None, pdata.id, hiddenstr])
def __populate_reg_list(self):
""" Build list of registered plugins"""
for (type, typestr) in PTYPE_STR.items():
registered_plugins = []
for pdata in self.__preg.type_plugins(type):
# model: plugintype, hidden, pluginname, plugindescr, pluginid
hidden = pdata.id in self.hidden
if hidden:
hiddenstr = self.HIDDEN
else:
hiddenstr = self.AVAILABLE
registered_plugins.append([typestr, hiddenstr, pdata.name,
pdata.description, pdata.id])
for row in sorted(registered_plugins):
self.model_reg.append(row)
def __rebuild_load_list(self):
self.model.clear()
self.__populate_load_list()
def __rebuild_reg_list(self):
self.model_reg.clear()
self.__populate_reg_list()
def cursor_changed(self, obj):
if __debug__:
selection = obj.get_selection()
if selection:
model, node = selection.get_selected()
if node:
data = model.get_value(node, 3)
self.__load_btn.set_sensitive(data is not None)
def button_press(self, obj, event):
""" Callback function from the user clicking on a line """
if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
model, node = self.selection.get_selected()
data = model.get_value(node, 3)
name = model.get_value(node, 1)
if data:
PluginTrace(self.uistate, [], data, name)
def button_press_reg(self, obj, event):
""" Callback function from the user clicking on a line in reg plugin
"""
if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
self.__info(obj, self.list_reg, 4)
def button_press_addon(self, obj):
""" Callback function from the user clicking on a line in reg plugin
"""
selection = self.addon_list.get_selection()
if selection:
model, node = selection.get_selected()
if node:
url = model.get_value(node, 9)
self.install_addon_path.set_text(url)
def build_menu_names(self, obj):
return (self.title, "")
def __reload(self, obj):
""" Callback function from the "Reload" button """
self.__pmgr.reload_plugins()
self.__rebuild_load_list()
self.__rebuild_reg_list()
def __info(self, obj, list_obj, id_col):
""" Callback function from the "Info" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
id = model.get_value(node, id_col)
pdata = self.__preg.get_plugin(id)
typestr = pdata.ptype
auth = ' - '.join(pdata.authors)
email = ' - '.join(pdata.authors_email)
if len(auth) > 60:
auth = auth[:60] + '...'
if len(email) > 60:
email = email[:60] + '...'
if pdata:
infotxt = """%(plugnam)s: %(name)s [%(typestr)s]
%(plugdes)s: %(descr)s
%(plugver)s: %(version)s
%(plugaut)s: %(authors)s
%(plugmel)s: %(email)s
%(plugfil)s: %(fname)s
%(plugpat)s: %(fpath)s
""" % {
'name': pdata.name,
'typestr': typestr,
'descr': pdata.description,
'version': pdata.version,
'authors': auth,
'email': email,
'fname': pdata.fname,
'fpath': pdata.fpath,
'plugnam': _("Plugin name"),
'plugdes': _("Description"),
'plugver': _("Version"),
'plugaut': _("Authors"),
'plugmel': _("Email"),
'plugfil': _("Filename"),
'plugpat': _("Location"),
}
InfoDialog(_('Detailed Info'), infotxt,
parent=self.window)
def __hide(self, obj, list_obj, id_col, hide_col):
""" Callback function from the "Hide" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
id = model.get_value(node, id_col)
if id in self.hidden:
#unhide
self.hidden.remove(id)
model.set_value(node, hide_col, self.AVAILABLE)
self.__pmgr.unhide_plugin(id)
else:
#hide
self.hidden.add(id)
model.set_value(node, hide_col, self.HIDDEN)
self.__pmgr.hide_plugin(id)
def __load(self, obj, list_obj, id_col):
""" Callback function from the "Load" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
idv = model.get_value(node, id_col)
pdata = self.__preg.get_plugin(idv)
self.__pmgr.load_plugin(pdata)
self.__rebuild_load_list()
def __edit(self, obj, list_obj, id_col):
""" Callback function from the "Load" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
id = model.get_value(node, id_col)
pdata = self.__preg.get_plugin(id)
if pdata.fpath and pdata.fname:
open_file_with_default_application(
os.path.join(pdata.fpath, pdata.fname),
self.uistate)
#-------------------------------------------------------------------------
#
# Details for an individual plugin that failed
#
#-------------------------------------------------------------------------
class PluginTrace(ManagedWindow):
"""Displays a dialog showing the status of loaded plugins"""
def __init__(self, uistate, track, data, name):
self.name = name
title = _("%(str1)s: %(str2)s"
) % {'str1': _("Plugin Error"), 'str2': name}
ManagedWindow.__init__(self, uistate, track, self)
dlg = Gtk.Dialog(title="", transient_for=uistate.window,
destroy_with_parent=True)
dlg.add_button(_('_Close'), Gtk.ResponseType.CLOSE),
self.set_window(dlg, None, title)
self.setup_configs('interface.plugintrace', 600, 400)
self.window.connect('response', self.close)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.text = Gtk.TextView()
scrolled_window.add(self.text)
self.text.get_buffer().set_text(
"".join(traceback.format_exception(data[0],data[1],data[2])))
self.window.get_content_area().add(scrolled_window)
self.window.show_all()
def build_menu_names(self, obj):
return (self.name, None)
#-------------------------------------------------------------------------
#
# Classes for tools
#
#-------------------------------------------------------------------------
class LinkTag(Gtk.TextTag):
def __init__(self, link, buffer):
Gtk.TextTag.__init__(self, name=link)
tag_table = buffer.get_tag_table()
self.set_property('foreground', "#0000ff")
self.set_property('underline', Pango.Underline.SINGLE)
try:
tag_table.add(self)
except ValueError:
pass # already in table
class ToolManagedWindowBase(ManagedWindow):
"""
Copied from src/ReportBase/_BareReportDialog.py BareReportDialog
"""
border_pad = 6
HELP_TOPIC = None
def __init__(self, dbstate, uistate, option_class, name, callback=None):
self.name = name
ManagedWindow.__init__(self, uistate, [], self)
self.extra_menu = None
self.widgets = []
self.frame_names = []
self.frames = {}
self.format_menu = None
self.style_button = None
window = Gtk.Dialog(title='Tool')
self.set_window(window, None, self.get_title())
#self.window.connect('response', self.close)
self.cancel = self.window.add_button(_('_Close'),
Gtk.ResponseType.CANCEL)
self.cancel.connect('clicked', self.close)
self.ok = self.window.add_button(_('_Execute'), Gtk.ResponseType.OK)
self.ok.connect('clicked', self.on_ok_clicked)
self.window.set_default_size(600, -1)
# Set up and run the dialog. These calls are not in top down
# order when looking at the dialog box as there is some
# interaction between the various frames.
self.setup_title()
self.setup_header()
# Build the list of widgets that are used to extend the Options
# frame and to create other frames
self.add_user_options()
self.notebook = Gtk.Notebook()
self.notebook.set_border_width(6)
self.window.get_content_area().pack_start(self.notebook, True, True, 0)
self.results_text = Gtk.TextView()
self.results_text.connect('button-press-event',
self.on_button_press)
self.results_text.connect('motion-notify-event',
self.on_motion)
self.tags = []
self.link_cursor = \
Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.LEFT_PTR)
self.standard_cursor = \
Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.XTERM)
self.setup_other_frames()
self.set_current_frame(self.initial_frame())
self.show()
#------------------------------------------------------------------------
#
# Callback functions from the dialog
#
#------------------------------------------------------------------------
def on_cancel(self, *obj):
pass # cancel just closes
def on_ok_clicked(self, obj):
"""
The user is satisfied with the dialog choices. Parse all options
and run the tool.
"""
# Save options
self.options.parse_user_options()
self.options.handler.save_options()
self.pre_run()
self.run() # activate results tab
self.post_run()
def initial_frame(self):
return None
def on_motion(self, view, event):
buffer_location = view.window_to_buffer_coords(Gtk.TextWindowType.TEXT,
int(event.x),
int(event.y))
_iter = view.get_iter_at_location(*buffer_location)
if isinstance(_iter, tuple): # Gtk changed api in recent versions
_iter = _iter[1]
for (tag, person_handle) in self.tags:
if _iter.has_tag(tag):
_window = view.get_window(Gtk.TextWindowType.TEXT)
_window.set_cursor(self.link_cursor)
return False # handle event further, if necessary
view.get_window(Gtk.TextWindowType.TEXT).set_cursor(self.standard_cursor)
return False # handle event further, if necessary
def on_button_press(self, view, event):
buffer_location = view.window_to_buffer_coords(Gtk.TextWindowType.TEXT,
int(event.x),
int(event.y))
_iter = view.get_iter_at_location(*buffer_location)
if isinstance(_iter, tuple): # Gtk changed api in recent versions
_iter = _iter[1]
for (tag, person_handle) in self.tags:
if _iter.has_tag(tag):
person = self.db.get_person_from_handle(person_handle)
if event.button == 1:
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS:
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
else:
self.uistate.set_active(person_handle, 'Person')
return True # handled event
return False # did not handle event
def results_write_link(self, text, person, person_handle):
self.results_write(" ")
buffer = self.results_text.get_buffer()
iter = buffer.get_end_iter()
offset = buffer.get_char_count()
self.results_write(text)
start = buffer.get_iter_at_offset(offset)
end = buffer.get_end_iter()
self.tags.append((LinkTag(person_handle, buffer), person_handle))
buffer.apply_tag(self.tags[-1][0], start, end)
def results_write(self, text):
buffer = self.results_text.get_buffer()
mark = buffer.create_mark("end", buffer.get_end_iter())
self.results_text.scroll_to_mark(mark, 0.0, True, 0, 0)
buffer.insert_at_cursor(text)
buffer.delete_mark_by_name("end")
def write_to_page(self, page, text):
buffer = page.get_buffer()
mark = buffer.create_mark("end", buffer.get_end_iter())
page.scroll_to_mark(mark, 0.0, True, 0, 0)
buffer.insert_at_cursor(text)
buffer.delete_mark_by_name("end")
def clear(self, text):
# Remove all tags and clear text
buffer = text.get_buffer()
tag_table = buffer.get_tag_table()
start = buffer.get_start_iter()
end = buffer.get_end_iter()
for (tag, handle) in self.tags:
buffer.remove_tag(tag, start, end)
tag_table.remove(tag)
self.tags = []
buffer.set_text("")
def results_clear(self):
# Remove all tags and clear text
buffer = self.results_text.get_buffer()
tag_table = buffer.get_tag_table()
start = buffer.get_start_iter()
end = buffer.get_end_iter()
for (tag, handle) in self.tags:
buffer.remove_tag(tag, start, end)
tag_table.remove(tag)
self.tags = []
buffer.set_text("")
def pre_run(self):
from ..utils import ProgressMeter
self.progress = ProgressMeter(self.get_title(),
parent=self.window)
def run(self):
raise NotImplementedError("tool needs to define a run() method")
def post_run(self):
self.progress.close()
#------------------------------------------------------------------------
#
# Functions related to setting up the dialog window.
#
#------------------------------------------------------------------------
def get_title(self):
"""The window title for this dialog"""
return "Tool" # self.title
def get_header(self, name):
"""The header line to put at the top of the contents of the
dialog box. By default this will just be the name of the
selected person. Most subclasses will customize this to give
some indication of what the report will be, i.e. 'Descendant
Report for %s'."""
return self.get_title()
def setup_title(self):
"""Set up the title bar of the dialog. This function relies
on the get_title() customization function for what the title
should be."""
self.window.set_title(self.get_title())
def setup_header(self):
"""Set up the header line bar of the dialog. This function
relies on the get_header() customization function for what the
header line should read. If no customization function is
supplied by the subclass, the default is to use the full name
of the currently selected person."""
title = self.get_header(self.get_title())
label = Gtk.Label(label='<span size="larger" weight="bold">%s</span>' % title)
label.set_use_markup(True)
self.window.get_content_area().pack_start(label, False, False,
self.border_pad)
def add_frame_option(self, frame_name, label_text, widget):
"""Similar to add_option this method takes a frame_name, a
text string and a Gtk Widget. When the interface is built,
all widgets with the same frame_name are grouped into a
GtkFrame. This allows the subclass to create its own sections,
filling them with its own widgets. The subclass is reponsible for
all managing of the widgets, including extracting the final value
before the report executes. This task should only be called in
the add_user_options task."""
if frame_name in self.frames:
self.frames[frame_name].append((label_text, widget))
else:
self.frames[frame_name] = [(label_text, widget)]
self.frame_names.append(frame_name)
def set_current_frame(self, name):
if name is None:
self.notebook.set_current_page(0)
else:
for frame_name in self.frame_names:
if name == frame_name:
if len(self.frames[frame_name]) > 0:
fname, child = self.frames[frame_name][0]
page = self.notebook.page_num(child)
self.notebook.set_current_page(page)
return
def add_results_frame(self, frame_name="Results"):
if frame_name not in self.frames:
window = Gtk.ScrolledWindow()
window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
window.add(self.results_text)
window.set_shadow_type(Gtk.ShadowType.IN)
self.frames[frame_name] = [[frame_name, window]]
self.frame_names.append(frame_name)
l = Gtk.Label(label="<b>%s</b>" % _(frame_name))
l.set_use_markup(True)
self.notebook.append_page(window, l)
self.notebook.show_all()
else:
self.results_clear()
return self.results_text
def add_page(self, frame_name="Help"):
if frame_name not in self.frames:
text = Gtk.TextView()
text.set_wrap_mode(Gtk.WrapMode.WORD)
window = Gtk.ScrolledWindow()
window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
window.add(text)
window.set_shadow_type(Gtk.ShadowType.IN)
self.frames[frame_name] = [[frame_name, window]]
self.frame_names.append(frame_name)
l = Gtk.Label(label="<b>%s</b>" % _(frame_name))
l.set_use_markup(True)
self.notebook.append_page(window, l)
self.notebook.show_all()
else:
# FIXME: get text
#
text = self.frames[frame_name][0][1].something
return text
def setup_other_frames(self):
"""Similar to add_option this method takes a frame_name, a
text string and a Gtk Widget. When the interface is built,
all widgets with the same frame_name are grouped into a
GtkFrame. This allows the subclass to create its own sections,
filling them with its own widgets. The subclass is reponsible for
all managing of the widgets, including extracting the final value
before the report executes. This task should only be called in
the add_user_options task."""
for key in self.frame_names:
flist = self.frames[key]
grid = Gtk.Grid()
grid.set_column_spacing(12)
grid.set_row_spacing(6)
grid.set_border_width(6)
l = Gtk.Label(label="<b>%s</b>" % key)
l.set_use_markup(True)
self.notebook.append_page(grid, l)
row = 0
for (text, widget) in flist:
widget.set_hexpand(True)
if text:
text_widget = Gtk.Label(label='%s:' % text)
text_widget.set_halign(Gtk.Align.START)
grid.attach(text_widget, 1, row, 1, 1)
grid.attach(widget, 2, row, 1, 1)
else:
grid.attach(widget, 2, row, 1, 1)
row += 1
self.notebook.show_all()
#------------------------------------------------------------------------
#
# Functions related to extending the options
#
#------------------------------------------------------------------------
def add_user_options(self):
"""Called to allow subclasses add widgets to the dialog form.
It is called immediately before the window is displayed. All
calls to add_option or add_frame_option should be called in
this task."""
add_gui_options(self)
def build_menu_names(self, obj):
return (_('Main window'), self.get_title())
class ToolManagedWindowBatch(tool.BatchTool, ToolManagedWindowBase):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
# This constructor will ask a question, set self.fail:
self.dbstate = dbstate
self.uistate = uistate
tool.BatchTool.__init__(self, dbstate, user, options_class, name)
if not self.fail:
ToolManagedWindowBase.__init__(self, dbstate, uistate,
options_class, name, callback)
class ToolManagedWindow(tool.Tool, ToolManagedWindowBase):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.dbstate = dbstate
self.uistate = uistate
tool.Tool.__init__(self, dbstate, options_class, name)
ToolManagedWindowBase.__init__(self, dbstate, uistate, options_class,
name, callback)
#-------------------------------------------------------------------------
#
# UpdateAddons
#
#-------------------------------------------------------------------------
class UpdateAddons(ManagedWindow):
def __init__(self, uistate, track, addon_update_list):
self.title = _('Available Gramps Updates for Addons')
ManagedWindow.__init__(self, uistate, track, self, modal=True)
glade = Glade("updateaddons.glade")
self.set_window(glade.toplevel, None, None)
self.window.set_title(self.title)
self.setup_configs("interface.updateaddons", 750, 400)
self.rescan = False
apply_button = glade.get_object('apply')
cancel_button = glade.get_object('cancel')
select_all = glade.get_object('select_all')
select_all.connect("clicked", self.select_all_clicked)
select_none = glade.get_object('select_none')
select_none.connect("clicked", self.select_none_clicked)
apply_button.connect("clicked", self.install_addons)
cancel_button.connect("clicked", self.close)
self.list = ListModel(glade.get_object("list"), [
# name, click?, width, toggle
{"name": _('Select'),
"width": 60,
"type": TOGGLE,
"visible_col": 6,
"editable": True}, # 0 selected?
(_('Type'), 1, 180), # 1 new gramplet
(_('Name'), 2, 200), # 2 name (version)
(_('Description'), 3, 200), # 3 description
('', NOSORT, 0), # 4 url
('', NOSORT, 0), # 5 id
{"name": '', "type": TOGGLE}, # 6 visible? bool
], list_mode="tree")
pos = None
addon_update_list.sort(key=lambda x: "%s %s" % (x[0], x[2]["t"]))
last_category = None
for (status,plugin_url,plugin_dict) in addon_update_list:
count = get_count(addon_update_list, plugin_dict["t"])
# translators: needed for French, ignore otherwise
category = _("%(str1)s: %(str2)s") % {'str1' : status,
'str2' : _(plugin_dict["t"])}
if last_category != category:
last_category = category
node = self.list.add([False, # initially selected?
category,
"",
"",
"",
"",
False]) # checkbox visible?
iter = self.list.add([False, # initially selected?
"%s %s" % (status, _(plugin_dict["t"])),
"%s (%s)" % (plugin_dict["n"],
plugin_dict["v"]),
plugin_dict["d"],
plugin_url,
plugin_dict["i"],
True], node=node)
if pos is None:
pos = iter
if pos:
self.list.selection.select_iter(pos)
self.show()
self.window.run()
def build_menu_names(self, obj):
return (self.title, " ")
def select_all_clicked(self, widget):
"""
Select all of the addons for download.
"""
self.list.model.foreach(update_rows, True)
self.list.tree.expand_all()
def select_none_clicked(self, widget):
"""
Select none of the addons for download.
"""
self.list.model.foreach(update_rows, False)
self.list.tree.expand_all()
def install_addons(self, obj):
"""
Process all of the selected addons.
"""
self.window.hide()
model = self.list.model
iter = model.get_iter_first()
length = 0
while iter:
iter = model.iter_next(iter)
if iter:
length += model.iter_n_children(iter)
longop = LongOpStatus(
_("Downloading and installing selected addons..."),
length, 1, # total, increment-by
can_cancel=True)
pm = ProgressMonitor(GtkProgressDialog,
("Title", self.parent_window, Gtk.DialogFlags.MODAL))
pm.add_op(longop)
count = 0
if not config.get('behavior.do-not-show-previously-seen-addon-updates'):
# reset list
config.get('behavior.previously-seen-addon-updates')[:] = []
iter = model.get_iter_first()
errors = []
while iter:
for rowcnt in range(model.iter_n_children(iter)):
child = model.iter_nth_child(iter, rowcnt)
row = [model.get_value(child, n) for n in range(6)]
if longop.should_cancel():
break
elif row[0]: # toggle on
ok = load_addon_file(row[4], callback=LOG.debug)
if ok:
count += 1
else:
errors.append(row[2])
else: # add to list of previously seen, but not installed
if row[5] not in config.get('behavior.previously-seen-addon-updates'):
config.get('behavior.previously-seen-addon-updates').append(row[5])
longop.heartbeat()
pm._get_dlg()._process_events()
iter = model.iter_next(iter)
if not longop.was_cancelled():
longop.end()
if errors:
OkDialog(_("Installation Errors"),
_("The following addons had errors: ") +
# TODO for Arabic, should the next comma be translated?
", ".join(errors),
parent=self.parent_window)
if count:
self.rescan = True
OkDialog(_("Done downloading and installing addons"),
# translators: leave all/any {...} untranslated
"%s %s" % (ngettext("{number_of} addon was installed.",
"{number_of} addons were installed.",
count).format(number_of=count),
_("If you have installed a 'Gramps View', you will need to restart Gramps.")),
parent=self.parent_window)
else:
OkDialog(_("Done downloading and installing addons"),
_("No addons were installed."),
parent=self.parent_window)
self.close()
#-------------------------------------------------------------------------
#
# Local Functions
#
#-------------------------------------------------------------------------
def update_rows(model, path, iter, user_data):
"""
Update the rows of a model.
"""
#path: (8,) iter: <GtkTreeIter at 0xbfa89fa0>
#path: (8, 0) iter: <GtkTreeIter at 0xbfa89f60>
if len(path.get_indices()) == 2:
row = model[path]
row[0] = user_data
model.row_changed(path, iter)
def get_count(addon_update_list, category):
"""
Get the count of matching category items.
"""
count = 0
for (status,plugin_url,plugin_dict) in addon_update_list:
if plugin_dict["t"] == category and plugin_url:
count += 1
return count
| Fedik/gramps | gramps/gui/plug/_windows.py | Python | gpl-2.0 | 51,290 | [
"Brian"
] | c01db4deef470410da634c9e818ed7b2bdfee11cb4715c3236f51249f3105b31 |
from __future__ import print_function
import numpy as np
import pandas as pd
import regreg.api as rr
import selection.tests.reports as reports
from selection.tests.flags import SET_SEED, SMALL_SAMPLES
from selection.tests.instance import logistic_instance, gaussian_instance
from selection.tests.decorators import (wait_for_return_value,
set_seed_iftrue,
set_sampling_params_iftrue,
register_report)
import selection.tests.reports as reports
from selection.api import (randomization,
glm_group_lasso,
pairs_bootstrap_glm,
multiple_queries,
glm_group_lasso_parametric)
from selection.randomized.query import (naive_confidence_intervals, naive_pvalues)
from selection.randomized.glm import glm_parametric_covariance, glm_nonparametric_bootstrap, restricted_Mest, set_alpha_matrix
@register_report(['truth', 'covered_clt', 'ci_length_clt',
'naive_pvalues','covered_naive', 'ci_length_naive'])
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
@set_seed_iftrue(SET_SEED)
@wait_for_return_value()
def test_without_screening(s=10,
n=300,
p=100,
rho=0.,
signal=3.5,
lam_frac = 1.,
ndraw=10000,
burnin=2000,
loss='gaussian',
randomizer ='laplace',
randomizer_scale =1.,
scalings=False,
subgrad =True,
check_screen=False):
if loss=="gaussian":
X, y, beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, rho=rho, signal=signal, sigma=1, random_signs=False)
lam = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 2000)))).max(0)) * sigma
loss = rr.glm.gaussian(X, y)
X_indep, y_indep, _, _, _ = gaussian_instance(n=n, p=p, s=s, rho=rho, signal=signal, sigma=1)
loss_indep = rr.glm.gaussian(X_indep, y_indep)
elif loss=="logistic":
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
loss = rr.glm.logistic(X, y)
lam = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.binomial(1, 1. / 2, (n, 10000)))).max(0))
X_indep, y_indep, _, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal, random_signs=False)
loss_indep = rr.glm.logistic(X_indep, y_indep)
nonzero = np.where(beta)[0]
if randomizer == 'laplace':
randomizer = randomization.laplace((p,), scale=randomizer_scale)
elif randomizer == 'gaussian':
randomizer = randomization.isotropic_gaussian((p,), scale=randomizer_scale)
epsilon = 1. / np.sqrt(n)
W = np.ones(p)*lam
#W[0] = 0 # use at least some unpenalized
penalty = rr.group_lasso(np.arange(p), weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = glm_group_lasso(loss, epsilon, penalty, randomizer)
M_est.solve()
active_union = M_est._overall
nactive = np.sum(active_union)
print("nactive", nactive)
active_set = np.nonzero(active_union)[0]
print("active set", active_set)
print("true nonzero", np.nonzero(beta)[0])
views = [M_est]
queries = multiple_queries(views)
queries.solve()
screened = False
if set(nonzero).issubset(np.nonzero(active_union)[0]):
screened = True
if check_screen==False or (check_screen==True and screened==True):
#if nactive==s:
# return None
if scalings: # try condition on some scalings
M_est.condition_on_subgradient()
M_est.condition_on_scalings()
if subgrad:
M_est.decompose_subgradient(conditioning_groups=np.zeros(p, dtype=bool), marginalizing_groups=np.ones(p, bool))
boot_target1, boot_target_observed1 = pairs_bootstrap_glm(loss, active_union, inactive=~active_union)
boot_target2, boot_target_observed2 = pairs_bootstrap_glm(loss_indep, active_union, inactive=~active_union)
target_observed = (boot_target_observed1-boot_target_observed2)[:nactive]
def _target(indices):
return boot_target1(indices)[:nactive]-boot_target2(indices)[:nactive]
form_covariances = glm_nonparametric_bootstrap(n, n)
queries.setup_sampler(form_covariances)
queries.setup_opt_state()
target_sampler = queries.setup_target(_target,
target_observed,
reference=target_observed)
target_sample = target_sampler.sample(ndraw=ndraw,
burnin=burnin)
LU = target_sampler.confidence_intervals(target_observed,
sample=target_sample,
level=0.9)
pivots = target_sampler.coefficient_pvalues(target_observed,
parameter=np.zeros(nactive),
sample=target_sample)
#test_stat = lambda x: np.linalg.norm(x - beta[active_union])
#observed_test_value = test_stat(target_observed)
#pivots = target_sampler.hypothesis_test(test_stat,
# observed_test_value,
# alternative='twosided',
# parameter = beta[active_union],
# ndraw=ndraw,
# burnin=burnin,
# stepsize=None)
true_vec = np.zeros(nactive)
def coverage(LU):
L, U = LU[:, 0], LU[:, 1]
covered = np.zeros(nactive)
ci_length = np.zeros(nactive)
for j in range(nactive):
if (L[j] <= true_vec[j]) and (U[j] >= true_vec[j]):
covered[j] = 1
ci_length[j] = U[j] - L[j]
return covered, ci_length
covered, ci_length = coverage(LU)
LU_naive = naive_confidence_intervals(target_sampler, target_observed)
covered_naive, ci_length_naive = coverage(LU_naive)
naive_pvals = naive_pvalues(target_sampler, target_observed, true_vec)
return pivots, covered, ci_length, naive_pvals, covered_naive, ci_length_naive
def report(niter=1, **kwargs):
condition_report = reports.reports['test_without_screening']
runs = reports.collect_multiple_runs(condition_report['test'],
condition_report['columns'],
niter,
reports.summarize_all,
**kwargs)
pkl_label = ''.join(["test_without_screening.pkl", "_", kwargs['loss'],"_",\
kwargs['randomizer'], ".pkl"])
pdf_label = ''.join(["test_without_screening.pkl", "_", kwargs['loss'], "_", \
kwargs['randomizer'], ".pdf"])
runs.to_pickle(pkl_label)
runs_read = pd.read_pickle(pkl_label)
fig = reports.pivot_plot_plus_naive(runs_read, color='b', label='no screening')
fig.suptitle('Testing without screening', fontsize=20)
fig.savefig(pdf_label)
if __name__ == '__main__':
np.random.seed(500)
kwargs = {'s':30, 'n':3000, 'p':1000, 'signal':3.5, 'rho':0, 'loss':'gaussian', 'randomizer':'gaussian',
'randomizer_scale':1.2, 'lam_frac':1.}
report(niter=1, **kwargs)
| selective-inference/selective-inference | sandbox/randomized_tests/test_without_screening.py | Python | bsd-3-clause | 7,872 | [
"Gaussian"
] | 1f9f712531c4ed6ffcb511af09a9d24bdec53050153c14f8156f4255da441bbe |
#!/usr/bin/env python
#
# PyCow - Python to JavaScript with MooTools translator
# Copyright 2009 Patrick Schneider <patrick.p2k.schneider@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Some Notes:
#
# PyCow does a limited type inference, so it can distinguish function calls
# from class instantiations. However, some conditions can prevent a correct
# evaluation.
#
# PyCow cannot parse comments but can parse docstrings.
#
# No kwargs.
#
import ast, simplejson, re, random
from StringIO import StringIO
__all__ = ["ParseError", "translate_string", "translate_file"]
class ParseError(Exception):
"""
This exception is raised if the parser detects fatal errors.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PyCowContext(ast.NodeVisitor):
"""
First-pass context parser. Builds an execution context for type inference
and captures docstrings.
"""
def __init__(self, node, parent = None):
"""
Parse the node as a new context. The parent must be another context
object. Only Module, Class, Method and Function nodes are allowed.
"""
self.docstring = ""
self.module_license = ""
self.module_all = None
self.node = node
if node.__class__.__name__ == "FunctionDef":
if parent.type == "Class":
self.type = "Method"
else:
self.type = "Function"
self.name = node.name
self.__get_docstring()
elif node.__class__.__name__ == "ClassDef":
self.type = "Class"
self.name = node.name
self.__get_docstring()
elif node.__class__.__name__ == "Module":
self.type = "Module"
self.name = "(Module)"
self.__get_docstring()
else:
raise ValueError("Only Module, ClassDef and FunctionDef nodes are allowed")
self.parent = parent
self.identifiers = {}
self.variables = [] # Holds declared local variables (filled on second pass)
self.visit_For = self.visit_body
self.visit_While = self.visit_body
self.visit_If = self.visit_body
self.visit_TryExcept = self.visit_body
self.visit_ExceptHandler = self.visit_body
self.visit_ClassDef = self.visit_func_or_class
self.visit_FunctionDef = self.visit_func_or_class
self.visit_body(node)
def visit_func_or_class(self, node):
if self.identifiers.has_key(node.name):
old_ctx = self.identifiers[node.name]
raise ParseError("%s identifier '%s' at line %d is illegaly overwritten on line %d" % (
old_ctx.type,
node.name,
old_ctx.node.lineno,
node.lineno,
))
self.identifiers[node.name] = PyCowContext(node, self)
def visit_body(self, node):
for stmt in node.body:
self.visit(stmt)
for stmt in getattr(node, "orelse", []):
self.visit(stmt)
def visit_Assign(self, stmt):
if not self.type == "Module": return
if len(stmt.targets) == 1 and isinstance(stmt.targets[0], ast.Name):
if stmt.targets[0].id == "__all__":
if not isinstance(stmt.value, ast.List):
raise ParseError("Value of `__all__` must be a list expression (line %d)" % (stmt.lineno))
self.module_all = []
for expr in stmt.value.elts:
if not isinstance(expr, ast.Str):
raise ParseError("All elements of `__all__` must be strings (line %d)" % (expr.lineno))
self.module_all.append(expr.s)
elif stmt.targets[0].id == "__license__":
if not isinstance(stmt.value, ast.Str):
raise ParseError("Value of `__license__` must be a string (line %d)" % (stmt.lineno))
self.module_license = stmt.value.s
def visit_TryFinally(self, node):
for stmt in node.body:
self.visit(stmt)
for stmt in node.finalbody:
self.visit(stmt)
def generic_visit(self, node):
pass
def child(self, identifier):
"""
Get a named child context.
"""
if self.identifiers.has_key(identifier):
return self.identifiers[identifier]
return None
def lookup(self, identifier):
"""
Get a context in this or the parents context.
Jumps over Class contexts.
"""
if self.type != "Class":
if self.identifiers.has_key(identifier):
return self.identifiers[identifier]
if self.parent != None:
return self.parent.lookup(identifier)
return None
def class_context(self):
"""
Return the topmost class context (useful to get the context for `self`).
"""
if self.type == "Class":
return self
elif self.parent == None:
return None
return self.parent.class_context()
def declare_variable(self, name):
"""
Returns False if the variable is already declared and True if not.
"""
if name in self.variables:
return False
else:
self.variables.append(name)
return True
def __get_docstring(self):
if len(self.node.body) > 0:
stmt = self.node.body[0]
if isinstance(stmt, ast.Expr):
if isinstance(stmt.value, ast.Str):
self.docstring = stmt.value.s
class PyCow(ast.NodeVisitor):
"""
Second-pass main parser.
"""
OP_MAP = {
"Add": ("+", 6, True), # chars, precedence, associates
"Sub": ("-", 6, True),
"Mult": ("*", 5, True),
"Div": ("/", 5, True),
"FloorDiv": ("/", 5, True),
"Mod": ("%", 5, True),
#"Pow": ?,
"LShift": ("<<", 7, True),
"RShift": (">>", 7, True),
"BitOr": ("|", 12, True),
"BitXor": ("^", 11, True),
"BitAnd": ("&", 10, True),
"USub": ("-", 4, False),
"UAdd": ("+", 4, False),
"And": ("&&", 13, True),
"Or": ("||", 14, True),
"Not": ("!", 4, False),
"Eq": ("==", 9, True),
"NotEq":("!=", 9, True),
"Lt": ("<", 8, True),
"LtE": ("<=", 8, True),
"Gt": (">", 8, True),
"GtE": (">=", 8, True),
}
NO_SEMICOLON = [
"Global",
"If",
"While",
"For",
]
RESERVED_WORDS = [
"null",
"undefined",
"true",
"false",
"new",
"var",
"switch",
"case",
"function",
"this",
"default",
"throw",
"delete",
"instanceof",
"typeof",
]
IDENTIFIER_RE = re.compile("[A-Za-z_$][0-9A-Za-z_$]*")
def __init__(self, outfile = None, indent = "\t", namespace = "", warnings = True):
if outfile == None:
outfile = StringIO()
self.__out = outfile
self.__ichars = indent
self.__ilevel = 0
self.__mod_context = None
self.__curr_context = None
self.__namespace = namespace
self.__iteratorid = 0
self.__warnings = warnings
def output(self):
if isinstance(self.__out, StringIO):
return self.__out.getvalue()
else:
self.__out.seek(0)
return self.__out.read()
def visit_Module(self, mod):
"""
Initial node.
There is and can be only one Module node.
"""
# Build context
self.__mod_context = PyCowContext(mod)
self.__curr_context = self.__mod_context
if self.__mod_context.module_license != "":
first = True
for line in self.__mod_context.module_license.split("\n"):
if first:
self.__out.write("/* %s\n" % (line))
first = False
else:
self.__out.write(" * %s\n" % (line))
self.__out.write(" */\n\n")
# Parse body
if self.__namespace != "":
if "." in self.__namespace:
self.__build_namespace(self.__namespace)
if self.__mod_context.docstring != "":
self.__write_docstring(self.__mod_context.docstring)
if "." not in self.__namespace:
self.__write("var ")
self.__write("%s = (function() {\n" % (self.__namespace))
self.__indent()
else:
if self.__mod_context.docstring != "": self.__write_docstring(self.__mod_context.docstring)
public_identifiers = self.__mod_context.module_all
for stmt in mod.body:
if isinstance(stmt, ast.Assign) and len(stmt.targets) == 1 and \
isinstance(stmt.targets[0], ast.Name) and \
stmt.targets[0].id in ("__all__", "__license__"):
continue
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Str):
continue # Module docstring
self.__do_indent()
self.visit(stmt)
self.__semicolon(stmt)
self.__write("\n") # Extra newline on module layer
if self.__namespace != "":
self.__write_indented("return {")
self.__indent()
if public_identifiers == None:
public_identifiers = self.__mod_context.identifiers.iterkeys()
first = True
for id in public_identifiers:
if first:
first = False
self.__write("\n")
else:
self.__write(",\n")
self.__write_indented("%s: %s" % (id, id))
self.__indent(False)
self.__write("\n")
self.__write_indented("};\n")
self.__indent(False)
self.__write_indented("})();\n")
self.__curr_context = None
def visit_ImportFrom(self, i):
"""
Ignored.
"""
self.__write("/* from %s import " % (i.module))
first = True
for name in i.names:
if first:
first = False
else:
self.__write(", ")
self.__write(name.name)
if name.asname:
self.__write(" as %s" % (name.asname))
self.__write(" */")
def visit_Import(self, i):
"""
Ignored.
"""
self.__write("/* import ")
first = True
for name in i.names:
if first:
first = False
else:
self.__write(", ")
self.__write(name.name)
if name.asname:
self.__write(" as %s" % (name.asname))
self.__write(" */")
def visit_Print(self, p):
"""
Translate `print` to `dbgprint()`.
"""
self.__write("dbgprint(")
first = True
for expr in p.values:
if first:
first = False
else:
self.__write(", ")
self.visit(expr)
self.__write(")")
def visit_Num(self, n):
self.__write(str(n.n))
def visit_Str(self, s):
"""
Output a quoted string.
Cleverly uses JSON to convert it ;)
"""
self.__write(simplejson.dumps(s.s))
def visit_Call(self, c):
"""
Translates a function/method call or class instantiation.
"""
cls = self.__curr_context.class_context()
# Check for 'super'
if cls != None and isinstance(c.func, ast.Name) and c.func.id == "super":
if len(c.args) != 2:
raise ParseError("`super` can only be parsed with two arguments (line %d)" % (c.lineno))
if not isinstance(c.args[0], ast.Name) or not isinstance(c.args[1], ast.Name):
raise ParseError("Arguments of `super` must be simple names, no other expressions allowed (line %d)" % (c.lineno))
if c.args[0].id != cls.name:
raise ParseError("First argument of `super` must be the current class name (line %d)" % (c.lineno))
if c.args[1].id != "self":
raise ParseError("Second argument of `super` must be `self` (line %d)" % (c.lineno))
self.__write("this.parent")
return
type = None
if isinstance(c.func, ast.Name):
if c.func.id == "Hash" or c.func.id == "Array": # Some hardcoded classes/functions
type = "Class"
elif c.func.id == "len" or c.func.id == "repr":
type = "Function"
elif c.func.id == "isinstance": # Translate to instanceof
if len(c.args) != 2:
raise ParseError("The isinstance call must have exactly two parameters (line %d)" % (c.lineno))
self.visit(c.args[0])
self.__write(" instanceof ")
if isinstance(c.args[1], ast.Name) and c.args[1].id == "list":
self.__write("Array")
else:
self.visit(c.args[1])
return
else:
# Look in current context
type = getattr(self.__curr_context.lookup(c.func.id), "type", None)
elif isinstance(c.func, ast.Attribute):
if cls != None and isinstance(c.func.value, ast.Call) and isinstance(c.func.value.func, ast.Name) and c.func.value.func.id == "super":
# A super call
if self.__curr_context.name == c.func.attr:
# Super constructor/method
self.visit(c.func.value) # Checks for errors on the 'super' call
self.__write("(")
self.__parse_args(c)
self.__write(")")
return
else:
raise ParseError("The method name of a `super` call must match the current method's name (line %d)" % (c.lineno))
elif isinstance(c.func.value, ast.Name) and c.func.value.id == "self":
# Look in Class context
if cls != None:
type = getattr(cls.child(c.func.attr), "type", None)
else:
# Create attribute chain
attrlst = [c.func.attr]
value = c.func.value
while isinstance(value, ast.Attribute):
attrlst.append(value.attr)
value = value.value
if isinstance(value, ast.Name): # The last value must be a Name
ctx = self.__curr_context.lookup(value.id)
while ctx != None: # Walk up
ctx = ctx.child(attrlst.pop())
if ctx != None and len(attrlst) == 0: # Win
type = ctx.type
break
if type == None and self.__warnings:
self.__write("/* Warning: Cannot infer type of -> */ ")
elif type == "Class":
self.__write("new ")
self.visit(c.func)
self.__write("(")
self.__parse_args(c)
self.__write(")")
def visit_Name(self, n):
"""
Translate an identifier. If the context is a method, substitute `self`
with `this`.
Some special keywords:
True -> true
False -> false
None -> null
"""
if self.__curr_context.type == "Method" and n.id == "self":
self.__write("this")
elif n.id == "True" or n.id == "False":
self.__write(n.id.lower())
elif n.id == "None":
self.__write("null")
elif n.id in self.RESERVED_WORDS:
raise ParseError("`%s` is a reserved word and cannot be used as an identifier (line %d)" % (n.id, n.lineno))
else:
self.__write(n.id)
def visit_Expr(self, expr):
self.visit(expr.value)
def visit_BinOp(self, o):
"""
Translates a binary operator.
Note: The modulo operator on strings is translated to left.sprintf(right)
and currently the only spot where tuples are allowed.
"""
if isinstance(o.left, ast.Str) and isinstance(o.op, ast.Mod) and isinstance(o.right, ast.Tuple):
self.visit(o.left)
self.__write(".sprintf(")
first = True
for elt in o.right.elts:
if first:
first = False
else:
self.__write(", ")
self.visit(elt)
self.__write(")")
else:
chars, prec, assoc = self.__get_op_cpa(o.op)
self.visit(o.left)
self.__write(" %s " % (chars))
eprec, eassoc = self.__get_expr_pa(o.right)
if eprec >= prec: self.__write("(")
self.visit(o.right)
if eprec >= prec: self.__write(")")
def visit_BoolOp(self, o):
"""
Translates a boolean operator.
"""
first = True
chars, prec, assoc = self.__get_op_cpa(o.op)
for expr in o.values:
if first:
first = False
else:
self.__write(" %s " % (self.__get_op(o.op)))
eprec, eassoc = self.__get_expr_pa(expr)
if eprec >= prec: self.__write("(")
self.visit(expr)
if eprec >= prec: self.__write(")")
def visit_UnaryOp(self, o):
"""
Translates a unary operator.
"""
self.__write(self.__get_op(o.op))
prec, assoc = self.__get_expr_pa(o.operand)
if isinstance(o.operand, ast.Num): prec = 3
if prec > 2: self.__write("(")
self.visit(o.operand)
if prec > 2: self.__write(")")
def visit_Compare(self, c):
"""
Translate a compare block.
"""
self.visit(c.left)
if len(c.ops) > 1:
raise ParseError("Comparisons with more than one operator are not supported (line %d)" % (c.lineno))
op, expr = c.ops[0], c.comparators[0]
self.__write(" %s " % (self.__get_op(op)))
prec, assoc = self.__get_expr_pa(expr)
if prec > 2: self.__write("(")
self.visit(expr)
if prec > 2: self.__write(")")
def visit_Global(self, g):
"""
Declares variables as global.
"""
for name in g.names:
self.__curr_context.declare_variable(name)
def visit_Lambda(self, l):
"""
Translates a lambda function.
"""
self.__write("function (")
self.__parse_args(l.args)
self.__write(") {return ")
self.visit(l.body)
self.__write(";}")
def visit_Yield(self, y):
"""
Translate the yield operator.
"""
self.__write("yield ")
self.visit(l.value)
def visit_Return(self, r):
"""
Translate the return statement.
"""
if r.value:
self.__write("return ")
self.visit(r.value)
else:
self.__write("return")
def visit_List(self, l):
"""
Translate a list expression.
"""
self.__write("[")
first = True
for expr in l.elts:
if first:
first = False
else:
self.__write(", ")
self.visit(expr)
self.__write("]")
def visit_Dict(self, d):
"""
Translate a dictionary expression.
"""
self.__write("{")
self.__indent()
first = True
for i in xrange(len(d.keys)):
key, value = d.keys[i], d.values[i]
if first:
first = False
self.__write("\n")
else:
self.__write(",\n")
if isinstance(key, ast.Num):
self.__write_indented("%d: " % (key.n))
elif not isinstance(key, ast.Str):
raise ParseError("Only numbers and string literals are allowed in dictionary expressions (line %d)" % (key.lineno))
else:
if self.IDENTIFIER_RE.match(key.s):
self.__write_indented("%s: " % (key.s))
else:
self.__write_indented("\"%s\": " % (key.s))
self.visit(value)
self.__indent(False)
if len(d.keys) > 0:
self.__write("\n")
self.__do_indent()
self.__write("}")
def visit_Subscript(self, s):
"""
Translate a subscript expression.
"""
self.visit(s.value)
if isinstance(s.slice, ast.Index):
if isinstance(s.slice.value, ast.Str):
if self.IDENTIFIER_RE.match(s.slice.value.s):
self.__write(".%s" % (s.slice.value.s))
return
self.__write("[")
self.visit(s.slice.value)
self.__write("]")
elif isinstance(s.slice, ast.Slice):
if s.slice.step != None:
raise ParseError("Subscript slice stepping '%s' is not supported (line %d)" % (str(s.slice.__class__.__name__), s.lineno))
if isinstance(s.ctx, ast.Load):
self.__write(".slice(")
if s.slice.lower != None:
self.visit(s.slice.lower)
else:
self.__write("0")
if s.slice.upper != None:
self.__write(", ")
self.visit(s.slice.upper)
self.__write(")")
elif isinstance(s.ctx, ast.Delete):
raise ParseError("Subscript slice deleting is not supported (line %d)" % (s.lineno))
else:
raise ParseError("Subscript slice assignment is not supported (line %d)" % (s.lineno))
else:
raise ParseError("Subscript slice type '%s' is not supported (line %d)" % (str(s.slice.__class__.__name__), s.lineno))
def visit_Delete(self, d):
"""
Translate a delete statement.
"""
first = True
for target in d.targets:
if first:
first = False
else:
self.__write("; ")
self.__write("delete ")
self.visit(target)
def visit_Assign(self, a):
"""
Translate an assignment.
Declares a new local variable if applicable.
"""
is_class = self.__curr_context.type == "Class"
if len(a.targets) > 1:
raise ParseError("Cannot handle assignment unpacking (line %d)" % (a.lineno))
if isinstance(a.targets[0], ast.Name):
if self.__curr_context.declare_variable(a.targets[0].id):
if not is_class: self.__write("var ")
elif is_class:
raise ParseError("Only simple variable assignments are allowed on class scope (line %d)" % (a.targets[0].id, a.lineno))
self.visit(a.targets[0])
if is_class:
self.__write(": ")
else:
self.__write(" = ")
self.visit(a.value)
def visit_AugAssign(self, a):
"""
Translate an assignment operator.
"""
self.visit(a.target)
if isinstance(a.value, ast.Num) and a.value.n == 1:
if isinstance(a.op, ast.Add):
self.__write("++")
return
elif isinstance(a.op, ast.Sub):
self.__write("--")
return
self.__write(" %s= " % (self.__get_op(a.op)))
self.visit(a.value)
def visit_Pass(self, p):
"""
Translate the `pass` statement. Places a comment.
"""
self.__write("/* pass */")
def visit_Continue(self, c):
"""
Translate the `continue` statement.
"""
self.__write("continue")
def visit_Break(self, c):
"""
Translate the `break` statement.
"""
self.__write("break")
def visit_Attribute(self, a):
"""
Translate an attribute chain.
"""
self.visit(a.value)
attr = a.attr
self.__write(".%s" % (attr))
def visit_If(self, i):
"""
Translate an if-block.
"""
self.__write("if (")
self.visit(i.test)
# Parse body
braces = True
if len(i.body) == 1 \
and not isinstance(i.body[0], ast.If) \
and not isinstance(i.body[0], ast.While) \
and not isinstance(i.body[0], ast.For):
braces = False
if braces:
self.__write(") {\n")
else:
self.__write(")\n")
self.__indent()
for stmt in i.body:
self.__do_indent()
self.visit(stmt)
self.__semicolon(stmt)
self.__indent(False)
if braces:
self.__write_indented("}\n")
# Parse else
if len(i.orelse) == 0:
return
braces = True
if len(i.orelse) == 1 \
and not isinstance(i.orelse[0], ast.If) \
and not isinstance(i.orelse[0], ast.While) \
and not isinstance(i.orelse[0], ast.For):
braces = False
elseif = False
if len(i.orelse) == 1 and isinstance(i.orelse[0], ast.If):
elseif = True
self.__write_indented("else ")
elif braces:
self.__write_indented("else {\n")
else:
self.__write_indented("else\n")
if elseif:
self.visit(i.orelse[0])
else:
self.__indent()
for stmt in i.orelse:
self.__do_indent()
self.visit(stmt)
self.__semicolon(stmt)
self.__indent(False)
if braces:
self.__write_indented("}\n")
def visit_IfExp(self, i):
"""
Translate an if-expression.
"""
self.visit(i.test)
self.__write(" ? ")
self.visit(i.body)
self.__write(" : ")
self.visit(i.orelse)
def visit_While(self, w):
"""
Translate a while loop.
"""
if len(w.orelse) > 0:
raise ParseError("`else` branches of the `while` statement are not supported (line %d)" % (w.lineno))
self.__write("while (")
self.visit(w.test)
# Parse body
if len(w.body) == 1:
self.__write(")\n")
else:
self.__write(") {\n")
self.__indent()
for stmt in w.body:
self.__do_indent()
self.visit(stmt)
self.__semicolon(stmt)
self.__indent(False)
if len(w.body) > 1:
self.__write_indented("}\n")
def visit_For(self, f):
"""
Translate a for loop.
"""
if len(f.orelse) > 0:
raise ParseError("`else` branches of the `for` statement are not supported (line %d)" % (f.lineno))
# -- This solution is needed to keep all semantics --
#
# for (var __iter0_ = new XRange(start, stop, step); __iter0_.hasNext();) {
# var value = __iter0_.next();
#
# }
# delete __iter0_;
#
# for (var __iter0_ = new _Iterator(expr); __iter0_.hasNext();)) {
# var value = __iter0_.next();
# var key = __iter0_.key();
# }
# delete __iter0_;
xrange = False
iterexpr = None
keyexpr = None
valexpr = None
iteritems = False
if isinstance(f.iter, ast.Call) and isinstance(f.iter.func, ast.Name) \
and (f.iter.func.id == "xrange" or f.iter.func.id == "range"):
xrange = True
if isinstance(f.iter, ast.Call) and isinstance(f.iter.func, ast.Attribute) \
and f.iter.func.attr == "iteritems":
iterexpr = f.iter.func.value
if not isinstance(f.target, ast.Tuple) or len(f.target.elts) != 2:
raise ParseError("Only 2-tuples are allowed as target in conjunction with an iteritems() call on the iterable of the `for` statement (line %d)" % (f.lineno))
iteritems = True
keyexpr = f.target.elts[0]
valexpr = f.target.elts[1]
else:
iterexpr = f.iter
valexpr = f.target
if isinstance(f.target, ast.Tuple) and not iteritems:
raise ParseError("Tuple targets can only be used in conjunction with an iteritems() call on the iterable of the `for` statement (line %d)" % (f.lineno))
itervar = "__iter%d_" % (self.__iteratorid)
self.__iteratorid += 1
if xrange:
self.__write("for (var %s = new XRange(" % (itervar))
self.__parse_args(f.iter)
else:
self.__write("for (var %s = new _Iterator(" % (itervar))
self.__indent()
self.__indent()
self.visit(iterexpr)
self.__indent(False)
self.__indent(False)
self.__write("); %s.hasNext();) {\n" % (itervar))
# Parse body
self.__indent()
self.__do_indent()
if isinstance(valexpr, ast.Name) and self.__curr_context.declare_variable(valexpr.id):
self.__write("var ")
self.visit(valexpr)
self.__write(" = %s.next();\n" % (itervar))
if keyexpr != None:
self.__do_indent()
if isinstance(keyexpr, ast.Name) and self.__curr_context.declare_variable(keyexpr.id):
self.__write("var ")
self.visit(keyexpr)
self.__write(" = %s.key();\n" % (itervar))
for stmt in f.body:
self.__do_indent()
self.visit(stmt)
self.__semicolon(stmt)
self.__indent(False)
self.__write_indented("}\n")
self.__write_indented("delete %s;\n" % (itervar))
self.__iteratorid -= 1
def visit_ClassDef(self, c):
"""
Translates a Python class into a MooTools class.
This inserts a Class context which influences the translation of
functions and assignments.
"""
self.__push_context(c.name)
# Write docstring
if len(self.__curr_context.docstring) > 0:
self.__write_docstring(self.__curr_context.docstring)
self.__do_indent()
self.__write("var %s = new Class({\n" % (c.name))
self.__indent()
# Special decorators
decorators = self.__get_decorators(c)
if decorators.has_key("Implements"):
self.__write_indented("Implements: ")
if len(decorators["Implements"]) == 1:
self.visit(decorators["Implements"][0])
self.__write(",\n")
else:
self.__write("[")
first = True
for expr in decorators["Implements"]:
if first:
first = False
else:
self.__write(", ")
self.visit(expr)
self.__write("],\n")
if not decorators.has_key("Class"):
import sys
sys.stderr.write("Warning: The class `%s` of line %d in the input file/string does not have the `Class` decorator!\n" % (c.name, c.lineno))
# Base classes
bases = filter(lambda b: not isinstance(b, ast.Name) or b.id != "object", c.bases)
if len(bases) > 0:
self.__write_indented("Extends: ")
if len(bases) == 1:
self.visit(bases[0])
self.__write(",\n")
else:
self.__write("[")
first = True
for expr in bases:
if first:
first = False
else:
self.__write(", ")
self.visit(expr)
self.__write("],\n")
first = True
first_docstring = True
statics = []
for stmt in c.body:
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Str):
if first_docstring:
first_docstring = False
else:
if not first:
self.__write("\n")
self.__do_indent()
self.__write_docstring(stmt.value.s)
if not first:
self.__do_indent()
continue
if isinstance(stmt, ast.FunctionDef):
if self.__get_decorators(stmt).has_key("staticmethod"):
statics.append(stmt)
continue
if first:
first = False
else:
self.__write(",\n")
if isinstance(stmt, ast.FunctionDef):
self.__write("\n")
self.__do_indent()
self.visit(stmt)
self.__write("\n")
self.__indent(False)
self.__write_indented("})")
for stmt in statics:
self.__write(";\n")
self.__do_indent()
self.visit(stmt)
self.__pop_context()
def visit_FunctionDef(self, f):
"""
Translate a Python function into a JavaScript function.
Depending on the context, it is translated to `var name = function (...)`
or `name: function (...)`.
"""
self.__push_context(f.name)
is_method = self.__curr_context.type == "Method"
# Special decorators
decorators = self.__get_decorators(f)
is_static = decorators.has_key("staticmethod")
# Write docstring
if len(self.__curr_context.docstring) > 0:
self.__write_docstring(self.__curr_context.docstring)
self.__do_indent()
if is_method:
if is_static:
self.__write("%s.%s = function (" % (self.__curr_context.class_context().name, f.name))
elif f.name == "__init__":
self.__write("initialize: function (")
else:
self.__write("%s: function (" % (f.name))
else:
self.__write("var %s = function (" % (f.name))
# Parse arguments
self.__parse_args(f.args, is_method and not is_static)
self.__write(") {\n")
# Parse defaults
self.__indent()
self.__parse_defaults(f.args)
# Parse body
for stmt in f.body:
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Str):
continue # Skip docstring
if isinstance(stmt, ast.Global): # The `global` statement is invisible
self.visit(stmt)
continue
self.__do_indent()
self.visit(stmt)
self.__semicolon(stmt)
self.__pop_context()
self.__indent(False)
self.__write_indented("}")
def generic_visit(self, node):
raise ParseError("Could not parse node type '%s' (line %d)" % (str(node.__class__.__name__), node.lineno))
def __parse_args(self, args, strip_first = False):
"""
Translate a list of arguments.
"""
first = True
for arg in args.args:
if first:
if strip_first and isinstance(arg, ast.Name):
strip_first = False
continue
first = False
else:
self.__write(", ")
self.visit(arg)
if getattr(args, "vararg", None) != None:
raise ParseError("Variable arguments on function definitions are not supported")
def __parse_defaults(self, args):
"""
Translate the default arguments list.
"""
if len(args.defaults) > 0:
first = len(args.args) - len(args.defaults)
for i in xrange(len(args.defaults)):
self.__write_indented("if (!$defined(")
self.visit(args.args[first+i])
self.__write(")) ")
self.visit(args.args[first+i])
self.__write(" = ")
self.visit(args.defaults[i])
self.__write(";\n")
def __get_decorators(self, stmt):
"""
Return a dictionary of decorators and their parameters.
"""
decorators = {}
if isinstance(stmt, ast.FunctionDef):
for dec in stmt.decorator_list:
if isinstance(dec, ast.Name):
if dec.id == "staticmethod":
decorators["staticmethod"] = []
continue
raise ParseError("This function decorator is not supported. Only @staticmethod is supported for now. (line %d)" % (stmt.lineno))
else:
for dec in stmt.decorator_list:
if isinstance(dec, ast.Call) and isinstance(dec.func, ast.Name):
if dec.func.id == "Implements":
decorators["Implements"] = dec.args
continue
if isinstance(dec, ast.Name) and dec.id == "Class":
decorators["Class"] = []
continue
raise ParseError("This class decorator is not supported. Only decorators of pycow.decorators are supported (line %d)" % (stmt.lineno))
return decorators
def __get_op(self, op):
"""
Translates an operator.
"""
return self.OP_MAP[op.__class__.__name__][0]
def __get_op_cpa(self, op):
"""
Get operator chars, precedence and associativity.
"""
return self.OP_MAP[op.__class__.__name__]
def __get_expr_pa(self, expr):
"""
Get the precedence and associativity of an expression.
"""
if isinstance(expr, ast.Expr):
expr = expr.value
name = expr.__class__.__name__
if name in ("BoolOp", "BinOp", "UnaryOp"):
return self.__get_op_cpa(expr.op)[1:]
elif name in ("Lambda", "Dict", "List", "Num", "Str", "Name"):
return (1, False)
elif name == "IfExp":
return (15, False)
elif name in ("Attribute", "Subscript"):
return (1, True)
elif name in ("Call", "Repr"):
return (2, True)
elif name == "Compare":
return (8, True)
def __indent(self, updown = True):
if updown:
self.__ilevel += 1
else:
self.__ilevel -= 1
def __write(self, s):
self.__out.write(s)
def __write_indented(self, s):
self.__out.write(self.__ichars * self.__ilevel + s)
def __write_docstring(self, s):
self.__out.write("/**\n")
gotnl = False
first = True
for line in s.split("\n"):
line = line.strip()
if line == "":
gotnl = True
else:
if gotnl and not first:
self.__write_indented(" *\n")
gotnl = False
first = False
self.__write_indented(" * %s\n" % (line))
self.__write_indented(" */\n")
def __do_indent(self):
self.__out.write(self.__ichars * self.__ilevel)
def __push_context(self, identifier):
"""
Walk context up.
"""
old_context = self.__curr_context
self.__curr_context = self.__curr_context.child(identifier)
if self.__curr_context == None:
raise ParseError("Lost context on accessing '%s' from '%s (%s)'" % (identifier, old_context.name, old_context.type))
def __pop_context(self):
"""
Walk context down.
"""
self.__curr_context = self.__curr_context.parent
def __semicolon(self, stmt, no_newline = False):
"""
Write a semicolon (and newline) for all statements except the ones
in NO_SEMICOLON.
"""
if stmt.__class__.__name__ not in self.NO_SEMICOLON:
if no_newline:
self.__write(";")
else:
self.__write(";\n")
def __build_namespace(self, namespace):
namespace = namespace.split(".")
self.__write("window.%s = $defined(window.%s) ? window.%s : {};\n" % (namespace[0], namespace[0], namespace[0]))
for i in xrange(1, len(namespace) - 1):
self.__write("%s.%s = $defined(%s.%s) ? %s.%s : {};\n" % (namespace[i-1], namespace[0], namespace[i-1], namespace[0], namespace[i-1], namespace[0]))
self.__write("\n")
def translate_string(input, indent = "\t", namespace = "", warnings = True):
"""
Translate a string of Python code to JavaScript.
Set the `indent` parameter, if you want an other indentation than tabs.
Set the `namespace` parameter, if you want to enclose the code in a namespace.
"""
moo = PyCow(indent=indent, namespace=namespace, warnings=warnings)
moo.visit(ast.parse(input, "(string)"))
return moo.output()
def translate_file(in_filename, out_filename = "", indent = "\t", namespace = "", warnings = True):
"""
Translate a Python file to JavaScript.
If `out_filename` is not given, it will be set to in_filename + ".js".
Set the `indent` parameter, if you want an other indentation than tabs.
Set the `namespace` parameter, if you want to enclose the code in a namespace.
"""
if out_filename == "":
out_filename = in_filename + ".js"
outfile = open(out_filename, "w")
outfile.write("/* This file was generated with PyCow - the Python to JavaScript translator */\n\n")
moo = PyCow(outfile, indent, namespace, warnings)
input = open(in_filename, "r").read()
try:
moo.visit(ast.parse(input, in_filename))
finally:
outfile.close()
| p2k/PyCow | pycow/pycow.py | Python | apache-2.0 | 34,344 | [
"VisIt"
] | 99fd0af697ed14d7ac051fe86515ee383f78e076225be8d982d1578d193a751e |
'''
A tool to annotate and print variants in tabular format
Author: Khalid Mahmood
Contact: khalid.mahmood@unimelb.edu.au
Copyright: 2015
'''
#!/usr/bin/python
from utils import findlist
from annotations import getTabixVal,getTabixValCondel,getTabixBool
from annotations import getfathmm,adjust_scores
import sys
import os
import argparse
import getopt
import vcf
import re
import array
import pysam
#class Error(Exception):
# """Base-class for exceptions in this module."""
#class UsageError(Error):
# def __init__(self, msg):
# self.msg = msg
def getcadd(cadd_tbx, current_chr, current_pos, current_ref, current_alt):
current_chr = current_chr.translate(None, 'chr')
data = cadd_tbx.fetch(current_chr, current_pos-1, current_pos)
cadd_phred, cadd_priPhCons, cadd_GerpRS = '','',''
cadd_polysift, cadd_test1, cadd_test2 = '','',''
if data is not None:
for row in data:
row_info = row.split("\t")
cadd_ref = row_info[2]
cadd_alt = row_info[4]
if(cadd_ref == current_ref and cadd_alt == current_alt):
cadd_phred = row_info[115]
cadd_priPhCons = row_info[18]
cadd_GerpRS = row_info[26]
if "damaging" in row_info[110] or "deleterious" in row_info[112]:
cadd_polysift = "del"
break
else:
cadd_phred = 'NA'
return cadd_phred, cadd_priPhCons, cadd_GerpRS, \
cadd_polysift
# return allele frequency given the allele count and assuming allele number = (total allele number/2)
def getAF(ac, an):
if(float(an)>0.0):
af_temp = float(ac) / an
#newlist = round(af_temp, 8)
newlist = af_temp
else:
newlist = 0.0
return str(newlist)
# return index of the current alt allele from exac multiallelic data
def getexacallele(exac_tbx, current_chr, current_pos, current_ref, current_alt):
current_chr = current_chr.translate(None, 'chr')
data = exac_tbx.fetch(current_chr, current_pos-1, current_pos)
index = -2
row = 0
found = False
exac_filter_return = ""
if data:
for exac_row in data:
exac_pos = int(exac_row.split("\t")[1])
exac_ref_temp = exac_row.split("\t")[3]
exac_alt_temp = exac_row.split("\t")[4]
exac_filter = exac_row.split("\t")[6]
exac_alt_row = exac_alt_temp.split(",")
exac_ref_row = exac_ref_temp.split(",")
#if(current_pos == exac_pos and current_ref in exac_ref_row and \
if(current_pos == exac_pos and current_alt in exac_alt_row ):
index = exac_alt_row.index(current_alt)
exac_filter_return = exac_filter
row += 1
break
else:
index = -2
#print "Row = " + str(row) + " " + str(found)
return index, exac_filter_return
# MAIN
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--vcf", type=str, dest="vcf", help="Input variant file (vcf)", required=True)
parser.add_argument("-o", "--output", type=str, dest="out", help="Output file (tabular)", required=True)
parser.add_argument("-X", "--exac", type=str, dest="exac_af_threshold", help="ExAC All threshold",
default=100, required=False)
parser.add_argument("-XE", "--exacEUR", type=str, dest="exac_eur_threshold", help="ExAC European threshold",
default=100, required=False)
parser.add_argument("-v", "--verbosity", action="count", default=0)
args = parser.parse_args()
outputfile = open(args.out, "w")
if args.verbosity >= 2:
print "{} to the power {} equals {}".format(args.v, args.o, answer)
elif args.verbosity >= 1:
print "{}^{} == {}".format(args.x, args.y, answer)
#else:
# print "Starting ..."
cadd_tbx = pysam.TabixFile("data/whole_genome_SNVs_inclAnno.tsv.gz")
cadd_indel_tbx = pysam.TabixFile("data/InDels_inclAnno.tsv.gz")
fathmm_tbx = pysam.TabixFile("data/dbNSFP.fathmmW.bed.gz")
exac_tbx = pysam.TabixFile("data/ExAC.r0.3.sites.vep.vcf.gz")
map_tbx = pysam.TabixFile("data/wgEncodeCrgMapabilityAlign100mer.bed.gz")
pfam_tbx = pysam.TabixFile("data/hg19.pfam.sorted.bed.gz")
prom_tbx = pysam.TabixFile("data/hg19.promoter.sorted.bed.gz")
enh_tbx = pysam.TabixFile("data/hg19.enhancer.sorted.bed.gz")
rmsk_tbx = pysam.TabixFile("data/hg19.rmsk.counts.bed.gz")
cpg_tbx = pysam.TabixFile("data/hg19.CpG.bed.gz")
clin_tbx = pysam.TabixFile("data/clinvar_20140303.bed.gz")
gwas_tbx = pysam.TabixFile("data/clinvar_20140303.bed.gz")
condel_tbx = pysam.TabixFile("data/fannsdb.small.bed.gz")
outputfile.write("chr\tpos\tid\tref\talt\tannotation\tgene_name\tlof" \
#"\texon\taa_pos\tpoly/sift\tSIFT\tPOLYPHEN\tAF\tGMAF\t1kgEMAF\tESPEMAF\t" \
"\texon\taa_pos\tSIFT\tPOLYPHEN\tCONDEL\tAF\tGMAF\t1kgEMAF\tESPEMAF\t" \
#"HETEUR\tHOMEUR\t
"ExAC_AF\tExAC_EAS\tExAC_NFE\tExAC_FIN\tExAC_SAS\tExAC_AFR\tExAC_AMR\tExAC_OTH\t" \
"CADD\tmaxCADD\tpriPhCons\tGerpRS\tFATHMM\t" \
"Mapability\tPromoter\tEnhancer\tRepeat\tPfam\t" \
"CPG\tClinVar\tGWAS\tMNP_FLAG\tExAC_FLAG\n")
vcf_reader = vcf.Reader(open(args.vcf, 'r'))
for record in vcf_reader:
current_chr = record.CHROM
current_id = record.ID
current_pos = record.POS
current_ref = record.REF
current_alt = ','.join(str(v) for v in record.ALT)
#current_alt_array = current_alt.split(","
# current_af = ','.join(str(v) for v in record.INFO['AF'])
current_het_nfe = ''
current_hom_nfe = ''
current_exac_af,current_exac_eas,current_exac_nfe = 0.0,0.0,0.0
current_exac_fin,current_exac_sas,current_exac_afr = 0.0,0.0,0.0
current_exac_amr,current_exac_oth = 0.0,0.0
exac_flag = "."
# check if the variant is in ExAC annotated
if any("ExAC" in s for s in record.INFO):
len_ac_adj = len(record.INFO['ExAC_AC_Adj'])
len_ac_eas = len(record.INFO['ExAC_AC_EAS'])
len_ac_nfe = len(record.INFO['ExAC_AC_NFE'])
len_ac_fin = len(record.INFO['ExAC_AC_FIN'])
len_ac_sas = len(record.INFO['ExAC_AC_SAS'])
len_ac_afr = len(record.INFO['ExAC_AC_AFR'])
len_ac_amr = len(record.INFO['ExAC_AC_AMR'])
len_ac_oth = len(record.INFO['ExAC_AC_OTH'])
current_exac_index, exac_filter = getexacallele(exac_tbx, current_chr, current_pos, current_ref, current_alt)
exac_flag = "True" + ":" + exac_filter
#print current_chr + "\t" + str(current_id) + "\t" + current_ref + ":" + current_alt + str(record.INFO['ExAC_AN_Adj']) + "\t" \
# + str(record.INFO['ExAC_AN_Adj'] ) + str(current_exac_index)
if(current_exac_index>-2):
current_het_nfe = ','.join(str(v) for v in record.INFO['ExAC_AC_Het'])
current_hom_nfe = ','.join(str(v) for v in record.INFO['ExAC_AC_Hom'])
if current_exac_index < len_ac_adj:
current_exac_af = getAF(float(record.INFO['ExAC_AC_Adj'][current_exac_index]),float(record.INFO['ExAC_AN_Adj'][-1]))
else:
current_exac_af = getAF(float(record.INFO['ExAC_AC_Adj'][-1]),float(record.INFO['ExAC_AN_Adj'][-1]))
if current_exac_index < len_ac_adj:
current_exac_eas = getAF(float(record.INFO['ExAC_AC_EAS'][current_exac_index]),float(record.INFO['ExAC_AN_EAS'][-1]))
else:
current_exac_eas = getAF(float(record.INFO['ExAC_AC_EAS'][-1]),float(record.INFO['ExAC_AN_EAS'][-1]))
if current_exac_index < len_ac_adj:
current_exac_nfe = getAF(float(record.INFO['ExAC_AC_NFE'][current_exac_index]),float(record.INFO['ExAC_AN_NFE'][-1]))
else:
current_exac_nfe = getAF(float(record.INFO['ExAC_AC_NFE'][-1]),float(record.INFO['ExAC_AN_NFE'][-1]))
if current_exac_index < len_ac_adj:
current_exac_fin = getAF(float(record.INFO['ExAC_AC_FIN'][current_exac_index]),float(record.INFO['ExAC_AN_FIN'][-1]))
else:
current_exac_fin = getAF(float(record.INFO['ExAC_AC_FIN'][-1]),float(record.INFO['ExAC_AN_FIN'][-1]))
if current_exac_index < len_ac_adj:
current_exac_sas = getAF(float(record.INFO['ExAC_AC_SAS'][current_exac_index]),float(record.INFO['ExAC_AN_SAS'][-1]))
else:
current_exac_sas = getAF(float(record.INFO['ExAC_AC_SAS'][-1]),float(record.INFO['ExAC_AN_SAS'][-1]))
if current_exac_index < len_ac_adj:
current_exac_afr = getAF(float(record.INFO['ExAC_AC_AFR'][current_exac_index]),float(record.INFO['ExAC_AN_AFR'][-1]))
else:
current_exac_afr = getAF(float(record.INFO['ExAC_AC_AFR'][-1]),float(record.INFO['ExAC_AN_AFR'][-1]))
if current_exac_index < len_ac_adj:
current_exac_amr = getAF(float(record.INFO['ExAC_AC_AMR'][current_exac_index]),float(record.INFO['ExAC_AN_AMR'][-1]))
else:
current_exac_amr = getAF(float(record.INFO['ExAC_AC_AMR'][-1]),float(record.INFO['ExAC_AN_AMR'][-1]))
if current_exac_index < len_ac_adj:
current_exac_oth = getAF(float(record.INFO['ExAC_AC_OTH'][current_exac_index]),float(record.INFO['ExAC_AN_OTH'][-1]))
else:
current_exac_oth = getAF(float(record.INFO['ExAC_AC_OTH'][-1]),float(record.INFO['ExAC_AN_OTH'][-1]))
else:
current_exac_af,current_exac_eas,current_exac_nfe = 0.0,0.0,0.0
current_exac_fin,current_exac_sas,current_exac_afr = 0.0,0.0,0.0
current_exac_amr,current_exac_oth = 0.0,0.0
exac_flag = "False"
# CHECK INDEL AND MNP
#print current_ref + ":" + current_alt
indel = True if ((len(current_ref) > 1 or len(current_alt) > 1) and \
("," not in current_ref and "," not in current_alt)) else False
# mnp = map(labmda x, len(record.ALT)
mnp = True if len(record.ALT) > 1 else False
mnpflag = "%s" % mnp
# VEP
current_sift, current_polyphen, current_consequence, current_LOF = '','','',''
current_sift_score, current_polyphen_score = 0.9999, 0.0001
current_gmaf, current_eur_maf, current_ea_maf = '','',''
current_feature, current_feature_type = '',''
if "CSQ" in record.INFO:
csq = record.INFO['CSQ'][0].split('|')
current_feature, current_feature_type = csq[2], csq[3]
current_consequence = csq[4]
#print csq[24] + "-" + csq[25]
current_sift = csq[23]
current_polyphen = csq[24]
if ( len(current_sift) > 0):
current_sift_score = re.findall(r'[0-9.]+', current_sift)[0]
if ( len(current_polyphen) > 0):
current_polyphen_score = re.findall(r'[0-9.]+', current_polyphen)[0]
current_gmaf, current_eur_maf, current_ea_maf = csq[31], csq[35], csq[37]
#current_LOF = csq[48]
else:
current_feature, current_feature_type, current_consequence = '','',''
current_sift, current_polyphen, current_eur_maf = '','',''
current_ea_maf, current_LOF, current_gmaf = '','',''
# SnpEff
ann = record.INFO['ANN'][0].split('|')
annotation = ann[1]
# GENE INFORMATION
current_gene, current_exon, current_aa_pos = ann[3], ann[8], ann[10]
#CADD SNP
cadd_phred_temp = ''
cadd_phred = ''
indel_str= ''
mnp_cadds = []
cadd_scores = []
fathmm_score = 0.0
for alt in record.ALT:
if(len(current_ref) == 1 and len(alt) == 1):
(cadd_phred_temp, cadd_priPhCons, cadd_GerpRS, cadd_polysift) = \
getcadd(cadd_tbx, current_chr, current_pos, current_ref, alt)
mnp_cadds.append(str(alt) + ":" + cadd_phred_temp)
cadd_scores.append(cadd_phred_temp)
# GET FATHMM SCORE
fathmm_score = getfathmm(fathmm_tbx, current_chr, current_pos, current_ref, alt)
else: # IF VAR IS AN INDEL
(cadd_phred_temp, cadd_priPhCons, cadd_GerpRS, cadd_polysift) = \
getcadd(cadd_indel_tbx, current_chr, current_pos, current_ref, alt)
mnp_cadds.append(str(alt) + ":" + cadd_phred_temp)
cadd_scores.append(cadd_phred_temp)
cadd_phred = ",".join(mnp_cadds)
# indel_str = "."
# INSERT OTHER TABIX BASED ANNOTATORS BELOW
current_mapability = getTabixVal(map_tbx, current_chr, current_pos, current_ref, current_alt)
current_pfam = getTabixVal(pfam_tbx, current_chr, current_pos, current_ref, current_alt)
current_promoter = getTabixBool(prom_tbx, current_chr, current_pos, current_ref, current_alt)
current_enhancer = getTabixBool(enh_tbx, current_chr, current_pos, current_ref, current_alt)
current_rmsk = getTabixBool(rmsk_tbx, current_chr, current_pos, current_ref, current_alt)
current_cpg = getTabixBool(cpg_tbx, current_chr, current_pos, current_ref, current_alt)
current_clinvar = getTabixVal(clin_tbx, current_chr, current_pos, current_ref, current_alt)
current_gwas = getTabixVal(gwas_tbx, current_chr, current_pos, current_ref, current_alt)
current_condel = getTabixValCondel(condel_tbx, current_chr, current_pos, current_ref, current_alt)
current_AF = record.INFO['AF']
# RESCORE SCORES FOR PROTEIN TRUNCATING MUTATIONS
(current_condel, current_sift, current_polyphen, fathmm_score) = adjust_scores(current_condel, current_sift, \
current_polyphen, fathmm_score, annotation)
out_str = [ current_chr, str(current_pos), str(current_id), current_ref, current_alt,
annotation, current_gene, current_LOF, current_exon,
current_aa_pos, str(current_sift_score), str(current_polyphen_score), str(current_condel), str(current_AF),
current_gmaf, current_eur_maf, current_ea_maf,
#current_het_nfe, current_hom_nfe,
str(current_exac_af), str(current_exac_eas), str(current_exac_nfe), str(current_exac_fin),
str(current_exac_sas), str(current_exac_afr), str(current_exac_amr), str(current_exac_oth),
cadd_phred, str(max(cadd_scores)), cadd_priPhCons, cadd_GerpRS,
str(fathmm_score), str(current_mapability), current_promoter, current_enhancer,
current_rmsk, current_pfam, current_cpg, current_clinvar, current_gwas,
mnpflag, exac_flag]
out_str = [x or '.' for x in out_str]
# filters ExAC ALL
if( 'PASS' in exac_flag ): # IF IT IS A PASS ExAC SITE - FILTER ON AF
if( float(current_exac_af) <= float(args.exac_af_threshold) ):
outputfile.write("\t".join(out_str))
outputfile.write("\n")
#else:
# outputfile.write("- ")
# outputfile.write("\t".join(out_str))
# outputfile.write("\n")
else: # THE EXAC CALL IS NOT RELIABLE THEREFORE CANNNOT FILTER ON AF
outputfile.write("\t".join(out_str))
outputfile.write("\n")
outputfile.close()
if __name__ == "__main__":
main(sys.argv)
| khalidm/VarPub | src/readvcf2.py | Python | gpl-2.0 | 15,746 | [
"pysam"
] | 563d7c6e6ed69b971c21ee9d9d62d968f254cfdb80ec0b26f565a4de02ed1a89 |
from __future__ import annotations
import os
import pytest
from cctbx import uctbx
from dials.tests.algorithms.indexing.test_index import run_indexing
@pytest.mark.xfail
def test_run(dials_regression, tmpdir):
expected_unit_cell = uctbx.unit_cell(
(11.624, 13.550, 30.103, 89.964, 93.721, 90.132)
)
expected_rmsds = (0.039, 0.035, 0.002)
experiments_old = os.path.join(
dials_regression, "indexing_test_data", "phi_scan", "datablock_old.json"
)
experiments_new = os.path.join(
dials_regression, "indexing_test_data", "phi_scan", "datablock.json"
)
strong_pickle = os.path.join(
dials_regression, "indexing_test_data", "phi_scan", "strong.pickle"
)
from dxtbx.serialize import load
imageset_old = load.experiment_list(
experiments_old, check_format=False
).imagesets()[0]
imageset_new = load.experiment_list(
experiments_new, check_format=False
).imagesets()[0]
gonio_old = imageset_old.get_goniometer()
gonio_new = imageset_new.get_goniometer()
assert gonio_old.get_rotation_axis() == pytest.approx(
(0.7497646259807715, -0.5517923303436749, 0.36520984351713554)
)
assert gonio_old.get_setting_rotation() == pytest.approx(
(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
)
assert gonio_old.get_fixed_rotation() == pytest.approx(
(
0.7497646259807748,
-0.20997265900532208,
-0.6275065641872948,
-0.5517923303436731,
0.3250014637526764,
-0.7680490041218182,
0.3652098435171313,
0.9221092836691605,
0.12781329809272568,
)
)
assert gonio_new.get_rotation_axis() == pytest.approx(gonio_old.get_rotation_axis())
assert gonio_new.get_rotation_axis_datum() == pytest.approx((1, 0, 0))
assert gonio_new.get_setting_rotation() == pytest.approx(
(
0.7497646259807705,
-0.20997265900532142,
-0.6275065641873,
-0.5517923303436786,
0.3250014637526763,
-0.768049004121814,
0.3652098435171315,
0.9221092836691607,
0.12781329809272335,
)
)
assert gonio_new.get_fixed_rotation() == pytest.approx(
(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
)
result_old = run_indexing(
strong_pickle,
experiments_old,
tmpdir,
extra_args=[],
expected_unit_cell=expected_unit_cell,
expected_rmsds=expected_rmsds,
expected_hall_symbol=" P 1",
)
result_new = run_indexing(
strong_pickle,
experiments_new,
tmpdir,
extra_args=[],
expected_unit_cell=expected_unit_cell,
expected_rmsds=expected_rmsds,
expected_hall_symbol=" P 1",
)
assert result_old.rmsds == pytest.approx(result_new.rmsds, abs=1e-6)
assert result_old.experiments[
0
].crystal.get_unit_cell().parameters() == pytest.approx(
result_new.experiments[0].crystal.get_unit_cell().parameters(), abs=1e-6
)
# Now test refinement gradients are correct
from dxtbx.model.experiment_list import Experiment, ExperimentList
old_exps = ExperimentList(
[
Experiment(
beam=imageset_old.get_beam(),
detector=imageset_old.get_detector(),
goniometer=gonio_old,
scan=imageset_old.get_scan(),
crystal=result_old.experiments[0].crystal,
imageset=None,
)
]
)
new_exps = ExperimentList(
[
Experiment(
beam=imageset_new.get_beam(),
detector=imageset_new.get_detector(),
goniometer=gonio_new,
scan=imageset_new.get_scan(),
crystal=result_new.experiments[0].crystal,
imageset=None,
)
]
)
from libtbx.phil import parse
from dials.algorithms.refinement.refiner import phil_scope
params = phil_scope.fetch(source=parse("")).extract()
from dials.algorithms.refinement.refiner import RefinerFactory
refiner_old = RefinerFactory.from_parameters_data_experiments(
params, result_old.indexed_reflections, old_exps
)
refiner_new = RefinerFactory.from_parameters_data_experiments(
params, result_new.indexed_reflections, new_exps
)
# Analytical gradients should be approximately the same in either case
an_grads_old = refiner_old._pred_param.get_gradients(refiner_old.get_matches())
an_grads_new = refiner_new._pred_param.get_gradients(refiner_new.get_matches())
for g1, g2 in zip(an_grads_old, an_grads_new):
assert g1["dX_dp"] == pytest.approx(g2["dX_dp"], abs=1.0e-6)
assert g1["dY_dp"] == pytest.approx(g2["dY_dp"], abs=1.0e-6)
assert g1["dphi_dp"] == pytest.approx(g2["dphi_dp"], abs=1.0e-6)
# Analytical gradients should be approximately equal to finite difference
# gradients in either case
fd_grads_old = calc_fd_grads(refiner_old)
for g1, g2 in zip(fd_grads_old, an_grads_old):
assert g1["dX_dp"] == pytest.approx(g2["dX_dp"], abs=5.0e-6)
assert g1["dY_dp"] == pytest.approx(g2["dY_dp"], abs=5.0e-6)
assert g1["dphi_dp"] == pytest.approx(g2["dphi_dp"], abs=5.0e-6)
fd_grads_new = calc_fd_grads(refiner_new)
for g1, g2 in zip(fd_grads_new, an_grads_new):
assert g1["dX_dp"] == pytest.approx(g2["dX_dp"], abs=5.0e-6)
assert g1["dY_dp"] == pytest.approx(g2["dY_dp"], abs=5.0e-6)
assert g1["dphi_dp"] == pytest.approx(g2["dphi_dp"], abs=5.0e-6)
def calc_fd_grads(refiner):
p_vals = refiner._pred_param.get_param_vals()
deltas = [1.0e-7] * len(p_vals)
fd_grads = []
for i, val in enumerate(p_vals):
p_vals[i] -= deltas[i] / 2.0
refiner._pred_param.set_param_vals(p_vals)
refiner._target.predict()
rev_state = refiner.get_matches()["xyzcal.mm"].deep_copy()
p_vals[i] += deltas[i]
refiner._pred_param.set_param_vals(p_vals)
refiner._target.predict()
fwd_state = refiner.get_matches()["xyzcal.mm"].deep_copy()
p_vals[i] = val
fd = fwd_state - rev_state
x_grads, y_grads, phi_grads = fd.parts()
x_grads /= deltas[i]
y_grads /= deltas[i]
phi_grads /= deltas[i]
fd_grads.append({"dX_dp": x_grads, "dY_dp": y_grads, "dphi_dp": phi_grads})
# return to the initial state
refiner._pred_param.set_param_vals(p_vals)
return fd_grads
| dials/dials | tests/algorithms/indexing/test_phi_scan.py | Python | bsd-3-clause | 6,645 | [
"CRYSTAL"
] | 789664ee9c2ffe88e1695bb696e4303c074e4b21ac88a0e6a6139ca6452cd662 |
from ase.data.g2_1_ref import convert
from ase.data.g2_1_ref import atomization_vasp
from ase.data.g2_1_ref import diatomic
info = {}
info['atomization energy'] = {}
info['atomization energy'].update({'reference': convert(atomization_vasp, 0)})
info['atomization energy'].update({'PBE': convert(atomization_vasp, 2)})
info['atomization energy'].update({'PBE0': convert(atomization_vasp, 4)})
info['bondlength'] = {}
info['bondlength'].update({'reference': convert(diatomic, 0)})
info['bondlength'].update({'PBE': convert(diatomic, 2)})
info['bondlength'].update({'PBE0': convert(diatomic, 4)})
| grhawk/ASE | tools/ase/data/g2_1_ref_g03.py | Python | gpl-2.0 | 599 | [
"ASE"
] | 909080e7b14dbd9be1d6b8e03e5052008e21ac20ca61aae2f46d4fc8dc2cfd0d |
# $Id: nodes.py 6011 2009-07-09 10:00:07Z gbrandl $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
import types
import unicodedata
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = 1
break
except SkipSiblings:
pass
except StopTraversal:
stop = 1
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = 1
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = 0
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = 1
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = 1
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None,
include_self=1, descend=1, siblings=0, ascend=0):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=1)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=1
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(
include_self=1, descend=1, siblings=0, ascend=0,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=1, descend=descend,
siblings=0, ascend=0,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None,
include_self=0, descend=1, siblings=0, ascend=0):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = unicode
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def __repr__(self):
data = reprunicode.__repr__(self)
if len(data) > 70:
data = reprunicode.__repr__(self[:64] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def shortrepr(self):
data = reprunicode.__repr__(self)
if len(data) > 20:
data = reprunicode.__repr__(self[:16] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
list_attributes = ('ids', 'classes', 'names', 'dupnames', 'backrefs')
"""List attributes, automatically initialized to empty lists for
all nodes."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % v) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self):
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, list):
values = [serial_escape('%s' % v) for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator in
__contains__ = hasattr
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict`.
"""
if isinstance(dict, Node):
dict = dict.attributes
for att in ('ids', 'classes', 'names', 'dupnames'):
for value in dict.get(att, []):
if not value in self[att]:
self[att].append(value)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in ('ids', 'names', 'classes', 'dupnames'):
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(**self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=1)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters
id = unicodedata.normalize('NFKD', id).\
encode('ASCII', 'ignore').decode('ASCII')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
| edisonlz/fruit | web_project/base/site-packages/docutils/nodes.py | Python | apache-2.0 | 64,905 | [
"VisIt"
] | b2a79f6b47566bb5541cff867568505221fbd1e7bd1360c9214121323b2dc0a2 |
import os
import types
import logging
import string
import unittest
import cStringIO as StringIO
import cherrypy
from cherrypy.process.wspbus import states
from webtest import TestApp
try:
import sqlobject
from sqlobject.inheritance import InheritableSQLObject
except ImportError:
sqlobject = None
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
from gearshift import startup, config, update_config, \
controllers, database, validators
from gearshift.identity import current_provider
#from gearshift.util import get_model
cwd = os.getcwd()
# For clean tests, remove all compiled Kid templates
for w in os.walk('.'):
if not os.sep + '.' in w[0]:
for f in w[2]:
if f.endswith('.kid'):
f = os.path.join(w[0], f[:-3] + 'pyc')
if os.path.exists(f):
os.remove(f)
# Load test configuration
if os.path.exists('test.cfg'):
# Look for a 'config' package
for dirpath, dirs, dummy2 in os.walk('.'):
basename = os.path.basename(dirpath)
dirname = os.path.normpath(os.path.dirname(dirpath))
init_py = os.path.join(dirpath, '__init__.py')
if basename == 'config' and os.path.exists(init_py) and \
dirname[0] in string.ascii_letters + '_':
modulename = "%s.app" % dirpath[2:].replace(os.sep, ".")
break
else:
modulename = None
update_config(configfile="test.cfg", modulename=modulename)
else:
database.set_db_uri("sqlite:///:memory:")
config.update({'global':
{'autoreload.on': False, 'tg.new_style_logging': True, 'tools.expose.on' : True}})
def start_server():
"""Start the server if it's not already."""
if not config.get("cp_started"):
cherrypy.engine.start()
config.update({"cp_started" : True})
if not config.get("server_started"):
startup.startTurboGears()
config.update({"server_started" : True})
def stop_server(tg_only = False):
"""Stop the server and unmount the application. \
Use tg_only = True to leave CherryPy running (for faster tests).
"""
unmount()
if not tg_only:
if not cherrypy.engine.state in [states.STOPPED]:
cherrypy.engine.exit()
config.update({"cp_started" : False})
if config.get("cp_started") and not tg_only:
cherrypy.server.stop()
config.update({"cp_started" : False})
if config.get("server_started"):
startup.stopTurboGears()
config.update({"server_started" : False})
def make_wsgiapp():
"""Return a WSGI application from cherrypy's root object."""
return cherrypy.tree
def make_app(controller=None, conf=None):
"""Return a WebTest.TestApp instance from Cherrypy.
If a Controller object is provided, it will be mounted at the root level.
If not, it'll look for an already mounted root.
"""
if conf:
config.app.update(conf)
if controller:
wsgiapp = mount(controller(), '/')
else:
wsgiapp = make_wsgiapp()
return TestApp(wsgiapp)
class TGTest(unittest.TestCase):
"""A WebTest enabled unit testing class.
To use, subclass & set root to your controller object, or set app to a
webtest.TestApp instance.
In your tests, use self.app to make WebTest calls.
"""
root = None
app = None
stop_tg_only = False
def setUp(self):
"""Set up the WebTest by starting the server.
You should override this and make sure you have properly
mounted a root for your server before calling super,
or simply pass a root controller to super.
Otherwise the Cherrypy filters for TurboGears will not be used.
"""
assert self.root or self.app, "Either self.root or self.app must be set"
if not self.app:
self.app = make_app(self.root)
start_server()
def tearDown(self):
"""Tear down the WebTest by stopping the server."""
stop_server(tg_only = self.stop_tg_only)
def login_user(self, user):
"""Log a specified user object into the system."""
self.app.post(config.get('tools.identity.failure_url'), dict(
user_name=user.user_name, password=user.password, login='Login'))
class BrowsingSession(object):
def __init__(self):
self.visit = None
self.response, self.status = None, None
self.cookie = {}
self.app = make_app()
def goto(self, path, headers=None, **kwargs):
if headers is None:
headers = {}
if self.cookie:
headers['Cookie'] = self.cookie_encoded
response = self.app.get(path, headers=headers, **kwargs)
# If we were given an encoding in the content type we should use it to
# decode the response:
ctype_parts = response.headers['Content-Type'].split(';')
for parameter in ctype_parts[1:]:
attribute, value = parameter.strip().split('=')
try:
self.unicode_response = response.body.decode(value)
break
except:
# If the named encoding doesn't work then it doesn't work. We
# just won't create the unicode_response field.
pass
self.response = response.body
self.full_response = response
self.status = response.status
self.cookie = response.cookies_set
self.cookie_encoded = response.headers.get('Set-Cookie', '')
class AbstractDBTest(unittest.TestCase):
"""A database enabled unit testing class.
Creates and destroys your database before and after each unit test.
You must set the model attribute in order for this class to
function correctly.
"""
model = None
def setUp(self):
raise NotImplementedError()
def tearDown(self):
raise NotImplementedError()
class DBTestSO(AbstractDBTest):
def _get_soClasses(self):
try:
return [self.model.__dict__[x] for x in self.model.soClasses]
except AttributeError:
return self.model.__dict__.values()
def setUp(self):
if not self.model:
self.model = get_model()
if not self.model:
raise Exception("Unable to run database tests without a model")
for item in self._get_soClasses():
if isinstance(item, types.TypeType) and issubclass(item,
sqlobject.SQLObject) and item != sqlobject.SQLObject \
and item != InheritableSQLObject:
item.createTable(ifNotExists=True)
def tearDown(self):
database.rollback_all()
for item in reversed(self._get_soClasses()):
if isinstance(item, types.TypeType) and issubclass(item,
sqlobject.SQLObject) and item != sqlobject.SQLObject \
and item != InheritableSQLObject:
item.dropTable(ifExists=True, cascade=True)
class DBTestSA(AbstractDBTest):
def setUp(self):
database.get_engine()
database.metadata.create_all()
def tearDown(self):
database.metadata.drop_all()
#Determine which class to use for "DBTest". Setup & teardown should behave
#simularly regardless of which ORM you choose.
if config.get("sqlobject.dburi"):
DBTest = DBTestSO
elif config.get("sqlalchemy.dburi"):
DBTest = DBTestSA
else:
raise Exception("Unable to find sqlalchemy or sqlobject dburi")
def unmount():
"""Remove an application from the object traversal tree."""
for app in cherrypy.tree.apps.keys():
del cherrypy.tree.apps[app]
def mount(controller, path="/"):
"""Mount a controller at a path. Returns a wsgi application."""
cherrypy.tree.mount(controller, path, config = config.app)
return make_wsgiapp()
def catch_validation_errors(widget, value):
"""Catch and unpack validation errors (for testing purposes)."""
try:
value = widget.validate(value)
except validators.Invalid, errors:
try:
errors = errors.unpack_errors()
except AttributeError:
pass
else:
errors = {}
return value, errors
class MemoryListHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self, level=logging.DEBUG)
self.log = []
def emit(self, record):
print "Got record: %s" % record
print "formatted as: %s" % self.format(record)
self.log.append(self.format(record))
def print_log(self):
print "\n".join(self.log)
self.log = []
def get_log(self):
log = self.log
self.log = []
return log
_memhandler = MemoryListHandler()
_currentcat = None
def capture_log(category):
"""Capture log for one category.
The category can either be a single category (a string like 'foo.bar')
or a list of them. You *must* call print_log() to reset when you're done.
"""
global _currentcat
assert not _currentcat, "_currentcat not cleared. Use get_log to reset."
if not isinstance(category, list) and not isinstance(category, tuple):
category = [category]
_currentcat = category
for cat in category:
log = logging.getLogger(cat)
log.setLevel(logging.DEBUG)
log.addHandler(_memhandler)
def _reset_logging():
"""Manage the resetting of the loggers."""
global _currentcat
if not _currentcat:
return
for cat in _currentcat:
log = logging.getLogger(cat)
log.removeHandler(_memhandler)
_currentcat = None
def print_log():
"""Print the log captured by capture_log to stdout.
Resets that log and resets the temporarily added handlers.
"""
_reset_logging()
_memhandler.print_log()
def get_log():
"""Return the list of log messages captured by capture_log.
Resets that log and resets the temporarily added handlers.
"""
_reset_logging()
return _memhandler.get_log()
def sqlalchemy_cleanup():
database.metadata.clear()
try:
database.metadata.dispose()
except AttributeError: # not threadlocal
if database.metadata.bind:
database.metadata.bind.dispose()
database._engine = None
sqlalchemy.orm.clear_mappers()
__all__ = ["DBTest", "TGTest",
"capture_log", "print_log", "get_log", "sqlalchemy_cleanup",
"make_wsgiapp", "make_app", "start_server",
"stop_server", "mount", "unmount"]
| dbrattli/python-gearshift | gearshift/testutil.py | Python | mit | 10,478 | [
"VisIt"
] | 41b6d780bc768c9bc90f185d295408a1e81eeac9df916c6b621b9b8c05e93b68 |
"""
Set of programs to degrade/convolve synthetic images/spectra to observational
conditions
"""
from scipy import ndimage, signal
import scipy.interpolate as interp
import numpy as np
import math
def spec_conv(spec, wave, conv_type='IRIS', ww=None, wpts=200, winterp='linear',
xMm=16.5491, graph=False, lscale=1.):
''' Convolves a 2D spectrogram to observational conditions (both on the
spatial and spectral axes)
IN:
spec: 2D array, wavelength on last axis
wave: wavelength array, could be irregular
conv_type: string, type of convolution to make: 'IRIS', 'SUMI', etc.
ww: 2-element list, tuple containing the first and last
wavelengths to be used in the wavelength interpolation.
If unset, will use the min and max of wave.
wpts: integer, number of interpolated wavelength points.
winterp: string, type of wavelength interpolation for interp1d
('linear', 'cubic', etc.)
xMm: physical size (in Mm) of spatial dimension
graph: if true, will show images
lscale: luminance scale for images
OUT:
nspec: 2D array of new spectrogram.
--Tiago, 20110819
'''
if graph:
import matplotlib.pyplot as p
vmin = np.min(spec)
vmax = np.max(spec) * lscale
# Convolution parameters. This is a 4-element list with the following:
# [spatial res. FWHM (arcsec), spatial pixel size (arcsec),
# spectral res FWHM (nm), spectral pixel size (nm)]
cp = {'IRIS': [0.4, 0.167, 0.008, 0.0025],
'SUMI': [1., 1., 0.0043, 0.0022],
'SUMI2': [2., 1., 0.0043, 0.0022],
'SUMI3': [3., 1., 0.0043, 0.0022]}
if conv_type not in list(cp.keys()):
raise ValueError('Invalid convolution type'
' %s. Supported values are %s' %
(conv_type, str([a for a in list(cp.keys())])))
pix2asec = xMm / (spec.shape[0] * 696. / 959.9) # from pixels to arcsec
# wavelength interpolation
if ww is None:
ww = (np.min(wave), np.max(wave))
nwave = np.arange(ww[0], ww[1], cp[conv_type][3] / 3.)
f = interp.interp1d(wave, spec, kind=winterp)
nspec = f(nwave)
# convolution
wstep = nwave[1] - nwave[0]
wsigma = cp[conv_type][2] / \
(wstep * 2 * math.sqrt(2 * math.log(2))) # from fwhm to sigma
dstep = pix2asec
dsigma = cp[conv_type][0] / (dstep * 2 * math.sqrt(2 * math.log(2)))
nspec = ndimage.gaussian_filter(nspec, [dsigma, wsigma])
if graph:
p.subplot(121)
p.pcolormesh(wave, np.arange(spec.shape[0]) * pix2asec, spec,
shading='gouraud', cmap=p.cm.gist_gray, vmin=vmin,
vmax=vmax)
p.xlim(ww[0], ww[1])
p.ylim(0, spec.shape[0] * pix2asec)
# pixelisation
coords = np.mgrid[0.: spec.shape[0]:cp[conv_type][1] / pix2asec,
0.:nwave.shape[0]:cp[conv_type][3] / wstep]
nspec = ndimage.map_coordinates(nspec, coords, order=1, mode='nearest')
nwave = interp.interp1d(np.arange(nwave.shape[0]), nwave)(
coords[1][0].astype('i'))
if graph:
p.subplot(122)
p.imshow(nspec,
extent=(nwave[0], nwave[-1], 0, spec.shape[0] * pix2asec),
cmap=p.cm.gist_gray, aspect='auto', interpolation='nearest',
vmin=vmin, vmax=vmax)
p.xlim(ww[0], ww[1])
p.ylim(0, spec.shape[0] * pix2asec)
return nspec, nwave
def spec3d_conv(spec, wave, conv_type='IRIS', ww=None, wpts=200,
winterp='linear', xMm=16.5491):
''' Convolves a 3D spectrogram to observational conditions (both on the
spatial andspectral axes)
IN:
spec: 3D array, wavelength on last axis
wave: wavelength array, could be irregular
conv_type: string, type of convolution to make: 'IRIS', 'SUMI', etc.
ww: 2-element list, tuple containing the first and last
wavelengths to be used in the wavelength interpolation.
If unset, will use the min and max of wave.
wpts: integer, number of interpolated wavelength points.
winterp: string, type of wavelength interpolation for interp1d
('linear', 'cubic', etc.)
xMm: physical size (in Mm) of spatial dimension
graph: if true, will show images
lscale: luminance scale for images
OUT:
nspec: 3D array of new spectrogram.
--Tiago, 20110819
'''
# Convolution parameters. This is a 4-element list with the following:
# [spatial res. FWHM (arcsec), spatial pixel size (arcsec),
# spectral res FWHM (nm), spectral pixel size (nm)]
cp = {'IRIS': [0.4, 0.167, 0.008, 0.0025],
'SUMI': [1., 1., 0.0043, 0.0022],
'SUMI2': [2., 1., 0.0043, 0.0022],
'SUMI3': [3., 1., 0.0043, 0.0022]}
if conv_type not in list(cp.keys()):
raise ValueError('Invalid convolution type %s. Supported values are %s' %
(conv_type, str([a for a in list(cp.keys())])))
pix2asec = xMm / (spec.shape[0] * 696. / 959.9) # from pixels to arcsec
nwave = spec.shape[-1]
# Spatial convolution
dstep = pix2asec
dsigma = cp[conv_type][0] / (dstep * 2 * math.sqrt(2 * math.log(2)))
#nspec = ndimage.gaussian_filter(nspec,[dsigma,dsigma,wsigma])
for w in range(nwave):
spec[:, :, w] = ndimage.gaussian_filter(spec[:, :, w], dsigma)
# Spatial pixelisation
coords = np.mgrid[0.: spec.shape[0]:cp[conv_type][1] / pix2asec,
0.: spec.shape[1]:cp[conv_type][1] / pix2asec]
nspec = np.empty(coords.shape[1:] + (nwave,), dtype='Float32')
for w in range(nwave):
nspec[:, :, w] = ndimage.map_coordinates(
spec[:, :, w], coords, order=1, mode='nearest')
# Spectral convolution
# wavelength interpolation to fixed scale
if ww is None:
ww = (np.min(wave), np.max(wave))
nwave = np.arange(ww[0], ww[1], cp[conv_type][3] / 3.)
f = interp.interp1d(wave, nspec, kind=winterp)
nspec = f(nwave)
# convolve with Gaussian
wstep = nwave[1] - nwave[0]
wsigma = cp[conv_type][2] / \
(wstep * 2 * math.sqrt(2 * math.log(2))) # from fwhm to sigma
nspec = ndimage.gaussian_filter1d(nspec, wsigma, axis=-1, mode='nearest')
# Spectral pixelisation
nspec = nspec[:, :, ::3]
return nspec, nwave[::3]
def img_conv(spec, wave, psf, psfx, conv_type='IRIS_MgII_core', xMm=16.5491,
wfilt=None, graph=False, lscale=1., pixelise=True):
''' Convolves a 3D spectrogram to observational slit-jaw conditions
(does spatial convolution and pixelisation, and spectral filtering)
IN:
spec: 3D array, wavelength on last axis
wave: wavelength array, could be irregular
conv_type: string, type of convolution to make: 'IRIS',
xMm: physical size (in Mm) of first spatial dimension
graph: if true, will show images
lscale: luminance scale for images
OUT:
nspec: 2D array of image.
--Tiago, 20110820
'''
from ..fitting import gaussian
if graph:
import matplotlib.pyplot as p
# some definitions
asec2Mm = 696. / 959.5 # conversion between arcsec and Mm
pix2Mm = xMm / spec.shape[0] # size of simulation's pixels in Mm
pix2asec = pix2Mm / asec2Mm # from pixels to arcsec
# Convolution parameters. This is a 4-element list with the following:
# [spatial res. FWHM (arcsec), spatial pixel size (arcsec),
# spectral central wavelength (nm), spectral FWHM (nm)]
cp = {'IRIS_MGII_CORE': [0.4, 0.166, 279.518, 0.4], # 279.6 nm in vac
'IRIS_MGII_WING': [0.4, 0.166, 283.017, 0.4], # 283.1 nm in vac
'IRIS_CII': [0.4, 0.166, 133.279, 4.0], # 133.5 nm in vac
'IRIS_SIV': [0.4, 0.166, 139.912, 4.0], # 140.0 nm in vac
'IRIS_TEST': [0.4, 0.166, 279.518, 1.5],
# scaled IRIS resolution to Hinode Ca H
'IRIS_HINODE_CAH': [0.4 * 397 / 280., 0.166, 396.85, 0.3],
# scaled IRIS resolution to Hinode red BFI
'IRIS_HINODE_450': [0.4 * 450 / 280., 0.166, 450.45, 0.4],
# scaled IRIS resolution to Hinode green BFI
'IRIS_HINODE_555': [0.4 * 555 / 280., 0.166, 555.05, 0.4],
# scaled IRIS resolution to Hinode blue BFI
'IRIS_HINODE_668': [0.4 * 668 / 280., 0.166, 668.40, 0.4]}
conv_type = conv_type.upper()
if wfilt is None:
wcent = cp[conv_type][2]
wfwhm = cp[conv_type][3]
# selecting wavelengths within 4 FWHM
widx = (wave[:] > wcent - 2. * wfwhm) & (wave[:] < wcent + 2. * wfwhm)
# filtering function, here set to Gaussian
wfilt = gaussian([wcent, wfwhm / (2 * math.sqrt(2 * math.log(2))),
1., 0.], wave[widx])
wfilt /= np.trapz(wfilt, x=wave[widx])
else:
widx = wfilt != 0
wfilt = wfilt[widx]
# multiply by filter and integrate
nspec = np.trapz(spec[:, :, widx] * wfilt, x=wave[widx], axis=-1)
if graph:
vmin = np.min(nspec)
vmax = np.max(nspec) * lscale
p.subplot(211)
aa = nspec
if hasattr(aa, 'mask'):
aa[aa.mask] = np.mean(nspec)
p.imshow(np.transpose(aa), extent=(0, spec.shape[0] * pix2asec, 0,
spec.shape[1] * pix2asec),
vmin=vmin, vmax=vmax, cmap=p.cm.gist_gray)
p.title('Filter only')
p.xlabel('arcsec')
p.ylabel('arcsec')
# spatial convolution
psf_x = psfx * asec2Mm
sep = np.mean(psf_x[1:] - psf_x[:-1])
coords = np.mgrid[0: psf_x.shape[0]: pix2Mm / sep,
0: psf_x.shape[0]: pix2Mm / sep]
npsf = ndimage.map_coordinates(psf, coords, order=1, mode='nearest')
npsf /= np.sum(npsf)
im = np.concatenate([nspec[-50:], nspec, nspec[:50]])
im = np.concatenate([im[:, -50:], im, im[:, :50]], axis=1)
nspec = signal.fftconvolve(im, npsf, mode='same')[50:-50, 50:-50]
# pixelisation
if pixelise:
coords = np.mgrid[0.:spec.shape[0]:cp[conv_type][1] / pix2asec,
0.:spec.shape[1]:cp[conv_type][1] / pix2asec]
nspec = ndimage.map_coordinates(nspec, coords, order=1, mode='nearest')
if graph:
p.subplot(212)
p.imshow(np.transpose(nspec),
extent=(0, spec.shape[0] * pix2asec, 0,
spec.shape[1] * pix2asec),
vmin=vmin, vmax=vmax,
interpolation='nearest', cmap=p.cm.gist_gray)
p.title('Filter + convolved %s' % (conv_type))
p.xlabel('arcsec')
p.ylabel('arcsec')
return nspec
def get_hinode_psf(wave, psfdir='/Users/tiago/data/Hinode/'):
"""
Gets the Hinode PSF (from Sven Wedemeyer's work) for a given
wavelength in nm. Assumes Hinode's ideal PSF is on psfdir.
Returns x scale (in arcsec), and psf (2D array, normalised).
"""
from astropy.io import fits as pyfits
from ..math import voigt
# Get ideal PSF
ipsf = pyfits.getdata('%s/hinode_ideal_psf_555nm.fits')
ix = pyfits.getdata('%s/hinode_ideal_psf_scale_555nm.fits')
# Scale ideal PSF to our wavelength and simulation pixels
cwave = np.mean(wave) # our wavelength
ix *= cwave / 555.
sep = np.mean(ix[1:] - ix[:-1])
# Get non-ideal PSF for our wavelength
# these are the tabulated values in Wedemeyer-Boem , A&A 487, 399 (2008),
# for voigt function and Mercury transit
gamma_data = [4., 5., 6.]
sigma = 8.
wave_data = [450.45, 555., 668.4]
# interpolate gamma for our wavelength
gamma = interp.interp1d(wave_data, gamma_data, kind='linear')(cwave)
gamma *= 1e-3
sigma *= 1e-3
uu = np.mgrid[-sep * 600: sep * 599: sep, -sep * 600: sep * 599: sep]
xm = np.arange(-sep * 600, sep * 599, sep)
r = np.sqrt(uu[0]**2 + uu[1]**2)
rf = np.ravel(r)
npsf = np.reshape(utilsmath.voigt(a, rf / b) /
(b * np.sqrt(np.pi)), (xm.shape[0], xm.shape[0]))
# Convolve ideal PSF with non-ideal PSF
psf = signal.fftconvolve(ipsf, npsf, mode='same')
# Recentre from convolution, remove pixels outside non-ideal PSF kernel
ix = ix[:-1][2000:-2000]
psf = psf[1:, 1:][2000:-2000, 2000:-2000]
return ix, psf
def spectral_convolve(w):
'''
Spectral convolution function for imgspec_conv. Interpolates to new
wavelength and does a gaussian convolution in the last index of the
spectrum array. Input is a single tuple to make it easier to
parallelise with multiprocessing. Requires the existence of a global
array called result.
'''
i, wsigma, wave, nwave, spec = w # get arguments from tuple
f = interp.interp1d(wave, spec, kind='linear')
spec = f(nwave)
result[i] = ndimage.gaussian_filter1d(spec, wsigma, axis=-1,
mode='nearest')
return
def spatial_convolve(w):
'''
Spatial convolution function for imgspec_conv. Does a convolution with
given psf in Fourier space. Input is a single tuple to make it easier to
parallelise with multiprocessing. Requires the existence of a global
array called result.
'''
i, im, psf = w
result[:, :, i] = signal.fftconvolve(im, psf, mode='same')[50:-50, 50:-50]
return
def atmos_conv(atmosfiles, xMm, psf, psfx, obs='IRIS_NUV', snapshots=None,
verbose=False, parallel=False):
"""
Spatially convolves variables from RH input atmospheres with the
same method as imgspec_conv. Results are written in a new file.
So far, the only variables to be convolved and written are defined
in conv_var. snapshots can be a list of snapshot indices. If None,
default is to write all snapshots.
"""
import os
import netCDF4
conv_var = ['temperature', 'velocity_z', 'electron_density', 'B_z']
copy_var = ['z', 'snapshot_number']
out_var = {}
for f in atmosfiles:
if not os.path.isfile(f):
print(('File %s not found, skipping.' % f))
continue
foutname = os.path.splitext(f)[0] + '_conv' + os.path.splitext(f)[1]
fin = netCDF4.Dataset(f, 'r')
fout = netCDF4.Dataset(foutname, 'w')
if snapshots is None:
snapshots = list(range(len(fin.dimensions['nt'])))
for v in conv_var:
if v not in fin.variables:
print(('Variable %s not found in input file, skipping.' % v))
continue
for s in snapshots:
buf = fin.variables[v][s].copy()
buf = var_conv(buf, xMm, psf, psfx, obs='iris_nuv',
parallel=parallel)
try:
out_var[v][s, :] = buf
except KeyError:
try:
out_var[v] = fout.createVariable(v, 'f4',
dimensions=('nt', 'nx',
'ny', 'nz'))
out_var[v][s, :] = buf
except ValueError:
fout.createDimension('nt', len(snapshots))
fout.createDimension('nx', buf.shape[0])
fout.createDimension('ny', buf.shape[1])
fout.createDimension('nz', buf.shape[2])
out_var[v] = fout.createVariable(v, 'f4',
dimensions=('nt', 'nx',
'ny', 'nz'))
out_var[v][s, :] = buf
for v in copy_var:
try:
out_var[v] = fout.createVariable(v, fin.variables[v].dtype,
fin.variables[v].dimensions)
except:
out_var[v] = fout.variables[v]
for s in snapshots:
out_var[v][s] = fin.variables[v][s]
# Copy attributes
fout.description = fin.description + ' Spatially-convolved to %s' % obs
fout.has_B = fin.has_B
fout.close()
fin.close()
print(('Sucessfully wrote ' + foutname + '.'))
return
def var_conv(var, xMm, psf, psfx, obs='iris_nuv', parallel=False,
pixelise=False, mean2=False):
"""
Spatially convolves a single atmos variable.
"""
import multiprocessing
import ctypes
global result
# some definitions
asec2Mm = 696. / 959.5 # conversion between arcsec and Mm
pix2Mm = xMm / var.shape[0] # size of simulation's pixels in Mm
if obs.lower() == 'hinode_sp':
obs_pix2Mm = 0.16 * asec2Mm # size of instrument spatial pixels in Mm
elif obs.lower() == 'iris_nuv':
obs_pix2Mm = 0.166 * asec2Mm # size of instrument spatial pixels in Mm
nwave = var.shape[-1] # This is really depth, not wavelength...
# convert PSF kernel to the spectrogram's pixel scale
psf_x = psfx * asec2Mm
sep = np.mean(psf_x[1:] - psf_x[:-1])
coords = np.mgrid[0: psf_x.shape[0]: pix2Mm / sep,
0: psf_x.shape[0]: pix2Mm / sep]
npsf = ndimage.map_coordinates(psf, coords, order=1, mode='nearest')
npsf /= np.sum(npsf)
im = np.concatenate([var[-50:], var, var[:50]])
im = np.concatenate([im[:, -50:], im, im[:, :50]], axis=1)
itr = ((i, im[:, :, i], npsf) for i in range(nwave))
# Spatial convolution
if parallel:
# multiprocessing shared object to collect output
result_base = multiprocessing.Array(ctypes.c_float, np.prod(var.shape))
result = np.ctypeslib.as_array(result_base.get_obj())
result = result.reshape(var.shape)
pool = multiprocessing.Pool() # by default use all CPUs
pool.map(spatial_convolve, itr)
pool.close()
pool.join()
else:
result = np.empty_like(var)
for w in itr:
spatial_convolve(w)
# Spatial pixelisation
if pixelise:
coords = np.mgrid[0.: var.shape[0]: obs_pix2Mm / pix2Mm,
0.: var.shape[1]: obs_pix2Mm / pix2Mm]
nvar = np.empty(coords.shape[1:] + (nwave,), dtype='Float32')
for w in range(nwave):
nvar[:, :, w] = ndimage.map_coordinates(result[:, :, w], coords,
order=1, mode='nearest')
if mean2:
# average 2 pixels along second dimension
si = nvar.shape
nvar = np.reshape(nvar, (si[0], si[1] / 2, 2, si[2])).mean(2)
else:
nvar = result[:]
return nvar
def imgspec_conv(spec, wave, xMm, psf, psfx, obs='hinode_sp', verbose=False,
pixelise=True, parallel=False, mean2=False):
'''
Convolves a 3D spectrogram to observational conditions (does spatial
convolution, spectral convolution and pixelisation, in that order)
IN:
spec: 3D array, wavelength on last axis
wave: wavelength array, could be irregular
xMm: physical size (in Mm) of first spatial dimension
psf: 2D array with PSF
psf_x: 1D array with PSF radial coordinates in arcsec
obs: type of observations. Options: 'hinode_sp', 'iris_nuv'.
parallel: if True, will run in parallel using all available CPUs
pixelise: if True, will pixelise into the observational conditions
mean2: if True and pixelise is True, will average every 2 pixels on
second dimension (to mimick the size of IRIS's slit width)
OUT:
nspec: 3D array of spectrogram.
nwave: 1D array of resulting wavelength
--Tiago, 20120105
'''
import multiprocessing
import ctypes
global result
# some definitions
asec2Mm = 696. / 959.5 # conversion between arcsec and Mm
pix2Mm = xMm / spec.shape[0] # size of simulation's pixels in Mm
if obs.lower() == 'hinode_sp':
obs_pix2Mm = 0.16 * asec2Mm # size of instrument spatial pixels in Mm
obs_pix2nm = 0.00215 # size of instrument spectral pixels in nm
# instrument spectral resolution (Gaussian FWHM in nm)
obs_spect_res = 0.0025
elif obs.lower() == 'iris_nuv':
# Tiago: updated to Iris Technical Note 1
obs_pix2Mm = 0.166 * asec2Mm # size of instrument spatial pixels in Mm
obs_pix2nm = 0.002546 # size of instrument spectral pixels in nm
# instrument spectral resolution (Gaussian FWHM in nm)
obs_spect_res = 0.0060
wavei = 278.1779
wavef = 283.3067 + obs_pix2nm
elif obs.lower() == 'iris_fuvcont':
obs_pix2Mm = 0.166 * asec2Mm # size of instrument spatial pixels in Mm
obs_pix2nm = 0.002546 / 2. # size of instrument spectral pixels in nm
# instrument spectral resolution (Gaussian FWHM in nm)
obs_spect_res = 0.0026
wavei = 132.9673
wavef = 141.6682 + obs_pix2nm
else:
raise ValueError('imgspec_conv: unsupported instrument %s' % obs)
nwave = spec.shape[-1]
# Spatial convolution
if verbose:
print('Spatial convolution...')
# convert PSF kernel to the spectrogram's pixel scale
psf_x = psfx * asec2Mm
sep = np.mean(psf_x[1:] - psf_x[:-1])
coords = np.mgrid[0: psf_x.shape[0]: pix2Mm / sep,
0: psf_x.shape[0]: pix2Mm / sep]
npsf = ndimage.map_coordinates(psf, coords, order=1, mode='nearest')
npsf /= np.sum(npsf)
im = np.concatenate([spec[-50:], spec, spec[:50]])
im = np.concatenate([im[:, -50:], im, im[:, :50]], axis=1)
itr = ((i, im[:, :, i], npsf) for i in range(nwave))
if parallel:
# multiprocessing shared object to collect output
result_base = multiprocessing.Array(
ctypes.c_float, np.prod(spec.shape))
result = np.ctypeslib.as_array(result_base.get_obj())
result = result.reshape(spec.shape)
pool = multiprocessing.Pool(parallel) # by default use all CPUs
pool.map(spatial_convolve, itr)
pool.close()
pool.join()
else:
result = np.empty_like(spec)
for w in itr:
spatial_convolve(w)
# Spatial pixelisation
if pixelise:
if verbose:
print('Spatial pixelisation...')
coords = np.mgrid[0.: spec.shape[0]: obs_pix2Mm / pix2Mm,
0.: spec.shape[1]: obs_pix2Mm / pix2Mm]
nspec = np.empty(coords.shape[1:] + (nwave,), dtype='Float32')
for w in range(nwave):
nspec[:, :, w] = ndimage.map_coordinates(result[:, :, w], coords,
order=1, mode='nearest')
if mean2:
# average 2 pixels along second dimension
si = nspec.shape
nspec = np.reshape(nspec, (si[0], si[1] / 2, 2, si[2])).mean(2)
else:
nspec = result[:]
# Spectral convolution
if verbose:
print('Spectral convolution...')
if obs.lower() in ['iris_nuv', 'iris_fuvcont']:
nwave = np.arange(wavei, wavef, obs_pix2nm / 3.)
else:
nwave = np.arange(wave[0], wave[-1], obs_pix2nm / 3.)
wstep = nwave[1] - nwave[0]
wsigma = obs_spect_res / \
(wstep * 2 * math.sqrt(2 * math.log(2))) # fwhm to sigma
itr = ((i, wsigma, wave, nwave, nspec[i]) for i in range(nspec.shape[0]))
if parallel:
result_base = multiprocessing.Array(ctypes.c_float,
np.prod(nspec.shape[:-1]) *
len(nwave))
result = np.ctypeslib.as_array(result_base.get_obj())
result = result.reshape(nspec.shape[:-1] + nwave.shape)
pool = multiprocessing.Pool() # by default use all CPUs
pool.map(spectral_convolve, itr)
pool.close()
pool.join()
else:
result = np.empty(nspec.shape[:-1] + nwave.shape, dtype='f')
for w in itr:
spectral_convolve(w)
# Spectral pixelisation
nspec = result[:, :, ::3]
return nspec
| M1kol4j/helita | helita/sim/synobs.py | Python | bsd-3-clause | 24,461 | [
"Gaussian"
] | 8e2573f230326082932839b64f5967ca90001046ec230558fd0961a511f16224 |
import sys
sys.path.append('../../src/')
import cryspy
from cryspy.fromstr import fromstr as fs
metric = cryspy.geo.Cellparameters(10.4416, 10.4416, 6.3432, 90, 90, 120).to_Metric()
atomset = cryspy.crystal.Atomset( { \
cryspy.crystal.Atom("Ca1", "Ca", fs("p 0 0 0 ")), \
cryspy.crystal.Atom("Mn1", "Mn", fs("p 1/2 0 0 ")), \
cryspy.crystal.Atom("Mn2", "Mn", fs("p 1/2 0 1/2 ")), \
cryspy.crystal.Atom("Mn3", "Mn", fs("p 0 0 1/2 ")), \
cryspy.crystal.Atom("O1", "O", fs("p 0.2226 0.2731 0.0814 ")), \
cryspy.crystal.Atom("O2", "O", fs("p 0.34219 0.5221 0.3410 "))
} )
Rm3 = cryspy.tables.spacegroup(148)
atomset = Rm3 ** atomset
atomset1 = fs("x, y, z+1") ** atomset
atomset2 = fs("x, y+1, z ") ** atomset
atomset3 = fs("x, y+1, z+1") ** atomset
atomset4 = fs("x+1, y, z ") ** atomset
atomset5 = fs("x+1, y, z+1") ** atomset
atomset6 = fs("x+1, y+1, z ") ** atomset
atomset7 = fs("x+1, y+1, z+1") ** atomset
menge = set([])
for atom in atomset.menge:
menge = menge.union({atom})
for atomseti in [atomset1, atomset2, atomset3, atomset4, \
atomset5, atomset6, atomset7]:
for atom in atomseti.menge:
if -0.05 <= float(atom.pos.x()) <= 1.05 \
and -0.05 <= float(atom.pos.y()) <= 1.05 \
and -0.05 <= float(atom.pos.z()) <= 1.05:
menge = menge.union({atom})
momentum = cryspy.crystal.Momentum("M", fs("p 1/2 1/2 1/2"), fs("d 1 0 0"))
menge.add(momentum)
bond = cryspy.crystal.Bond("B", fs("p 0 0 0"), fs("p 1/2 1/2 0"))
menge.add(bond)
face = cryspy.crystal.Face("F", [fs("p 0 0 0"), fs("p 1 0 0"), fs("p 0 1 0")])
menge.add(face)
atomset = cryspy.crystal.Atomset(menge)
cryspy.blender.make_blender_script(atomset, metric, "structure", "blenderscript.py")
| tobias-froehlich/cryspy | tests/blendertest/blendertest.py | Python | gpl-3.0 | 1,841 | [
"CRYSTAL"
] | 10b3f4bf1c1a57f57ebcb1d2dcaa0652d4cbc70cd07aefcfeaced75fb758da60 |
# Hop --- a framework to analyze solvation dynamics from MD simulations
# Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utility functions --- :mod:`hop.utilities`
==========================================
Random mix of convenience functions that don't fit anywhere else.
For messages I should probably use python's logger module but this is
working so far (even though it's pretty crappy).
"""
from __future__ import absolute_import
import sys
import os
import errno
import cPickle
import warnings
import MDAnalysis.lib.log
from .exceptions import MissingDataWarning
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def mkdir_p(path):
"""Create a directory *path* with subdirs but do not complain if it exists.
This is like GNU ``mkdir -p path``.
"""
try:
os.makedirs(path)
except OSError, err:
if err.errno != errno.EEXIST:
raise
# unbound methods filename_function(), to be used in other
# classes; the plan is to make all classes that require them
# subclasses of hop.utilities.Saveable and bind them to this super
# class. (load() and save() are already tentatively XXXed out)
# used in many classes for filename handling (not all are Saveable yet)
# filename = hop.utilities.filename_function
# (adds the _filename attribute to the class self!)
# NOTE: filename_function is not developed anymore and deprecated
# Try to derive classes from Saveable.
def filename_function(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the object.
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension (use_my_ext=False) and
if provided, another extension is appended. Chooses a default if no
filename is given. Raises a ValueError exception if no default file name
is known.
If set_default=True then the default filename is also set.
use_my_ext=True lets the suffix of a provided filename take priority over a
default ext(tension).
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith('.'):
ext = ext[1:] # strip a dot to avoid annoying mistakes
filename = filename + '.' + ext
return filename
def fileextension(filename,default=None):
"""Return the file extension without the leading dot or the default."""
ext = os.path.splitext(filename)[1]
if len(ext) > 1:
if ext.startswith('.'):
ext = ext[1:]
return ext
else:
return default
class Saveable(object):
"""Baseclass that supports save()ing and load()ing.
Override the class variables
_saved_attributes = [] # list attributes to be pickled
_merge_attributes = [] # list dicts to be UPDATED from the pickled file with load(merge=True)
_excluded_attributes = [] # list attributes that should never be pickled
Note:
_saved_attributes = 'all' # pickles ALL attributes, equivalent to self.__dict__.keys()
# (use _excluded_attributes with 'all'!)
Use _excluded_attributes to filter out some attributes such as
type('method-wrapper') objects that cannot be pickled (e.g. when using properties).
"""
_saved_attributes = []
_merge_attributes = []
_excluded_attributes = []
# TODO: could I use __new__ to load the pickled file?
def __init__(self,*args,**kwargs):
# TODO: should initialize _XXX_attributes[] via __init__() and use super(cls,Saveable).__init__()
# in subclasses
kwargs.setdefault('filename',None)
# Multiple Inheritance is probably NOT going to work...
super(Saveable,self).__init__() # XXX: ... shouldn't this take *args,**kwargs ?? OB-2009-06-10
if kwargs['filename'] is not None:
self.load(kwargs['filename'],'pickle') # sets _saved_attributes in __dict__
else:
pass # do the initialization from the data
# basic pickle protocol
def __getstate__(self):
if not self._saved_attributes:
warnings.warn("No data saved: the class declared empty '_saved_attributes'.")
return False
data = {}
if self._saved_attributes == 'all':
# HACK: filter out some attributes such as type('method-wrapper') objects that
# cannot be pickled
saved_attributes = [x for x in self.__dict__.keys() if x not in self._excluded_attributes]
else:
saved_attributes = self._saved_attributes
for attr in saved_attributes:
try:
data[attr] = self.__dict__[attr]
except KeyError:
warnings.warn("Attribute '"+attr+"' has not been computed and will not be saved.",
category=MissingDataWarning)
return data
def __setstate__(self,data):
# Simple:
self.__dict__.update(data) # could check for _saved_attributes
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the object.
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension (use_my_ext=False) and
if provided, another extension is appended. Chooses a default if no
filename is given. Raises a ValueError exception if no default file name
is known.
If set_default=True then the default filename is also set.
use_my_ext=True lets the suffix of a provided filename take priority over a
default ext(tension).
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith('.'):
ext = ext[1:] # strip a dot to avoid annoying mistakes
filename = filename + '.' + ext
return filename
def save(self,filename=None):
"""Save class to a pickled file."""
fh = open(self.filename(filename,'pickle',set_default=True),'wb') # 2.5: with open(..) as fh:
try:
cPickle.dump(self,fh,cPickle.HIGHEST_PROTOCOL)
finally:
fh.close()
def load(self,filename=None,merge=False):
"""Reinstantiate class from a pickled file (produced with save())."""
fh = open(self.filename(filename,'pickle',set_default=True),'rb')
try:
tmp = cPickle.load(fh)
finally:
fh.close()
try:
data = tmp.__dict__
except AttributeError:
warnings.warn("Loading an old-style save file.",category=DeprecationWarning)
data = tmp # just hope we got all attributes...
# backwards-compatibility hacks:
try:
data['P'] = data['parameters']
except KeyError:
pass
del tmp
# restore attributes from the temporary instance
if not self._saved_attributes:
warnings.warn("No data loaded: the object declared empty '_saved_attributes'.",
category=MissingDataWarning)
return False
if self._saved_attributes == 'all':
saved_attributes = [x for x in data.keys() if x not in self._excluded_attributes]
else:
saved_attributes = self._saved_attributes
for attr in saved_attributes:
try:
if merge and attr in self._merge_attributes: # only works for type(attr)==dict
self.__dict__[attr].update(data[attr])
else:
self.__dict__[attr] = data[attr]
except KeyError:
warnings.warn("Expected attribute '"+attr+"' was not found in saved file '"+filename+"'.",
category=MissingDataWarning)
del data
return True
def easy_load(names,baseclass,keymethod):
"""Instantiate a class either from an existing instance or a pickled file.
instance_list = easy_load(names,baseclass,keymethod)
>>> x = easy_load(<filename>,Xclass,'my_method_name')
>>> [x1,x2,...] = easy_load([<filename1>, <fn2>,...], Xclass,'my_method_name')
>>> [x1,x2,...] = easy_load([x1, x2, ..], Xclass,'my_method_name')
If the argument does not implement the keymethod then try loading
from a file.
API:
For this to work, the baseclass (eg Saveable) must be able to instantiate
itself using
x = baseclass(filename=name)
If a single name is given, a singlet is returned, otherwise a list of instances.
(The docs are longer than the code...)
"""
def load(name):
if hasattr(name,keymethod):
x = name
else:
x = baseclass(filename=name)
return x
if not iterable(names):
return load(names)
return [load(x) for x in names]
#------------------------------------------------------------
# status message functions
#------------------------------------------------------------
# Set the global verbosity = 0 to define the default verbosity level.
# I haven't found a way to make this a class -- unless I let every
# other class inherit from the base class (eg 'interactive') and make
# verbosity a class-level variable (or whatever it is called) .
#
# (NOTE: this will be removed once we use logger)
verbosity = 3
logfile = None
LOGFILENAME = None
def set_verbosity(level=None,logfilename=None):
"""set_verbosity([level],logfilename=<filename>)
Set the verbosity level
level < 0 : level <- abs(level) but output is also appended to logfile
level == 0: minimum
level == 3: verbose
level > 3 : debugging"""
global verbosity,logfile,LOGFILENAME
if level is None:
return verbosity
if level < 0:
# this really should be a class, with the destructor closing the file
if logfilename is None:
logfilename = LOGFILENAME
else:
LOGFILENAME = logfilename
try:
logfile = open(logfilename,'a')
except:
raise IOError("Failed to open logfile; provide a filename when level<0.")
if level > 0 and verbosity < 0:
# close the logfile if we don't write to it anymore
try:
close_log()
except:
pass
verbosity = level
return verbosity
def close_log():
"""Close open logfile; must be done manually."""
try:
logfile.close()
except AttributeError:
raise ValueError("no open logfile; use negative verbosity.")
def get_verbosity():
return verbosity
def msg(level,m=None):
"""msg(level,[m])
1) Print message string if the level <= verbose. level describes the
priority with lower = more important.
Terminate string with \\n if a newline is desired or \\r to overwrite
the current line (eg for output progress indication)
Note that if the global verbosity level is < 0 then the message is also
written to the logfile.
2) If called without a string then msg(level) returns True if it would
print a message, and False otherwise.
"""
if level <= abs(verbosity):
if m:
print(m), # terminate string with \n if newline desired
sys.stdout.flush()
if verbosity < 0:
logfile.write(m)
return True
return False
def fixedwidth_bins(delta,xmin,xmax):
"""Return bins of width delta that cover xmin,xmax (or a larger range).
dict = fixedwidth_bins(delta,xmin,xmax)
The dict contains 'Nbins', 'delta', 'min', and 'max'.
"""
import numpy
if not numpy.all(xmin < xmax):
raise ValueError('Boundaries are not sane: should be xmin < xmax.')
_delta = numpy.asarray(delta,dtype=numpy.float_)
_xmin = numpy.asarray(xmin,dtype=numpy.float_)
_xmax = numpy.asarray(xmax,dtype=numpy.float_)
_length = _xmax - _xmin
N = numpy.ceil(_length/_delta).astype(numpy.int_) # number of bins
dx = 0.5 * (N*_delta - _length) # add half of the excess to each end
return {'Nbins':N, 'delta':_delta,'min':_xmin-dx, 'max':_xmax+dx}
def flatiter(seq):
"""Returns an iterator that flattens a sequence of sequences of sequences...
(c) 2005 Peter Otten, at http://www.thescripts.com/forum/thread23631.html
"""
# avoid infinite recursion with strings
if isinstance(seq, basestring):
yield seq
else:
try:
for i in seq:
for k in flatiter(i):
yield k
except TypeError:
yield seq
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
From http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def matplotlib_interactive(interactive=False):
import matplotlib
if not interactive:
matplotlib.use('Agg') # allows running without X11 on compute nodes
matplotlib.interactive(interactive)
return interactive
def iterable(obj):
"""Returns True if obj can be iterated over and is NOT a string."""
try: len(obj)
except: return False
if isinstance(obj, basestring):
return False # avoid iterating over characters of a string
return True
def asiterable(obj):
"""Return an object that is an iterable: object itself or wrapepd in a list.
iterable <-- asiterable(something)
Treats strings as NOT-iterable.
"""
if not iterable(obj):
return [obj]
return obj
def Pearson_r(x,y):
"""Pearson's r (correlation coefficient)
r = Pearson(x,y)
x and y are arrays of same length
Historical note:
Naive implementation of Pearson's r:
Ex = scipy.stats.mean(x)
Ey = scipy.stats.mean(y)
covxy = numpy.sum((x-Ex)*(y-Ey))
r = covxy/math.sqrt(numpy.sum((x-Ex)**2)*numpy.sum((y-Ey)**2))
return r
"""
import numpy
return numpy.corrcoef(x,y)[1,0]
def linfit(x,y,dy=[]):
"""Fit a straight line y = a + bx to the data in x and y; errors
on y should be provided in dy in order to assess the goodness of
the fit and derive errors on the parameters.
result_dict = linfit(x,y[,dy])
Fit y = a + bx to the data in x and y by analytically minimizing
chi^2. dy holds the standard deviations of the individual y_i. If
dy is not given, they are assumed to be constant (note that in
this case Q is set to 1 and it is meaningless and chi2 is
normalised to unit standard deviation on all points!).
Returns the parameters a and b, their uncertainties sigma_a and
sigma_b, and their correlation coefficient r_ab; it also returns
the chi-squared statistic and the goodness-of-fit probability Q
(that the fit would have chi^2 this large or larger; Q < 10^-2
indicates that the model is bad --- Q is the probability that a
value of chi-square as _poor_ as the calculated statistic chi2
should occur by chance.)
result_dict =
intercept, sigma_intercept a +/- sigma_a
slope, sigma_slope b +/- sigma_b
parameter_correlation correlation coefficient r_ab
between a and b
chi_square chi^2 test statistic
Q_fit goodness-of-fit probability
Based on 'Numerical Recipes in C', Ch 15.2.
"""
import math
import numpy
import scipy.stats
n = len(x)
m = len(y)
if n != m:
raise ValueError("lengths of x and y must match: %s != %s" % (n, m))
try:
have_dy = (len(dy) > 0)
except TypeError:
have_dy = False
if not have_dy:
dy = numpy.ones((n),numpy.float)
x = numpy.asarray(x)
y = numpy.asarray(y)
dy = numpy.asarray(dy)
s2 = dy*dy
S = numpy.add.reduce(1/s2)
Sx = numpy.add.reduce(x/s2)
Sy = numpy.add.reduce(y/s2)
Sxx = numpy.add.reduce(x*x/s2)
Sxy = numpy.add.reduce(x*y/s2)
t = (x - Sx/S)/dy
Stt = numpy.add.reduce(t*t)
b = numpy.add.reduce(t*y/dy)/Stt
a = (Sy - Sx*b)/S
sa = math.sqrt((1 + (Sx*Sx)/(S*Stt))/S)
sb = math.sqrt(1/Stt)
covab = -Sx/(S*Stt)
r = covab/(sa*sb)
chi2 = numpy.add.reduce(((y-a-b*x)/dy)**2)
if not have_dy:
# estimate error if none were provided
sigmadata = math.sqrt(chi2/(n-2))
sa *= sigmadata
sb *= sigmadata
Q = 1.0
else:
Q = scipy.stats.chisqprob(chi2,n-2)
return {"intercept":a,"slope":b,
"sigma_intercept":sa,"sigma_slope":sb,
"parameter_correlation":r, "chi_square":chi2, "Q":Q}
def autocorrelation_fft(series,include_mean=False,periodic=False,
start=None,stop=None,**kwargs):
"""Calculate the auto correlation function.
acf = autocorrelation_fft(series,include_mean=False,**kwargs)
The time series is correlated with itself across its whole length. It is 0-padded
and the ACF is corrected for the 0-padding (the values for larger lags are
increased) unless mode='valid' (see below).
Only the [0,len(series)[ interval is returned. The series is normalized to ots 0-th
element.
Note that the series for mode='same'|'full' is inaccurate for long times and
should probably be truncated at 1/2*len(series). Alternatively, only sample a
subseries with the stop keyword.
:Arguments:
series (time) series, a 1D numpy array
include_mean False: subtract mean(series) from series
periodic False: corrected for 0-padding
True: return as is
start,stop If set, calculate the ACF of series[start:stop] with series;
in this case mode='valid' is enforced
kwargs keyword arguments for scipy.signal.fftconvolve
mode = 'full' | 'same' | 'valid' (see there)
"""
import numpy
import scipy.signal
kwargs.setdefault('mode','full')
series = numpy.squeeze(series.astype(float)).copy() # must copy because de-meaning modifies it
if not include_mean:
mean = series.mean()
series -= mean
if start or stop:
kwargs['mode'] = 'valid' # makes sense to only return the true unpolluted ACF
start = start or 0
stop = stop or len(series)
if start >= stop:
raise ValueError('Must be start < stop but start = %(start)d >= stop = %(stop)d.'
% locals())
ac = scipy.signal.fftconvolve(series,series[stop:start:-1,...],**kwargs)
if kwargs['mode'] == 'valid':
# origin at start+1
norm = ac[start+1] or 1.0 # to guard against ACFs of zero arrays
# Note that this is periodic (and symmetric) over result[0,stop-start+1] and so we
# only return one half:
##return numpy.concatenate( (ac[start+1:], ac[:start+1]) )[:len(ac)/2] / norm
# ac is symmetric around start+1 so we average the two halves (just in case):
ac[:] = numpy.concatenate( (ac[start+1:], ac[:start+1]) ) / norm
ac = numpy.resize(ac,len(ac)+1) # make space for replicated 0-th element
ac[-1] = ac[0]
if len(ac) % 2 == 1:
# orig ac was even
return 0.5*(ac[:len(ac)/2] + ac[:len(ac)/2:-1])
else:
# orig ac was odd: replicate the least important datapoint for second half
return 0.5*(ac[:len(ac)/2] + ac[:len(ac)/2-1:-1])
else:
origin = ac.shape[0]/2 # should work for both odd and even len(series)
ac = ac[origin:]
assert len(ac) <= len(series), "Oops: len(ac)=%d len(series)=%d" % (len(ac),len(series))
if not periodic:
ac *= len(series)/(len(series) - 1.0*scipy.arange(len(ac))) # correct for 0 padding
norm = ac[0] or 1.0 # to guard against ACFs of zero arrays
return ac/norm
def averaged_autocorrelation(series,length=None,sliding_window=None,**kwargs):
"""Calculates the averaged ACF of a series.
mean(acf), std(acf) = averaged_autocorrelation(series,length=None,sliding_window=None):
Calculate the ACF of a series for only a fraction of the total length, <length> but
repeat the calculation by setting the origin progressively every <sliding_window>
steps and average over all the ACFs.
:Arguments:
series time series (by default, mean will be removed)
length length (in frames) of the ACF (default: 1/2*len(series))
sliding_window repeat ACF calculation every N frames (default: len(series)/100)
kwargs additional arguments to autocorrelation_fft()
"""
import numpy
kwargs.pop('start',None) # must filter those kwargs as we set them ourselves
kwargs.pop('stop',None)
nframes = len(series)
length = length or nframes/2
_length = nframes - length # _length is the length of the comparison series
sliding_window = sliding_window or nframes/100
# note: do NOT be tempted to change nframes-_length to nframes-_length+1
# (this will make the last acf 1 step longer, see series[stop:start:-1] !)
acfs = numpy.array([autocorrelation_fft(series,start=start,stop=start+_length,**kwargs)
for start in xrange(0,nframes-_length,sliding_window)])
return acfs.mean(axis=0), acfs.std(axis=0)
# Compatibility layer for running in python 2.3
# sorted()
# --- (can also use numpy.sort function instead) ---
try:
sorted([])
sorted = sorted
except NameError:
def sorted(iterable, cmp=None, key=None, reverse=False):
"""sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list
Naive pre python 2.4 compatibility fudge.
With key, cmp must make use of triplets (key,int,value).
It's a fudge after all.
"""
L = list(iterable)
args = ()
if cmp is not None:
args = (cmp,)
if key is None:
L.sort(*args)
else:
# decorate-sort-undecorate
deco = [(key(x),i,x) for i,x in enumerate(L)]
deco.sort(*args)
L[:] = [y[2] for y in deco]
if reverse:
L.reverse()
return L
try:
import collections
class Fifo(collections.deque):
pop = collections.deque.popleft
class Ringbuffer(Fifo):
"""Ring buffer of size capacity; 'pushes' data from left and discards
on the right.
"""
# See http://mail.python.org/pipermail/tutor/2005-March/037149.html.
def __init__(self,capacity,iterable=None):
if iterable is None: iterable = []
super(Ringbuffer,self).__init__(iterable)
assert capacity > 0
self.capacity = capacity
while len(self) > self.capacity:
super(Ringbuffer,self).pop() # prune initial loading
def append(self,x):
while len(self) >= self.capacity:
super(Ringbuffer,self).pop()
super(Ringbuffer,self).append(x)
def __repr__(self):
return "Ringbuffer(capacity="+str(self.capacity)+", "+str(list(self))+")"
except ImportError:
class Ringbuffer(list):
"""Ringbuffer that can be treated as a list. Note that the real queuing
order is only obtained with the tolist() method.
Based on
http://www.onlamp.com/pub/a/python/excerpt/pythonckbk_chap1/index1.html
"""
def __init__(self, capacity, iterable=None):
assert capacity > 0
self.capacity = capacity
if iterable is None:
self = []
else:
self[:] = list(iterable)[-self.capacity:]
class __Full(list):
def append(self, x):
self[self.cur] = x
self.cur = (self.cur+1) % self.capacity
def tolist(self):
"""Return a list of elements from the oldest to the newest."""
return self[self.cur:] + self[:self.cur]
def append(self, x):
super(Ringbuffer,self).append(x)
if len(self) >= self.capacity:
self[:] = self[-self.capacity:]
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = self.__Full
def extend(self,iterable):
for x in list(iterable)[-self.capacity:]:
self.append(x)
def tolist(self):
"""Return a list of elements from the oldest to the newest."""
return self
def __repr__(self):
return "Ringbuffer(capacity="+str(self.capacity)+", "+str(list(self))+")"
class DefaultDict(dict):
"""Dictionary based on defaults and updated with keys/values from user."""
def __init__(self,defaultdict,userdict=None,**kwargs):
super(DefaultDict,self).__init__(**defaultdict)
if userdict is not None:
self.update(userdict)
self.update(kwargs)
class IntrospectiveDict(dict):
"""A dictionary that contains its keys as attributes for easier introspection.
Keys that collide with dict methods or attributes are _not_ added as attributes.
The implementation is simple and certainly not optimized for larger dictionaries
or ones which are often accessed. Only use it for 'final results' collections
that you are likely to investigate interactively.
ARGH: This cannot be pickled safely.
"""
def __init__(self,*args,**kwargs):
super(IntrospectiveDict,self).__init__(*args,**kwargs)
self.__reserved = dict.__dict__.keys() # don't know how to use super here?
self._update()
def __getattr__(self,x):
self._update() # maybe something changed? Can't be bothered to subclass update,etc
try:
return self.__dict__[x]
except KeyError:
return super(IntrospectiveDict,self).__getattr__(x)
def _update(self):
for k,v in self.items(): # initialisation of the attributes from the keys
self._set(k,v)
def _set(self,k,v):
if k not in self.__reserved:
self.__setattr__(k,v)
class CustomProgressMeter(MDAnalysis.lib.log.ProgressMeter):
"""ProgressMeter that uses addition '%(other)s' in format string.
.. SeeAlso:: :class:`MDAnalysis.lib.log.ProgressMeter`
"""
def echo(self, step, other):
"""Output status for *step* with additional information *other*."""
self.other = other
return super(CustomProgressMeter, self).echo(step)
| Becksteinlab/hop | hop/utilities.py | Python | gpl-3.0 | 29,424 | [
"MDAnalysis"
] | 19c8fcacd9590d6ba40f2d7e62eb521076e49af3de1b3c9f5801444ca0507474 |
from __future__ import print_function, division
import unittest, numpy as np
from pyscf.nao import tddft_iter
from pyscf.nao.tddft_iter_x_zip import tddft_iter_x_zip as td_c
from os.path import dirname, abspath
class KnowValues(unittest.TestCase):
def test_x_zip_feature_na20_chain(self):
""" This a test for compression of the eigenvectos at higher energies """
dname = dirname(abspath(__file__))
siesd = dname+'/sodium_20'
x = td_c(label='siesta', cd=siesd,x_zip=True, x_zip_emax=0.25,x_zip_eps=0.05,jcutoff=7,xc_code='RPA',nr=128, fermi_energy=-0.0913346431431985)
eps = 0.005
ww = np.arange(0.0, 0.5, eps/2.0)+1j*eps
data = np.array([ww.real*27.2114, -x.comp_polariz_inter_ave(ww).imag])
fname = 'na20_chain.tddft_iter_rpa.omega.inter.ave.x_zip.txt'
np.savetxt(fname, data.T, fmt=['%f','%f'])
#print(__file__, fname)
data_ref = np.loadtxt(dname+'/'+fname+'-ref')
#print(' x.rf0_ncalls ', x.rf0_ncalls)
#print(' x.matvec_ncalls ', x.matvec_ncalls)
self.assertTrue(np.allclose(data_ref,data.T, rtol=1.0e-1, atol=1e-06))
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0091_tddft_x_zip_na20.py | Python | apache-2.0 | 1,134 | [
"PySCF",
"SIESTA"
] | e728b719564bb623ff025f05a6ca9cb9ff0805ce1c4fb8f4d221698d50b7689c |
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Parser for GAMESS(US) output files"""
from __future__ import print_function
import re
import numpy
from . import logfileparser
from . import utils
class GAMESS(logfileparser.Logfile):
"""A GAMESS/Firefly log file."""
# Used to index self.scftargets[].
SCFRMS, SCFMAX, SCFENERGY = list(range(3))
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(GAMESS, self).__init__(logname="GAMESS", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "GAMESS log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'GAMESS("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by GAMESS.
To normalise, two rules need to be applied:
(1) Occurences of U/G in the 2/3 position of the label
must be lower-cased
(2) Two single quotation marks must be replaced by a double
>>> t = GAMESS("dummyfile").normalisesym
>>> labels = ['A', 'A1', 'A1G', "A'", "A''", "AG"]
>>> answers = map(t, labels)
>>> print answers
['A', 'A1', 'A1g', "A'", 'A"', 'Ag']
"""
if label[1:] == "''":
end = '"'
else:
end = label[1:].replace("U", "u").replace("G", "g")
return label[0] + end
def before_parsing(self):
self.firststdorient = True # Used to decide whether to wipe the atomcoords clean
self.cihamtyp = "none" # Type of CI Hamiltonian: saps or dets.
self.scftype = "none" # Type of SCF calculation: BLYP, RHF, ROHF, etc.
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if line[1:12] == "INPUT CARD>":
return
# We are looking for this line:
# PARAMETERS CONTROLLING GEOMETRY SEARCH ARE
# ...
# OPTTOL = 1.000E-04 RMIN = 1.500E-03
if line[10:18] == "OPTTOL =":
if not hasattr(self, "geotargets"):
opttol = float(line.split()[2])
self.geotargets = numpy.array([opttol, 3. / opttol], "d")
# Has to deal with such lines as:
# FINAL R-B3LYP ENERGY IS -382.0507446475 AFTER 10 ITERATIONS
# FINAL ENERGY IS -379.7594673378 AFTER 9 ITERATIONS
# ...so take the number after the "IS"
if line.find("FINAL") == 1:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
temp = line.split()
self.scfenergies.append(utils.convertor(float(temp[temp.index("IS") + 1]), "hartree", "eV"))
# For total energies after Moller-Plesset corrections, the output looks something like this:
#
# RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE
# E(0)= -285.7568061536
# E(1)= 0.0
# E(2)= -0.9679419329
# E(MP2)= -286.7247480864
# where E(MP2) = E(0) + E(2)
#
# With GAMESS-US 12 Jan 2009 (R3), the preceding text is different:
# DIRECT 4-INDEX TRANSFORMATION
# SCHWARZ INEQUALITY TEST SKIPPED 0 INTEGRAL BLOCKS
# E(SCF)= -76.0088477471
# E(2)= -0.1403745370
# E(MP2)= -76.1492222841
#
if line.find("RESULTS OF MOLLER-PLESSET") >= 0 or line[6:37] == "SCHWARZ INEQUALITY TEST SKIPPED":
if not hasattr(self, "mpenergies"):
self.mpenergies = []
# Each iteration has a new print-out
self.mpenergies.append([])
# GAMESS-US presently supports only second order corrections (MP2)
# PC GAMESS also has higher levels (3rd and 4th), with different output
# Only the highest level MP4 energy is gathered (SDQ or SDTQ)
while re.search("DONE WITH MP(\d) ENERGY", line) is None:
line = next(inputfile)
if len(line.split()) > 0:
# Only up to MP2 correction
if line.split()[0] == "E(MP2)=":
mp2energy = float(line.split()[1])
self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
# MP2 before higher order calculations
if line.split()[0] == "E(MP2)":
mp2energy = float(line.split()[2])
self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
if line.split()[0] == "E(MP3)":
mp3energy = float(line.split()[2])
self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV"))
if line.split()[0] in ["E(MP4-SDQ)", "E(MP4-SDTQ)"]:
mp4energy = float(line.split()[2])
self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV"))
# Total energies after Coupled Cluster calculations
# Only the highest Coupled Cluster level result is gathered
if line[12:23] == "CCD ENERGY:":
if not hasattr(self, "ccenergies"):
self.ccenergies = []
ccenergy = float(line.split()[2])
self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV"))
if line.find("CCSD") >= 0 and line.split()[0:2] == ["CCSD", "ENERGY:"]:
if not hasattr(self, "ccenergies"):
self.ccenergies = []
ccenergy = float(line.split()[2])
line = next(inputfile)
if line[8:23] == "CCSD[T] ENERGY:":
ccenergy = float(line.split()[2])
line = next(inputfile)
if line[8:23] == "CCSD(T) ENERGY:":
ccenergy = float(line.split()[2])
self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV"))
# Also collect MP2 energies, which are always calculated before CC
if line[8:23] == "MBPT(2) ENERGY:":
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
mp2energy = float(line.split()[2])
self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
# Extract charge and multiplicity
if line[1:19] == "CHARGE OF MOLECULE":
charge = int(line.split()[-1])
self.set_attribute('charge', charge)
line = next(inputfile)
mult = int(line.split()[-1])
self.set_attribute('mult', mult)
# etenergies (originally used only for CIS runs, but now also TD-DFT)
if "EXCITATION ENERGIES" in line and line.find("DONE WITH") < 0:
if not hasattr(self, "etenergies"):
self.etenergies = []
get_etosc = False
header = next(inputfile).rstrip()
if header.endswith("OSC. STR."):
# water_cis_dets.out does not have the oscillator strength
# in this table...it is extracted from a different section below
get_etosc = True
self.etoscs = []
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
broken = line.split()
while len(broken) > 0:
# Take hartree value with more numbers, and convert.
# Note that the values listed after this are also less exact!
etenergy = float(broken[1])
self.etenergies.append(utils.convertor(etenergy, "hartree", "cm-1"))
if get_etosc:
etosc = float(broken[-1])
self.etoscs.append(etosc)
broken = next(inputfile).split()
# Detect the CI hamiltonian type, if applicable.
# Should always be detected if CIS is done.
if line[8:64] == "RESULTS FROM SPIN-ADAPTED ANTISYMMETRIZED PRODUCT (SAPS)":
self.cihamtyp = "saps"
if line[8:64] == "RESULTS FROM DETERMINANT BASED ATOMIC ORBITAL CI-SINGLES":
self.cihamtyp = "dets"
# etsecs (used only for CIS runs for now)
if line[1:14] == "EXCITED STATE":
if not hasattr(self, 'etsecs'):
self.etsecs = []
if not hasattr(self, 'etsyms'):
self.etsyms = []
statenumber = int(line.split()[2])
spin = int(float(line.split()[7]))
if spin == 0:
sym = "Singlet"
if spin == 1:
sym = "Triplet"
sym += '-' + line.split()[-1]
self.etsyms.append(sym)
# skip 5 lines
for i in range(5):
line = next(inputfile)
line = next(inputfile)
CIScontribs = []
while line.strip()[0] != "-":
MOtype = 0
# alpha/beta are specified for hamtyp=dets
if self.cihamtyp == "dets":
if line.split()[0] == "BETA":
MOtype = 1
fromMO = int(line.split()[-3])-1
toMO = int(line.split()[-2])-1
coeff = float(line.split()[-1])
# With the SAPS hamiltonian, the coefficients are multiplied
# by sqrt(2) so that they normalize to 1.
# With DETS, both alpha and beta excitations are printed.
# if self.cihamtyp == "saps":
# coeff /= numpy.sqrt(2.0)
CIScontribs.append([(fromMO, MOtype), (toMO, MOtype), coeff])
line = next(inputfile)
self.etsecs.append(CIScontribs)
# etoscs (used only for CIS runs now)
if line[1:50] == "TRANSITION FROM THE GROUND STATE TO EXCITED STATE":
if not hasattr(self, "etoscs"):
self.etoscs = []
# This was the suggested as a fix in issue #61, and it does allow
# the parser to finish without crashing. However, it seems that
# etoscs is shorter in this case than the other transition attributes,
# so that should be somehow corrected and tested for.
if "OPTICALLY" in line:
pass
else:
statenumber = int(line.split()[-1])
# skip 7 lines
for i in range(8):
line = next(inputfile)
strength = float(line.split()[3])
self.etoscs.append(strength)
# TD-DFT for GAMESS-US.
# The format for excitations has changed a bit between 2007 and 2012.
# Original format parser was written for:
#
# -------------------
# TRIPLET EXCITATIONS
# -------------------
#
# STATE # 1 ENERGY = 3.027228 EV
# OSCILLATOR STRENGTH = 0.000000
# DRF COEF OCC VIR
# --- ---- --- ---
# 35 -1.105383 35 -> 36
# 69 -0.389181 34 -> 37
# 103 -0.405078 33 -> 38
# 137 0.252485 32 -> 39
# 168 -0.158406 28 -> 40
#
# STATE # 2 ENERGY = 4.227763 EV
# ...
#
# Here is the corresponding 2012 version:
#
# -------------------
# TRIPLET EXCITATIONS
# -------------------
#
# STATE # 1 ENERGY = 3.027297 EV
# OSCILLATOR STRENGTH = 0.000000
# LAMBDA DIAGNOSTIC = 0.925 (RYDBERG/CHARGE TRANSFER CHARACTER)
# SYMMETRY OF STATE = A
# EXCITATION DE-EXCITATION
# OCC VIR AMPLITUDE AMPLITUDE
# I A X(I->A) Y(A->I)
# --- --- -------- --------
# 35 36 -0.929190 -0.176167
# 34 37 -0.279823 -0.109414
# ...
#
# We discern these two by the presence of the arrow in the old version.
#
# The "LET EXCITATIONS" pattern used below catches both
# singlet and triplet excitations output.
if line[14:29] == "LET EXCITATIONS":
self.etenergies = []
self.etoscs = []
self.etsecs = []
etsyms = []
self.skip_lines(inputfile, ['d', 'b'])
# Loop while states are still being printed.
line = next(inputfile)
while line[1:6] == "STATE":
self.updateprogress(inputfile, "Excited States")
etenergy = utils.convertor(float(line.split()[-2]), "eV", "cm-1")
etoscs = float(next(inputfile).split()[-1])
self.etenergies.append(etenergy)
self.etoscs.append(etoscs)
# Symmetry is not always present, especially in old versions.
# Newer versions, on the other hand, can also provide a line
# with lambda diagnostic and some extra headers.
line = next(inputfile)
if "LAMBDA DIAGNOSTIC" in line:
line = next(inputfile)
if "SYMMETRY" in line:
etsyms.append(line.split()[-1])
line = next(inputfile)
if "EXCITATION" in line and "DE-EXCITATION" in line:
line = next(inputfile)
if line.count("AMPLITUDE") == 2:
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
CIScontribs = []
line = next(inputfile)
while line.strip():
cols = line.split()
if "->" in line:
i_occ_vir = [2, 4]
i_coeff = 1
else:
i_occ_vir = [0, 1]
i_coeff = 2
fromMO, toMO = [int(cols[i]) - 1 for i in i_occ_vir]
coeff = float(cols[i_coeff])
CIScontribs.append([(fromMO, 0), (toMO, 0), coeff])
line = next(inputfile)
self.etsecs.append(CIScontribs)
line = next(inputfile)
# The symmetries are not always present.
if etsyms:
self.etsyms = etsyms
# Maximum and RMS gradients.
if "MAXIMUM GRADIENT" in line or "RMS GRADIENT" in line:
parts = line.split()
# Avoid parsing the following...
## YOU SHOULD RESTART "OPTIMIZE" RUNS WITH THE COORDINATES
## WHOSE ENERGY IS LOWEST. RESTART "SADPOINT" RUNS WITH THE
## COORDINATES WHOSE RMS GRADIENT IS SMALLEST. THESE ARE NOT
## ALWAYS THE LAST POINT COMPUTED!
if parts[0] not in ["MAXIMUM", "RMS", "(1)"]:
return
if not hasattr(self, "geovalues"):
self.geovalues = []
# Newer versions (around 2006) have both maximum and RMS on one line:
# MAXIMUM GRADIENT = 0.0531540 RMS GRADIENT = 0.0189223
if len(parts) == 8:
maximum = float(parts[3])
rms = float(parts[7])
# In older versions of GAMESS, this spanned two lines, like this:
# MAXIMUM GRADIENT = 0.057578167
# RMS GRADIENT = 0.027589766
if len(parts) == 4:
maximum = float(parts[3])
line = next(inputfile)
parts = line.split()
rms = float(parts[3])
# FMO also prints two final one- and two-body gradients (see exam37):
# (1) MAXIMUM GRADIENT = 0.0531540 RMS GRADIENT = 0.0189223
if len(parts) == 9:
maximum = float(parts[4])
rms = float(parts[8])
self.geovalues.append([maximum, rms])
# This is the input orientation, which is the only data available for
# SP calcs, but which should be overwritten by the standard orientation
# values, which is the only information available for all geoopt cycles.
if line[11:50] == "ATOMIC COORDINATES":
if not hasattr(self, "atomcoords"):
self.atomcoords = []
line = next(inputfile)
atomcoords = []
atomnos = []
line = next(inputfile)
while line.strip():
temp = line.strip().split()
atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[2:5]])
atomnos.append(int(round(float(temp[1])))) # Don't use the atom name as this is arbitary
line = next(inputfile)
self.set_attribute('atomnos', atomnos)
self.atomcoords.append(atomcoords)
if line[12:40] == "EQUILIBRIUM GEOMETRY LOCATED":
# Prevent extraction of the final geometry twice
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
# Make sure we always have optdone for geomtry optimization, even if not converged.
if "GEOMETRY SEARCH IS NOT CONVERGED" in line:
if not hasattr(self, 'optdone'):
self.optdone = []
# This is the standard orientation, which is the only coordinate
# information available for all geometry optimisation cycles.
# The input orientation will be overwritten if this is a geometry optimisation
# We assume that a previous Input Orientation has been found and
# used to extract the atomnos
if line[1:29] == "COORDINATES OF ALL ATOMS ARE" and (not hasattr(self, "optdone") or self.optdone == []):
self.updateprogress(inputfile, "Coordinates")
if self.firststdorient:
self.firststdorient = False
# Wipes out the single input coordinate at the start of the file
self.atomcoords = []
self.skip_lines(inputfile, ['line', '-'])
atomcoords = []
line = next(inputfile)
for i in range(self.natom):
temp = line.strip().split()
atomcoords.append(list(map(float, temp[2:5])))
line = next(inputfile)
self.atomcoords.append(atomcoords)
# Section with SCF information.
#
# The space at the start of the search string is to differentiate from MCSCF.
# Everything before the search string is stored as the type of SCF.
# SCF types may include: BLYP, RHF, ROHF, UHF, etc.
#
# For example, in exam17 the section looks like this (note that this is GVB):
# ------------------------
# ROHF-GVB SCF CALCULATION
# ------------------------
# GVB STEP WILL USE 119875 WORDS OF MEMORY.
#
# MAXIT= 30 NPUNCH= 2 SQCDF TOL=1.0000E-05
# NUCLEAR ENERGY= 6.1597411978
# EXTRAP=T DAMP=F SHIFT=F RSTRCT=F DIIS=F SOSCF=F
#
# ITER EX TOTAL ENERGY E CHANGE SQCDF DIIS ERROR
# 0 0 -38.298939963 -38.298939963 0.131784454 0.000000000
# 1 1 -38.332044339 -0.033104376 0.026019716 0.000000000
# ... and will be terminated by a blank line.
if line.rstrip()[-16:] == " SCF CALCULATION":
# Remember the type of SCF.
self.scftype = line.strip()[:-16]
self.skip_line(inputfile, 'dashes')
while line[:5] != " ITER":
self.updateprogress(inputfile, "Attributes")
# GVB uses SQCDF for checking convergence (for example in exam17).
if "GVB" in self.scftype and "SQCDF TOL=" in line:
scftarget = float(line.split("=")[-1])
# Normally, however, the density is used as the convergence criterium.
# Deal with various versions:
# (GAMESS VERSION = 12 DEC 2003)
# DENSITY MATRIX CONV= 2.00E-05 DFT GRID SWITCH THRESHOLD= 3.00E-04
# (GAMESS VERSION = 22 FEB 2006)
# DENSITY MATRIX CONV= 1.00E-05
# (PC GAMESS version 6.2, Not DFT?)
# DENSITY CONV= 1.00E-05
elif "DENSITY CONV" in line or "DENSITY MATRIX CONV" in line:
scftarget = float(line.split()[-1])
line = next(inputfile)
if not hasattr(self, "scftargets"):
self.scftargets = []
self.scftargets.append([scftarget])
if not hasattr(self, "scfvalues"):
self.scfvalues = []
# Normally the iterations print in 6 columns.
# For ROHF, however, it is 5 columns, thus this extra parameter.
if "ROHF" in self.scftype:
self.scf_valcol = 4
else:
self.scf_valcol = 5
line = next(inputfile)
# SCF iterations are terminated by a blank line.
# The first four characters usually contains the step number.
# However, lines can also contain messages, including:
# * * * INITIATING DIIS PROCEDURE * * *
# CONVERGED TO SWOFF, SO DFT CALCULATION IS NOW SWITCHED ON
# DFT CODE IS SWITCHING BACK TO THE FINER GRID
values = []
while line.strip():
try:
temp = int(line[0:4])
except ValueError:
pass
else:
values.append([float(line.split()[self.scf_valcol])])
line = next(inputfile)
self.scfvalues.append(values)
# Sometimes, only the first SCF cycle has the banner parsed for above,
# so we must identify them from the header before the SCF iterations.
# The example we have for this is the GeoOpt unittest for Firefly8.
if line[1:8] == "ITER EX":
# In this case, the convergence targets are not printed, so we assume
# they do not change.
self.scftargets.append(self.scftargets[-1])
values = []
line = next(inputfile)
while line.strip():
try:
temp = int(line[0:4])
except ValueError:
pass
else:
values.append([float(line.split()[self.scf_valcol])])
line = next(inputfile)
self.scfvalues.append(values)
# Extract normal coordinate analysis, including vibrational frequencies (vibfreq),
# IT intensities (vibirs) and displacements (vibdisps).
#
# This section typically looks like the following in GAMESS-US:
#
# MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
#
# FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2,
# REDUCED MASSES IN AMU.
#
# 1 2 3 4 5
# FREQUENCY: 52.49 41.45 17.61 9.23 10.61
# REDUCED MASS: 3.92418 3.77048 5.43419 6.44636 5.50693
# IR INTENSITY: 0.00013 0.00001 0.00004 0.00000 0.00003
#
# ...or in the case of a numerical Hessian job...
#
# MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
#
# FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2,
# REDUCED MASSES IN AMU.
#
# 1 2 3 4 5
# FREQUENCY: 0.05 0.03 0.03 30.89 30.94
# REDUCED MASS: 8.50125 8.50137 8.50136 1.06709 1.06709
#
# ...whereas PC-GAMESS has...
#
# MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
#
# FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
#
# 1 2 3 4 5
# FREQUENCY: 5.89 1.46 0.01 0.01 0.01
# IR INTENSITY: 0.00000 0.00000 0.00000 0.00000 0.00000
#
# If Raman is present we have (for PC-GAMESS)...
#
# MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
#
# FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
# RAMAN INTENSITIES IN ANGSTROM**4/AMU, DEPOLARIZATIONS ARE DIMENSIONLESS
#
# 1 2 3 4 5
# FREQUENCY: 5.89 1.46 0.04 0.03 0.01
# IR INTENSITY: 0.00000 0.00000 0.00000 0.00000 0.00000
# RAMAN INTENSITY: 12.675 1.828 0.000 0.000 0.000
# DEPOLARIZATION: 0.750 0.750 0.124 0.009 0.750
#
# If GAMESS-US or PC-GAMESS has not reached the stationary point we have
# and additional warning, repeated twice, like so (see n_water.log for an example):
#
# *******************************************************
# * THIS IS NOT A STATIONARY POINT ON THE MOLECULAR PES *
# * THE VIBRATIONAL ANALYSIS IS NOT VALID !!! *
# *******************************************************
#
# There can also be additional warnings about the selection of modes, for example:
#
# * * * WARNING, MODE 6 HAS BEEN CHOSEN AS A VIBRATION
# WHILE MODE12 IS ASSUMED TO BE A TRANSLATION/ROTATION.
# PLEASE VERIFY THE PROGRAM'S DECISION MANUALLY!
#
if "NORMAL COORDINATE ANALYSIS IN THE HARMONIC APPROXIMATION" in line:
self.vibfreqs = []
self.vibirs = []
self.vibdisps = []
# Need to get to the modes line, which is often preceeded by
# a list of atomic weights and some possible warnings.
# Pass the warnings to the logger if they are there.
while not "MODES" in line:
self.updateprogress(inputfile, "Frequency Information")
line = next(inputfile)
if "THIS IS NOT A STATIONARY POINT" in line:
msg = "\n This is not a stationary point on the molecular PES"
msg += "\n The vibrational analysis is not valid!!!"
self.logger.warning(msg)
if "* * * WARNING, MODE" in line:
line1 = line.strip()
line2 = next(inputfile).strip()
line3 = next(inputfile).strip()
self.logger.warning("\n " + "\n ".join((line1, line2, line3)))
# In at least one case (regression zolm_dft3a.log) for older version of GAMESS-US,
# the header concerning the range of nodes is formatted wrong and can look like so:
# MODES 9 TO14 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
# ... although it's unclear whether this happens for all two-digit values.
startrot = int(line.split()[1])
if len(line.split()[2]) == 2:
endrot = int(line.split()[3])
else:
endrot = int(line.split()[2][2:])
self.skip_line(inputfile, 'blank')
# Continue down to the first frequencies
line = next(inputfile)
while not line.strip() or not line.startswith(" 1"):
line = next(inputfile)
while not "SAYVETZ" in line:
self.updateprogress(inputfile, "Frequency Information")
# Note: there may be imaginary frequencies like this (which we make negative):
# FREQUENCY: 825.18 I 111.53 12.62 10.70 0.89
#
# A note for debuggers: some of these frequencies will be removed later,
# assumed to be translations or rotations (see startrot/endrot above).
for col in next(inputfile).split()[1:]:
if col == "I":
self.vibfreqs[-1] *= -1
else:
self.vibfreqs.append(float(col))
line = next(inputfile)
# Skip the symmetry (appears in newer versions), fixes bug #3476063.
if line.find("SYMMETRY") >= 0:
line = next(inputfile)
# Skip the reduced mass (not always present).
if line.find("REDUCED") >= 0:
line = next(inputfile)
# Not present in numerical Hessian calculations.
if line.find("IR INTENSITY") >= 0:
irIntensity = map(float, line.strip().split()[2:])
self.vibirs.extend([utils.convertor(x, "Debye^2/amu-Angstrom^2", "km/mol") for x in irIntensity])
line = next(inputfile)
# Read in Raman vibrational intensities if present.
if line.find("RAMAN") >= 0:
if not hasattr(self, "vibramans"):
self.vibramans = []
ramanIntensity = line.strip().split()
self.vibramans.extend(list(map(float, ramanIntensity[2:])))
depolar = next(inputfile)
line = next(inputfile)
# This line seems always to be blank.
assert line.strip() == ''
# Extract the Cartesian displacement vectors.
p = [[], [], [], [], []]
for j in range(self.natom):
q = [[], [], [], [], []]
for coord in "xyz":
line = next(inputfile)[21:]
cols = list(map(float, line.split()))
for i, val in enumerate(cols):
q[i].append(val)
for k in range(len(cols)):
p[k].append(q[k])
self.vibdisps.extend(p[:len(cols)])
# Skip the Sayvetz stuff at the end.
for j in range(10):
line = next(inputfile)
self.skip_line(inputfile, 'blank')
line = next(inputfile)
# Exclude rotations and translations.
self.vibfreqs = numpy.array(self.vibfreqs[:startrot-1]+self.vibfreqs[endrot:], "d")
self.vibirs = numpy.array(self.vibirs[:startrot-1]+self.vibirs[endrot:], "d")
self.vibdisps = numpy.array(self.vibdisps[:startrot-1]+self.vibdisps[endrot:], "d")
if hasattr(self, "vibramans"):
self.vibramans = numpy.array(self.vibramans[:startrot-1]+self.vibramans[endrot:], "d")
if line[5:21] == "ATOMIC BASIS SET":
self.gbasis = []
line = next(inputfile)
while line.find("SHELL") < 0:
line = next(inputfile)
self.skip_lines(inputfile, ['blank', 'atomname'])
# shellcounter stores the shell no of the last shell
# in the previous set of primitives
shellcounter = 1
while line.find("TOTAL NUMBER") < 0:
self.skip_line(inputfile, 'blank')
line = next(inputfile)
shellno = int(line.split()[0])
shellgap = shellno - shellcounter
gbasis = [] # Stores basis sets on one atom
shellsize = 0
while len(line.split()) != 1 and line.find("TOTAL NUMBER") < 0:
shellsize += 1
coeff = {}
# coefficients and symmetries for a block of rows
while line.strip():
temp = line.strip().split()
sym = temp[1]
assert sym in ['S', 'P', 'D', 'F', 'G', 'L']
if sym == "L": # L refers to SP
if len(temp) == 6: # GAMESS US
coeff.setdefault("S", []).append((float(temp[3]), float(temp[4])))
coeff.setdefault("P", []).append((float(temp[3]), float(temp[5])))
else: # PC GAMESS
assert temp[6][-1] == temp[9][-1] == ')'
coeff.setdefault("S", []).append((float(temp[3]), float(temp[6][:-1])))
coeff.setdefault("P", []).append((float(temp[3]), float(temp[9][:-1])))
else:
if len(temp) == 5: # GAMESS US
coeff.setdefault(sym, []).append((float(temp[3]), float(temp[4])))
else: # PC GAMESS
assert temp[6][-1] == ')'
coeff.setdefault(sym, []).append((float(temp[3]), float(temp[6][:-1])))
line = next(inputfile)
# either a blank or a continuation of the block
if sym == "L":
gbasis.append(('S', coeff['S']))
gbasis.append(('P', coeff['P']))
else:
gbasis.append((sym, coeff[sym]))
line = next(inputfile)
# either the start of the next block or the start of a new atom or
# the end of the basis function section
numtoadd = 1 + (shellgap // shellsize)
shellcounter = shellno + shellsize
for x in range(numtoadd):
self.gbasis.append(gbasis)
# The eigenvectors, which also include MO energies and symmetries, follow
# the *final* report of evalues and the last list of symmetries in the log file:
#
# ------------
# EIGENVECTORS
# ------------
#
# 1 2 3 4 5
# -10.0162 -10.0161 -10.0039 -10.0039 -10.0029
# BU AG BU AG AG
# 1 C 1 S 0.699293 0.699290 -0.027566 0.027799 0.002412
# 2 C 1 S 0.031569 0.031361 0.004097 -0.004054 -0.000605
# 3 C 1 X 0.000908 0.000632 -0.004163 0.004132 0.000619
# 4 C 1 Y -0.000019 0.000033 0.000668 -0.000651 0.005256
# 5 C 1 Z 0.000000 0.000000 0.000000 0.000000 0.000000
# 6 C 2 S -0.699293 0.699290 0.027566 0.027799 0.002412
# 7 C 2 S -0.031569 0.031361 -0.004097 -0.004054 -0.000605
# 8 C 2 X 0.000908 -0.000632 -0.004163 -0.004132 -0.000619
# 9 C 2 Y -0.000019 -0.000033 0.000668 0.000651 -0.005256
# 10 C 2 Z 0.000000 0.000000 0.000000 0.000000 0.000000
# 11 C 3 S -0.018967 -0.019439 0.011799 -0.014884 -0.452328
# 12 C 3 S -0.007748 -0.006932 0.000680 -0.000695 -0.024917
# 13 C 3 X 0.002628 0.002997 0.000018 0.000061 -0.003608
# ...
#
# There are blanks lines between each block.
#
# Warning! There are subtle differences between GAMESS-US and PC-GAMES
# in the formatting of the first four columns. In particular, for F orbitals,
# PC GAMESS:
# 19 C 1 YZ 0.000000 0.000000 0.000000 0.000000 0.000000
# 20 C XXX 0.000000 0.000000 0.000000 0.000000 0.002249
# 21 C YYY 0.000000 0.000000 -0.025555 0.000000 0.000000
# 22 C ZZZ 0.000000 0.000000 0.000000 0.002249 0.000000
# 23 C XXY 0.000000 0.000000 0.001343 0.000000 0.000000
# GAMESS US
# 55 C 1 XYZ 0.000000 0.000000 0.000000 0.000000 0.000000
# 56 C 1XXXX -0.000014 -0.000067 0.000000 0.000000 0.000000
#
if line.find("EIGENVECTORS") == 10 or line.find("MOLECULAR ORBITALS") == 10:
# This is the stuff that we can read from these blocks.
self.moenergies = [[]]
self.mosyms = [[]]
if not hasattr(self, "nmo"):
self.nmo = self.nbasis
self.mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")]
readatombasis = False
if not hasattr(self, "atombasis"):
self.atombasis = []
self.aonames = []
for i in range(self.natom):
self.atombasis.append([])
self.aonames = []
readatombasis = True
self.skip_line(inputfile, 'dashes')
for base in range(0, self.nmo, 5):
self.updateprogress(inputfile, "Coefficients")
line = next(inputfile)
# This makes sure that this section does not end prematurely,
# which happens in regression 2CO.ccsd.aug-cc-pVDZ.out.
if line.strip() != "":
break
numbers = next(inputfile) # Eigenvector numbers.
# Sometimes there are some blank lines here.
while not line.strip():
line = next(inputfile)
# Eigenvalues for these orbitals (in hartrees).
try:
self.moenergies[0].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()])
except:
self.logger.warning('MO section found but could not be parsed!')
break
# Orbital symmetries.
line = next(inputfile)
if line.strip():
self.mosyms[0].extend(list(map(self.normalisesym, line.split())))
# Now we have nbasis lines. We will use the same method as in normalise_aonames() before.
p = re.compile("(\d+)\s*([A-Z][A-Z]?)\s*(\d+)\s*([A-Z]+)")
oldatom = '0'
i_atom = 0 # counter to keep track of n_atoms > 99
flag_w = True # flag necessary to keep from adding 100's at wrong time
for i in range(self.nbasis):
line = next(inputfile)
# If line is empty, break (ex. for FMO in exam37 which is a regression).
if not line.strip():
break
# Fill atombasis and aonames only first time around
if readatombasis and base == 0:
aonames = []
start = line[:17].strip()
m = p.search(start)
if m:
g = m.groups()
g2 = int(g[2]) # atom index in GAMESS file; changes to 0 after 99
# Check if we have moved to a hundred
# if so, increment the counter and add it to the parsed value
# There will be subsequent 0's as that atoms AO's are parsed
# so wait until the next atom is parsed before resetting flag
if g2 == 0 and flag_w:
i_atom = i_atom + 100
flag_w = False # handle subsequent AO's
if g2 != 0:
flag_w = True # reset flag
g2 = g2 + i_atom
aoname = "%s%i_%s" % (g[1].capitalize(), g2, g[3])
oldatom = str(g2)
atomno = g2-1
orbno = int(g[0])-1
else: # For F orbitals, as shown above
g = [x.strip() for x in line.split()]
aoname = "%s%s_%s" % (g[1].capitalize(), oldatom, g[2])
atomno = int(oldatom)-1
orbno = int(g[0])-1
self.atombasis[atomno].append(orbno)
self.aonames.append(aoname)
coeffs = line[15:] # Strip off the crud at the start.
j = 0
while j*11+4 < len(coeffs):
self.mocoeffs[0][base+j, i] = float(coeffs[j * 11:(j + 1) * 11])
j += 1
line = next(inputfile)
# If it's a restricted calc and no more properties, we have:
#
# ...... END OF RHF/DFT CALCULATION ......
#
# If there are more properties (such as the density matrix):
# --------------
#
# If it's an unrestricted calculation, however, we now get the beta orbitals:
#
# ----- BETA SET -----
#
# ------------
# EIGENVECTORS
# ------------
#
# 1 2 3 4 5
# ...
#
line = next(inputfile)
# This can come in between the alpha and beta orbitals (see #130).
if line.strip() == "LZ VALUE ANALYSIS FOR THE MOS":
while line.strip():
line = next(inputfile)
line = next(inputfile)
if line[2:22] == "----- BETA SET -----":
self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d"))
self.moenergies.append([])
self.mosyms.append([])
for i in range(4):
line = next(inputfile)
for base in range(0, self.nmo, 5):
self.updateprogress(inputfile, "Coefficients")
blank = next(inputfile)
line = next(inputfile) # Eigenvector no
line = next(inputfile)
self.moenergies[1].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()])
line = next(inputfile)
self.mosyms[1].extend(list(map(self.normalisesym, line.split())))
for i in range(self.nbasis):
line = next(inputfile)
temp = line[15:] # Strip off the crud at the start
j = 0
while j * 11 + 4 < len(temp):
self.mocoeffs[1][base+j, i] = float(temp[j * 11:(j + 1) * 11])
j += 1
line = next(inputfile)
self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
# Natural orbital coefficients and occupation numbers, presently supported only
# for CIS calculations. Looks the same as eigenvectors, without symmetry labels.
#
# --------------------
# CIS NATURAL ORBITALS
# --------------------
#
# 1 2 3 4 5
#
# 2.0158 2.0036 2.0000 2.0000 1.0000
#
# 1 O 1 S 0.000000 -0.157316 0.999428 0.164938 0.000000
# 2 O 1 S 0.000000 0.754402 0.004472 -0.581970 0.000000
# ...
#
if line[10:30] == "CIS NATURAL ORBITALS":
self.nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")
self.nooccnos = []
self.skip_line(inputfile, 'dashes')
for base in range(0, self.nmo, 5):
self.skip_lines(inputfile, ['blank', 'numbers'])
# The eigenvalues that go along with these natural orbitals are
# their occupation numbers. Sometimes there are blank lines before them.
line = next(inputfile)
while not line.strip():
line = next(inputfile)
eigenvalues = map(float, line.split())
self.nooccnos.extend(eigenvalues)
# Orbital symemtry labels are normally here for MO coefficients.
line = next(inputfile)
# Now we have nbasis lines with the coefficients.
for i in range(self.nbasis):
line = next(inputfile)
coeffs = line[15:]
j = 0
while j*11+4 < len(coeffs):
self.nocoeffs[base+j, i] = float(coeffs[j * 11:(j + 1) * 11])
j += 1
# We cannot trust this self.homos until we come to the phrase:
# SYMMETRIES FOR INITAL GUESS ORBITALS FOLLOW
# which either is followed by "ALPHA" or "BOTH" at which point we can say
# for certain that it is an un/restricted calculations.
# Note that MCSCF calcs also print this search string, so make sure
# that self.homos does not exist yet.
if line[1:28] == "NUMBER OF OCCUPIED ORBITALS" and not hasattr(self, 'homos'):
homos = [int(line.split()[-1])-1]
line = next(inputfile)
homos.append(int(line.split()[-1])-1)
self.set_attribute('homos', homos)
if line.find("SYMMETRIES FOR INITIAL GUESS ORBITALS FOLLOW") >= 0:
# Not unrestricted, so lop off the second index.
# In case the search string above was not used (ex. FMO in exam38),
# we can try to use the next line which should also contain the
# number of occupied orbitals.
if line.find("BOTH SET(S)") >= 0:
nextline = next(inputfile)
if "ORBITALS ARE OCCUPIED" in nextline:
homos = int(nextline.split()[0])-1
if hasattr(self, "homos"):
try:
assert self.homos[0] == homos
except AssertionError:
self.logger.warning("Number of occupied orbitals not consistent. This is normal for ECP and FMO jobs.")
else:
self.homos = [homos]
self.homos = numpy.resize(self.homos, [1])
# Set the total number of atoms, only once.
# Normally GAMESS print TOTAL NUMBER OF ATOMS, however in some cases
# this is slightly different (ex. lower case for FMO in exam37).
if not hasattr(self, "natom") and "NUMBER OF ATOMS" in line.upper():
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
# The first is from Julien's Example and the second is from Alexander's
# I think it happens if you use a polar basis function instead of a cartesian one
if line.find("NUMBER OF CARTESIAN GAUSSIAN BASIS") == 1 or line.find("TOTAL NUMBER OF BASIS FUNCTIONS") == 1:
nbasis = int(line.strip().split()[-1])
self.set_attribute('nbasis', nbasis)
elif line.find("TOTAL NUMBER OF CONTAMINANTS DROPPED") >= 0:
nmos_dropped = int(line.split()[-1])
if hasattr(self, "nmo"):
self.set_attribute('nmo', self.nmo - nmos_dropped)
else:
self.set_attribute('nmo', self.nbasis - nmos_dropped)
# Note that this line is present if ISPHER=1, e.g. for C_bigbasis
elif line.find("SPHERICAL HARMONICS KEPT IN THE VARIATION SPACE") >= 0:
nmo = int(line.strip().split()[-1])
self.set_attribute('nmo', nmo)
# Note that this line is not always present, so by default
# NBsUse is set equal to NBasis (see below).
elif line.find("TOTAL NUMBER OF MOS IN VARIATION SPACE") == 1:
nmo = int(line.split()[-1])
self.set_attribute('nmo', nmo)
elif line.find("OVERLAP MATRIX") == 0 or line.find("OVERLAP MATRIX") == 1:
# The first is for PC-GAMESS, the second for GAMESS
# Read 1-electron overlap matrix
if not hasattr(self, "aooverlaps"):
self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
else:
self.logger.info("Reading additional aooverlaps...")
base = 0
while base < self.nbasis:
self.updateprogress(inputfile, "Overlap")
self.skip_lines(inputfile, ['b', 'basis_fn_number', 'b'])
for i in range(self.nbasis - base): # Fewer lines each time
line = next(inputfile)
temp = line.split()
for j in range(4, len(temp)):
self.aooverlaps[base+j-4, i+base] = float(temp[j])
self.aooverlaps[i+base, base+j-4] = float(temp[j])
base += 5
# ECP Pseudopotential information
if "ECP POTENTIALS" in line:
if not hasattr(self, "coreelectrons"):
self.coreelectrons = [0]*self.natom
self.skip_lines(inputfile, ['d', 'b'])
header = next(inputfile)
while header.split()[0] == "PARAMETERS":
name = header[17:25]
atomnum = int(header[34:40])
# The pseudopotnetial is given explicitely
if header[40:50] == "WITH ZCORE":
zcore = int(header[50:55])
lmax = int(header[63:67])
self.coreelectrons[atomnum-1] = zcore
# The pseudopotnetial is copied from another atom
if header[40:55] == "ARE THE SAME AS":
atomcopy = int(header[60:])
self.coreelectrons[atomnum-1] = self.coreelectrons[atomcopy-1]
line = next(inputfile)
while line.split() != []:
line = next(inputfile)
header = next(inputfile)
# This was used before refactoring the parser, geotargets was set here after parsing.
#if not hasattr(self, "geotargets"):
# opttol = 1e-4
# self.geotargets = numpy.array([opttol, 3. / opttol], "d")
#if hasattr(self,"geovalues"): self.geovalues = numpy.array(self.geovalues, "d")
# This is quite simple to parse, but some files seem to print certain lines twice,
# repeating the populations without charges, but not in proper order.
# The unrestricted calculations are a bit tricky, since GAMESS-US prints populations
# for both alpha and beta orbitals in the same format and with the same title,
# but it still prints the charges only at the very end.
if "TOTAL MULLIKEN AND LOWDIN ATOMIC POPULATIONS" in line:
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
header = next(inputfile)
line = next(inputfile)
# It seems that when population are printed twice (without charges),
# there is a blank line along the way (after the first header),
# so let's get a flag out of that circumstance.
doubles_printed = line.strip() == ""
if doubles_printed:
title = next(inputfile)
header = next(inputfile)
line = next(inputfile)
# Only go further if the header had five columns, which should
# be the case when both populations and charges are printed.
# This is pertinent for both double printing and unrestricted output.
if not len(header.split()) == 5:
return
mulliken, lowdin = [], []
while line.strip():
if line.strip() and doubles_printed:
line = next(inputfile)
mulliken.append(float(line.split()[3]))
lowdin.append(float(line.split()[5]))
line = next(inputfile)
self.atomcharges["mulliken"] = mulliken
self.atomcharges["lowdin"] = lowdin
# ---------------------
# ELECTROSTATIC MOMENTS
# ---------------------
#
# POINT 1 X Y Z (BOHR) CHARGE
# -0.000000 0.000000 0.000000 -0.00 (A.U.)
# DX DY DZ /D/ (DEBYE)
# 0.000000 -0.000000 0.000000 0.000000
#
if line.strip() == "ELECTROSTATIC MOMENTS":
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
# The old PC-GAMESS prints memory assignment information here.
if "MEMORY ASSIGNMENT" in line:
memory_assignment = next(inputfile)
line = next(inputfile)
# If something else ever comes up, we should get a signal from this assert.
assert line.split()[0] == "POINT"
# We can get the reference point from here, as well as
# check here that the net charge of the molecule is correct.
coords_and_charge = next(inputfile)
assert coords_and_charge.split()[-1] == '(A.U.)'
reference = numpy.array([float(x) for x in coords_and_charge.split()[:3]])
reference = utils.convertor(reference, 'bohr', 'Angstrom')
charge = float(coords_and_charge.split()[-2])
self.set_attribute('charge', charge)
dipoleheader = next(inputfile)
assert dipoleheader.split()[:3] == ['DX', 'DY', 'DZ']
assert dipoleheader.split()[-1] == "(DEBYE)"
dipoleline = next(inputfile)
dipole = [float(d) for d in dipoleline.split()[:3]]
# The dipole is always the first multipole moment to be printed,
# so if it already exists, we will overwrite all moments since we want
# to leave just the last printed value (could change in the future).
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
try:
assert self.moments[1] == dipole
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.logger.warning('This could be from post-HF properties or geometry optimization')
self.moments = [reference, dipole]
if __name__ == "__main__":
import doctest, gamessparser, sys
if len(sys.argv) == 1:
doctest.testmod(gamessparser, verbose=False)
if len(sys.argv) >= 2:
parser = gamessparser.GAMESS(sys.argv[1])
data = parser.parse()
if len(sys.argv) > 2:
for i in range(len(sys.argv[2:])):
if hasattr(data, sys.argv[2 + i]):
print(getattr(data, sys.argv[2 + i]))
| ghutchis/cclib | src/cclib/parser/gamessparser.py | Python | lgpl-2.1 | 56,389 | [
"Firefly",
"GAMESS",
"Gaussian",
"cclib"
] | c172506d76738b5baeef2affc915dac3d7f27ccf284caa88b03a6d44a7d94da6 |
'''
Nacker is a tool to circumvent 802.1x Network Access Control (NAC) on
a wired LAN.
Copyright (C) 2013 Carsten Maartmann-Moe
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Aug 29, 2013
@author: Carsten Maartmann-Moe <carsten@carmaa.com> aka ntropy
'''
import threading
import Queue
import time
from scapy.all import *
from pprint import pprint
THREADS = 4
TOPPORTS = [80, # http
23, # telnet
22, # ssh
443, # https
3389, # ms-term-serv
445, # microsoft-ds
139, # netbios-ssn
21, # ftp
135, # msrpc
25] # smtp
def synscan(target, portlist = Queue.Queue()):
if portlist.empty():
for p in TOPPORTS:
portlist.put(p)
open_ports = []
started = time.time()
print('SYN scan of {0} started at {1} {2}'.format(target, time.ctime(started), time.tzname[0]))
threads = []
for i in range(1, THREADS + 1):
#if cfg.verbose:
#print('Creating Thread {0}'.format(i))
t = SYNScannerThread(target, portlist, i, open_ports)
t.setDaemon(True)
t.start()
threads.append(t)
portlist.join()
for item in threads:
item.join()
#if cfg.verbose:
#print(item.status)
finished = time.time()
print('Finished scanning in {0:5f} seconds at {1} {2}'.format((finished-started), time.ctime(finished), time.tzname[0]))
return open_ports
class SYNScannerThread(threading.Thread):
def __init__(self, target, portlist, tid, open_ports):
threading.Thread.__init__(self)
self.target = target
self.portlist = portlist
self.tid = tid
self.open_ports = open_ports
self.status = ''
def run(self):
# ports scanned by this thread
totalPorts = 0
while True:
port = 0
try:
port = self.portlist.get(timeout = 1)
except Queue.Empty:
break
response = sr1(IP(dst = self.target)/TCP(dport = port, flags = 'S'), verbose = False)
if response:
# flags is 18 if SYN,ACK received
# i.e port is open
if response[TCP].flags == 18:
self.open_ports.append(port)
totalPorts += 1
self.portlist.task_done()
# end while block
self.status = 'Thread {0} scanned {1} ports'.format(self.tid, totalPorts)
| mehulsbhatt/nacker | caravan/tcp.py | Python | gpl-2.0 | 3,089 | [
"MOE"
] | f468e3bf61571e62072cc4669d0c2d4cf329dbaeb558551f1b62b40f838185c0 |
#! /usr/bin/env python3
#
# Copyright 2006-2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests vtkTools
# RoBo - juin 2006
import vtk
import os
from vtkTools import *
datadir="../data/"
# test 1
print("\nTEST #1: readImageScalarRange")
readImageScalarRange(datadir+"lara/seg.img", extent=(0,255,0,255,0,59), coding='ushort')
# test 2
print("\nTEST #2: printOneLineOfData")
printOneLineOfData(datadir+"lara/seg.img", slice=30, line=128, extent=(0,255,0,255,0,59), coding='ushort')
# test 3
print("\nTEST #3: loadRawImage/extractOneSlice/displayOneSlice")
image = loadRawImage(datadir+"lara/seg.img",(0, 255, 0, 255, 0, 59),(0.98,0.98,1.56),'ushort','little')
image2 = extractOneSlice(image, slice=30)
print("close VTK window to continue...")
displayOneSlice(image2, slice=30, window=1, level=2)
# test 4
print("\nTEST #4: convert16to8bits")
convert16to8bits(datadir+"lara/seg.img", "seg2.img", extent=(0,255,0,255,0,59))
image = loadRawImage("seg2.img",(0, 255, 0, 255, 0, 59),(0.98,0.98,1.56),'uchar','little')
print("close VTK window to continue...")
displayOneSlice(image, slice=30, window=1, level=2)
#os.remove("seg2.img") # marche pas (permission denied)
# test 5
print("\nTEST #5: createContourActor/createOutlineActor/display3D")
image = loadRawImage(datadir+"lara/seg.img", (0, 255, 0, 255, 0, 59),(0.9375,0.9375,2.5),'ushort','little')
skin = createContourActor(image)
outline = createOutlineActor(image)
print("close VTK window to continue...")
display3D((skin, outline))
# test 6
print("\nTEST #6: loadGenesisImage")
image = loadGenesisImage(datadir+"lara/006", (1,60))
print("close VTK window to continue...")
displayOneSlice(image, slice=30, window=255, level=127)
# test 7
print("\nTEST #7: loadRawImage (directory)")
image = loadRawImage(datadir+"lara/006",(0, 255, 0, 255, 1, 60),(0.98,0.98,1.56),'ushort','big')
print("close VTK window to continue...")
displayOneSlice(image, slice=30, window=255, level=127)
# test 8
print("\nTEST #8: off2vtk/savePolyData")
polydata = off2vtk(datadir+"tests/ellipse.off")
savePolyData("ellipse.vtk", polydata)
poly = createPolyDataActor(polydata)
outline = createOutlineActor(polydata)
print("close VTK window to continue...")
display3D((poly, outline))
# test 9
print("\nTEST #9: loadPolyData")
polydata = loadPolyData(datadir+"tests/brain.vtk")
poly = createPolyDataActor(polydata)
outline = createOutlineActor(polydata)
print("close VTK window to continue...")
display3D((poly, outline))
# test 10
print("\nTEST #10: createEuclide/createNegative/addImages (brain)")
image = loadRawImage(datadir+"lara/seg.img",(0, 255, 0, 255, 0, 59),(0.98,0.98,1.56),'ushort','little')
image2 = extractOneSlice(image, slice=13)
euclide1 = createEuclide(image2)
negative = createNegative(image2)
euclide2 = createEuclide(negative)
final = addImages(euclide1, euclide2)
polydata = createWarpPolyData(final, scale=1.0/19480*100)
actor = createPolyDataActor(polydata, showScalar=True, range=(0,2))
outline = createOutlineActor(polydata)
print("close VTK window to continue...")
display3D((actor, outline))
# test 11
print("\nTEST #11: createEuclide/createNegative/addImages (ellipse)")
image=createEllipsoid()
image2 = extractOneSlice(image, slice=80)
euclide1 = createEuclide(image2)
negative = createNegative(image2)
euclide2 = createEuclide(negative)
final = addImages(euclide1, euclide2)
polydata = createWarpPolyData(final, scale=1.0/19480*100)
actor = createPolyDataActor(polydata, showScalar=True, range=(0,2))
outline = createOutlineActor(polydata)
print("close VTK window to continue...")
display3D((actor, outline))
# test 12
print("\nTEST #12: createEllipsoid/saveRawImage/saveVtkImage")
image=createEllipsoid()
saveRawImage("ellipse.raw", image)
saveVtkImage("ellipse.vtk", image)
# test 13
print("\nTEST #12: loadVtkImage")
image = loadVtkImage(datadir+"tests/ellipse.vtk")
print("close VTK window to continue...")
displayOneSlice(image, slice=80, window=255, level=127)
| rboman/progs | metafor/biomec/vtkToolsTEST.py | Python | apache-2.0 | 4,488 | [
"VTK"
] | 42b11ff671269d10ce31d30433ce11318f1f2739d5a3ee383afc5b57fca3eeff |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Analysis and Exploration/Compare Individual Events"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import os
from collections import defaultdict
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gtk
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.filters import GenericFilter, rules
from gramps.gui.filters import build_filter_model
from gramps.gen.sort import Sort
from gramps.gui.utils import ProgressMeter
from gramps.gen.utils.docgen import ODSTab
from gramps.gen.const import CUSTOM_FILTERS, URL_MANUAL_PAGE
from gramps.gen.errors import WindowActiveError
from gramps.gen.datehandler import get_date
from gramps.gui.dialog import WarningDialog
from gramps.gui.plug import tool
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gui.glade import Glade
from gramps.gui.editors import FilterEditor
from gramps.gen.constfunc import conv_to_unicode, uni_to_gui, get_curr_dir
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Compare_Individual_Events...')
#------------------------------------------------------------------------
#
# EventCmp
#
#------------------------------------------------------------------------
class TableReport(object):
"""
This class provides an interface for the spreadsheet table
used to save the data into the file.
"""
def __init__(self,filename,doc):
self.filename = filename
self.doc = doc
def initialize(self,cols):
self.doc.open(self.filename)
self.doc.start_page()
def finalize(self):
self.doc.end_page()
self.doc.close()
def write_table_data(self,data,skip_columns=[]):
self.doc.start_row()
index = -1
for item in data:
index += 1
if index not in skip_columns:
self.doc.write_cell(item)
self.doc.end_row()
def set_row(self,val):
self.row = val + 2
def write_table_head(self, data):
self.doc.start_row()
list(map(self.doc.write_cell, data))
self.doc.end_row()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class EventComparison(tool.Tool,ManagedWindow):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
self.dbstate = dbstate
self.uistate = uistate
tool.Tool.__init__(self,dbstate, options_class, name)
ManagedWindow.__init__(self, uistate, [], self)
self.qual = 0
self.filterDialog = Glade(toplevel="filters")
self.filterDialog.connect_signals({
"on_apply_clicked" : self.on_apply_clicked,
"on_editor_clicked" : self.filter_editor_clicked,
"on_help_clicked" : self.on_help_clicked,
"destroy_passed_object" : self.close,
"on_write_table" : self.__dummy,
})
window = self.filterDialog.toplevel
window.show()
self.filters = self.filterDialog.get_object("filter_list")
self.label = _('Event comparison filter selection')
self.set_window(window,self.filterDialog.get_object('title'),
self.label)
self.on_filters_changed('Person')
uistate.connect('filters-changed', self.on_filters_changed)
self.show()
def __dummy(self, obj):
"""dummy callback, needed because widget is in same glade file
as another widget, so callbacks must be defined to avoid warnings.
"""
pass
def on_filters_changed(self, name_space):
if name_space == 'Person':
all_filter = GenericFilter()
all_filter.set_name(_("Entire Database"))
all_filter.add_rule(rules.person.Everyone([]))
self.filter_model = build_filter_model('Person', [all_filter])
self.filters.set_model(self.filter_model)
self.filters.set_active(0)
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def build_menu_names(self, obj):
return (_("Filter selection"),_("Event Comparison tool"))
def filter_editor_clicked(self, obj):
try:
FilterEditor('Person',CUSTOM_FILTERS,
self.dbstate,self.uistate)
except WindowActiveError:
pass
def on_apply_clicked(self, obj):
cfilter = self.filter_model[self.filters.get_active()][1]
progress_bar = ProgressMeter(_('Comparing events'),'')
progress_bar.set_pass(_('Selecting people'),1)
plist = cfilter.apply(self.db,
self.db.iter_person_handles())
progress_bar.step()
progress_bar.close()
self.options.handler.options_dict['filter'] = self.filters.get_active()
# Save options
self.options.handler.save_options()
if len(plist) == 0:
WarningDialog(_("No matches were found"))
else:
DisplayChart(self.dbstate,self.uistate,plist,self.track)
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
##def by_value(first,second):
## return cmp(second[0],first[0])
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def fix(line):
l = line.strip().replace('&','&').replace('>','>')
return l.replace(l,'<','<').replace(l,'"','"')
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class DisplayChart(ManagedWindow):
def __init__(self,dbstate,uistate,people_list,track):
self.dbstate = dbstate
self.uistate = uistate
ManagedWindow.__init__(self, uistate, track, self)
self.db = dbstate.db
self.my_list = people_list
self.row_data = []
self.save_form = None
self.topDialog = Glade()
self.topDialog.connect_signals({
"on_write_table" : self.on_write_table,
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_apply_clicked" : self.__dummy,
"on_editor_clicked" : self.__dummy,
})
window = self.topDialog.toplevel
window.show()
self.set_window(window, self.topDialog.get_object('title'),
_('Event Comparison Results'))
self.eventlist = self.topDialog.get_object('treeview')
self.sort = Sort(self.db)
self.my_list.sort(key=self.sort.by_last_name_key)
self.event_titles = self.make_event_titles()
self.table_titles = [_("Person"),_("ID")]
for event_name in self.event_titles:
self.table_titles.append(_("%(event_name)s Date") %
{'event_name' :event_name}
)
self.table_titles.append('sort') # This won't be shown in a tree
self.table_titles.append(_("%(event_name)s Place") %
{'event_name' :event_name}
)
self.build_row_data()
self.draw_display()
self.show()
def __dummy(self, obj):
"""dummy callback, needed because widget is in same glade file
as another widget, so callbacks must be defined to avoid warnings.
"""
pass
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def build_menu_names(self, obj):
return (_("Event Comparison Results"),None)
def draw_display(self):
model_index = 0
tree_index = 0
mylist = []
renderer = Gtk.CellRendererText()
for title in self.table_titles:
mylist.append(str)
if title == 'sort':
# This will override the previously defined column
self.eventlist.get_column(
tree_index-1).set_sort_column_id(model_index)
else:
column = Gtk.TreeViewColumn(title,renderer,text=model_index)
column.set_sort_column_id(model_index)
self.eventlist.append_column(column)
# This one numbers the tree columns: increment on new column
tree_index += 1
# This one numbers the model columns: always increment
model_index += 1
model = Gtk.ListStore(*mylist)
self.eventlist.set_model(model)
self.progress_bar.set_pass(_('Building display'),len(self.row_data))
for data in self.row_data:
model.append(row=list(data))
self.progress_bar.step()
self.progress_bar.close()
def build_row_data(self):
self.progress_bar = ProgressMeter(_('Comparing Events'),'')
self.progress_bar.set_pass(_('Building data'),len(self.my_list))
for individual_id in self.my_list:
individual = self.db.get_person_from_handle(individual_id)
name = individual.get_primary_name().get_name()
gid = individual.get_gramps_id()
the_map = defaultdict(list)
for ievent_ref in individual.get_event_ref_list():
ievent = self.db.get_event_from_handle(ievent_ref.ref)
event_name = str(ievent.get_type())
the_map[event_name].append(ievent_ref.ref)
first = True
done = False
while not done:
added = False
tlist = [name, gid] if first else ["", ""]
for ename in self.event_titles:
if ename in the_map and len(the_map[ename]) > 0:
event_handle = the_map[ename][0]
del the_map[ename][0]
date = place = ""
if event_handle:
event = self.db.get_event_from_handle(event_handle)
date = get_date(event)
sortdate = "%09d" % (
event.get_date_object().get_sort_value()
)
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(
place_handle).get_title()
tlist += [date, sortdate, place]
added = True
else:
tlist += [""]*3
if first:
first = False
self.row_data.append(tlist)
elif not added:
done = True
else:
self.row_data.append(tlist)
self.progress_bar.step()
def make_event_titles(self):
"""
Create the list of unique event types, along with the person's
name, birth, and death.
This should be the column titles of the report.
"""
the_map = defaultdict(int)
for individual_id in self.my_list:
individual = self.db.get_person_from_handle(individual_id)
for event_ref in individual.get_event_ref_list():
event = self.db.get_event_from_handle(event_ref.ref)
name = str(event.get_type())
if not name:
break
the_map[name] += 1
unsort_list = sorted([(d, k) for k,d in the_map.items()],
key=lambda x: x[0], reverse=True)
sort_list = [ item[1] for item in unsort_list ]
## Presently there's no Birth and Death. Instead there's Birth Date and
## Birth Place, as well as Death Date and Death Place.
## # Move birth and death to the begining of the list
## if _("Death") in the_map:
## sort_list.remove(_("Death"))
## sort_list = [_("Death")] + sort_list
## if _("Birth") in the_map:
## sort_list.remove(_("Birth"))
## sort_list = [_("Birth")] + sort_list
return sort_list
def on_write_table(self, obj):
f = Gtk.FileChooserDialog(_("Select filename"),
action=Gtk.FileChooserAction.SAVE,
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE,
Gtk.ResponseType.OK))
f.set_current_folder(get_curr_dir())
status = f.run()
f.hide()
if status == Gtk.ResponseType.OK:
name = conv_to_unicode(f.get_filename())
doc = ODSTab(len(self.row_data))
doc.creator(self.db.get_researcher().get_name())
spreadsheet = TableReport(name, doc)
new_titles = []
skip_columns = []
index = 0
for title in self.table_titles:
if title == 'sort':
skip_columns.append(index)
else:
new_titles.append(title)
index += 1
spreadsheet.initialize(len(new_titles))
spreadsheet.write_table_head(new_titles)
index = 0
for top in self.row_data:
spreadsheet.set_row(index%2)
index += 1
spreadsheet.write_table_data(top,skip_columns)
spreadsheet.finalize()
f.destroy()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class EventComparisonOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
# Options specific for this report
self.options_dict = {
'filter' : 0,
}
filters = ReportUtils.get_person_filters(None)
self.options_help = {
'filter' : ("=num","Filter number.",
[ filt.get_name() for filt in filters ],
True ),
}
| pmghalvorsen/gramps_branch | gramps/plugins/tool/eventcmp.py | Python | gpl-2.0 | 16,295 | [
"Brian"
] | 88c83d893bd54a02dee79bffa81ec4245a5d901f2d3fc671bd1486d884204650 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
from monty.json import MontyDecoder
from pymatgen import Composition
from pymatgen.apps.battery.conversion_battery import ConversionElectrode, \
ConversionVoltagePair
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class ConversionElectrodeTest(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
# both 'LiCoO2' and "FeF3" are using Li+ as working ion; MnO2 is for the multivalent Mg2+ ion
formulas = ['LiCoO2', "FeF3", "MnO2"]
expected_properties = {}
expected_properties['LiCoO2'] = {'average_voltage': 2.26940307125,
'capacity_grav': 903.19752911225669,
'capacity_vol': 2903.35804724,
'specific_energy': 2049.7192465127678,
'energy_density': 6588.8896693479574}
expected_properties['FeF3'] = {'average_voltage': 3.06179925889,
'capacity_grav': 601.54508701578118,
'capacity_vol': 2132.2069115142394,
'specific_energy': 1841.8103016131706,
'energy_density': 6528.38954147}
expected_properties['MnO2'] = {'average_voltage': 1.7127027687901726,
'capacity_grav': 790.9142070034802,
'capacity_vol': 3543.202003526853,
'specific_energy': 1354.6009522103434,
'energy_density': 6068.451881823329}
for f in formulas:
with open(os.path.join(test_dir, f + "_batt.json"), 'r') as fid:
entries = json.load(fid, cls=MontyDecoder)
# entries = computed_entries_from_json(fid.read())
# with open(os.path.join(test_dir, f + "_batt.json"), 'w') as fid:
# json.dump(entries, fid, cls=MontyEncoder)
if f in ['LiCoO2', "FeF3"]:
working_ion = "Li"
elif f in ["MnO2"]:
working_ion = "Mg"
c = ConversionElectrode.from_composition_and_entries(
Composition(f), entries, working_ion_symbol=working_ion)
self.assertEqual(len(c.get_sub_electrodes(True)), c.num_steps)
self.assertEqual(len(c.get_sub_electrodes(False)),
sum(range(1, c.num_steps + 1)))
self.assertIsNotNone(str(c))
p = expected_properties[f]
for k, v in p.items():
self.assertAlmostEqual(getattr(c, "get_" + k).__call__(), v, 2)
self.assertIsNotNone(c.get_summary_dict(True))
# Test pair to dict
pair = c.voltage_pairs[0]
d = pair.as_dict()
pair2 = ConversionVoltagePair.from_dict(d)
for prop in ['voltage', 'mass_charge', 'mass_discharge']:
self.assertEqual(getattr(pair, prop), getattr(pair2, prop), 2)
# Test
d = c.as_dict()
electrode = ConversionElectrode.from_dict(d)
for k, v in p.items():
self.assertAlmostEqual(getattr(electrode,
"get_" + k).__call__(), v, 2)
if __name__ == "__main__":
unittest.main()
| mbkumar/pymatgen | pymatgen/apps/battery/tests/test_conversion_battery.py | Python | mit | 3,604 | [
"pymatgen"
] | 02b09086dde0e7afdd9eb62cc1509c424729da8d7059e96e36b24a2fe3f44f86 |
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""Training AdaGAN on various datasets.
Refer to the arXiv paper 'AdaGAN: Boosting Generative Models'
Coded by Ilya Tolstikhin, Carl-Johann Simon-Gabriel
"""
import os
import argparse
import logging
import tensorflow as tf
import numpy as np
from datahandler import DataHandler
from adagan import AdaGan
from metrics import Metrics
import utils
flags = tf.app.flags
flags.DEFINE_float("g_learning_rate", 0.001,
"Learning rate for Generator optimizers [16e-4]")
flags.DEFINE_float("d_learning_rate", 0.0005,
"Learning rate for Discriminator optimizers [4e-4]")
flags.DEFINE_float("learning_rate", 0.001,
"Learning rate for other optimizers [8e-4]")
flags.DEFINE_float("adam_beta1", 0.5, "Beta1 parameter for Adam optimizer [0.5]")
flags.DEFINE_integer("zdim", 8, "Dimensionality of the latent space [100]")
flags.DEFINE_float("init_std", 0.0099999, "Initial variance for weights [0.02]")
flags.DEFINE_string("workdir", 'results_mnist_vae', "Working directory ['results']")
flags.DEFINE_bool("unrolled", False, "Use unrolled GAN training [True]")
flags.DEFINE_bool("vae", True, "Use VAE instead of GAN")
flags.DEFINE_bool("pot", False, "Use POT instead of GAN")
flags.DEFINE_float("pot_lambda", 10., "POT regularization")
flags.DEFINE_bool("is_bagging", False, "Do we want to use bagging instead of adagan? [False]")
FLAGS = flags.FLAGS
def main():
opts = {}
# Utility
opts['random_seed'] = 66
opts['dataset'] = 'mnist' # gmm, circle_gmm, mnist, mnist3 ...
opts['data_dir'] = 'mnist'
opts['trained_model_path'] = None #'models'
opts['mnist_trained_model_file'] = None #'mnist_trainSteps_19999_yhat' # 'mnist_trainSteps_20000'
opts['work_dir'] = FLAGS.workdir
opts['ckpt_dir'] = 'checkpoints'
opts["verbose"] = 1
opts['tf_run_batch_size'] = 128
opts["early_stop"] = -1 # set -1 to run normally
opts["plot_every"] = 200
opts["save_every_epoch"] = 20
opts['gmm_max_val'] = 15.
# Datasets
opts['toy_dataset_size'] = 10000
opts['toy_dataset_dim'] = 2
opts['mnist3_dataset_size'] = 2 * 64 # 64 * 2500
opts['mnist3_to_channels'] = False # Hide 3 digits of MNIST to channels
opts['input_normalize_sym'] = False # Normalize data to [-1, 1]
opts['gmm_modes_num'] = 5
# AdaGAN parameters
opts['adagan_steps_total'] = 1
opts['samples_per_component'] = 1000
opts['is_bagging'] = FLAGS.is_bagging
opts['beta_heur'] = 'uniform' # uniform, constant
opts['weights_heur'] = 'theory_star' # theory_star, theory_dagger, topk
opts['beta_constant'] = 0.5
opts['topk_constant'] = 0.5
opts["mixture_c_epoch_num"] = 5
opts["eval_points_num"] = 25600
opts['digit_classification_threshold'] = 0.999
opts['inverse_metric'] = False # Use metric from the Unrolled GAN paper?
opts['inverse_num'] = 100 # Number of real points to inverse.
opts['objective'] = None
# Generative model parameters
opts["init_std"] = FLAGS.init_std
opts["init_bias"] = 0.0
opts['latent_space_distr'] = 'normal' # uniform, normal
opts['latent_space_dim'] = FLAGS.zdim
opts["gan_epoch_num"] = 100
opts['convolutions'] = True # If False then encoder is MLP of 3 layers
opts['d_num_filters'] = 1024
opts['d_num_layers'] = 4
opts['g_num_filters'] = 1024
opts['g_num_layers'] = 3
opts['e_is_random'] = False
opts['e_pretrain'] = False
opts['e_add_noise'] = False
opts['e_pretrain_bsize'] = 1000
opts['e_num_filters'] = 1024
opts['e_num_layers'] = 4
opts['g_arch'] = 'dcgan_mod'
opts['g_stride1_deconv'] = False
opts['g_3x3_conv'] = 0
opts['e_arch'] = 'dcgan'
opts['e_3x3_conv'] = 0
opts['conv_filters_dim'] = 4
# --GAN specific:
opts['conditional'] = False
opts['unrolled'] = FLAGS.unrolled # Use Unrolled GAN? (only for images)
opts['unrolling_steps'] = 5 # Used only if unrolled = True
# --VAE specific
opts['vae'] = FLAGS.vae
opts['vae_sigma'] = 0.01
# --POT specific
opts['pot'] = FLAGS.pot
opts['pot_pz_std'] = 2.
opts['pot_lambda'] = FLAGS.pot_lambda
opts['adv_c_loss'] = 'none'
opts['vgg_layer'] = 'pool2'
opts['adv_c_patches_size'] = 5
opts['adv_c_num_units'] = 32
opts['adv_c_loss_w'] = 1.0
opts['cross_p_w'] = 0.0
opts['diag_p_w'] = 0.0
opts['emb_c_loss_w'] = 1.0
opts['reconstr_w'] = 1.0
opts['z_test'] = 'gan'
opts['gan_p_trick'] = False
opts['pz_transform'] = False
opts['z_test_corr_w'] = 1.0
opts['z_test_proj_dim'] = 10
# Optimizer parameters
opts['optimizer'] = 'adam' # sgd, adam
opts["batch_size"] = 100
opts["d_steps"] = 1
opts['d_new_minibatch'] = False
opts["g_steps"] = 2
opts['batch_norm'] = True
opts['dropout'] = False
opts['dropout_keep_prob'] = 0.5
opts['recon_loss'] = 'cross_entropy'
# "manual" or number (float or int) giving the number of epochs to divide
# the learning rate by 10 (converted into an exp decay per epoch).
opts['decay_schedule'] = 'manual'
opts['opt_learning_rate'] = FLAGS.learning_rate
opts['opt_d_learning_rate'] = FLAGS.d_learning_rate
opts['opt_g_learning_rate'] = FLAGS.g_learning_rate
opts["opt_beta1"] = FLAGS.adam_beta1
opts['batch_norm_eps'] = 1e-05
opts['batch_norm_decay'] = 0.9
if opts['e_is_random']:
assert opts['latent_space_distr'] == 'normal',\
'Random encoders currently work only with Gaussian Pz'
# Data augmentation
opts['data_augm'] = False
if opts['verbose']:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
utils.create_dir(opts['work_dir'])
utils.create_dir(os.path.join(opts['work_dir'], opts['ckpt_dir']))
with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
text.write('Parameters:\n')
for key in opts:
text.write('%s : %s\n' % (key, opts[key]))
data = DataHandler(opts)
assert data.num_points >= opts['batch_size'], 'Training set too small'
adagan = AdaGan(opts, data)
metrics = Metrics()
train_size = data.num_points
random_idx = np.random.choice(train_size, 4*320, replace=False)
metrics.make_plots(opts, 0, data.data,
data.data[random_idx], adagan._data_weights, prefix='dataset_')
for step in range(opts["adagan_steps_total"]):
logging.info('Running step {} of AdaGAN'.format(step + 1))
adagan.make_step(opts, data)
num_fake = opts['eval_points_num']
logging.debug('Sampling fake points')
fake_points = adagan.sample_mixture(num_fake)
logging.debug('Sampling more fake points')
more_fake_points = adagan.sample_mixture(500)
logging.debug('Plotting results')
if opts['dataset'] == 'gmm':
metrics.make_plots(opts, step, data.data[:500],
fake_points[0:100], adagan._data_weights[:500])
logging.debug('Evaluating results')
(likelihood, C) = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
else:
metrics.make_plots(opts, step, data.data,
fake_points[:320], adagan._data_weights)
if opts['inverse_metric']:
logging.debug('Evaluating results')
l2 = np.min(adagan._invert_losses[:step + 1], axis=0)
logging.debug('MSE=%.5f, STD=%.5f' % (np.mean(l2), np.std(l2)))
res = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
logging.debug("AdaGan finished working!")
if __name__ == '__main__':
main()
| tolstikhin/adagan | iclr_mnist_vae.py | Python | bsd-3-clause | 8,020 | [
"Gaussian"
] | 3adb69a330e9178c6efbbff44c99e53cbe141933d8924b841455077786f5f188 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend as keras_backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import kernelized as kernel_layers
from tensorflow.python.keras.utils import kernelized_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
def _assert_all_close(self, expected, actual, atol=0.001):
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
self.assertAllClose(expected, actual, atol=atol)
else:
self.assertAllClose(expected, actual, atol=atol)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_output_dim(self):
with self.assertRaisesRegexp(
ValueError, r'`output_dim` should be a positive integer. Given: -3.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_unsupported_kernel_type(self):
with self.assertRaisesRegexp(
ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'):
_ = kernel_layers.RandomFourierFeatures(
3, 'unsupported_kernel', stddev=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_scale(self):
with self.assertRaisesRegexp(
ValueError,
r'When provided, `scale` should be a positive float. Given: 0.0.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_input_shape(self):
inputs = random_ops.random_uniform((3, 2, 4), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
with self.assertRaisesRegexp(
ValueError,
r'The rank of the input tensor should be 2. Got 3 instead.'):
_ = rff_layer(inputs)
@parameterized.named_parameters(
('gaussian', 'gaussian', 10.0, False),
('random', init_ops.random_uniform_initializer, 1.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_random_features_properties(self, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=scale,
trainable=trainable)
self.assertEqual(rff_layer.output_dim, 10)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
@parameterized.named_parameters(('gaussian', 'gaussian', False),
('laplacian', 'laplacian', True),
('other', init_ops.ones_initializer, True))
@test_util.run_in_graph_and_eager_modes()
def test_call(self, initializer, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=1.0,
trainable=trainable,
name='random_fourier_features')
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, 10], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_no_eager_Leak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
kernel_layers.RandomFourierFeatures(output_dim=4, name='rff')(inputs)
kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs)
@test_util.run_in_graph_and_eager_modes()
def test_output_shape(self):
inputs = random_ops.random_uniform((3, 2), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=7, name='random_fourier_features', trainable=True)
outputs = rff_layer(inputs)
self.assertEqual([3, 7], outputs.shape.as_list())
@parameterized.named_parameters(
('gaussian', 'gaussian'), ('laplacian', 'laplacian'),
('other', init_ops.random_uniform_initializer))
@test_util.run_deprecated_v1
def test_call_on_placeholder(self, initializer):
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegexp(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegexp(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5, name='random_fourier_features')
rff_layer(inputs)
@parameterized.named_parameters(('gaussian', 10, 'gaussian', 2.0),
('laplacian', 5, 'laplacian', None),
('other', 10, init_ops.ones_initializer, 1.0))
@test_util.run_in_graph_and_eager_modes()
def test_compute_output_shape(self, output_dim, initializer, scale):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim, initializer, scale=scale, name='rff')
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, 2, 3]))
with self.assertRaisesRegexp(
ValueError, r'The innermost dimension of input shape must be defined.'):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, None]))
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape((None, 3)).as_list())
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape(
tensor_shape.TensorShape([None, 2])).as_list())
self.assertEqual([4, output_dim],
rff_layer.compute_output_shape((4, 1)).as_list())
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, False),
('laplacian', 5, 'laplacian', 5.5, True),
('other', 7, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_get_config(self, output_dim, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim,
initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features',
)
expected_initializer = initializer
if isinstance(initializer, init_ops.Initializer):
expected_initializer = initializers.serialize(initializer)
expected_dtype = (
'float32' if base_layer_utils.v2_dtype_behavior_enabled() else None)
expected_config = {
'output_dim': output_dim,
'kernel_initializer': expected_initializer,
'scale': scale,
'name': 'random_fourier_features',
'trainable': trainable,
'dtype': expected_dtype,
}
self.assertLen(expected_config, len(rff_layer.get_config()))
self.assertSameElements(
list(expected_config.items()), list(rff_layer.get_config().items()))
@parameterized.named_parameters(
('gaussian', 5, 'gaussian', None, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 7, init_ops.ones_initializer(), 2.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_from_config(self, output_dim, initializer, scale, trainable):
model_config = {
'output_dim': output_dim,
'kernel_initializer': initializer,
'scale': scale,
'trainable': trainable,
'name': 'random_fourier_features',
}
rff_layer = kernel_layers.RandomFourierFeatures.from_config(model_config)
self.assertEqual(rff_layer.output_dim, output_dim)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, output_dim], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.trainable_variables, num_trainable_vars)
if trainable:
self.assertEqual('random_fourier_features/random_features_scale:0',
rff_layer.trainable_variables[0].name)
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 10, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_same_random_features_params_reused(self, output_dim, initializer,
scale, trainable):
"""Applying the layer on the same input twice gives the same output."""
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features')
inputs = constant_op.constant(
np.random.uniform(low=-1.0, high=1.0, size=(2, 4)))
output1 = rff_layer(inputs)
output2 = rff_layer(inputs)
self._assert_all_close(output1, output2)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0), ('laplacian', 'laplacian', 3.0),
('other', init_ops.random_uniform_initializer(), 5.0))
@test_util.run_in_graph_and_eager_modes()
def test_different_params_similar_approximation(self, initializer, scale):
random_seed.set_random_seed(12345)
rff_layer1 = kernel_layers.RandomFourierFeatures(
output_dim=3000,
kernel_initializer=initializer,
scale=scale,
name='rff1')
rff_layer2 = kernel_layers.RandomFourierFeatures(
output_dim=2000,
kernel_initializer=initializer,
scale=scale,
name='rff2')
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
# Apply both layers to both inputs.
output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x)
output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y)
output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x)
output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)
# Compute the inner products of the outputs (on inputs x and y) for both
# layers. For any fixed random features layer rff_layer, and inputs x, y,
# rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 20.0, _exact_laplacian(stddev=20.0)))
@test_util.run_in_graph_and_eager_modes()
def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
"""Approximation is bad when output dimension is small."""
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
small_output_dim = 10
random_seed.set_random_seed(1234)
# Initialize layer.
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=small_output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# Apply layer to both inputs.
output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)
# The inner products of the outputs (on inputs x and y) approximates the
# real value of the RBF kernel but poorly since the output dimension of the
# layer is small.
exact_kernel_value = exact_kernel_fn(x, y)
approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
abs_error_eval = sess.run([abs_error])
self.assertGreater(abs_error_eval[0][0], 0.05)
self.assertLess(abs_error_eval[0][0], 0.5)
else:
self.assertGreater(abs_error, 0.05)
self.assertLess(abs_error, 0.5)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 10.0, _exact_laplacian(stddev=10.0)))
@test_util.run_in_graph_and_eager_modes()
def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
exact_kernel_fn):
# Parameters.
input_dim = 5
output_dim = 2000
x_rows = 20
y_rows = 30
x = constant_op.constant(
np.random.uniform(size=(x_rows, input_dim)), dtype=dtypes.float32)
y = constant_op.constant(
np.random.uniform(size=(y_rows, input_dim)), dtype=dtypes.float32)
random_seed.set_random_seed(1234)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# The shapes of output_x and output_y are (x_rows, output_dim) and
# (y_rows, output_dim) respectively.
output_x = math.sqrt(2.0 / output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / output_dim) * rff_layer(y)
approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y)
exact_kernel_matrix = exact_kernel_fn(x, y)
self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.05)
if __name__ == '__main__':
test.main()
| arborh/tensorflow | tensorflow/python/keras/layers/kernelized_test.py | Python | apache-2.0 | 16,387 | [
"Gaussian"
] | 9333014d302703427a08e263a82b5dffecd83e5fb72c67bfce71f3cba0761ff5 |
"""
This is the boilerplate default configuration file.
Changes and additions to settings should be done in the config module
located in the application root rather than this config.
"""
config = {
# webapp2 sessions
'webapp2_extras.sessions' : {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name' : "The Arky",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang' : 'en',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales' : ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ'],
# contact page email settings
'contact_sender' : "PUT_SENDER_EMAIL_HERE",
'contact_recipient' : "tjunhao.90@gmail.com",
# Password AES Encryption Parameters
'aes_key' : "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt' : "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key' : 'wBtHqd4a3IqZN89J1TCeog',
'twitter_consumer_secret' : 'aH9isB1ZI2zM8gYMWrEhOI2DQaCXhn59PQCdyheMg',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key' : '136496959884393',
'fb_secret' : '70623320f6537fec08eb4a308ddc54b9',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api' : 'xv9iudz1frb8',
'linkedin_secret' : 'lPdAnwbrlOFViozl',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server' : 'github.com',
'github_redirect_uri' : 'http://www.example.com/social_login/github/complete',
'github_client_id' : 'bf270aa784452945c2d9',
'github_client_secret' : '9e80bd5c451605437dbffb03e22af4036d8d645f',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key' : "6Lf3-uISAAAAAJJmMkUjTP_Pjg7iXCVadduKEbl2",
'captcha_private_key' : "6Lf3-uISAAAAAFxZOHfmSw2Kydxfk6K_vOfXP5rW",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain' : "YOUR_PRIMARY_DOMAIN (e.g. google.com)",
'google_analytics_code' : "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates' : {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login' : True,
# jinja2 base layout template
'base_layout' : 'base.html',
# send error emails to developers
'send_mail_developer' : False,
# fellas' list
'developers' : (
('Tan Jun Hao', 'bb111189@gmail.com')
),
# If true, it will write in datastore a log of every email sent
'log_email' : False,
# If true, it will write in datastore a log of every visit
'log_visit' : False,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
| bb111189/Arky2 | config/production.py | Python | lgpl-3.0 | 4,100 | [
"VisIt"
] | d9c7619b51153385df3bf0951710cf83d09e7503cffd11853770b1b7619e91ba |
#!/usr/bin/python
"""
(C) Copyright 2014-2017 Marc Rosanes
The program is distributed under the terms of the
GNU General Public License (or the Lesser GPL).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from txm2nexuslib import tomonorm
from txm2nexuslib import specnorm
from txm2nexuslib import mosaicnorm
import argparse
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def main():
parser = argparse.ArgumentParser(description=
"Normalization of: \n" +
"- TOMOGRAPHIES \n" +
"- SPECTROSCOPIES\n "
"Taking into account FF, currents and "
"exposure times",
formatter_class=CustomFormatter)
parser.add_argument('inputfile', type=str, default=None,
help='Enter hdf5 file which contains the information ' +
'of both tomography and flatfield.')
parser.add_argument('-s', '--spectroscopy', type=int, default=0,
help='Constant energy tomo normalization (0) ' +
'or spectroscopy normalization (-s=1).')
parser.add_argument('-g', '--gaussianblur', type=int, default=0,
help='gaussian filtering for avoiding '
'diffraction artifacts. \n' +
'Default=0 -> gaussian filter not applied. \n' +
'Integer not 0: Indicate the std as integer. '
'Ex: -g=5')
parser.add_argument('-f', '--avgff', type=int, default=1,
help='Normalize using avergeFF (-f=1); or using ' +
'single FF (FFimage 0) (-f=0).')
parser.add_argument('-m', '--mosaicnorm', type=int, default=0,
help='Mosaic normalization using a given FF (-m=1).')
parser.add_argument('-r', '--ratio', type=int, default=1,
help='ratio = exp_time_mosaic/exp_time_FF.\n' +
'Exposure times ratio. \n' +
'This option can be used only when '
'normalizing mosaics.')
parser.add_argument('-a', '--avgtomnorm', type=int, default=0,
help='Indicate if we want to obtain the average of '
'the normalized images (-a=1).\n '
'Available only for Tomo normalization.')
parser.add_argument('-d',
'--diffraction',
type=int,
default=0,
help='Correct diffraction pattern with external '
'given avgFF (-d=1).')
args = parser.parse_args()
if args.mosaicnorm == 1:
print("\nNormalizing Mosaic")
normalize_object = mosaicnorm.MosaicNormalize(args.inputfile,
ratio=args.ratio)
normalize_object.normalizeMosaic()
else:
if args.spectroscopy == 0:
print("\nNormalizing Tomography images")
""" We normalize the tomography using the tomography images,
the FF (flatfield) images, the experimental times of FF, images,
and the machine current for each image."""
normalize_object = tomonorm.TomoNormalize(args.inputfile,
args.avgtomnorm,
args.gaussianblur,
args.avgff,
args.diffraction)
normalize_object.normalize_tomo()
else:
print("\nNormalizing Spectroscopy images")
normalize_object = specnorm.SpecNormalize(args.inputfile)
normalize_object.normalizeSpec()
if __name__ == "__main__":
main()
| sagiss/txrm2nexus | txm2nexuslib/scripts/normalize.py | Python | gpl-3.0 | 4,710 | [
"Gaussian"
] | 92dbe450bd964ed3d3a75cd13b9d5f57fb807edc916b974a35db8272c2eb98f5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.