text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
import os
from logging import getLogger
from django.conf import settings
from lettuce import step, world
from nose.tools import assert_in, assert_true
from selenium.webdriver.common.keys import Keys
from student import auth
from student.models import get_user
from student.roles import CourseInstructorRole, CourseStaffRole, GlobalStaff
from student.tests.factories import AdminFactory
from terrain.browser import reset_data
logger = getLogger(__name__)
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.action.delete-section-button'
elif category == 'subsection':
css = 'a.action.delete-subsection-button'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('I have populated a new course in Studio$')
def i_have_populated_a_new_course(_step):
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification button.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='robot+studio@edx.org',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='robot+studio@edx.org',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('span.account-username', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
global_admin = AdminFactory()
for role in (CourseStaffRole, CourseInstructorRole):
auth.add_users(global_admin, role(course.id), user)
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user('robot+studio@edx.org')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section():
world.css_click('.outline .button-new')
assert_true(world.is_css_present('.outline-section .xblock-field-value'))
def set_date_and_time(date_css, desired_date, time_css, desired_time, key=None):
set_element_value(date_css, desired_date, key)
world.wait_for_ajax_complete()
set_element_value(time_css, desired_time, key)
world.wait_for_ajax_complete()
def set_element_value(element_css, element_value, key=None):
element = world.css_find(element_css).first
element.fill(element_value)
# hit TAB or provided key to trigger save content
if key is not None:
element._element.send_keys(getattr(Keys, key)) # pylint: disable=protected-access
else:
element._element.send_keys(Keys.TAB) # pylint: disable=protected-access
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_unit_from_course_outline():
"""
Expands the section and clicks on the New Unit link.
The end result is the page where the user is editing the new unit.
"""
css_selectors = [
'.outline-subsection .expand-collapse', '.outline-subsection .button-new'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_loading()
assert world.is_css_present('ul.new-component-type')
@world.absorb
def wait_for_loading():
"""
Waits for the loading indicator to be hidden.
"""
world.wait_for(lambda _driver: len(world.browser.find_by_css('div.ui-loading.is-hidden')) > 0)
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
step.given('I have populated a new course in Studio')
create_unit_from_course_outline()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See common/js/components/views/feedback_prompt.js for implementation
"""
assert intent in [
'warning',
'error',
'confirmation',
'announcement',
'step-required',
'help',
'mini',
]
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > button.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text, find_prefix="$"):
script = """
var cm = {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror;
cm.getInputField().focus();
cm.setValue(arguments[0]);
cm.getInputField().blur();""".format(index=index, find_prefix=find_prefix)
world.browser.driver.execute_script(script, str(text))
world.wait_for_ajax_complete()
def get_codemirror_value(index=0, find_prefix="$"):
return world.browser.driver.execute_script(
"""
return {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror.getValue();
""".format(index=index, find_prefix=find_prefix)
)
def attach_file(filename, sub_path):
path = os.path.join(TEST_ROOT, sub_path, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
assert_true(os.path.exists(path))
world.browser.attach_file('file', os.path.abspath(path))
def upload_file(filename, sub_path=''):
# The file upload dialog is a faux modal, a div that takes over the display
attach_file(filename, sub_path)
modal_css = 'div.wrapper-modal-window-assetupload'
button_css = '{} .action-upload'.format(modal_css)
world.css_click(button_css)
# Clicking the Upload button triggers an AJAX POST.
world.wait_for_ajax_complete()
# The modal stays up with a "File uploaded succeeded" confirmation message, then goes away.
# It should take under 2 seconds, so wait up to 10.
# Note that is_css_not_present will return as soon as the element is gone.
assert world.is_css_not_present(modal_css, wait_time=10)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
GlobalStaff().add_users(user)
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = (CourseStaffRole, CourseInstructorRole)
else:
roles = (CourseStaffRole,)
course_key = world.scenario_dict["COURSE"].id
global_admin = AdminFactory()
for role in roles:
auth.add_users(global_admin, role(course_key), user)
@step('I log out')
def log_out(_step):
world.visit('logout')
| BehavioralInsightsTeam/edx-platform | cms/djangoapps/contentstore/features/common.py | Python | agpl-3.0 | 12,703 | [
"VisIt"
] | 0ce7e37eb701f89c8d76a01a0d20efe1b1511f61436441fb7ae9c99d1a65aff3 |
# -*- coding: utf-8 -*-
#
# Firefly documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 16 13:22:41 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Firefly'
copyright = u'2016, Zachary Priddy'
author = u'Zachary Priddy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'a0.1'
# The full version, including alpha/beta/rc tags.
release = u'a0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fireflydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Firefly.tex', u'Firefly Documentation',
u'Zachary Priddy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'firefly', u'Firefly Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Firefly', u'Firefly Documentation',
author, 'Firefly', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| Firefly-Automation/Firefly | docs/conf.py | Python | apache-2.0 | 9,258 | [
"Firefly"
] | a7f85df5e77fd1268b3b587630ca3d0825ae8320b12720d193fc6132c137bd92 |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
"""
Numbers to bytes or strings and URLs coder/decoders.
"""
padding = b'/'
b85_symbols = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~'
len_b85_symbols = len(b85_symbols)
def to_base_n(num, base):
"""
Convert `num` number to a string representing this number in base `base`
where base <= 85.
Use recursion for progressive encoding.
"""
# ensure that base is within bounds
assert base >= 2 and base <= len_b85_symbols
if num == 0:
return b'0'
# recurse with a floor division to encode from left to right
based = to_base_n(num // base, base)
# remove leading zeroes resulting from floor-based encoding
stripped = based.lstrip(b'0')
# pick the symbol in the symbol table using a modulo
encoded = b85_symbols[num % base]
return stripped + encoded
MAXLEN = len(to_base_n(pow(2, 32) - 1, 85))
def to_base85(num):
"""
Convert `num` number to a string representing this number in base 85,
padded as needed.
The character set to encode 85 base85 digits is defined to be:
'0'..'9', 'A'..'Z', 'a'..'z', '!', '#', '$', '%', '&', '(',
')', '*', '+', '-', ';', '<', '=', '>', '?', '@', '^', '_',
'`', '{', '|', '}', and '~'.
From http://www.faqs.org/rfcs/rfc1924.html
See also http://en.wikipedia.org/wiki/Base_85 for the rationale for Base
85. Git also uses https://github.com/git/git/blob/master/base85.c
"""
encoded = to_base_n(num, 85)
# add padding
elen = len(encoded)
if elen < MAXLEN:
encoded = encoded + (padding * (MAXLEN - (elen)))
return encoded
def to_base10(s, b=36):
"""
Convert a string s representing a number in base b back to an integer where base <= 85.
"""
assert b <= len(b85_symbols) and b >= 2, 'Base must be in range(2, %d)' % (len(b85_symbols))
# strip padding
s = s.replace(padding, b'')
base10_num = 0
i = len(s) - 1
for digit in s:
base10_num += b85_symbols.index(digit) * pow(b, i)
i -= 1
return base10_num
def num_to_bin(num):
"""
Convert a `num` integer or long to a binary string byte-ordered such that
the least significant bytes are at the beginning of the string (aka. little
endian).
NOTE: The code below does not use struct for conversions to handle
arbitrary long binary strings (such as a SHA512 digest) and convert that
safely to a long: using structs does not work easily for this.
"""
binstr = []
# Zero is not encoded but returned as an empty value
if num == 0:
return b'\x00'
while num > 0:
# add the least significant byte value
binstr.append(chr(num & 0xFF))
# shift the next byte to least significant and repeat
num = num >> 8
# reverse the list now such that the most significant
# byte is at the start of this string to speed decoding
return b''.join(reversed(binstr))
def bin_to_num(binstr):
"""
Convert a little endian byte-ordered binary string to an integer or long.
"""
# this will cast to long as needed
num = 0
for charac in binstr:
# the most significant byte is a the start of the string so we multiply
# that value by 256 (e.g. <<8) and add the value of the current byte,
# then move to next byte in the string and repeat
num = (num << 8) + ord(charac)
return num
from base64 import standard_b64decode as stddecode
from base64 import urlsafe_b64encode as b64encode
def urlsafe_b64encode(s):
"""
Encode a binary string to a url safe base64 encoding.
"""
return b64encode(s)
def urlsafe_b64decode(b64):
"""
Decode a url safe base64-encoded string.
Note that we use stddecode to work around a bug in the standard library.
"""
b = b64.replace(b'-', b'+').replace(b'_', b'/')
return stddecode(b)
def _encode(num):
"""
Encode a number (int or long) in url safe base64.
"""
return b64encode(num_to_bin(num))
| yashdsaraf/scancode-toolkit | src/commoncode/codec.py | Python | apache-2.0 | 5,524 | [
"VisIt"
] | 2b33272d27c55cee42d2e5cbc1b14c6d9612109dd6d7a8d7966dd57aeebee277 |
import residue_sidechains as res_scs
import read_amber_prmtop as amber
import chirality as chir
import networkx as nx
def res_atom_graph(molecule_graph, residues):
"""returns the graph of atoms in residues
:param molecule: molecule containing residue and atom graphs
:type molecule: amber.Molecule
:param residues: residues
:type residues: amber.Residue
:return: graph containing only atoms in residues
:rtype: nx.Graph
"""
res_atoms = []
for res in residues:
res_atoms += res.atoms
return molecule_graph.atoms.subgraph(res_atoms)
def find_sidechains(molecule_graph):
# Identify chiral atoms
atoms = molecule_graph.atoms
chiral_centres = chir.get_chiral_sets(atoms)
# Identify sidechains (Ca-Cb-X), apart from proline and glycine.
sidechains = {}
# Detection of sidechains requires the multiple bonds be present in the atom graph.
chir.multi_bonds(atoms)
for k, v in chiral_centres.items():
carbons = [atom for atom in v if atom.element == 'C']
amides = [carbon for carbon in carbons
if any([type(nb) == chir.GhostAtom and nb.element == 'O' for nb in nx.neighbors(atoms, carbon)])
and any([nb.element == 'N' or nb.element == 'O' for nb in nx.neighbors(atoms, carbon)])]
nbs_n = [nb for nb in v if nb.element == 'N']
if amides and nbs_n:
amide_bond = (k, amides[0])
n_bond = (k, nbs_n[0])
h_bond = (k, [h for h in nx.neighbors(atoms, k) if h.element == 'H'][0])
# Now find sidechains by cutting the Ca-C, Ca-N and Ca-H bonds
atoms.remove_edges_from([amide_bond, n_bond, h_bond])
sidechain_atoms = [atom for atom in [comp for comp in nx.connected_components(atoms) if k in comp][0]
if type(atom) != chir.GhostAtom]
atoms.add_edges_from([amide_bond, n_bond, h_bond])
if not any([k in cycle for cycle in nx.cycle_basis(atoms.subgraph(sidechain_atoms))]):
sidechains[k] = atoms.subgraph(sidechain_atoms)
chir.remove_ghost_atoms(atoms)
return sidechains
def residue_from_sidechain(sidechains):
""" For each sidechain in sidechains, determines the type of residue and generates the mapping from the atom types
in residue_sidechains to atoms in the molecule.
:param sidechains: {Ca: sidechain atoms graph} dictionary
:return: residues: residue type
:return: mapping: atom mapping dictionary
"""
residues = {}
mapping = {}
for sc in sidechains:
for res in res_scs.amino_acids:
graph_match = nx.algorithms.isomorphism.GraphMatcher(res_scs.amino_acids[res], sidechains[sc],
node_match=(lambda x, y: x['element'] == y['element']))
if graph_match.is_isomorphic():
residues[sc.residue] = res
mapping[sc.residue] = graph_match.mapping
break
return residues, mapping
if __name__ == "__main__":
import os.path
topology_data = amber.read_topology(os.path.normpath("/home/khs26/flu.prmtop"))
molecule = amber.create_molecule(topology_data)
cands = chir.tetravalent_atoms(molecule.atoms)
chir.multi_bonds(molecule.atoms)
cands2 = chir.rankable_neighbours(cands)[0]
print len(molecule.atoms), len(cands), len(cands2)
# scs = find_sidechains(molecule, [res for res in molecule.residues.nodes()])
# ress, maps = residue_from_sidechain(scs)
# for k, v in sorted(ress.items()):
# print k, v
# for i, j in res_scs.dihedrals:
# print i, j, chir.chiral_order(molecule.atoms, maps[k][i]), chir.chiral_order(molecule.atoms, maps[k][j]) | khs26/rotamer_library | rotamer/topology/identify_residue.py | Python | mit | 3,770 | [
"Amber"
] | 9dc41c6a45fe9758884fee5bd124aa5b7582f58a258bbf71715d1e7480dce1f2 |
# -*- coding: utf-8 -*-
#
# Ray documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 1 13:19:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import shutil
import sys
import os
import urllib
sys.path.insert(0, os.path.abspath('.'))
from custom_directives import CustomGalleryItemDirective
# These lines added to enable Sphinx to work without installing Ray.
import mock
class ChildClassMock(mock.MagicMock):
@classmethod
def __getattr__(cls, name):
return mock.Mock
MOCK_MODULES = [
"ax",
"ax.service.ax_client",
"blist",
"ConfigSpace",
"gym",
"gym.spaces",
"horovod",
"horovod.ray",
"kubernetes",
"mxnet",
"mxnet.model",
"psutil",
"ray._raylet",
"ray.core.generated",
"ray.core.generated.common_pb2",
"ray.core.generated.gcs_pb2",
"ray.core.generated.ray.protocol.Task",
"scipy.signal",
"scipy.stats",
"setproctitle",
"tensorflow_probability",
"tensorflow",
"tensorflow.contrib",
"tensorflow.contrib.all_reduce",
"tree",
"tensorflow.contrib.all_reduce.python",
"tensorflow.contrib.layers",
"tensorflow.contrib.rnn",
"tensorflow.contrib.slim",
"tensorflow.core",
"tensorflow.core.util",
"tensorflow.keras",
"tensorflow.python",
"tensorflow.python.client",
"tensorflow.python.util",
"torch",
"torch.distributed",
"torch.nn",
"torch.nn.parallel",
"torch.utils.data",
"torch.utils.data.distributed",
"wandb",
"xgboost",
"zoopt",
]
import scipy.stats
import scipy.linalg
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# ray.rllib.models.action_dist.py and
# ray.rllib.models.lstm.py will use tf.VERSION
sys.modules["tensorflow"].VERSION = "9.9.9"
sys.modules["tensorflow.keras.callbacks"] = ChildClassMock()
sys.modules["pytorch_lightning"] = ChildClassMock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../python/"))
import ray
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_click.ext',
'sphinx_tabs.tabs',
'sphinx-jsonschema',
'sphinx_gallery.gen_gallery',
'sphinxemoji.sphinxemoji',
'sphinx_copybutton',
'versionwarning.extension',
]
versionwarning_admonition_type = "tip"
versionwarning_messages = {
"master": (
"This document is for the master branch. "
'Visit the <a href="/en/latest/">latest pip release documentation here</a>.'
),
"latest": (
"This document is for the latest pip release. "
'Visit the <a href="/en/master/">master branch documentation here</a>.'
),
}
versionwarning_body_selector = "#main-content"
sphinx_gallery_conf = {
"examples_dirs": ["../examples",
"tune/_tutorials"], # path to example scripts
# path where to save generated examples
"gallery_dirs": ["auto_examples", "tune/tutorials"],
"ignore_pattern": "../examples/doc_code/",
"plot_gallery": "False",
# "filename_pattern": "tutorial.py",
# "backreferences_dir": "False",
# "show_memory': False,
# 'min_reported_time': False
}
for i in range(len(sphinx_gallery_conf["examples_dirs"])):
gallery_dir = sphinx_gallery_conf["gallery_dirs"][i]
source_dir = sphinx_gallery_conf["examples_dirs"][i]
try:
os.mkdir(gallery_dir)
except OSError:
pass
# Copy rst files from source dir to gallery dir.
for f in glob.glob(os.path.join(source_dir, '*.rst')):
shutil.copy(f, gallery_dir)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
from recommonmark.parser import CommonMarkParser
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ray'
copyright = u'2019, The Ray Team'
author = u'The Ray Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from ray import __version__ as version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns += sphinx_gallery_conf['examples_dirs']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"repository_url": "https://github.com/ray-project/ray",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "doc/source",
"home_page_in_toc": True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"Ray v{release}"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/ray_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'**': ['index.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Raydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Ray.tex', u'Ray Documentation', u'The Ray Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'ray', u'Ray Documentation', [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Ray', u'Ray Documentation', author, 'Ray',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# pcmoritz: To make the following work, you have to run
# sudo pip install recommonmark
# Python methods should be presented in source code order
autodoc_member_order = 'bysource'
# Taken from https://github.com/edx/edx-documentation
FEEDBACK_FORM_FMT = "https://github.com/ray-project/ray/issues/new?title={title}&labels=docs&body={body}"
def feedback_form_url(project, page):
"""Create a URL for feedback on a particular page in a project."""
return FEEDBACK_FORM_FMT.format(
title=urllib.parse.quote(
"[docs] Issue on `{page}.rst`".format(page=page)),
body=urllib.parse.quote(
"# Documentation Problem/Question/Comment\n"
"<!-- Describe your issue/question/comment below. -->\n"
"<!-- If there are typos or errors in the docs, feel free to create a pull-request. -->\n"
"\n\n\n\n"
"(Created directly from the docs)\n"))
def update_context(app, pagename, templatename, context, doctree):
"""Update the page rendering context to include ``feedback_form_url``."""
context['feedback_form_url'] = feedback_form_url(app.config.project,
pagename)
# see also http://searchvoidstar.tumblr.com/post/125486358368/making-pdfs-from-markdown-on-readthedocsorg-using
def setup(app):
app.connect('html-page-context', update_context)
app.add_stylesheet('css/custom.css')
# Custom directives
app.add_directive('customgalleryitem', CustomGalleryItemDirective)
| robertnishihara/ray | doc/source/conf.py | Python | apache-2.0 | 14,389 | [
"VisIt"
] | 1f074ae5992e707ffa797aecafd6b9be1e8325a9f618fadd1795c5a4cc99950e |
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for the C++ BitVects
"""
import unittest,os,sys
import cPickle
from rdkit.DataStructs import cDataStructs
klass = cDataStructs.SparseBitVect
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<=tol
def ieq(n1,n2):
return abs(n1-n2)==0
class TestCase(unittest.TestCase):
def setUp(self):
print '\n%s: '%self.shortDescription(),
sys.stdout.flush()
def testSparseIdx(self):
""" test indexing into SparseBitVects
"""
v = klass(10)
ok = 1
v[0] = 1
v[2] = 1
v[9] = 1
try:
v[10] = 1
except IndexError:
ok = 1
except:
assert 0, 'setting high bit should have failed with an IndexError'
else:
assert 0, 'setting high bit should have failed'
assert v[0] == 1, 'bad bit'
assert v[1] == 0, 'bad bit'
assert v[2] == 1, 'bad bit'
assert v[9] == 1, 'bad bit'
assert v[-1] == 1, 'bad bit'
assert v[-2] == 0, 'bad bit'
try:
foo = v[10]
except IndexError:
ok = 1
except:
assert 0, 'getting high bit should have failed with an IndexError'
else:
assert 0, 'getting high bit should have failed'
def testSparseBitGet(self):
""" test operations to get sparse bits
"""
v = klass(10)
v[0] = 1
v[2] = 1
v[6] = 1
assert len(v)==10,'len(SparseBitVect) failed'
assert v.GetNumOnBits()==3,'NumOnBits failed'
assert tuple(v.GetOnBits())==(0,2,6), 'GetOnBits failed'
def testSparseBitOps(self):
""" test bit operations on SparseBitVects
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
assert tuple((v1&v2).GetOnBits()) == (0,6),'binary & failed'
assert tuple((v1&v2).GetOnBits()) == (0,6),'binary & failed'
assert tuple((v1|v2).GetOnBits()) == (0,2,3,6),'binary | failed'
assert tuple((v1^v2).GetOnBits()) == (2,3),'binary ^ failed'
def testTanimotoSim(self):
""" test Tanimoto Similarity measure
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.TanimotoSimilarity(v1,v1),1.0),'bad v1,v1 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2,v2),1.0),'bad v2,v2 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v1,v2),0.5),'bad v1,v2 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2,v1),0.5),'bad v2,v1 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v1,v3),0.0),'bad v1,v3 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2,v3),0.0),'bad v2,v3 TanimotoSimilarity'
def testOnBitSim(self):
""" test On Bit Similarity measure
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.OnBitSimilarity(v1,v1),1.0),'bad v1,v1 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2,v2),1.0),'bad v2,v2 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v1,v2),0.5),'bad v1,v2 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2,v1),0.5),'bad v2,v1 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v1,v3),0.0),'bad v1,v3 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2,v3),0.0),'bad v2,v3 OnBitSimilarity'
def testNumBitsInCommon(self):
""" test calculation of Number of Bits in Common
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert ieq(cDataStructs.NumBitsInCommon(v1,v1),10),'bad v1,v1 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2,v2),10),'bad v2,v2 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v1,v2),8),'bad v1,v2 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2,v1),8),'bad v2,v1 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v1,v3),4),'bad v1,v3 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2,v3),4),'bad v2,v3 NumBitsInCommon'
def testAllBitSim(self):
""" test All Bit Similarity measure
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.AllBitSimilarity(v1,v1),1.0),'bad v1,v1 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2,v2),1.0),'bad v2,v2 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v1,v2),0.8),'bad v1,v2 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2,v1),0.8),'bad v2,v1 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v1,v3),0.4),'bad v1,v3 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2,v3),0.4),'bad v2,v3 AllBitSimilarity'
def testStringOps(self):
""" test serialization operations
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
s = v1.ToBinary()
v2 = klass(s)
assert tuple(v2.GetOnBits())==tuple(v1.GetOnBits()),'To/From string failed'
def testOnBitsInCommon(self):
""" test OnBitsInCommon
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = cDataStructs.OnBitsInCommon(v1,v2)
assert tuple(v3)==(0,6),'bad on bits in common'
def testOffBitsInCommon(self):
""" test OffBitsInCommon
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = cDataStructs.OffBitsInCommon(v1,v2)
assert tuple(v3)==(1,4,5,7,8,9),'bad off bits in common'
def testOnBitProjSimilarity(self):
""" test OnBitProjSimilarity
"""
v1 = klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
v2 = klass(10)
v2[2] = 1
v2[3] = 1
res = cDataStructs.OnBitProjSimilarity(v1,v2)
assert feq(res[0],0.666667),'bad 1st OnBitsProjSimilarity'
assert feq(res[1],1.0),'bad 2nd OnBitsProjSimilarity'
res = cDataStructs.OnBitProjSimilarity(v2,v1)
assert feq(res[1],0.666667),'bad 1st OnBitsProjSimilarity'
assert feq(res[0],1.0),'bad 2nd OnBitsProjSimilarity'
def testOffBitProjSimilarity(self):
""" test OffBitProjSimilarity
"""
v1 = klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
v2 = klass(10)
v2[2] = 1
v2[3] = 1
res = cDataStructs.OffBitProjSimilarity(v1,v2)
assert feq(res[0],1.0),'bad 1st OffBitsProjSimilarity'
assert feq(res[1],0.875),'bad 2nd OffBitsProjSimilarity'
res = cDataStructs.OffBitProjSimilarity(v2,v1)
assert feq(res[1],1.0),'bad 1st OffBitsProjSimilarity'
assert feq(res[0],0.875),'bad 2nd OffBitsProjSimilarity'
def testPkl(self):
""" test pickling
"""
v1 = klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
pklName = 'foo.pkl'
outF = open(pklName,'wb+')
cPickle.dump(v1,outF)
outF.close()
inF = open(pklName,'rb')
v2 = cPickle.load(inF)
inF.close()
os.unlink(pklName)
assert tuple(v1.GetOnBits())==tuple(v2.GetOnBits()),'pkl failed'
def testFingerprints(self):
" test the parsing of daylight fingerprints "
#actual daylight output:
rawD="""
0,Cc1n[nH]c(=O)nc1N,.b+HHa.EgU6+ibEIr89.CpX0g8FZiXH+R0+Ps.mr6tg.2
1,Cc1n[nH]c(=O)[nH]c1=O,.b7HEa..ccc+gWEIr89.8lV8gOF3aXFFR.+Ps.mZ6lg.2
2,Cc1nnc(NN)nc1O,.H+nHq2EcY09y5EIr9e.8p50h0NgiWGNx4+Hm+Gbslw.2
3,Cc1nnc(N)nc1C,.1.HHa..cUI6i5E2rO8.Op10d0NoiWGVx.+Hm.Gb6lo.2
"""
dists="""0,0,1.000000
0,1,0.788991
0,2,0.677165
0,3,0.686957
1,1,1.000000
1,2,0.578125
1,3,0.591304
2,2,1.000000
2,3,0.732759
3,3,1.000000
"""
fps = []
for line in rawD.split('\n'):
if line:
sbv = klass(256)
id,smi,fp=line.split(',')
cDataStructs.InitFromDaylightString(sbv,fp)
fps.append(sbv)
ds = dists.split('\n')
whichd=0
for i in range(len(fps)):
for j in range(i,len(fps)):
idx1,idx2,tgt = ds[whichd].split(',')
whichd += 1
tgt = float(tgt)
dist = cDataStructs.TanimotoSimilarity(fps[i],fps[j])
assert feq(tgt,dist),'tanimoto between fps %d and %d failed'%(int(idx1),int(idx2))
def testFold(self):
""" test folding fingerprints
"""
v1 = klass(16)
v1[1] = 1
v1[12] = 1
v1[9] = 1
try:
v2 = cDataStructs.FoldFingerprint(v1)
except:
assert 0,'Fold with no args failed'
assert v1.GetNumBits()/2==v2.GetNumBits(),'bad num bits post folding'
try:
v2 = cDataStructs.FoldFingerprint(v1,2)
except:
assert 0,'Fold with arg failed'
assert v1.GetNumBits()/2==v2.GetNumBits(),'bad num bits post folding'
v2 = cDataStructs.FoldFingerprint(v1,4)
assert v1.GetNumBits()/4==v2.GetNumBits(),'bad num bits post folding'
def testOtherSims(self):
""" test other similarity measures
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
assert feq(cDataStructs.CosineSimilarity(v1,v2),.6667)
assert feq(cDataStructs.KulczynskiSimilarity(v1,v2),.6667)
assert feq(cDataStructs.DiceSimilarity(v1,v2),.6667)
assert feq(cDataStructs.SokalSimilarity(v1,v2),.3333)
assert feq(cDataStructs.McConnaugheySimilarity(v1,v2),.3333)
assert feq(cDataStructs.AsymmetricSimilarity(v1,v2),.6667)
assert feq(cDataStructs.BraunBlanquetSimilarity(v1,v2),.6667)
assert feq(cDataStructs.RusselSimilarity(v1,v2),.2000)
assert feq(cDataStructs.RogotGoldbergSimilarity(v1,v2),.7619)
def testQuickSims(self):
""" the asymmetric similarity stuff (bv,pkl)
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
pkl = v2.ToBinary()
v2 = pkl
assert feq(cDataStructs.CosineSimilarity(v1,v2),.6667)
assert feq(cDataStructs.KulczynskiSimilarity(v1,v2),.6667)
assert feq(cDataStructs.DiceSimilarity(v1,v2),.6667)
assert feq(cDataStructs.SokalSimilarity(v1,v2),.3333)
assert feq(cDataStructs.McConnaugheySimilarity(v1,v2),.3333)
assert feq(cDataStructs.AsymmetricSimilarity(v1,v2),.6667)
assert feq(cDataStructs.BraunBlanquetSimilarity(v1,v2),.6667)
assert feq(cDataStructs.RusselSimilarity(v1,v2),.2000)
assert feq(cDataStructs.RogotGoldbergSimilarity(v1,v2),.7619)
if __name__ == '__main__':
unittest.main()
| rdkit/rdkit-orig | rdkit/DataStructs/UnitTestcBitVect.py | Python | bsd-3-clause | 10,915 | [
"RDKit"
] | 6269788acdb9d38d87b221a21c9d68226c44c4bfdef00fa14ec5a0ca8a42eef2 |
#!/usr/bin/env python
"""
This is a simple installation script for casual users of pymatgen who simply
plan to use pymatgen as a basic analysis library and is not planning to
develop on it. This script should work on most Linux and Mac systems that
have Python 2.7+ installed and setuptools installed. These are the only
required pre-requisites. Once those are installed, the script should take
care of the remainder of the installation process.
There are only a few options in this script. Please note that you probably
have to *run all commands with sudo* for the installation to proceed correctly.
Simply running:
./pmg_install
will install pymatgen with the basic dependencies.
Running:
./pmg_install -f
will install pymatgen with a few more optional packages and also start an
initial setup process that guides you through basic configuration
for POTCAR and Materials API support.
Report any issues or suggestions for this script to shyuep@gmail.com.
"""
__author__ = "Shyue Ping Ong"
__version__ = "1.0"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2013"
import sys
import subprocess
import urllib
import os
import shutil
def build_enum(fortran_command="gfortran"):
enumlib_url = "http://downloads.sourceforge.net/project/enum/enum/enum.tar.gz"
currdir = os.getcwd()
state = True
try:
os.makedirs("enumlib")
os.chdir("enumlib")
urllib.urlretrieve(enumlib_url, "enum.tar.gz")
subprocess.call(["tar", "-zxf", "enum.tar.gz"])
os.chdir("celib")
os.chdir("trunk")
os.environ["F90"] = fortran_command
subprocess.call(["make"])
os.chdir(os.path.join("..", ".."))
enumpath = os.path.join("enumlib", "trunk")
os.chdir(enumpath)
subprocess.call(["make"])
for f in ["multienum.x", "makestr.x"]:
subprocess.call(["make", f])
shutil.copy(f, os.path.join("..", "..", ".."))
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
shutil.rmtree("enumlib")
return state
def build_bader(fortran_command="gfortran"):
bader_url = "http://theory.cm.utexas.edu/henkelman/code/bader/download/bader.tar.gz"
currdir = os.getcwd()
state = True
try:
urllib.urlretrieve(bader_url, "bader.tar.gz")
subprocess.call(["tar", "-zxf", "bader.tar.gz"])
os.chdir("bader")
subprocess.call(["cp", "makefile.osx_"+fortran_command, "makefile"])
subprocess.call(["make"])
shutil.copy("bader", os.path.join("..", "bader_exe"))
os.chdir("..")
shutil.rmtree("bader")
os.remove("bader.tar.gz")
shutil.move("bader_exe", "bader")
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
return state
try:
py_ver = sys.version_info
print("Detected Python version %s" % ".".join(["%s" % i for i in py_ver]))
if py_ver < (2, 7) or py_ver >= (2, 8):
print("Python version 2.7+ required. Download and install the necessary "
"python version from http://www.python.org/download/.")
sys.exit(-1)
except:
print("Python version 2.7+ required. Download and install the necessary "
"python version from http://www.python.org/download/.")
sys.exit(-1)
try:
import setuptools
print("Detected setuptools version {}".format(setuptools.__version__))
except ImportError:
print("setuptools not detected. Get it from https://pypi.python"
".org/pypi/setuptools and follow the instructions to install first.")
sys.exit(-1)
try:
gcc_ver = subprocess.Popen(["gcc", "--version"], stdout=subprocess.PIPE)\
.communicate()[0]
except:
print("gcc not found in PATH. gcc is needed for installation of numpy "
"and C extensions. For Mac users, please install Xcode and its "
"corresponding command-line tools first.")
sys.exit(-1)
try:
import pip
print("Detected pip version {}".format(pip.__version__))
except ImportError:
print("pip not detected. Installing...")
subprocess.call(["easy_install", "pip"])
try:
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs
print("Detected numpy version {}".format(numpy.__version__))
except ImportError:
print("numpy.distutils.misc_util cannot be imported. Installing...")
subprocess.call(["pip", "install", "-q", "numpy>=1.8.0"])
from numpy.distutils.misc_util import get_numpy_include_dirs
for pk in ["pyhull>=1.3.6", "pyyaml", "PyCifRW>=3.3", "requests>=1.0",
"pybtex>=0.16"]:
print("Installing {}".format(pk))
ret = subprocess.call(["pip", "install", "-q", pk])
if ret != 0:
ret = subprocess.call(["easy_install", pk])
if ret != 0:
print("Error installing required dependency {}".format(pk))
sys.exit(-1)
print
if subprocess.call(["pip", "install", "pymatgen"]) != 0:
print("Error installing pymatgen")
sys.exit(-1)
print
enum = False
bader = False
if "-f" in sys.argv:
for pk in ["matplotlib>1.1"]:
if subprocess.call(["pip", "install", pk]) != 0:
print("Unable to install {}. Skipping...".format(pk))
if subprocess.call([
"pip", "install", "-Ivq",
"https://wiki.fysik.dtu.dk/ase-files/python-ase-3.6.0.2515.tar.gz"]
) != 0:
print("Unable to install ASE. Skipping...")
print
fortran_command = None
try:
if subprocess.call(["ifort", "--version"]) == 0:
print("Found ifort")
fortran_command = "ifort"
elif subprocess.call(["gfortran", "--version"]) == 0:
print("Found gfortran")
fortran_command = "gfortran"
except:
fortran_command = None
if fortran_command is not None:
print("Building enumlib")
enum = build_enum(fortran_command)
print
print("Building bader")
bader = build_bader(fortran_command)
print
else:
print("No fortran compiler found. Skipping enumlib and bader build.")
print("Performing POTCAR setup. Press Ctrl-C at any prompt to skip this "
"step.")
try:
subprocess.call(["potcar_setup"])
except:
print("Skipping POTCAR setup.")
print
print("------------ Setup complete --------------")
print("You still need to perform a few manual changes.")
print
if enum or bader:
print("Please add {} to your PATH or move the executables multinum.x, "
"makestr.x and bader to a location in your PATH."
.format(os.path.abspath(".")))
print
print("To use the Materials API, get your Materials API key at "
"https://www.materialsproject.org/profile and add it to your "
"environment")
print("export MAPI_KEY=YOUR_API_KEY")
print | migueldiascosta/pymatgen | docs/_static/pmg_install.py | Python | mit | 6,875 | [
"ASE",
"pymatgen"
] | 7b302ec03a2f66aa43c0301e1ce8f4189aee9204d4062889a9d6589a9f3b9800 |
# Language translations
text = {
# Main menu
'Select a mod': {
'en': 'Select a mod',
'fr': 'Sélectionnez un mod',
'es': 'Selecciona un mod',
'ru': 'Выбрать мод',
},
'Miscellaneous Stuff': {
'en': 'Miscellaneous Stuff',
'fr': 'Divers',
'es': 'Opciones Varias',
'ru': 'Опции',
},
'Exit': {
'en': 'Exit',
'fr': 'Quitter',
'es': 'Salir',
'ru': 'Выход',
},
# Jud6s
'Select a ruleset': {
'en': 'Select a ruleset',
'fr': 'Sélectionnez une option',
'es': 'Selecciona un conjunto de reglas',
'ru': 'Выбрать набор правил',
},
# Instant Start
'Choose a Start': {
'en': 'Choose a Start',
'fr': 'Choisissez un Starter',
'es': 'Elige un Starter',
'ru': 'Выбрать Начальный',
},
'Random Start': {
'en': 'Random Start',
'fr': 'Starter Aléatoire',
'es': 'Starter Aleatorio',
'ru': 'Случайный Начальный',
},
'Click a build to play it': {
'en': 'Click a build to play it',
'fr': 'Cliquez sur une option pour la jouer',
'es': 'Clickea una build para jugar',
'ru': 'Кликни на сборке чтобы сыграть ее',
},
'Search': {
'en': 'Search',
'fr': 'Rechercher',
'es': 'Busqueda',
'ru': 'Поиск',
},
'Treasure Room Starts': {
'en': 'Treasure Room Starts',
'fr': 'Starters Treasure Room',
'es': 'Starters de la Treasure Room',
'ru': 'Предмет из Комнаты Сокровищ',
},
'Devil Room Starts': {
'en': 'Devil Room Starts',
'fr': 'Starters Devil Room',
'es': 'Starters de la Devil Room',
'ru': 'Предмет из Комнаты Дьявола',
},
'Angel Room Starts': {
'en': 'Angel Room Starts',
'fr': 'Starters Angel Room',
'es': 'Starters de la Angel Room',
'ru': 'Предмет из Комнаты Ангела',
},
'Custom Starts (with the D6)': {
'en': 'Custom Starts (with the D6)',
'fr': 'Starters Spéciaux (avec D6)',
'es': 'Starters Especiales (Sin el D6)',
'ru': 'Специальные Начальные (с D6)',
},
'Custom Starts (without the D6)': {
'en': 'Custom Starts (without the D6)',
'fr': 'Starters Spéciaux (sans D6)',
'es': 'Starters Especiales (Sin el D6)',
'ru': 'Специальные Начальные (без D6)',
},
# Diversity Mod
'Enter a seed': {
'en': 'Enter a seed',
'fr': 'Entrez une seed',
'es': 'Introduce una semilla',
'ru': 'Введите сид',
},
'Start Diversity Mod': {
'en': 'Start Diversity Mod',
'fr': 'Commencer le Mod Diversity',
'es': 'Arranca el Diversity Mod',
'ru': 'Играть Diversity Mod',
},
# Miscellaneous Stuff
'Select an option': {
'en': 'Select an option',
'fr': 'Sélectionnnez une option',
'es': 'Selecciona una opción',
'ru': 'Выбрать опцию',
},
'Remove boss cutscenes': {
'en': 'Remove boss cutscenes',
'fr': 'Enlever les cinématiques des boss',
'es': 'Quitar las animaciones de los jefes',
'ru': 'Убрать заставки боссов',
},
'Automatically attempt to close Isaac': {
'en': 'Automatically attempt to close Isaac',
'fr': 'Fermeture automatique d\'Isaac',
'es': 'Intento automático de cerrar Isaac',
'ru': 'Автоматически пытаться закрыть Isaac-а',
},
'Open Isaac game directory': {
'en': 'Open Isaac game directory',
'fr': 'Ouvrir le répertoire du jeu Isaac',
'es': 'Abrir el directorio del juego Isaac',
'ru': 'Открыть папку с игрой Isaac',
},
'Open Isaac documents directory': {
'en': 'Open Isaac documents directory',
'fr': 'Ouvrir le répertoire de documents d\'Isaac',
'es': 'Abrir la carpeta de documentos de Isaac',
'ru': 'Открыть папку документов Isaac',
},
'Uninstall all existing mods': {
'en': 'Uninstall all existing mods',
'fr': 'Désinstaller tous les mods',
'es': 'Desinstalar todos los mods',
'ru': 'Удалить все модификации',
},
'Visit the website for': {
'en': 'Visit the website for',
'fr': 'Visiter le site',
'es': 'Visita la web',
'ru': 'Посетить сайт',
},
# Miscellaneous
'Isaac will open': {
'en': 'Isaac will open when you start the mod.',
'fr': 'Isaac va s\'ouvrir automatiquement au lancement d\'un mod.',
'es': 'Isaac se abrirá cuando inicies un mod.',
'ru': 'Isaac откроется автоматически при запуске мода.',
},
'Keep this program open': {
'en': 'Keep this program open while playing.',
'fr': 'Laissez ce programme ouvert pendant que vous jouez.',
'es': 'Manten este programa abierto.',
'ru': 'Держите эту программу открытой во время игры.',
},
'Isaac will return to normal': {
'en': 'Isaac will return to normal when this program is closed.',
'fr': 'Isaac retournera à sa configuration d\'origine à la fermeture de ce programme.',
'es': 'Isaac volverá a la normalidad cuando se cierre el programa.',
'ru': 'Isaac вернется в изначальное состояние после закрытия.',
},
'About these options': {
'en': 'About these options',
'fr': 'A propos des options',
'es': 'Acerca de estas opciones',
'ru': 'Об этих опциях',
},
'About this mod': {
'en': 'About this mod',
'fr': 'A propos du mod',
'es': 'Acerca de este mod',
'ru': 'Об этом моде',
},
'Go Back': {
'en': 'Go Back',
'fr': 'Retour',
'es': 'Volver atrás',
'ru': 'Вернуться назад',
},
# Errors and warnings
'Error': {
'en': 'Error',
'fr': 'Erreur',
'es': 'Error',
'ru': 'Ошибка',
},
'Warning': {
'en': 'Warning',
'fr': 'Attention',
'es': 'Atención',
'ru': 'Предупреждение',
},
'Generic error': {
'en': 'Generic error',
'fr': 'Erreur générique',
'es': 'Error Genérico',
'ru': 'Общая ошибка',
},
'was unable to find your Isaac resources directory.': {
'en': 'was unable to find your Isaac resources directory.',
'fr': 'n\'a pas pu trouver le répertoire des ressources d\'Isaac.',
'es': 'No fue posible encontrar la carpeta "Resources" de Isaac.',
'ru': 'не может найти папку "recources" игры Isaac.',
},
'Navigate to the program "isaac-ng.exe" in your Steam directory.': {
'en': 'Navigate to the program "isaac-ng.exe" in your Steam directory.',
'fr': 'Naviguez vers le programme "isaac-ng.exe" dans votre répertoire Steam.',
'es': 'Navega al programa "isaac-ng.exe" en tu carpeta de Steam.',
'ru': 'Укажите путь к программе "isaac-ng.exe" в твоей папке Steam.',
},
'Navigate to "isaac-ng.exe"': {
'en': 'Navigate to "isaac-ng.exe"',
'fr': 'Naviguer vers "isaac-ng.exe"',
'es': 'Navega a "isaac-ng.exe"',
'ru': 'Перейдите к "isaac-ng.exe"',
},
'Example location': {
'en': 'Example location',
'fr': 'Exemple de location',
'es': 'Ejemplo de localización',
'ru': 'Пример расположения',
},
'The file you selected is not called "isaac-ng.exe".': {
'en': 'The file you selected is not called "isaac-ng.exe".',
'fr': 'Le fichier que vous avez sélectionné ne se nomme pas "isaac-ng.exe".',
'es': 'El archivo seleccionado no se llama "isaac-ng.exe".',
'ru': 'Файл, который вы выбрали, не называется "isaac-ng.exe".',
},
'Success': {
'en': 'Success',
'fr': 'Succés',
'es': 'Conseguido',
'ru': 'Успех',
},
'You have successfully set': {
'en': 'You have successfully set your Isaac resources directory. Click OK to restart ',
'fr': 'Vous avez réussi à rediriger le programme vers le répertoire de ressources d\'Isaac. Cliquez sur OK pour redémarrer l\'',
'es': 'Has conseguido colocar tu carpeta de "Resources" de Isaac. Haz click para reiniciar ',
'ru': 'Вы успешно установили папку "recources" игры Isaac. Нажмите OK, чтобы перезапустить ',
},
# Template
'': {
'en': '',
'fr': '',
'es': '',
'ru': '',
},
}
| Zamiell/isaac-racing-mods | program/languages.py | Python | gpl-3.0 | 9,254 | [
"VisIt"
] | b050f09bdadd177cae076c710772bf0fd56a66d500495d721e1df544156d6bfe |
import numpy
def acg(N):
""" approximated confined gaussian window of support N;
https://en.wikipedia.org/wiki/Window_function#Confined_Gaussian_window
"""
# sigma = 1.0
s = 1.0
# edge
A = (N - 1) / 2.0
x = numpy.linspace(0, N * 0.5, 8192, endpoint=True)
y = x + A
def G(y):
return numpy.exp(-0.25 * ((y - A)/ s) ** 2)
phi = G(y) - G(-0.5) * (G(y + N) + G(y - N)) / (G(-0.5 + N) + G(-0.5 - N))
sum = 2 * numpy.trapz(phi, x)
phi /= sum
print(N, phi[-1])
return phi, x
def genacg(n):
phi, x = acg(n)
name = 'acg%d' % n
support = n
vnumbers = ["%.8f, %.8f, %.8f, %.8f" % tuple(a) for a in phi.reshape(-1, 4)]
step = numpy.diff(x).mean()
template = """
static double _%(funcname)s_vtable[] = %(vtable)s;
static double _%(funcname)s_nativesupport = %(support)g;
static double _%(funcname)s_kernel(double x)
{
x = fabs(x);
double f = x / %(step)e;
int i = f;
if (i < 0) return 0;
if (i >= %(tablesize)d - 1) return 0;
f -= i;
return _%(funcname)s_vtable[i] * (1 - f)
+ _%(funcname)s_vtable[i+1] * f;
}
static double _%(funcname)s_diff(double x)
{
double factor;
if(x >= 0) {
factor = 1;
} else {
factor = -1;
x = -x;
}
int i = x / %(step)e;
if (i < 0) return 0;
if (i >= %(tablesize)d - 1) return 0;
double f = _%(funcname)s_vtable[i+1] - _%(funcname)s_vtable[i];
return factor * f / %(step)e;
}
"""
return template % {
'vtable' : "{\n" + ",\n".join(vnumbers) + "}",
'hsupport' : support * 0.5,
'support' : support,
'funcname' : name,
'step' : step,
'tablesize' : len(phi),
}
with open('pmesh/_window_acg.h', 'wt') as f:
f.write("""
/*
* do not modify this file
* generated by makeacg.py
*
*/
""")
f.write(genacg(2))
f.write(genacg(3))
f.write(genacg(4))
f.write(genacg(5))
f.write(genacg(6))
| rainwoodman/pypm | makeacg.py | Python | gpl-3.0 | 2,115 | [
"Gaussian"
] | 7fdb69b83ea533bd783a624037fdb714794eddf85f83f2b33a75fd85c7f4a6d5 |
#!/usr/bin/env python
import sys
sys.path.append( '.' )
from optparse import OptionParser
from sfepy.mesh.mesh_generators import gen_cylinder_mesh
from sfepy.discrete.fem.meshio import MeshIO
usage = """%prog [options]
Cylinder mesh generator.
"""
help = {
'filename' :
'output file name [default: %default]',
'format' : 'output mesh format (overrides output file name extension)',
'axis' :
'axis of the cylinder, one of x, y, z [default: %default]',
'dims' :
'dimensions of the cylinder: inner surface semi-axes a1, b1, outer'\
' surface semi-axes a2, b2, length [default: %default]',
'shape' :
'shape (counts of nodes in radial, circumferential and longitudinal'\
' directions) of the cylinder mesh [default: %default]',
'centre' :
'centre of the cylinder [default: %default]',
'force_hollow' :
'force hollow mesh even if inner radii a1 = b1 = 0',
'is_open' :
'generate an open cylinder segment',
'open_angle' :
'opening angle in radians [default: %default]',
'non_uniform' :
'space the mesh nodes in radial direction so that the element'\
' volumes are (approximately) the same, making thus the elements towards'\
' the outer surface thinner',
}
def main():
parser = OptionParser( usage = usage, version = "%prog" )
parser.add_option( "-o", "", metavar = 'filename',
action = "store", dest = "output_filename",
default = 'out.vtk', help = help['filename'] )
parser.add_option('-f', '--format', metavar='format',
action='store', type='string', dest='format',
default=None, help=help['format'])
parser.add_option( "-a", "--axis", metavar = 'axis',
action = "store", dest = "axis",
default = 'x', help = help['axis'] )
parser.add_option( "-d", "--dims", metavar = 'dims',
action = "store", dest = "dims",
default = '[1.0, 1.0, 2.0, 2.0, 3.0]',
help = help['dims'] )
parser.add_option( "-s", "--shape", metavar = 'shape',
action = "store", dest = "shape",
default = '[11, 11, 11]', help = help['shape'] )
parser.add_option( "-c", "--centre", metavar = 'centre',
action = "store", dest = "centre",
default = '[0.0, 0.0, 0.0]', help = help['centre'] )
parser.add_option( "", "--force-hollow",
action = "store_true", dest = "force_hollow",
default = False, help = help['force_hollow'] )
parser.add_option( "", "--is-open",
action = "store_true", dest = "is_open",
default = False, help = help['is_open'] )
parser.add_option( "", "--open-angle", metavar = 'angle', type='float',
action = "store", dest = "open_angle",
default = '0.0', help = help['open_angle'] )
parser.add_option( "", "--non-uniform",
action = "store_true", dest = "non_uniform",
default = False, help = help['non_uniform'] )
(options, args) = parser.parse_args()
import numpy as nm
dims = eval( "nm.array( %s, dtype = nm.float64 )" % options.dims )
shape = eval( "nm.array( %s, dtype = nm.int32 )" % options.shape )
centre = eval( "nm.array( %s, dtype = nm.float64 )" % options.centre )
print dims
print shape
print centre
mesh = gen_cylinder_mesh(dims, shape, centre,
axis=options.axis,
force_hollow=options.force_hollow,
is_open=options.is_open,
open_angle=options.open_angle,
non_uniform=options.non_uniform,
name=options.output_filename)
io = MeshIO.for_format(options.output_filename, format=options.format,
writable=True)
mesh.write(options.output_filename, io=io)
if __name__ == '__main__':
main()
| RexFuzzle/sfepy | script/cylindergen.py | Python | bsd-3-clause | 4,144 | [
"VTK"
] | 02abf592717e8053098b392442f9b2b405e1cfe8c16179a3d30405a01b5d5bfd |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','yy.logcatcher.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','YyLogcatcherModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| dbankier/TiLogCatcher | ios/build.py | Python | mit | 8,766 | [
"VisIt"
] | 5d79b35623eae140b28fbfd78e0f2655e1bd5272ee91cee86752c5e71a494ab1 |
from ase.constraints import FixAtoms
from ase.io import read
from ase.thermochemistry import HarmonicThermo
from ase.vibrations import Vibrations
from espresso import espresso
from espresso.vibespresso import vibespresso
########################################################################################################
## more information here: https://wiki.fysik.dtu.dk/ase/ase/thermochemistry/thermochemistry.html ##
########################################################################################################
#########################################################################################################
##### YOUR SETTINGS HERE #####
#########################################################################################################
# rename to the name of your trajectory file
# containing the surface with adsorbates
atoms = read('ads_surface.traj')
#########################################################################################################
##### END #####
#########################################################################################################
metal_atoms = [atom.index for atom in atoms if atom.symbol not in ['N','H']]
num_atoms = len(metal_atoms)
if num_atoms == 16:
kpts = (4, 4, 1)
elif num_atoms == 13:
kpts = 'gamma'
else:
print "Wrong number of metal atoms! Check your input trajectory!"
exit()
params = {'pw':500,
'dw':5000,
'kpts':kpts,
'nbands':-20,
'xc':'BEEF-vdW',
'psppath':'/home/vossj/suncat/psp/gbrv1.5pbe',
'convergence':{'energy':1e-5, 'mixing':0.1, 'nmix':10, 'maxsteps':500, 'diag':'david'},
'spinpol':False}
calc = espresso(outdir = 'calcdir', **params) # regular espresso calculator
calcvib = vibespresso(outdirprefix = 'vibdir', **params) # special calculator for the vibration calculations
atoms.set_calculator(calc) # attach calculator to the atoms
energy = atoms.get_potential_energy() # caclulate the energy, to be used to determine G
# vibrate N and H atoms
vibrateatoms = [atom.index for atom in atoms if atom.symbol in ['H','N']] # calculate the vibrational modes for all N and H atoms
atoms.set_calculator(calcvib) # attach vibrations calculator to the atoms
# Calculate vibrations
vib = Vibrations(atoms,indices=vibrateatoms,delta=0.03) # define a vibration calculation
vib.run() # run the vibration calculation
vib.summary(method='standard') # summarize the calculated results
for mode in range(len(vibrateatoms)*3): # Make trajectory files to visualize the modes.
vib.write_mode(mode)
### UNCOMMENT TO CALCULATE FREE get_energies
### YOU CAN ALSO USER get_ads_free_energy.py and get_gas_free_energy.py
### Calculate free energy
# vibenergies=vib.get_energies()
# vibenergies=[vib for vib in vibenergies if not isinstance(vib,complex)] # only take the real modes
# gibbs = HarmonicThermo(vib_energies = vibenergies, electronicenergy = energy)
### At 300K and 101325 Pa
### change for your operating conditions
# freeenergy = gibbs.get_gibbs_energy(300,101325)
# f=open('out.energy','w')
# f.write('Potential energy: '+str(energy)+'\n'+'Free energy: '+str(freeenergy)+'\n')
# f.close | brohr/brohr.github.io | ASE/Transition_States/run_freq.py | Python | gpl-2.0 | 3,757 | [
"ASE",
"ESPResSo"
] | ae5f0e9e19e98341069dae943987bb675dc9eece7a46fe7b06a4404c2aab0b0a |
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Volumes/KSW-Data/tonic/mpas-contours-sorted-composite/'
inputFile = '/Volumes/KSW-Data/Data/DataExploration/Data/MPAS/data/xyz_n_primal/X_Y_Z_NLAYER-primal_%d_0.vtu'
earthCore = '/Volumes/KSW-Data/Data/vtk/mpas/earth/earth-ok.vtk'
phi = range(0, 360, 30)
theta = range(-60, 61, 30)
time = range(50, 5151, 50)
dataRanges = {
'bottomDepth': [-9753, 5984],
'salinity': [24.8574, 37.4595],
'temperature': [-1.64296, 28.6918]
}
sections = {
'LookupTables': {
"bottomDepth": { "preset": "earth"},
"temperature": { "preset": "ocean", "range": [5, 30]},
"salinity" : { "preset": "yellow2brown", "range": [34, 38]}
}
}
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# Pipeline creation
# -----------------------------------------------------------------------------
core = simple.OpenDataFile(earthCore)
coreSurface = simple.ExtractSurface(Input=core)
coreWithNormals = simple.GenerateSurfaceNormals(Input=coreSurface)
reader = simple.OpenDataFile(inputFile % time[0])
reader.CellArrayStatus = ['temperature', 'salinity']
dataCleanUp = simple.Threshold(Input = reader, Scalars = ['CELLS', 'temperature'], ThresholdRange = [-1000.0, 50.0])
dataToPoints = simple.CellDatatoPointData(Input = dataCleanUp)
sceneDescription = {
'size': [500, 500],
'light': [ 'intensity', 'normal' ],
'camera': {
'CameraViewUp': [0.0, 0.0, 1.0],
'CameraPosition': [107823.5, -28000000, -44044.25],
'CameraFocalPoint': [107823.5, -7766.0, -44044.25]
},
'scene': [
{
'name': 'Earth',
'source': coreWithNormals,
'colors': {
'bottomDepth': {'location': 'POINT_DATA', 'range': dataRanges['bottomDepth'] }
}
},{
'parent': 'Temperatures',
'name': '5C',
'source': simple.Contour(
Input = dataToPoints,
PointMergeMethod = "Uniform Binning",
ContourBy = 'temperature',
Isosurfaces = [5.0]),
'colors': {
'temperature': {'constant': 5.0 },
'salinity': {'location': 'POINT_DATA', 'range': dataRanges['salinity'] }
}
},{
'parent': 'Temperatures',
'name': '10C',
'source': simple.Contour(
Input = dataToPoints,
PointMergeMethod = "Uniform Binning",
ContourBy = 'temperature',
Isosurfaces = [10.0]),
'colors': {
'temperature': {'constant': 10.0 },
'salinity': {'location': 'POINT_DATA', 'range': dataRanges['salinity'] }
}
},{
'parent': 'Temperatures',
'name': '15C',
'source': simple.Contour(
Input = dataToPoints,
PointMergeMethod = "Uniform Binning",
ContourBy = 'temperature',
Isosurfaces = [15.0]),
'colors': {
'temperature': {'constant': 15.0 },
'salinity': {'location': 'POINT_DATA', 'range': dataRanges['salinity'] }
}
},{
'parent': 'Temperatures',
'name': '20C',
'source': simple.Contour(
Input = dataToPoints,
PointMergeMethod = "Uniform Binning",
ContourBy = 'temperature',
Isosurfaces = [20.0]),
'colors': {
'temperature': {'constant': 20.0 },
'salinity': {'location': 'POINT_DATA', 'range': dataRanges['salinity'] }
}
},{
'parent': 'Temperatures',
'name': '25C',
'source': simple.Contour(
Input = dataToPoints,
PointMergeMethod = "Uniform Binning",
ContourBy = 'temperature',
Isosurfaces = [25.0]),
'colors': {
'temperature': {'constant': 25.0 },
'salinity': {'location': 'POINT_DATA', 'range': dataRanges['salinity'] }
}
}
]
}
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
dsb = CompositeDataSetBuilder(outputDir, sceneDescription, {'type': 'spherical', 'phi': phi, 'theta': theta}, sections=sections)
# Add time information
dsb.getDataHandler().registerArgument(priority=1, name='time', values=time, ui='slider', loop='modulo')
dsb.start()
for t in dsb.getDataHandler().time:
reader.FileName = inputFile % t
dsb.writeData()
dsb.stop()
| Kitware/tonic-data-generator | scripts/paraview/mpas/earth-contours-sorted-composite.py | Python | bsd-3-clause | 5,301 | [
"ParaView",
"VTK"
] | ff4eb976361f7a33641d69ad3d3feafa83c1635d57e8d3ea3bfc5820c906f81b |
import cv2
import numpy as np
from matplotlib import pyplot as plt
# inpired by: http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html
def show_colour_transforms():
for flag in [i for i in dir(cv2) if i.startswith('COLOR_')]:
if flag.startswith('COLOR_BGR'):
print flag
return 0
def treshold_test(str_fn):
img = cv2.imread(str_fn, 0)
low, hig = 96,255
ret, thresh1 = cv2.threshold(img, low, hig, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(img, low, hig, cv2.THRESH_BINARY_INV)
ret, thresh3 = cv2.threshold(img, low, hig, cv2.THRESH_TRUNC)
ret, thresh4 = cv2.threshold(img, low, hig, cv2.THRESH_TOZERO)
ret, thresh5 = cv2.threshold(img, low, hig, cv2.THRESH_TOZERO_INV)
titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
def treshold_adaptive_test(str_fn):
img = cv2.imread(str_fn, 0)
# global thresholding
ret1, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
# Otsu's thresholding
ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img, (5, 5), 0)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
img, 0, th2,
blur, 0, th3]
titles = ['Original Noisy Image', 'Histogram', 'Global Thresholding (v=127)',
'Original Noisy Image', 'Histogram', "Otsu's Thresholding",
'Gaussian filtered Image', 'Histogram', "Otsu's Thresholding"]
for i in xrange(3):
plt.subplot(3, 3, i * 3 + 1), plt.imshow(images[i * 3], 'gray')
plt.title(titles[i * 3]), plt.xticks([]), plt.yticks([])
plt.subplot(3, 3, i * 3 + 2), plt.hist(images[i * 3].ravel(), 256)
plt.title(titles[i * 3 + 1]), plt.xticks([]), plt.yticks([])
plt.subplot(3, 3, i * 3 + 3), plt.imshow(images[i * 3 + 2], 'gray')
plt.title(titles[i * 3 + 2]), plt.xticks([]), plt.yticks([])
plt.show()
def highpass_test(str_fn):
img = cv2.imread(str_fn, 0)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
#ret1, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
#ret2, th2 = cv2.threshold(laplacian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=-1)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=-1)
plt.subplot(2, 2, 1), plt.imshow(img, cmap='gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap='gray') # laplacian, cmap='gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 4), plt.imshow(sobely, cmap='gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
def canny_test(str_fn):
img = cv2.imread(str_fn, 0)
edges = cv2.Canny(img, 100, 200)
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(edges, cmap='gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()
def houghlines_test(str_fn):
img = cv2.imread(str_fn)
img_org = img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imwrite('h'+str_fn, img)
plt.subplot(121), plt.imshow(img_org, cmap='gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img, cmap='gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()
def houghlines2_test(str_fn):
img = cv2.imread(str_fn)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength, maxLineGap)
for x1, y1, x2, y2 in lines[0]:
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imwrite('houghlines2.jpg', img)
if __name__ == "__main__":
## images
str_fn = r"/home/martin/PycharmProjects/sudoku/data/images/pol20171201.jpg" # rather nice
#str_fn = r"/home/martin/PycharmProjects/sudoku/data/images/pol20170825.jpg" # cener fold
#str_fn = r"/home/martin/PycharmProjects/sudoku/data/images/pol20170909.jpg" # mixed light
str_fn = r"/home/martin/PycharmProjects/sudoku/data/images/pol20170917.jpg" # super nice
#str_fn = r"/home/martin/PycharmProjects/sudoku/data/images/gradients01.jpg" # super nice
## routines
#canny_test(str_fn)
#show_colour_transforms()
#treshold_test(str_fn)
#treshold_adaptive_test(str_fn) # Gaussian + Otsu give good results for (no shadow) Sudoku :-)
#highpass_test(str_fn)
#canny_test(str_fn)
#houghlines_test(str_fn)
houghlines2_test(str_fn)
# Later :
# Perspective transform:
# Then transformation matrix can be found by the function cv2.getPerspectiveTransform. Then apply cv2.warpPerspective with this 3x3 transformation matrix.
# Consider Open and Close, before identifying numbers
# Template matching for number recognition
# http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_template_matching/py_template_matching.html
| EC-software/EC_stuff | imagine/opencv_fun.py | Python | gpl-2.0 | 6,098 | [
"Gaussian"
] | ffe2686272d2f5f064e329a75db98b1c5bef5ac14740f172bf44fffc14278dd9 |
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-parameters
# Author : Stuart Paterson
########################################################################
"""
Retrieve parameters associated to the given DIRAC job
Example:
$ dirac-wms-job-parameters 1
{'CPU(MHz)': '1596.479',
'CPUNormalizationFactor': '6.8',
'CacheSize(kB)': '4096KB',
'GridCEQueue': 'ce.labmc.inf.utfsm.cl:2119/jobmanager-lcgpbs-prod',
'HostName': 'wn05.labmc',
'JobPath': 'JobPath,JobSanity,JobScheduling,TaskQueue',
'JobSanityCheck': 'Job: 1 JDL: OK,InputData: No input LFNs, Input Sandboxes: 0, OK.',
'JobWrapperPID': '599',
'LocalAccount': 'prod006',
'LocalBatchID': '',
'LocalJobID': '277821.ce.labmc.inf.utfsm.cl',
'MatcherServiceTime': '2.27646398544',
'Memory(kB)': '858540kB',
'ModelName': 'Intel(R)Xeon(R)CPU5110@1.60GHz',
'NormCPUTime(s)': '1.02',
'OK': 'True',
'OutputSandboxMissingFiles': 'std.err',
'PayloadPID': '604',
'PilotAgent': 'EELADIRAC v1r1; DIRAC v5r12',
'Pilot_Reference': 'https://lb2.eela.ufrj.br:9000/ktM6WWR1GdkOTm98_hwM9Q',
'ScaledCPUTime': '115.6',
'TotalCPUTime(s)': '0.15'}
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["JobID: DIRAC Job ID"])
_, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
for job in parseArguments(args):
result = dirac.getJobParameters(job, printOutput=True)
if not result["OK"]:
errorList.append((job, result["Message"]))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
| DIRACGrid/DIRAC | src/DIRAC/Interfaces/scripts/dirac_wms_job_parameters.py | Python | gpl-3.0 | 1,972 | [
"DIRAC"
] | a1807d4e070a3e6c7d7ad7442aacf038af7148eadec8cdf3032b7652cb2da19c |
# -*- coding: utf8 -*-
# LaTeX math to Unicode symbols translation dictionaries.
# Generated with ``write_tex2unichar.py`` from the data in
# http://milde.users.sourceforge.net/LUCR/Math/
# Includes commands from: wasysym, stmaryrd, mathdots, mathabx, esint, bbold, amsxtra, amsmath, amssymb, standard LaTeX
mathaccent = {
'acute': '\u0301', # x́ COMBINING ACUTE ACCENT
'bar': '\u0304', # x̄ COMBINING MACRON
'breve': '\u0306', # x̆ COMBINING BREVE
'check': '\u030c', # x̌ COMBINING CARON
'ddddot': '\u20dc', # x⃜ COMBINING FOUR DOTS ABOVE
'dddot': '\u20db', # x⃛ COMBINING THREE DOTS ABOVE
'ddot': '\u0308', # ẍ COMBINING DIAERESIS
'dot': '\u0307', # ẋ COMBINING DOT ABOVE
'grave': '\u0300', # x̀ COMBINING GRAVE ACCENT
'hat': '\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'mathring': '\u030a', # x̊ COMBINING RING ABOVE
'not': '\u0338', # x̸ COMBINING LONG SOLIDUS OVERLAY
'overleftarrow': '\u20d6', # x⃖ COMBINING LEFT ARROW ABOVE
'overleftrightarrow': '\u20e1', # x⃡ COMBINING LEFT RIGHT ARROW ABOVE
'overline': '\u0305', # x̅ COMBINING OVERLINE
'overrightarrow': '\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'tilde': '\u0303', # x̃ COMBINING TILDE
'underbar': '\u0331', # x̱ COMBINING MACRON BELOW
'underleftarrow': '\u20ee', # x⃮ COMBINING LEFT ARROW BELOW
'underline': '\u0332', # x̲ COMBINING LOW LINE
'underrightarrow': '\u20ef', # x⃯ COMBINING RIGHT ARROW BELOW
'vec': '\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'widehat': '\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'widetilde': '\u0303', # x̃ COMBINING TILDE
}
mathalpha = {
'Bbbk': '\U0001d55c', # 𝕜 MATHEMATICAL DOUBLE-STRUCK SMALL K
'Delta': '\u0394', # Δ GREEK CAPITAL LETTER DELTA
'Gamma': '\u0393', # Γ GREEK CAPITAL LETTER GAMMA
'Im': '\u2111', # ℑ BLACK-LETTER CAPITAL I
'Lambda': '\u039b', # Λ GREEK CAPITAL LETTER LAMDA
'Omega': '\u03a9', # Ω GREEK CAPITAL LETTER OMEGA
'Phi': '\u03a6', # Φ GREEK CAPITAL LETTER PHI
'Pi': '\u03a0', # Π GREEK CAPITAL LETTER PI
'Psi': '\u03a8', # Ψ GREEK CAPITAL LETTER PSI
'Re': '\u211c', # ℜ BLACK-LETTER CAPITAL R
'Sigma': '\u03a3', # Σ GREEK CAPITAL LETTER SIGMA
'Theta': '\u0398', # Θ GREEK CAPITAL LETTER THETA
'Upsilon': '\u03a5', # Υ GREEK CAPITAL LETTER UPSILON
'Xi': '\u039e', # Ξ GREEK CAPITAL LETTER XI
'aleph': '\u2135', # ℵ ALEF SYMBOL
'alpha': '\u03b1', # α GREEK SMALL LETTER ALPHA
'beta': '\u03b2', # β GREEK SMALL LETTER BETA
'beth': '\u2136', # ℶ BET SYMBOL
'chi': '\u03c7', # χ GREEK SMALL LETTER CHI
'daleth': '\u2138', # ℸ DALET SYMBOL
'delta': '\u03b4', # δ GREEK SMALL LETTER DELTA
'digamma': '\u03dc', # Ϝ GREEK LETTER DIGAMMA
'ell': '\u2113', # ℓ SCRIPT SMALL L
'epsilon': '\u03f5', # ϵ GREEK LUNATE EPSILON SYMBOL
'eta': '\u03b7', # η GREEK SMALL LETTER ETA
'eth': '\xf0', # ð LATIN SMALL LETTER ETH
'gamma': '\u03b3', # γ GREEK SMALL LETTER GAMMA
'gimel': '\u2137', # ℷ GIMEL SYMBOL
'hbar': '\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'hslash': '\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'imath': '\u0131', # ı LATIN SMALL LETTER DOTLESS I
'iota': '\u03b9', # ι GREEK SMALL LETTER IOTA
'jmath': '\u0237', # ȷ LATIN SMALL LETTER DOTLESS J
'kappa': '\u03ba', # κ GREEK SMALL LETTER KAPPA
'lambda': '\u03bb', # λ GREEK SMALL LETTER LAMDA
'mu': '\u03bc', # μ GREEK SMALL LETTER MU
'nu': '\u03bd', # ν GREEK SMALL LETTER NU
'omega': '\u03c9', # ω GREEK SMALL LETTER OMEGA
'phi': '\u03d5', # ϕ GREEK PHI SYMBOL
'pi': '\u03c0', # π GREEK SMALL LETTER PI
'psi': '\u03c8', # ψ GREEK SMALL LETTER PSI
'rho': '\u03c1', # ρ GREEK SMALL LETTER RHO
'sigma': '\u03c3', # σ GREEK SMALL LETTER SIGMA
'tau': '\u03c4', # τ GREEK SMALL LETTER TAU
'theta': '\u03b8', # θ GREEK SMALL LETTER THETA
'upsilon': '\u03c5', # υ GREEK SMALL LETTER UPSILON
'varDelta': '\U0001d6e5', # 𝛥 MATHEMATICAL ITALIC CAPITAL DELTA
'varGamma': '\U0001d6e4', # 𝛤 MATHEMATICAL ITALIC CAPITAL GAMMA
'varLambda': '\U0001d6ec', # 𝛬 MATHEMATICAL ITALIC CAPITAL LAMDA
'varOmega': '\U0001d6fa', # 𝛺 MATHEMATICAL ITALIC CAPITAL OMEGA
'varPhi': '\U0001d6f7', # 𝛷 MATHEMATICAL ITALIC CAPITAL PHI
'varPi': '\U0001d6f1', # 𝛱 MATHEMATICAL ITALIC CAPITAL PI
'varPsi': '\U0001d6f9', # 𝛹 MATHEMATICAL ITALIC CAPITAL PSI
'varSigma': '\U0001d6f4', # 𝛴 MATHEMATICAL ITALIC CAPITAL SIGMA
'varTheta': '\U0001d6e9', # 𝛩 MATHEMATICAL ITALIC CAPITAL THETA
'varUpsilon': '\U0001d6f6', # 𝛶 MATHEMATICAL ITALIC CAPITAL UPSILON
'varXi': '\U0001d6ef', # 𝛯 MATHEMATICAL ITALIC CAPITAL XI
'varepsilon': '\u03b5', # ε GREEK SMALL LETTER EPSILON
'varkappa': '\U0001d718', # 𝜘 MATHEMATICAL ITALIC KAPPA SYMBOL
'varphi': '\u03c6', # φ GREEK SMALL LETTER PHI
'varpi': '\u03d6', # ϖ GREEK PI SYMBOL
'varrho': '\u03f1', # ϱ GREEK RHO SYMBOL
'varsigma': '\u03c2', # ς GREEK SMALL LETTER FINAL SIGMA
'vartheta': '\u03d1', # ϑ GREEK THETA SYMBOL
'wp': '\u2118', # ℘ SCRIPT CAPITAL P
'xi': '\u03be', # ξ GREEK SMALL LETTER XI
'zeta': '\u03b6', # ζ GREEK SMALL LETTER ZETA
}
mathbin = {
'Cap': '\u22d2', # ⋒ DOUBLE INTERSECTION
'Circle': '\u25cb', # ○ WHITE CIRCLE
'Cup': '\u22d3', # ⋓ DOUBLE UNION
'LHD': '\u25c0', # ◀ BLACK LEFT-POINTING TRIANGLE
'RHD': '\u25b6', # ▶ BLACK RIGHT-POINTING TRIANGLE
'amalg': '\u2a3f', # ⨿ AMALGAMATION OR COPRODUCT
'ast': '\u2217', # ∗ ASTERISK OPERATOR
'barwedge': '\u22bc', # ⊼ NAND
'bigtriangledown': '\u25bd', # ▽ WHITE DOWN-POINTING TRIANGLE
'bigtriangleup': '\u25b3', # △ WHITE UP-POINTING TRIANGLE
'bindnasrepma': '\u214b', # ⅋ TURNED AMPERSAND
'blacklozenge': '\u29eb', # ⧫ BLACK LOZENGE
'blacktriangledown': '\u25be', # ▾ BLACK DOWN-POINTING SMALL TRIANGLE
'blacktriangleleft': '\u25c2', # ◂ BLACK LEFT-POINTING SMALL TRIANGLE
'blacktriangleright': '\u25b8', # ▸ BLACK RIGHT-POINTING SMALL TRIANGLE
'blacktriangleup': '\u25b4', # ▴ BLACK UP-POINTING SMALL TRIANGLE
'boxast': '\u29c6', # ⧆ SQUARED ASTERISK
'boxbar': '\u25eb', # ◫ WHITE SQUARE WITH VERTICAL BISECTING LINE
'boxbox': '\u29c8', # ⧈ SQUARED SQUARE
'boxbslash': '\u29c5', # ⧅ SQUARED FALLING DIAGONAL SLASH
'boxcircle': '\u29c7', # ⧇ SQUARED SMALL CIRCLE
'boxdot': '\u22a1', # ⊡ SQUARED DOT OPERATOR
'boxminus': '\u229f', # ⊟ SQUARED MINUS
'boxplus': '\u229e', # ⊞ SQUARED PLUS
'boxslash': '\u29c4', # ⧄ SQUARED RISING DIAGONAL SLASH
'boxtimes': '\u22a0', # ⊠ SQUARED TIMES
'bullet': '\u2219', # ∙ BULLET OPERATOR
'cap': '\u2229', # ∩ INTERSECTION
'cdot': '\u22c5', # ⋅ DOT OPERATOR
'circ': '\u2218', # ∘ RING OPERATOR
'circledast': '\u229b', # ⊛ CIRCLED ASTERISK OPERATOR
'circledcirc': '\u229a', # ⊚ CIRCLED RING OPERATOR
'circleddash': '\u229d', # ⊝ CIRCLED DASH
'cup': '\u222a', # ∪ UNION
'curlyvee': '\u22ce', # ⋎ CURLY LOGICAL OR
'curlywedge': '\u22cf', # ⋏ CURLY LOGICAL AND
'dagger': '\u2020', # † DAGGER
'ddagger': '\u2021', # ‡ DOUBLE DAGGER
'diamond': '\u22c4', # ⋄ DIAMOND OPERATOR
'div': '\xf7', # ÷ DIVISION SIGN
'divideontimes': '\u22c7', # ⋇ DIVISION TIMES
'dotplus': '\u2214', # ∔ DOT PLUS
'doublebarwedge': '\u2a5e', # ⩞ LOGICAL AND WITH DOUBLE OVERBAR
'intercal': '\u22ba', # ⊺ INTERCALATE
'interleave': '\u2af4', # ⫴ TRIPLE VERTICAL BAR BINARY RELATION
'land': '\u2227', # ∧ LOGICAL AND
'leftthreetimes': '\u22cb', # ⋋ LEFT SEMIDIRECT PRODUCT
'lhd': '\u25c1', # ◁ WHITE LEFT-POINTING TRIANGLE
'lor': '\u2228', # ∨ LOGICAL OR
'ltimes': '\u22c9', # ⋉ LEFT NORMAL FACTOR SEMIDIRECT PRODUCT
'mp': '\u2213', # ∓ MINUS-OR-PLUS SIGN
'odot': '\u2299', # ⊙ CIRCLED DOT OPERATOR
'ominus': '\u2296', # ⊖ CIRCLED MINUS
'oplus': '\u2295', # ⊕ CIRCLED PLUS
'oslash': '\u2298', # ⊘ CIRCLED DIVISION SLASH
'otimes': '\u2297', # ⊗ CIRCLED TIMES
'pm': '\xb1', # ± PLUS-MINUS SIGN
'rhd': '\u25b7', # ▷ WHITE RIGHT-POINTING TRIANGLE
'rightthreetimes': '\u22cc', # ⋌ RIGHT SEMIDIRECT PRODUCT
'rtimes': '\u22ca', # ⋊ RIGHT NORMAL FACTOR SEMIDIRECT PRODUCT
'setminus': '\u29f5', # ⧵ REVERSE SOLIDUS OPERATOR
'slash': '\u2215', # ∕ DIVISION SLASH
'smallsetminus': '\u2216', # ∖ SET MINUS
'smalltriangledown': '\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'smalltriangleleft': '\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'smalltriangleright': '\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'smalltriangleup': '\u25b5', # ▵ WHITE UP-POINTING SMALL TRIANGLE
'sqcap': '\u2293', # ⊓ SQUARE CAP
'sqcup': '\u2294', # ⊔ SQUARE CUP
'sslash': '\u2afd', # ⫽ DOUBLE SOLIDUS OPERATOR
'star': '\u22c6', # ⋆ STAR OPERATOR
'talloblong': '\u2afe', # ⫾ WHITE VERTICAL BAR
'times': '\xd7', # × MULTIPLICATION SIGN
'triangle': '\u25b3', # △ WHITE UP-POINTING TRIANGLE
'triangledown': '\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'triangleleft': '\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'triangleright': '\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'uplus': '\u228e', # ⊎ MULTISET UNION
'vartriangle': '\u25b3', # △ WHITE UP-POINTING TRIANGLE
'vee': '\u2228', # ∨ LOGICAL OR
'veebar': '\u22bb', # ⊻ XOR
'wedge': '\u2227', # ∧ LOGICAL AND
'wr': '\u2240', # ≀ WREATH PRODUCT
}
mathclose = {
'Rbag': '\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'lrcorner': '\u231f', # ⌟ BOTTOM RIGHT CORNER
'rangle': '\u27e9', # ⟩ MATHEMATICAL RIGHT ANGLE BRACKET
'rbag': '\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'rbrace': '}', # } RIGHT CURLY BRACKET
'rbrack': ']', # ] RIGHT SQUARE BRACKET
'rceil': '\u2309', # ⌉ RIGHT CEILING
'rfloor': '\u230b', # ⌋ RIGHT FLOOR
'rgroup': '\u27ef', # ⟯ MATHEMATICAL RIGHT FLATTENED PARENTHESIS
'rrbracket': '\u27e7', # ⟧ MATHEMATICAL RIGHT WHITE SQUARE BRACKET
'rrparenthesis': '\u2988', # ⦈ Z NOTATION RIGHT IMAGE BRACKET
'urcorner': '\u231d', # ⌝ TOP RIGHT CORNER
'}': '}', # } RIGHT CURLY BRACKET
}
mathfence = {
'Vert': '\u2016', # ‖ DOUBLE VERTICAL LINE
'vert': '|', # | VERTICAL LINE
'|': '\u2016', # ‖ DOUBLE VERTICAL LINE
}
mathop = {
'Join': '\u2a1d', # ⨝ JOIN
'bigcap': '\u22c2', # ⋂ N-ARY INTERSECTION
'bigcup': '\u22c3', # ⋃ N-ARY UNION
'biginterleave': '\u2afc', # ⫼ LARGE TRIPLE VERTICAL BAR OPERATOR
'bigodot': '\u2a00', # ⨀ N-ARY CIRCLED DOT OPERATOR
'bigoplus': '\u2a01', # ⨁ N-ARY CIRCLED PLUS OPERATOR
'bigotimes': '\u2a02', # ⨂ N-ARY CIRCLED TIMES OPERATOR
'bigsqcup': '\u2a06', # ⨆ N-ARY SQUARE UNION OPERATOR
'biguplus': '\u2a04', # ⨄ N-ARY UNION OPERATOR WITH PLUS
'bigvee': '\u22c1', # ⋁ N-ARY LOGICAL OR
'bigwedge': '\u22c0', # ⋀ N-ARY LOGICAL AND
'coprod': '\u2210', # ∐ N-ARY COPRODUCT
'fatsemi': '\u2a1f', # ⨟ Z NOTATION SCHEMA COMPOSITION
'fint': '\u2a0f', # ⨏ INTEGRAL AVERAGE WITH SLASH
'iiiint': '\u2a0c', # ⨌ QUADRUPLE INTEGRAL OPERATOR
'iiint': '\u222d', # ∭ TRIPLE INTEGRAL
'iint': '\u222c', # ∬ DOUBLE INTEGRAL
'int': '\u222b', # ∫ INTEGRAL
'oiint': '\u222f', # ∯ SURFACE INTEGRAL
'oint': '\u222e', # ∮ CONTOUR INTEGRAL
'ointctrclockwise': '\u2233', # ∳ ANTICLOCKWISE CONTOUR INTEGRAL
'prod': '\u220f', # ∏ N-ARY PRODUCT
'sqint': '\u2a16', # ⨖ QUATERNION INTEGRAL OPERATOR
'sum': '\u2211', # ∑ N-ARY SUMMATION
'varointclockwise': '\u2232', # ∲ CLOCKWISE CONTOUR INTEGRAL
}
mathopen = {
'Lbag': '\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'langle': '\u27e8', # ⟨ MATHEMATICAL LEFT ANGLE BRACKET
'lbag': '\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'lbrace': '{', # { LEFT CURLY BRACKET
'lbrack': '[', # [ LEFT SQUARE BRACKET
'lceil': '\u2308', # ⌈ LEFT CEILING
'lfloor': '\u230a', # ⌊ LEFT FLOOR
'lgroup': '\u27ee', # ⟮ MATHEMATICAL LEFT FLATTENED PARENTHESIS
'llbracket': '\u27e6', # ⟦ MATHEMATICAL LEFT WHITE SQUARE BRACKET
'llcorner': '\u231e', # ⌞ BOTTOM LEFT CORNER
'llparenthesis': '\u2987', # ⦇ Z NOTATION LEFT IMAGE BRACKET
'ulcorner': '\u231c', # ⌜ TOP LEFT CORNER
'{': '{', # { LEFT CURLY BRACKET
}
mathord = {
'#': '#', # # NUMBER SIGN
'$': '$', # $ DOLLAR SIGN
'%': '%', # % PERCENT SIGN
'&': '&', # & AMPERSAND
'AC': '\u223f', # ∿ SINE WAVE
'APLcomment': '\u235d', # ⍝ APL FUNCTIONAL SYMBOL UP SHOE JOT
'APLdownarrowbox': '\u2357', # ⍗ APL FUNCTIONAL SYMBOL QUAD DOWNWARDS ARROW
'APLinput': '\u235e', # ⍞ APL FUNCTIONAL SYMBOL QUOTE QUAD
'APLinv': '\u2339', # ⌹ APL FUNCTIONAL SYMBOL QUAD DIVIDE
'APLleftarrowbox': '\u2347', # ⍇ APL FUNCTIONAL SYMBOL QUAD LEFTWARDS ARROW
'APLlog': '\u235f', # ⍟ APL FUNCTIONAL SYMBOL CIRCLE STAR
'APLrightarrowbox': '\u2348', # ⍈ APL FUNCTIONAL SYMBOL QUAD RIGHTWARDS ARROW
'APLuparrowbox': '\u2350', # ⍐ APL FUNCTIONAL SYMBOL QUAD UPWARDS ARROW
'Aries': '\u2648', # ♈ ARIES
'CIRCLE': '\u25cf', # ● BLACK CIRCLE
'CheckedBox': '\u2611', # ☑ BALLOT BOX WITH CHECK
'Diamond': '\u25c7', # ◇ WHITE DIAMOND
'Finv': '\u2132', # Ⅎ TURNED CAPITAL F
'Game': '\u2141', # ⅁ TURNED SANS-SERIF CAPITAL G
'Gemini': '\u264a', # ♊ GEMINI
'Jupiter': '\u2643', # ♃ JUPITER
'LEFTCIRCLE': '\u25d6', # ◖ LEFT HALF BLACK CIRCLE
'LEFTcircle': '\u25d0', # ◐ CIRCLE WITH LEFT HALF BLACK
'Leo': '\u264c', # ♌ LEO
'Libra': '\u264e', # ♎ LIBRA
'Mars': '\u2642', # ♂ MALE SIGN
'Mercury': '\u263f', # ☿ MERCURY
'Neptune': '\u2646', # ♆ NEPTUNE
'Pluto': '\u2647', # ♇ PLUTO
'RIGHTCIRCLE': '\u25d7', # ◗ RIGHT HALF BLACK CIRCLE
'RIGHTcircle': '\u25d1', # ◑ CIRCLE WITH RIGHT HALF BLACK
'Saturn': '\u2644', # ♄ SATURN
'Scorpio': '\u264f', # ♏ SCORPIUS
'Square': '\u2610', # ☐ BALLOT BOX
'Sun': '\u2609', # ☉ SUN
'Taurus': '\u2649', # ♉ TAURUS
'Uranus': '\u2645', # ♅ URANUS
'Venus': '\u2640', # ♀ FEMALE SIGN
'XBox': '\u2612', # ☒ BALLOT BOX WITH X
'Yup': '\u2144', # ⅄ TURNED SANS-SERIF CAPITAL Y
'_': '_', # _ LOW LINE
'angle': '\u2220', # ∠ ANGLE
'aquarius': '\u2652', # ♒ AQUARIUS
'aries': '\u2648', # ♈ ARIES
'ast': '*', # * ASTERISK
'backepsilon': '\u03f6', # ϶ GREEK REVERSED LUNATE EPSILON SYMBOL
'backprime': '\u2035', # ‵ REVERSED PRIME
'backslash': '\\', # \ REVERSE SOLIDUS
'because': '\u2235', # ∵ BECAUSE
'bigstar': '\u2605', # ★ BLACK STAR
'binampersand': '&', # & AMPERSAND
'blacklozenge': '\u2b27', # ⬧ BLACK MEDIUM LOZENGE
'blacksmiley': '\u263b', # ☻ BLACK SMILING FACE
'blacksquare': '\u25fc', # ◼ BLACK MEDIUM SQUARE
'bot': '\u22a5', # ⊥ UP TACK
'boy': '\u2642', # ♂ MALE SIGN
'cancer': '\u264b', # ♋ CANCER
'capricornus': '\u2651', # ♑ CAPRICORN
'cdots': '\u22ef', # ⋯ MIDLINE HORIZONTAL ELLIPSIS
'cent': '\xa2', # ¢ CENT SIGN
'centerdot': '\u2b1d', # ⬝ BLACK VERY SMALL SQUARE
'checkmark': '\u2713', # ✓ CHECK MARK
'circlearrowleft': '\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'circlearrowright': '\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'circledR': '\xae', # ® REGISTERED SIGN
'circledcirc': '\u25ce', # ◎ BULLSEYE
'clubsuit': '\u2663', # ♣ BLACK CLUB SUIT
'complement': '\u2201', # ∁ COMPLEMENT
'dasharrow': '\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'dashleftarrow': '\u21e0', # ⇠ LEFTWARDS DASHED ARROW
'dashrightarrow': '\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'diameter': '\u2300', # ⌀ DIAMETER SIGN
'diamondsuit': '\u2662', # ♢ WHITE DIAMOND SUIT
'earth': '\u2641', # ♁ EARTH
'exists': '\u2203', # ∃ THERE EXISTS
'female': '\u2640', # ♀ FEMALE SIGN
'flat': '\u266d', # ♭ MUSIC FLAT SIGN
'forall': '\u2200', # ∀ FOR ALL
'fourth': '\u2057', # ⁗ QUADRUPLE PRIME
'frownie': '\u2639', # ☹ WHITE FROWNING FACE
'gemini': '\u264a', # ♊ GEMINI
'girl': '\u2640', # ♀ FEMALE SIGN
'heartsuit': '\u2661', # ♡ WHITE HEART SUIT
'infty': '\u221e', # ∞ INFINITY
'invneg': '\u2310', # ⌐ REVERSED NOT SIGN
'jupiter': '\u2643', # ♃ JUPITER
'ldots': '\u2026', # … HORIZONTAL ELLIPSIS
'leftmoon': '\u263e', # ☾ LAST QUARTER MOON
'leftturn': '\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'leo': '\u264c', # ♌ LEO
'libra': '\u264e', # ♎ LIBRA
'lnot': '\xac', # ¬ NOT SIGN
'lozenge': '\u25ca', # ◊ LOZENGE
'male': '\u2642', # ♂ MALE SIGN
'maltese': '\u2720', # ✠ MALTESE CROSS
'mathdollar': '$', # $ DOLLAR SIGN
'measuredangle': '\u2221', # ∡ MEASURED ANGLE
'mercury': '\u263f', # ☿ MERCURY
'mho': '\u2127', # ℧ INVERTED OHM SIGN
'nabla': '\u2207', # ∇ NABLA
'natural': '\u266e', # ♮ MUSIC NATURAL SIGN
'neg': '\xac', # ¬ NOT SIGN
'neptune': '\u2646', # ♆ NEPTUNE
'nexists': '\u2204', # ∄ THERE DOES NOT EXIST
'notbackslash': '\u2340', # ⍀ APL FUNCTIONAL SYMBOL BACKSLASH BAR
'partial': '\u2202', # ∂ PARTIAL DIFFERENTIAL
'pisces': '\u2653', # ♓ PISCES
'pluto': '\u2647', # ♇ PLUTO
'pounds': '\xa3', # £ POUND SIGN
'prime': '\u2032', # ′ PRIME
'quarternote': '\u2669', # ♩ QUARTER NOTE
'rightmoon': '\u263d', # ☽ FIRST QUARTER MOON
'rightturn': '\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'sagittarius': '\u2650', # ♐ SAGITTARIUS
'saturn': '\u2644', # ♄ SATURN
'scorpio': '\u264f', # ♏ SCORPIUS
'second': '\u2033', # ″ DOUBLE PRIME
'sharp': '\u266f', # ♯ MUSIC SHARP SIGN
'sim': '~', # ~ TILDE
'slash': '/', # / SOLIDUS
'smiley': '\u263a', # ☺ WHITE SMILING FACE
'spadesuit': '\u2660', # ♠ BLACK SPADE SUIT
'spddot': '\xa8', # ¨ DIAERESIS
'sphat': '^', # ^ CIRCUMFLEX ACCENT
'sphericalangle': '\u2222', # ∢ SPHERICAL ANGLE
'sptilde': '~', # ~ TILDE
'square': '\u25fb', # ◻ WHITE MEDIUM SQUARE
'sun': '\u263c', # ☼ WHITE SUN WITH RAYS
'taurus': '\u2649', # ♉ TAURUS
'therefore': '\u2234', # ∴ THEREFORE
'third': '\u2034', # ‴ TRIPLE PRIME
'top': '\u22a4', # ⊤ DOWN TACK
'triangleleft': '\u25c5', # ◅ WHITE LEFT-POINTING POINTER
'triangleright': '\u25bb', # ▻ WHITE RIGHT-POINTING POINTER
'twonotes': '\u266b', # ♫ BEAMED EIGHTH NOTES
'uranus': '\u2645', # ♅ URANUS
'varEarth': '\u2641', # ♁ EARTH
'varnothing': '\u2205', # ∅ EMPTY SET
'virgo': '\u264d', # ♍ VIRGO
'wasylozenge': '\u2311', # ⌑ SQUARE LOZENGE
'wasytherefore': '\u2234', # ∴ THEREFORE
'yen': '\xa5', # ¥ YEN SIGN
}
mathover = {
'overbrace': '\u23de', # ⏞ TOP CURLY BRACKET
'wideparen': '\u23dc', # ⏜ TOP PARENTHESIS
}
mathradical = {
'sqrt': '\u221a', # √ SQUARE ROOT
'sqrt[3]': '\u221b', # ∛ CUBE ROOT
'sqrt[4]': '\u221c', # ∜ FOURTH ROOT
}
mathrel = {
'Bumpeq': '\u224e', # ≎ GEOMETRICALLY EQUIVALENT TO
'Doteq': '\u2251', # ≑ GEOMETRICALLY EQUAL TO
'Downarrow': '\u21d3', # ⇓ DOWNWARDS DOUBLE ARROW
'Leftarrow': '\u21d0', # ⇐ LEFTWARDS DOUBLE ARROW
'Leftrightarrow': '\u21d4', # ⇔ LEFT RIGHT DOUBLE ARROW
'Lleftarrow': '\u21da', # ⇚ LEFTWARDS TRIPLE ARROW
'Longleftarrow': '\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'Longleftrightarrow': '\u27fa', # ⟺ LONG LEFT RIGHT DOUBLE ARROW
'Longmapsfrom': '\u27fd', # ⟽ LONG LEFTWARDS DOUBLE ARROW FROM BAR
'Longmapsto': '\u27fe', # ⟾ LONG RIGHTWARDS DOUBLE ARROW FROM BAR
'Longrightarrow': '\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'Lsh': '\u21b0', # ↰ UPWARDS ARROW WITH TIP LEFTWARDS
'Mapsfrom': '\u2906', # ⤆ LEFTWARDS DOUBLE ARROW FROM BAR
'Mapsto': '\u2907', # ⤇ RIGHTWARDS DOUBLE ARROW FROM BAR
'Rightarrow': '\u21d2', # ⇒ RIGHTWARDS DOUBLE ARROW
'Rrightarrow': '\u21db', # ⇛ RIGHTWARDS TRIPLE ARROW
'Rsh': '\u21b1', # ↱ UPWARDS ARROW WITH TIP RIGHTWARDS
'Subset': '\u22d0', # ⋐ DOUBLE SUBSET
'Supset': '\u22d1', # ⋑ DOUBLE SUPERSET
'Uparrow': '\u21d1', # ⇑ UPWARDS DOUBLE ARROW
'Updownarrow': '\u21d5', # ⇕ UP DOWN DOUBLE ARROW
'VDash': '\u22ab', # ⊫ DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'Vdash': '\u22a9', # ⊩ FORCES
'Vvdash': '\u22aa', # ⊪ TRIPLE VERTICAL BAR RIGHT TURNSTILE
'apprge': '\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'apprle': '\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'approx': '\u2248', # ≈ ALMOST EQUAL TO
'approxeq': '\u224a', # ≊ ALMOST EQUAL OR EQUAL TO
'asymp': '\u224d', # ≍ EQUIVALENT TO
'backsim': '\u223d', # ∽ REVERSED TILDE
'backsimeq': '\u22cd', # ⋍ REVERSED TILDE EQUALS
'barin': '\u22f6', # ⋶ ELEMENT OF WITH OVERBAR
'barleftharpoon': '\u296b', # ⥫ LEFTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'barrightharpoon': '\u296d', # ⥭ RIGHTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'between': '\u226c', # ≬ BETWEEN
'bowtie': '\u22c8', # ⋈ BOWTIE
'bumpeq': '\u224f', # ≏ DIFFERENCE BETWEEN
'circeq': '\u2257', # ≗ RING EQUAL TO
'coloneq': '\u2254', # ≔ COLON EQUALS
'cong': '\u2245', # ≅ APPROXIMATELY EQUAL TO
'corresponds': '\u2259', # ≙ ESTIMATES
'curlyeqprec': '\u22de', # ⋞ EQUAL TO OR PRECEDES
'curlyeqsucc': '\u22df', # ⋟ EQUAL TO OR SUCCEEDS
'curvearrowleft': '\u21b6', # ↶ ANTICLOCKWISE TOP SEMICIRCLE ARROW
'curvearrowright': '\u21b7', # ↷ CLOCKWISE TOP SEMICIRCLE ARROW
'dashv': '\u22a3', # ⊣ LEFT TACK
'ddots': '\u22f1', # ⋱ DOWN RIGHT DIAGONAL ELLIPSIS
'dlsh': '\u21b2', # ↲ DOWNWARDS ARROW WITH TIP LEFTWARDS
'doteq': '\u2250', # ≐ APPROACHES THE LIMIT
'doteqdot': '\u2251', # ≑ GEOMETRICALLY EQUAL TO
'downarrow': '\u2193', # ↓ DOWNWARDS ARROW
'downdownarrows': '\u21ca', # ⇊ DOWNWARDS PAIRED ARROWS
'downdownharpoons': '\u2965', # ⥥ DOWNWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'downharpoonleft': '\u21c3', # ⇃ DOWNWARDS HARPOON WITH BARB LEFTWARDS
'downharpoonright': '\u21c2', # ⇂ DOWNWARDS HARPOON WITH BARB RIGHTWARDS
'downuparrows': '\u21f5', # ⇵ DOWNWARDS ARROW LEFTWARDS OF UPWARDS ARROW
'downupharpoons': '\u296f', # ⥯ DOWNWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'drsh': '\u21b3', # ↳ DOWNWARDS ARROW WITH TIP RIGHTWARDS
'eqcirc': '\u2256', # ≖ RING IN EQUAL TO
'eqcolon': '\u2255', # ≕ EQUALS COLON
'eqsim': '\u2242', # ≂ MINUS TILDE
'eqslantgtr': '\u2a96', # ⪖ SLANTED EQUAL TO OR GREATER-THAN
'eqslantless': '\u2a95', # ⪕ SLANTED EQUAL TO OR LESS-THAN
'equiv': '\u2261', # ≡ IDENTICAL TO
'fallingdotseq': '\u2252', # ≒ APPROXIMATELY EQUAL TO OR THE IMAGE OF
'frown': '\u2322', # ⌢ FROWN
'ge': '\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geq': '\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geqq': '\u2267', # ≧ GREATER-THAN OVER EQUAL TO
'geqslant': '\u2a7e', # ⩾ GREATER-THAN OR SLANTED EQUAL TO
'gets': '\u2190', # ← LEFTWARDS ARROW
'gg': '\u226b', # ≫ MUCH GREATER-THAN
'ggcurly': '\u2abc', # ⪼ DOUBLE SUCCEEDS
'ggg': '\u22d9', # ⋙ VERY MUCH GREATER-THAN
'gnapprox': '\u2a8a', # ⪊ GREATER-THAN AND NOT APPROXIMATE
'gneq': '\u2a88', # ⪈ GREATER-THAN AND SINGLE-LINE NOT EQUAL TO
'gneqq': '\u2269', # ≩ GREATER-THAN BUT NOT EQUAL TO
'gnsim': '\u22e7', # ⋧ GREATER-THAN BUT NOT EQUIVALENT TO
'gtrapprox': '\u2a86', # ⪆ GREATER-THAN OR APPROXIMATE
'gtrdot': '\u22d7', # ⋗ GREATER-THAN WITH DOT
'gtreqless': '\u22db', # ⋛ GREATER-THAN EQUAL TO OR LESS-THAN
'gtreqqless': '\u2a8c', # ⪌ GREATER-THAN ABOVE DOUBLE-LINE EQUAL ABOVE LESS-THAN
'gtrless': '\u2277', # ≷ GREATER-THAN OR LESS-THAN
'gtrsim': '\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'hash': '\u22d5', # ⋕ EQUAL AND PARALLEL TO
'hookleftarrow': '\u21a9', # ↩ LEFTWARDS ARROW WITH HOOK
'hookrightarrow': '\u21aa', # ↪ RIGHTWARDS ARROW WITH HOOK
'iddots': '\u22f0', # ⋰ UP RIGHT DIAGONAL ELLIPSIS
'impliedby': '\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'implies': '\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'in': '\u2208', # ∈ ELEMENT OF
'le': '\u2264', # ≤ LESS-THAN OR EQUAL TO
'leftarrow': '\u2190', # ← LEFTWARDS ARROW
'leftarrowtail': '\u21a2', # ↢ LEFTWARDS ARROW WITH TAIL
'leftarrowtriangle': '\u21fd', # ⇽ LEFTWARDS OPEN-HEADED ARROW
'leftbarharpoon': '\u296a', # ⥪ LEFTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'leftharpoondown': '\u21bd', # ↽ LEFTWARDS HARPOON WITH BARB DOWNWARDS
'leftharpoonup': '\u21bc', # ↼ LEFTWARDS HARPOON WITH BARB UPWARDS
'leftleftarrows': '\u21c7', # ⇇ LEFTWARDS PAIRED ARROWS
'leftleftharpoons': '\u2962', # ⥢ LEFTWARDS HARPOON WITH BARB UP ABOVE LEFTWARDS HARPOON WITH BARB DOWN
'leftrightarrow': '\u2194', # ↔ LEFT RIGHT ARROW
'leftrightarrows': '\u21c6', # ⇆ LEFTWARDS ARROW OVER RIGHTWARDS ARROW
'leftrightarrowtriangle': '\u21ff', # ⇿ LEFT RIGHT OPEN-HEADED ARROW
'leftrightharpoon': '\u294a', # ⥊ LEFT BARB UP RIGHT BARB DOWN HARPOON
'leftrightharpoons': '\u21cb', # ⇋ LEFTWARDS HARPOON OVER RIGHTWARDS HARPOON
'leftrightsquigarrow': '\u21ad', # ↭ LEFT RIGHT WAVE ARROW
'leftslice': '\u2aa6', # ⪦ LESS-THAN CLOSED BY CURVE
'leftsquigarrow': '\u21dc', # ⇜ LEFTWARDS SQUIGGLE ARROW
'leq': '\u2264', # ≤ LESS-THAN OR EQUAL TO
'leqq': '\u2266', # ≦ LESS-THAN OVER EQUAL TO
'leqslant': '\u2a7d', # ⩽ LESS-THAN OR SLANTED EQUAL TO
'lessapprox': '\u2a85', # ⪅ LESS-THAN OR APPROXIMATE
'lessdot': '\u22d6', # ⋖ LESS-THAN WITH DOT
'lesseqgtr': '\u22da', # ⋚ LESS-THAN EQUAL TO OR GREATER-THAN
'lesseqqgtr': '\u2a8b', # ⪋ LESS-THAN ABOVE DOUBLE-LINE EQUAL ABOVE GREATER-THAN
'lessgtr': '\u2276', # ≶ LESS-THAN OR GREATER-THAN
'lesssim': '\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'lightning': '\u21af', # ↯ DOWNWARDS ZIGZAG ARROW
'll': '\u226a', # ≪ MUCH LESS-THAN
'llcurly': '\u2abb', # ⪻ DOUBLE PRECEDES
'lll': '\u22d8', # ⋘ VERY MUCH LESS-THAN
'lnapprox': '\u2a89', # ⪉ LESS-THAN AND NOT APPROXIMATE
'lneq': '\u2a87', # ⪇ LESS-THAN AND SINGLE-LINE NOT EQUAL TO
'lneqq': '\u2268', # ≨ LESS-THAN BUT NOT EQUAL TO
'lnsim': '\u22e6', # ⋦ LESS-THAN BUT NOT EQUIVALENT TO
'longleftarrow': '\u27f5', # ⟵ LONG LEFTWARDS ARROW
'longleftrightarrow': '\u27f7', # ⟷ LONG LEFT RIGHT ARROW
'longmapsfrom': '\u27fb', # ⟻ LONG LEFTWARDS ARROW FROM BAR
'longmapsto': '\u27fc', # ⟼ LONG RIGHTWARDS ARROW FROM BAR
'longrightarrow': '\u27f6', # ⟶ LONG RIGHTWARDS ARROW
'looparrowleft': '\u21ab', # ↫ LEFTWARDS ARROW WITH LOOP
'looparrowright': '\u21ac', # ↬ RIGHTWARDS ARROW WITH LOOP
'mapsfrom': '\u21a4', # ↤ LEFTWARDS ARROW FROM BAR
'mapsto': '\u21a6', # ↦ RIGHTWARDS ARROW FROM BAR
'mid': '\u2223', # ∣ DIVIDES
'models': '\u22a7', # ⊧ MODELS
'multimap': '\u22b8', # ⊸ MULTIMAP
'nLeftarrow': '\u21cd', # ⇍ LEFTWARDS DOUBLE ARROW WITH STROKE
'nLeftrightarrow': '\u21ce', # ⇎ LEFT RIGHT DOUBLE ARROW WITH STROKE
'nRightarrow': '\u21cf', # ⇏ RIGHTWARDS DOUBLE ARROW WITH STROKE
'nVDash': '\u22af', # ⊯ NEGATED DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'nVdash': '\u22ae', # ⊮ DOES NOT FORCE
'ncong': '\u2247', # ≇ NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO
'ne': '\u2260', # ≠ NOT EQUAL TO
'nearrow': '\u2197', # ↗ NORTH EAST ARROW
'neq': '\u2260', # ≠ NOT EQUAL TO
'ngeq': '\u2271', # ≱ NEITHER GREATER-THAN NOR EQUAL TO
'ngtr': '\u226f', # ≯ NOT GREATER-THAN
'ni': '\u220b', # ∋ CONTAINS AS MEMBER
'nleftarrow': '\u219a', # ↚ LEFTWARDS ARROW WITH STROKE
'nleftrightarrow': '\u21ae', # ↮ LEFT RIGHT ARROW WITH STROKE
'nleq': '\u2270', # ≰ NEITHER LESS-THAN NOR EQUAL TO
'nless': '\u226e', # ≮ NOT LESS-THAN
'nmid': '\u2224', # ∤ DOES NOT DIVIDE
'notasymp': '\u226d', # ≭ NOT EQUIVALENT TO
'notin': '\u2209', # ∉ NOT AN ELEMENT OF
'notowner': '\u220c', # ∌ DOES NOT CONTAIN AS MEMBER
'notslash': '\u233f', # ⌿ APL FUNCTIONAL SYMBOL SLASH BAR
'nparallel': '\u2226', # ∦ NOT PARALLEL TO
'nprec': '\u2280', # ⊀ DOES NOT PRECEDE
'npreceq': '\u22e0', # ⋠ DOES NOT PRECEDE OR EQUAL
'nrightarrow': '\u219b', # ↛ RIGHTWARDS ARROW WITH STROKE
'nsim': '\u2241', # ≁ NOT TILDE
'nsubseteq': '\u2288', # ⊈ NEITHER A SUBSET OF NOR EQUAL TO
'nsucc': '\u2281', # ⊁ DOES NOT SUCCEED
'nsucceq': '\u22e1', # ⋡ DOES NOT SUCCEED OR EQUAL
'nsupseteq': '\u2289', # ⊉ NEITHER A SUPERSET OF NOR EQUAL TO
'ntriangleleft': '\u22ea', # ⋪ NOT NORMAL SUBGROUP OF
'ntrianglelefteq': '\u22ec', # ⋬ NOT NORMAL SUBGROUP OF OR EQUAL TO
'ntriangleright': '\u22eb', # ⋫ DOES NOT CONTAIN AS NORMAL SUBGROUP
'ntrianglerighteq': '\u22ed', # ⋭ DOES NOT CONTAIN AS NORMAL SUBGROUP OR EQUAL
'nvDash': '\u22ad', # ⊭ NOT TRUE
'nvdash': '\u22ac', # ⊬ DOES NOT PROVE
'nwarrow': '\u2196', # ↖ NORTH WEST ARROW
'owns': '\u220b', # ∋ CONTAINS AS MEMBER
'parallel': '\u2225', # ∥ PARALLEL TO
'perp': '\u27c2', # ⟂ PERPENDICULAR
'pitchfork': '\u22d4', # ⋔ PITCHFORK
'prec': '\u227a', # ≺ PRECEDES
'precapprox': '\u2ab7', # ⪷ PRECEDES ABOVE ALMOST EQUAL TO
'preccurlyeq': '\u227c', # ≼ PRECEDES OR EQUAL TO
'preceq': '\u2aaf', # ⪯ PRECEDES ABOVE SINGLE-LINE EQUALS SIGN
'precnapprox': '\u2ab9', # ⪹ PRECEDES ABOVE NOT ALMOST EQUAL TO
'precnsim': '\u22e8', # ⋨ PRECEDES BUT NOT EQUIVALENT TO
'precsim': '\u227e', # ≾ PRECEDES OR EQUIVALENT TO
'propto': '\u221d', # ∝ PROPORTIONAL TO
'restriction': '\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'rightarrow': '\u2192', # → RIGHTWARDS ARROW
'rightarrowtail': '\u21a3', # ↣ RIGHTWARDS ARROW WITH TAIL
'rightarrowtriangle': '\u21fe', # ⇾ RIGHTWARDS OPEN-HEADED ARROW
'rightbarharpoon': '\u296c', # ⥬ RIGHTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'rightharpoondown': '\u21c1', # ⇁ RIGHTWARDS HARPOON WITH BARB DOWNWARDS
'rightharpoonup': '\u21c0', # ⇀ RIGHTWARDS HARPOON WITH BARB UPWARDS
'rightleftarrows': '\u21c4', # ⇄ RIGHTWARDS ARROW OVER LEFTWARDS ARROW
'rightleftharpoon': '\u294b', # ⥋ LEFT BARB DOWN RIGHT BARB UP HARPOON
'rightleftharpoons': '\u21cc', # ⇌ RIGHTWARDS HARPOON OVER LEFTWARDS HARPOON
'rightrightarrows': '\u21c9', # ⇉ RIGHTWARDS PAIRED ARROWS
'rightrightharpoons': '\u2964', # ⥤ RIGHTWARDS HARPOON WITH BARB UP ABOVE RIGHTWARDS HARPOON WITH BARB DOWN
'rightslice': '\u2aa7', # ⪧ GREATER-THAN CLOSED BY CURVE
'rightsquigarrow': '\u21dd', # ⇝ RIGHTWARDS SQUIGGLE ARROW
'risingdotseq': '\u2253', # ≓ IMAGE OF OR APPROXIMATELY EQUAL TO
'searrow': '\u2198', # ↘ SOUTH EAST ARROW
'sim': '\u223c', # ∼ TILDE OPERATOR
'simeq': '\u2243', # ≃ ASYMPTOTICALLY EQUAL TO
'smallfrown': '\u2322', # ⌢ FROWN
'smallsmile': '\u2323', # ⌣ SMILE
'smile': '\u2323', # ⌣ SMILE
'sqsubset': '\u228f', # ⊏ SQUARE IMAGE OF
'sqsubseteq': '\u2291', # ⊑ SQUARE IMAGE OF OR EQUAL TO
'sqsupset': '\u2290', # ⊐ SQUARE ORIGINAL OF
'sqsupseteq': '\u2292', # ⊒ SQUARE ORIGINAL OF OR EQUAL TO
'subset': '\u2282', # ⊂ SUBSET OF
'subseteq': '\u2286', # ⊆ SUBSET OF OR EQUAL TO
'subseteqq': '\u2ac5', # ⫅ SUBSET OF ABOVE EQUALS SIGN
'subsetneq': '\u228a', # ⊊ SUBSET OF WITH NOT EQUAL TO
'subsetneqq': '\u2acb', # ⫋ SUBSET OF ABOVE NOT EQUAL TO
'succ': '\u227b', # ≻ SUCCEEDS
'succapprox': '\u2ab8', # ⪸ SUCCEEDS ABOVE ALMOST EQUAL TO
'succcurlyeq': '\u227d', # ≽ SUCCEEDS OR EQUAL TO
'succeq': '\u2ab0', # ⪰ SUCCEEDS ABOVE SINGLE-LINE EQUALS SIGN
'succnapprox': '\u2aba', # ⪺ SUCCEEDS ABOVE NOT ALMOST EQUAL TO
'succnsim': '\u22e9', # ⋩ SUCCEEDS BUT NOT EQUIVALENT TO
'succsim': '\u227f', # ≿ SUCCEEDS OR EQUIVALENT TO
'supset': '\u2283', # ⊃ SUPERSET OF
'supseteq': '\u2287', # ⊇ SUPERSET OF OR EQUAL TO
'supseteqq': '\u2ac6', # ⫆ SUPERSET OF ABOVE EQUALS SIGN
'supsetneq': '\u228b', # ⊋ SUPERSET OF WITH NOT EQUAL TO
'supsetneqq': '\u2acc', # ⫌ SUPERSET OF ABOVE NOT EQUAL TO
'swarrow': '\u2199', # ↙ SOUTH WEST ARROW
'to': '\u2192', # → RIGHTWARDS ARROW
'trianglelefteq': '\u22b4', # ⊴ NORMAL SUBGROUP OF OR EQUAL TO
'triangleq': '\u225c', # ≜ DELTA EQUAL TO
'trianglerighteq': '\u22b5', # ⊵ CONTAINS AS NORMAL SUBGROUP OR EQUAL TO
'twoheadleftarrow': '\u219e', # ↞ LEFTWARDS TWO HEADED ARROW
'twoheadrightarrow': '\u21a0', # ↠ RIGHTWARDS TWO HEADED ARROW
'uparrow': '\u2191', # ↑ UPWARDS ARROW
'updownarrow': '\u2195', # ↕ UP DOWN ARROW
'updownarrows': '\u21c5', # ⇅ UPWARDS ARROW LEFTWARDS OF DOWNWARDS ARROW
'updownharpoons': '\u296e', # ⥮ UPWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'upharpoonleft': '\u21bf', # ↿ UPWARDS HARPOON WITH BARB LEFTWARDS
'upharpoonright': '\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'upuparrows': '\u21c8', # ⇈ UPWARDS PAIRED ARROWS
'upupharpoons': '\u2963', # ⥣ UPWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'vDash': '\u22a8', # ⊨ TRUE
'varpropto': '\u221d', # ∝ PROPORTIONAL TO
'vartriangleleft': '\u22b2', # ⊲ NORMAL SUBGROUP OF
'vartriangleright': '\u22b3', # ⊳ CONTAINS AS NORMAL SUBGROUP
'vdash': '\u22a2', # ⊢ RIGHT TACK
'vdots': '\u22ee', # ⋮ VERTICAL ELLIPSIS
}
mathunder = {
'underbrace': '\u23df', # ⏟ BOTTOM CURLY BRACKET
}
space = {
':': '\u205f', # MEDIUM MATHEMATICAL SPACE
'medspace': '\u205f', # MEDIUM MATHEMATICAL SPACE
'quad': '\u2001', # EM QUAD
}
| superdesk/Live-Blog | documentor/libraries/docutils-0.9.1-py3.2/docutils/math/tex2unichar.py | Python | agpl-3.0 | 34,480 | [
"Bowtie"
] | 804a16f261c0f9a63d8839405cdd299bc1fe792ecdd20dcf2a0d93a247bf63e2 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPU's prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = 1
for d in pool2.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| 4Quant/tensorflow | tensorflow/models/image/cifar10/cifar10.py | Python | apache-2.0 | 13,637 | [
"Gaussian"
] | dec283074eb411598ddd81ec8a4bca6fec0c2c18871a84224224311579a6293e |
import logging
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../gittalk/assets/secrets/client_secrets.json')
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(os.path.join(os.environ['HOME'], '.gittalk', "%s-oauth2.json" % sys.argv[0]))
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
# print "Uploading file..."
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
# print "Video id '%s' was successfully uploaded." % response['id']
print "Video: https://www.youtube.com/watch?v={}".format(response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
def upload_to_youtube(file_path, title, description):
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[2], help="Video privacy status.")
args = argparser.parse_args(['--file', file_path, '--title', title, '--description', description])
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content) | sandeepraju/git-talk | gittalk/upload.py | Python | bsd-3-clause | 7,092 | [
"VisIt"
] | 05b27898be6a93f68829c6a4b55c11f5af077367ca0fcf34c09441d1d5b67625 |
#generic python modules
import argparse
import operator
from operator import itemgetter
import sys, os, shutil
import os.path
##########################################################################################
# RETRIEVE USER INPUTS
##########################################################################################
#=========================================================================================
# create parser
#=========================================================================================
version_nb="0.0.2"
parser = argparse.ArgumentParser(prog='ff_contacts_sizes', usage='', add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter, description=\
'''
************************************************
v''' + version_nb + '''
author: Jean Helie (jean.helie@bioch.ox.ac.uk)
git: https://github.com/jhelie/ff_contacts_sizes
DOI:
************************************************
[ DESCRIPTION ]
This script identities the size of the TM clusters each flip-flopping lipids has been
in contact with.
The identified protein clusters are considered to be transmembrane only if the closest
lipid headgroup neighbours to the cluster particles are all within the same leaflet.
A file listing the flip-flopping lipids must be supplied with the --flipflops option.
Each line of this file should follow the format (time in ns):
-> 'resname,resid,starting_leaflet,z_bead,t_start,t_end'
where starting_leaflet is either 'upper' or 'lower' - e.g. 'POPC,145,lower,PO4,150,500'.
The 'z_bead' particle is used to track the position of the lipid.
[ REQUIREMENTS ]
The following python modules are needed :
- MDAnalysis
- numpy
- scipy
- networkX
[ NOTES ]
1. It's a good idea to pre-process the trajectory first and to only output the relevant
particles (e.g. no water and no cholesterol).
2. Identification of the bilayer leaflets can be controlled via two options.
(a) beads
By default, the particles taken into account to define leaflet depend on the
forcefield (which can be set via the --forcefield option) and are as follows:
-> Martini: 'name PO4 or name PO3 or name B1A'
Note that only lipids which contain one of the beads mentioned in the selection string
will be taken into account. If you wish to specify your own selection string (e.g. to
choose different beads or add a bead not in the default list in order to take into
account a particular lipid specie) you can do so by supplying a file via the --beads
option. This file should contain a single line that can be passed as the argument
to MDAnalysis selectAtoms() routine and should not contain any quotation marks, e.g.:
-> name PO4 or name PO3 or name B1A or name AM1
(b) leaflet finding method
By default leaflets are identified using the MDAnalysis LeafletFinder routine and the
the optimum cutoff to identify 2 lipids groups is determined using the optimize_cutoff
routine.
This optimisation process can take time in large systems and you can specify your own
cutoff value to skip this step. For instance to use a 15 Angstrom cutoff value:
-> '--leaflet 15'
In very large systems (more then ~50,000 phospholipids) LeafletFinder (or rather the
networkX module that it relies on) can fail. To avoid this you can choose not to use
this routine by specifying:
-> '--leaflet large'
In this case lipids whose headgroups z value is above the average lipids z value will
be considered to make up the upper leaflet and those whose headgroups z value is below
the average will be considered to be in the lower leaflet.
This means that the bilayer should be as flat as possible in the gro file supplied in
order to get a meaningful outcome.
3. Proteins are detected automatically but you can specify an input file to define your
own selection with the --proteins option.
In this case the supplied file should contain on each line a protein selection string
that can be passed as the argument of the MDAnalysis selectAtoms() routine - for
instance 'bynum 1:344'.
[ USAGE ]
Option Default Description
-----------------------------------------------------
-f : structure file [.gro] (required)
-x : trajectory file [.xtc] (required)
-o : name of output folder
-b : beginning time (ns) (the bilayer must exist by then!)
-e : ending time (ns)
-t 1 : process every t-frames
Lipids identification
-----------------------------------------------------
--flipflops : input file with flipflopping lipids, see note 4
--beads : leaflet identification technique, see note 2(a)
--leaflets optimise: leaflet identification technique, see note 2(b)
Protein clusters identification and contacts
-----------------------------------------------------
--proteins : protein selection file, (optional, see note 6)
--pp_cutoff 6 : cutoff distance for protein-protein contact (Angstrom)
--pl_cutoff 6 : cutoff distance for protein-lipid contact (Angstrom)
Other options
-----------------------------------------------------
--version : show version number and exit
-h, --help : show this menu and exit
''')
#data options
parser.add_argument('-f', nargs=1, dest='grofilename', default=['no'], help=argparse.SUPPRESS, required=True)
parser.add_argument('-x', nargs=1, dest='xtcfilename', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-o', nargs=1, dest='output_folder', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('-b', nargs=1, dest='t_start', default=[-1], type=int, help=argparse.SUPPRESS)
parser.add_argument('-e', nargs=1, dest='t_end', default=[10000000000000], type=int, help=argparse.SUPPRESS)
parser.add_argument('-t', nargs=1, dest='frames_dt', default=[1], type=int, help=argparse.SUPPRESS)
#lipids identification options
parser.add_argument('--beads', nargs=1, dest='beadsfilename', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('--flipflops', nargs=1, dest='selection_file_ff', default=['no'], help=argparse.SUPPRESS)
parser.add_argument('--leaflets', nargs=1, dest='cutoff_leaflet', default=['optimise'], help=argparse.SUPPRESS)
#protein options
parser.add_argument('--algorithm', dest='m_algorithm', choices=['min'], default='min', help=argparse.SUPPRESS)
parser.add_argument('--proteins', nargs=1, dest='selection_file_prot', default=['auto'], help=argparse.SUPPRESS)
parser.add_argument('--pp_cutoff', nargs=1, dest='cutoff_pp', default=[6], type=float, help=argparse.SUPPRESS)
parser.add_argument('--pl_cutoff', nargs=1, dest='cutoff_pl', default=[6], type=float, help=argparse.SUPPRESS)
#other options
parser.add_argument('--version', action='version', version='%(prog)s v' + version_nb, help=argparse.SUPPRESS)
parser.add_argument('-h','--help', action='help', help=argparse.SUPPRESS)
#=========================================================================================
# store inputs
#=========================================================================================
#parse user inputs
#-----------------
args = parser.parse_args()
#data options
args.grofilename = args.grofilename[0]
args.xtcfilename = args.xtcfilename[0]
args.output_folder = args.output_folder[0]
args.t_start = args.t_start[0]
args.t_end = args.t_end[0]
args.frames_dt = args.frames_dt[0]
#lipids identification options
args.beadsfilename = args.beadsfilename[0]
args.cutoff_leaflet = args.cutoff_leaflet[0]
args.selection_file_ff = args.selection_file_ff[0]
#radial and protein clusters options
args.selection_file_prot = args.selection_file_prot[0]
args.cutoff_pp = args.cutoff_pp[0]
args.cutoff_pl = args.cutoff_pl[0]
#process options
#---------------
global lipids_ff_nb
lipids_ff_nb = 0
#leaflet identification
if args.cutoff_leaflet != "large" and args.cutoff_leaflet != "optimise":
try:
args.cutoff_leaflet = float(args.cutoff_leaflet)
except:
print "Error: the argument of the --leaflets option should be a number or 'large', see note 2"
sys.exit(1)
#=========================================================================================
# import modules (doing it now otherwise might crash before we can display the help menu!)
#=========================================================================================
#generic science modules
try:
import math
except:
print "Error: you need to install the maths module."
sys.exit(1)
try:
import numpy as np
except:
print "Error: you need to install the numpy module."
sys.exit(1)
try:
import scipy as sp
import scipy.stats
except:
print "Error: you need to install the scipy module."
sys.exit(1)
try:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.colors as mcolors
mcolorconv = mcolors.ColorConverter()
import matplotlib.cm as cm #colours library
import matplotlib.ticker
from matplotlib.ticker import MaxNLocator
from matplotlib.font_manager import FontProperties
fontP=FontProperties()
except:
print "Error: you need to install the matplotlib module."
sys.exit(1)
try:
import pylab as plt
except:
print "Error: you need to install the pylab module."
sys.exit(1)
#MDAnalysis module
try:
import MDAnalysis
from MDAnalysis import *
import MDAnalysis.analysis
import MDAnalysis.analysis.leaflet
import MDAnalysis.analysis.distances
#set MDAnalysis to use periodic boundary conditions
MDAnalysis.core.flags['use_periodic_selections'] = True
MDAnalysis.core.flags['use_KDTree_routines'] = False
except:
print "Error: you need to install the MDAnalysis module first. See http://mdanalysis.googlecode.com"
sys.exit(1)
#=========================================================================================
# sanity check
#=========================================================================================
if not os.path.isfile(args.grofilename):
print "Error: file " + str(args.grofilename) + " not found."
sys.exit(1)
if args.selection_file_ff != "no" and not os.path.isfile(args.selection_file_ff):
print "Error: file " + str(args.selection_file_ff) + " not found."
sys.exit(1)
if args.selection_file_prot != "auto" and not os.path.isfile(args.selection_file_prot):
print "Error: file " + str(args.selection_file_prot) + " not found."
sys.exit(1)
if args.beadsfilename != "no" and not os.path.isfile(args.beadsfilename):
print "Error: file " + str(args.beadsfilename) + " not found."
sys.exit(1)
if args.t_end < args.t_start:
print "Error: the starting time (" + str(args.t_start) + "ns) for analysis is later than the ending time (" + str(args.t_end) + "ns)."
sys.exit(1)
if args.xtcfilename == "no":
if '-t' in sys.argv:
print "Error: -t option specified but no xtc file specified."
sys.exit(1)
elif '-b' in sys.argv:
print "Error: -b option specified but no xtc file specified."
sys.exit(1)
elif '-e' in sys.argv:
print "Error: -e option specified but no xtc file specified."
sys.exit(1)
elif '--smooth' in sys.argv:
print "Error: --smooth option specified but no xtc file specified."
sys.exit(1)
elif not os.path.isfile(args.xtcfilename):
print "Error: file " + str(args.xtcfilename) + " not found."
sys.exit(1)
#=========================================================================================
# create folders and log file
#=========================================================================================
if args.output_folder == "no":
args.output_folder = "ff_ctct_size_" + args.xtcfilename[:-4]
if os.path.isdir(args.output_folder):
print "Error: folder " + str(args.output_folder) + " already exists, choose a different output name via -o."
sys.exit(1)
else:
os.mkdir(args.output_folder)
#create log
#----------
filename_log=os.getcwd() + '/' + str(args.output_folder) + '/ff_contacts_sizes.log'
output_log=open(filename_log, 'w')
output_log.write("[ff_contacts_sizes v" + str(version_nb) + "]\n")
output_log.write("\nThis folder and its content were created using the following command:\n\n")
tmp_log="python ff_contacts_sizes.py"
for c in sys.argv[1:]:
tmp_log+=" " + c
output_log.write(tmp_log + "\n")
output_log.close()
#copy input files
#----------------
if args.selection_file_ff != "no":
shutil.copy2(args.selection_file_ff,args.output_folder + "/")
if args.selection_file_prot != "no" and args.selection_file_prot != "auto":
shutil.copy2(args.selection_file_prot,args.output_folder + "/")
if args.beadsfilename != "no":
shutil.copy2(args.beadsfilename,args.output_folder + "/")
##########################################################################################
# FUNCTIONS DEFINITIONS
##########################################################################################
#=========================================================================================
# data loading
#=========================================================================================
def set_lipids_beads():
global leaflet_sele_string
#set default beads
leaflet_beads = {}
leaflet_beads['martini'] = "name PO4 or name PO3 or name B1A"
leaflet_sele_string = leaflet_beads['martini']
#use users input
if args.beadsfilename != "no":
with open(args.beadsfilename) as f:
lines = f.readlines()
if len(lines) > 1:
print "Error: the file " + str(args.beadsfilename) + " should conly ontain 1 line (" + str(len(lines)) + " found), see note 2(a)."
sys.exit(1)
else:
if lines[0][-1] == "\n":
lines[0] = lines[0][:-1]
leaflet_sele_string = lines[0]
return
def load_MDA_universe(): #DONE
global U
global all_atoms
global nb_atoms
global nb_frames_xtc
global frames_to_process
global frames_to_write
global nb_frames_to_process
global f_start
global radial_bins
global radial_bin_max
global radial_radius_max
f_start = 0
if args.xtcfilename == "no":
print "\nLoading file..."
U = Universe(args.grofilename)
all_atoms = U.selectAtoms("all")
nb_atoms = all_atoms.numberOfAtoms()
nb_frames_xtc = 1
frames_to_process = [0]
frames_to_write = [True]
nb_frames_to_process = 1
else:
print "\nLoading trajectory..."
U = Universe(args.grofilename, args.xtcfilename)
all_atoms = U.selectAtoms("all")
nb_atoms = all_atoms.numberOfAtoms()
nb_frames_xtc = U.trajectory.numframes
U.trajectory.rewind()
#sanity check
if U.trajectory[nb_frames_xtc-1].time/float(1000) < args.t_start:
print "Error: the trajectory duration (" + str(U.trajectory.time/float(1000)) + "ns) is shorted than the starting stime specified (" + str(args.t_start) + "ns)."
sys.exit(1)
if U.trajectory.numframes < args.frames_dt:
print "Warning: the trajectory contains fewer frames (" + str(nb_frames_xtc) + ") than the frame step specified (" + str(args.frames_dt) + ")."
#create list of index of frames to process
if args.t_start > 0:
for ts in U.trajectory:
progress = '\r -skipping frame ' + str(ts.frame) + '/' + str(nb_frames_xtc) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
if ts.time/float(1000) > args.t_start:
f_start = ts.frame-1
break
print ''
if (nb_frames_xtc - f_start)%args.frames_dt == 0:
tmp_offset = 0
else:
tmp_offset = 1
frames_to_process = map(lambda f:f_start + args.frames_dt*f, range(0,(nb_frames_xtc - f_start)//args.frames_dt+tmp_offset))
nb_frames_to_process = len(frames_to_process)
#check the leaflet selection string is valid
test_beads = U.selectAtoms(leaflet_sele_string)
if test_beads.numberOfAtoms() == 0:
print "Error: invalid selection string '" + str(leaflet_sele_string) + "'"
print "-> no particles selected."
sys.exit(1)
return
def identify_ff():
print "\nReading selection file for flipflopping lipids..."
#declare variables
global lipids_ff_nb
global lipids_ff_info
global lipids_ff_resnames
global lipids_ff_leaflet
global lipids_ff_u2l_index
global lipids_ff_l2u_index
global lipids_sele_ff
global lipids_sele_ff_bead
global lipids_sele_ff_bonds
global lipids_sele_ff_VMD_string
global leaflet_sele_string
lipids_ff_nb = 0
lipids_ff_info = {}
lipids_ff_resnames = []
lipids_ff_leaflet = []
lipids_ff_u2l_index = []
lipids_ff_l2u_index = []
lipids_sele_ff = {}
lipids_sele_ff_bead = {}
lipids_sele_ff_bonds = {}
lipids_sele_ff_VMD_string={}
with open(args.selection_file_ff) as f:
lines = f.readlines()
lipids_ff_nb = len(lines)
print " -found " + str(lipids_ff_nb) + " flipflopping lipids"
leaflet_sele_string = leaflet_sele_string + " and not ("
for l_index in range(0,lipids_ff_nb):
line = lines[l_index]
if line[-1] == "\n":
line = line[:-1]
try:
line_content = line.split(',')
if len(line_content) != 6:
print "Error: wrong format for line " + str(l_index+1) + " in " + str(args.selection_file_ff) + ", see note 4 in bilayer_perturbations --help."
print " ->", line
sys.exit(1)
#read current lipid details
lip_resname = line_content[0]
lip_resnum = int(line_content[1])
lip_leaflet = line_content[2]
lip_bead = line_content[3]
lip_tstart = float(line_content[4])
lip_tend = float(line_content[5])
if lip_tend == 0:
lip_tend = U.trajectory.totaltime/float(1000) #this is to handle lipids which haven't finished flip-flopping
lipids_ff_info[l_index] = [lip_resname,lip_resnum,lip_leaflet,lip_bead,lip_tstart,lip_tend]
#update: starting leaflets
if lip_leaflet not in lipids_ff_leaflet:
lipids_ff_leaflet.append(lip_leaflet)
#update: index in directional lists
if lip_leaflet == "upper":
lipids_ff_u2l_index.append(l_index)
elif lip_leaflet == "lower":
lipids_ff_l2u_index.append(l_index)
else:
print "->unknown starting leaflet '" + str(lip_leaflet) + "'."
sys.exit(1)
#update: resnames
if lip_resname not in lipids_ff_resnames:
lipids_ff_resnames.append(lip_resname)
#update: leaflet selection string
if l_index==0:
leaflet_sele_string+="(resname " + str(lip_resname) + " and resnum " + str(lip_resnum) + ")"
else:
leaflet_sele_string+=" or (resname " + str(lip_resname) + " and resnum " + str(lip_resnum) + ")"
#create selections
lipids_sele_ff[l_index] = U.selectAtoms("resname " + str(lip_resname) + " and resnum " + str(lip_resnum) + " and name " + str(lipids_ff_info[l_index][3]))
lipids_sele_ff_bead[l_index] = lipids_sele_ff[l_index].selectAtoms("name " + str(lip_bead))
lipids_sele_ff_VMD_string[l_index]="resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1])
if lipids_sele_ff[l_index].numberOfAtoms() == 0:
print "Error:"
print line
print "-> no such lipid found."
sys.exit(1)
except:
print "Error: invalid flipflopping lipid selection string on line " + str(l_index+1) + ": '" + line + "'"
sys.exit(1)
leaflet_sele_string+=")"
return
def identify_proteins():
print "\nIdentifying proteins..."
#import modules
global nx
import networkx as nx
#declare variables
global proteins_nb
global proteins_sele
global proteins_sele_string
global proteins_sele_string_VMD
global proteins_boundaries
global proteins_nb_atoms
global nb_atom_per_protein
proteins_nb = 0
proteins_sele = {}
proteins_sele_string = {}
proteins_sele_string_VMD = {}
proteins_boundaries = {}
#check for protein presence
if U.selectAtoms("protein").numberOfAtoms() == 0:
print "Error: no protein detected."
sys.exit(1)
#case: selection file provided
if args.selection_file_prot != "auto":
print " -reading protein selection file..."
with open(args.selection_file_prot) as f:
lines = f.readlines()
proteins_nb=len(lines)
proteins_sele["all"] = MDAnalysis.core.AtomGroup.AtomGroup([])
for p_index in range(0,proteins_nb):
line = lines[p_index]
if line[-1] == "\n":
line = line[:-1]
progress='\r -creating proteins selections: ' + str(p_index+1) + '/' + str(proteins_nb) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
try:
print " p[" + str(p_index) + "]=U.selectAtoms(" + line + ")"
proteins_sele[p_index] = U.selectAtoms(line[1:-2])
proteins_sele["all"] += proteins_sele[p_index]
proteins_boundaries[p_index] = [proteins_sele[p_index].indices()[0] + 1, proteins_sele[p_index].indices()[proteins_sele[p_index].numberOfAtoms()]+1]
proteins_sele_string[p_index] = "bynum " + str(proteins_boundaries[p_index][0]) + ":" + str(proteins_boundaries[p_index][1])
proteins_sele_string_VMD[p_index] = "serial " + str(proteins_boundaries[p_index][0]) + " to " + str(proteins_boundaries[p_index][1])
except:
print "Error:"
print line
print "->invalid selection string."
sys.exit(1)
proteins_nb_atoms = proteins_sele["all"].numberOfAtoms()
#case: automatic detection
else:
#declare local variables
proteins_ca_nb = {}
proteins_ca_nmax = 0
proteins_ca_group = {}
proteins_boundaries = {}
#retrieve 1st atom info
proteins_sele["all"] = U.selectAtoms("protein")
proteins_nb_atoms = proteins_sele["all"].numberOfAtoms()
prec_resnum = proteins_sele["all"][0].resnum
prec_segid = proteins_sele["all"][0].segid
prec_atnum = proteins_sele["all"][0].number+1
prev_atnum = proteins_sele["all"][0].number+1 #atom corresponding to the beginning of the current protein
#browse following atoms
for a in proteins_sele["all"][1:]:
delta_res = a.resnum-prec_resnum
delta_atm = a.number+1-prec_atnum
if delta_res < 0 or a.segid != prec_segid or delta_atm > 1:
proteins_boundaries[proteins_nb] = [prev_atnum,prec_atnum]
proteins_nb += 1
prev_atnum = a.number + 1
prec_resnum = a.resnum
prec_atnum = a.number + 1
prec_segid = a.segid
#add last protein section
if prev_atnum < proteins_sele["all"][proteins_nb_atoms-1].number:
proteins_boundaries[proteins_nb] = [prev_atnum,proteins_sele["all"][proteins_nb_atoms-1].number+1]
proteins_nb += 1
#display results
print " -protein found:", proteins_nb
print " -protein boundaries (atom numbers): see protein.sele file"
#create protein selections and save into a txt file
filename_sele=os.getcwd() + '/' + str(args.output_folder) + '/proteins.sele'
output_stat = open(filename_sele, 'w')
output_stat.write("#This file was generated by the script bilayer_perturbations v" + str(version_nb) +"\n")
output_stat.write("#The lines below correspond to MDAnalysis section string, e.g. U.selectAtoms(LINE)\n")
output_stat.write("\n")
for p_index in range(0, proteins_nb):
progress='\r -creating proteins selections: ' + str(p_index+1) + '/' + str(proteins_nb) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
proteins_sele_string[p_index] = "bynum " + str(proteins_boundaries[p_index][0]) + ":" + str(proteins_boundaries[p_index][1])
proteins_sele_string_VMD[p_index] = "serial " + str(proteins_boundaries[p_index][0]) + " to " + str(proteins_boundaries[p_index][1])
proteins_sele[p_index] = U.selectAtoms(proteins_sele_string[p_index])
output_stat.write(proteins_sele_string[p_index] + "\n")
output_stat.close()
nb_atom_per_protein = proteins_sele[0].numberOfAtoms()
print ""
return
def identify_leaflets():
print "\nIdentifying leaflets..."
#declare variables
global leaflet_sele
global leaflet_sele_atoms
leaflet_sele = {}
leaflet_sele_atoms = {}
for l in ["lower","upper","both"]:
leaflet_sele[l] = {}
leaflet_sele_atoms[l] = {}
#check the leaflet selection string is valid
test_beads = U.selectAtoms(leaflet_sele_string)
if test_beads.numberOfAtoms() == 0:
print "Error: invalid selection string '" + str(leaflet_sele_string) + "'"
print "-> no particles selected."
sys.exit(1)
#use LeafletFinder:
if args.cutoff_leaflet != 'large':
if args.cutoff_leaflet == 'optimise':
print " -optimising cutoff..."
cutoff_value = MDAnalysis.analysis.leaflet.optimize_cutoff(U, leaflet_sele_string)
L = MDAnalysis.analysis.leaflet.LeafletFinder(U, leaflet_sele_string, cutoff_value[0])
else:
L = MDAnalysis.analysis.leaflet.LeafletFinder(U, leaflet_sele_string, args.cutoff_leaflet)
if np.shape(L.groups())[0]<2:
print "Error: imposssible to identify 2 leaflets."
sys.exit(1)
if L.group(0).centerOfGeometry()[2] > L.group(1).centerOfGeometry()[2]:
leaflet_sele["upper"]["all species"] = L.group(0)
leaflet_sele["lower"]["all species"] = L.group(1)
else:
leaflet_sele["upper"]["all species"] = L.group(1)
leaflet_sele["lower"]["all species"] = L.group(0)
leaflet_sele["both"]["all species"] = leaflet_sele["lower"]["all species"] + leaflet_sele["upper"]["all species"]
if np.shape(L.groups())[0] == 2:
print " -found 2 leaflets: ", leaflet_sele["upper"]["all species"].numberOfResidues(), '(upper) and ', leaflet_sele["lower"]["all species"].numberOfResidues(), '(lower) lipids'
else:
other_lipids=0
for g in range(2, np.shape(L.groups())[0]):
other_lipids += L.group(g).numberOfResidues()
print " -found " + str(np.shape(L.groups())[0]) + " groups: " + str(leaflet_sele["upper"]["all species"].numberOfResidues()) + "(upper), " + str(leaflet_sele["lower"]["all species"].numberOfResidues()) + "(lower) and " + str(other_lipids) + " (others) lipids respectively"
#use cof:
else:
leaflet_sele["both"]["all species"] = U.selectAtoms(leaflet_sele_string)
tmp_lipids_avg_z = leaflet_sele["both"]["all species"].centerOfGeometry()[2]
leaflet_sele["upper"]["all species"] = leaflet_sele["both"]["all species"].selectAtoms("prop z > " + str(tmp_lipids_avg_z))
leaflet_sele["lower"]["all species"] = leaflet_sele["both"]["all species"].selectAtoms("prop z < " + str(tmp_lipids_avg_z))
print " -found 2 leaflets: ", leaflet_sele["upper"]["all species"].numberOfResidues(), '(upper) and ', leaflet_sele["lower"]["all species"].numberOfResidues(), '(lower) lipids'
#store full selections
for l in ["lower","upper","both"]:
leaflet_sele_atoms[l]["all species"] = leaflet_sele[l]["all species"].residues.atoms
return
#=========================================================================================
# data structures
#=========================================================================================
def data_struct_time():
global frames_nb
global frames_time
frames_nb = np.zeros(nb_frames_to_process)
frames_time = np.zeros(nb_frames_to_process)
return
def data_ff_contacts():
global lipids_ff_contacts_during_nb
global lipids_ff_contacts_outside_nb
global lipids_ff_contacts_during_pc
global lipids_ff_contacts_outside_pc
lipids_ff_contacts_during_nb = {}
lipids_ff_contacts_outside_nb = {}
lipids_ff_contacts_during_pc = {}
lipids_ff_contacts_outside_pc = {}
for l_index in range(0,lipids_ff_nb):
lipids_ff_contacts_during_nb[l_index] = np.zeros(proteins_nb)
lipids_ff_contacts_outside_nb[l_index] = np.zeros(proteins_nb)
lipids_ff_contacts_during_pc[l_index] = np.zeros(proteins_nb)
lipids_ff_contacts_outside_pc[l_index] = np.zeros(proteins_nb)
return
#=========================================================================================
# core functions
#=========================================================================================
def get_z_coords(f_index):
tmp_zu = leaflet_sele["upper"]["all species"].centerOfGeometry()[2]
tmp_zl = leaflet_sele["lower"]["all species"].centerOfGeometry()[2]
tmp_zm = tmp_zl + (tmp_zu - tmp_zl)/float(2)
z_upper[f_index] = tmp_zu - tmp_zm
z_lower[f_index] = tmp_zl - tmp_zm
for l in range(0,lipids_ff_nb):
z_ff[l][f_index] = lipids_sele_ff_bead[l].centerOfGeometry()[2] - tmp_zm
return
def get_distances(box_dim):
#method: use minimum distance between proteins
#---------------------------------------------
if args.m_algorithm == "min":
#pre-process: get protein coordinates
tmp_proteins_coords = np.zeros((proteins_nb, nb_atom_per_protein, 3))
for p_index in range(0, proteins_nb):
tmp_proteins_coords[p_index,:] = proteins_sele[p_index].coordinates()
#store min distance between each proteins
dist_matrix = 100000 * np.ones((proteins_nb,proteins_nb))
for n in range(proteins_nb,1,-1):
dist_matrix[proteins_nb-n,proteins_nb-n+1:proteins_nb] = map(lambda pp: np.min(MDAnalysis.analysis.distances.distance_array(np.float32(tmp_proteins_coords[proteins_nb-n,:]), np.float32(tmp_proteins_coords[pp,:]), box_dim)), range(proteins_nb-n+1,proteins_nb))
dist_matrix[proteins_nb-n+1:proteins_nb,proteins_nb-n] = dist_matrix[proteins_nb-n,proteins_nb-n+1:proteins_nb]
#method: use distance between cog
#--------------------------------
else:
tmp_proteins_cogs = np.asarray(map(lambda p_index: calculate_cog(proteins_sele[p_index].coordinates(), box_dim), range(0,proteins_nb)))
dist_matrix = MDAnalysis.analysis.distances.distance_array(np.float32(tmp_proteins_cogs), np.float32(tmp_proteins_cogs), box_dim)
return dist_matrix
def calculate_cog(tmp_coords, box_dim):
#this method allows to take pbc into account when calculcating the center of geometry
#see: http://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions
cog_coord = np.zeros(3)
tmp_nb_atoms = np.shape(tmp_coords)[0]
for n in range(0,3):
tet = tmp_coords[:,n] * 2 * math.pi / float(box_dim[n])
xsi = np.cos(tet)
zet = np.sin(tet)
tet_avg = math.atan2(-np.average(zet),-np.average(xsi)) + math.pi
cog_coord[n] = tet_avg * box_dim[n] / float(2*math.pi)
return cog_coord
def detect_clusters_connectivity(dist, box_dim):
#use networkx algorithm
connected = (dist<args.cutoff_pp)
network = nx.Graph(connected)
groups = nx.connected_components(network)
return groups
def identify_ff_contacts(box_dim, f_time):
global lipids_ff_contacts_during_nb
global lipids_ff_contacts_outside_nb
#initialise dictionary allowing to retrieve cluster size
dict_protatoms_2_clustersize = {}
#retrieve coordinates arrays (pre-processing saves time as MDAnalysis functions are quite slow and we need to make such calls a few times)
tmp_lip_coords = {l: leaflet_sele[l]["all species"].coordinates() for l in ["lower","upper"]}
#identify clusters
#=================
clusters = detect_clusters_connectivity(get_distances(box_dim), box_dim)
#process each cluster
#====================
c_sele_all = MDAnalysis.core.AtomGroup.AtomGroup([])
for cluster in clusters:
#create selection for current cluster and only process it if it's TM (find closest PO4 particles for each particles of clusters, if all are in the same leaflet then it's surfacic [NB: this is done at the CLUSTER level (the same criteria at the protein level would probably fail)])
c_sele = MDAnalysis.core.AtomGroup.AtomGroup([])
for p_index in cluster:
c_sele += proteins_sele[p_index]
c_sele_all += c_sele
tmp_c_sele_coordinates = c_sele.coordinates()
dist_min_lower = np.min(MDAnalysis.analysis.distances.distance_array(tmp_c_sele_coordinates, tmp_lip_coords["lower"], box_dim), axis = 1)
dist_min_upper = np.min(MDAnalysis.analysis.distances.distance_array(tmp_c_sele_coordinates, tmp_lip_coords["upper"], box_dim), axis = 1)
dist = dist_min_upper - dist_min_lower
#store size of TM cluster
if np.size(dist[dist>0]) != np.size(dist) and np.size(dist[dist>0]) !=0:
c_size = np.size(cluster)
for a in c_sele.atoms:
dict_protatoms_2_clustersize[a.number] = c_size
#store -1 for non TM cluster
else:
for a in c_sele.atoms:
dict_protatoms_2_clustersize[a.number] = -1
#process each ff lipid
#=====================
for l_index in range(0,lipids_ff_nb):
#detect contacts
ff_lip_and_prot_TM = lipids_sele_ff[l_index] + c_sele_all
around_lip_prot_TM = ff_lip_and_prot_TM.selectAtoms("around " + str(args.cutoff_pl) + " (resname " + str(lipids_ff_info[l_index][0]) + " and resid " + str(lipids_ff_info[l_index][1]) + ")")
#get size of cluster in contact if any
if around_lip_prot_TM.numberOfAtoms() > 0:
tmp_size = dict_protatoms_2_clustersize[around_lip_prot_TM.atoms[0].number]
tmp_nbct = around_lip_prot_TM.numberOfAtoms()
#store it if TM
if tmp_size > 0:
if f_time < lipids_ff_info[l_index][4] or f_time > lipids_ff_info[l_index][5]:
lipids_ff_contacts_outside_nb[l_index][tmp_size - 1] += tmp_nbct
else:
lipids_ff_contacts_during_nb[l_index][tmp_size - 1] += tmp_nbct
return
#=========================================================================================
# outputs
#=========================================================================================
def write_xvg():
for l_index in lipids_ff_u2l_index:
filename_xvg = os.getcwd() + '/' + str(args.output_folder) + '/u2l_' + str(lipids_ff_info[l_index][0]) + "_" + str(lipids_ff_info[l_index][1]) + '.xvg'
output_xvg = open(filename_xvg, 'w')
output_xvg.write("# [ff_contacts_sizes v" + str(version_nb) + "]\n")
output_xvg.write("# The data is organised as follows:\n")
output_xvg.write("# -1st line: distribution (%) of contacts before/after flipflop\n")
output_xvg.write("# -2nd line: distribution (%) of contacts during flipflop\n")
output_xvg.write("# -3rd line: distribution (nb) of contacts before/after flipflop\n")
output_xvg.write("# -4th line: distribution (nb) of contacts during flipflop\n")
output_xvg.write("@ title \"Evolution of bilayer thickness by lipid specie\"\n")
output_xvg.write("@ xaxis label \"cluster size\"\n")
output_xvg.write("@ yaxis label \"% of contacts\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length 9\n")
output_xvg.write("@ s0 legend \"1\"\n")
output_xvg.write("@ s1 legend \"2\"\n")
output_xvg.write("@ s2 legend \"3\"\n")
output_xvg.write("@ s3 legend \"4\"\n")
output_xvg.write("@ s4 legend \"5\"\n")
output_xvg.write("@ s5 legend \"6\"\n")
output_xvg.write("@ s6 legend \"7\"\n")
output_xvg.write("@ s7 legend \"8\"\n")
output_xvg.write("@ s8 legend \"9\"\n")
#distribution: %
#---------------
#before/after
results = "0"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_outside_pc[l_index][c],2))
output_xvg.write(results + "\n")
#during
results = "1"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_during_pc[l_index][c],2))
output_xvg.write(results + "\n")
#distribution: nb
#---------------
#before/after
results = "0"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_outside_nb[l_index][c],2))
output_xvg.write(results + "\n")
#during
results = "1"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_during_nb[l_index][c],2))
output_xvg.write(results + "\n")
output_xvg.close()
for l_index in lipids_ff_l2u_index:
filename_xvg = os.getcwd() + '/' + str(args.output_folder) + '/l2u_' + str(lipids_ff_info[l_index][0]) + "_" + str(lipids_ff_info[l_index][1]) + '.xvg'
output_xvg = open(filename_xvg, 'w')
output_xvg.write("# [ff_contacts_sizes v" + str(version_nb) + "]\n")
output_xvg.write("@ title \"Evolution of bilayer thickness by lipid specie\"\n")
output_xvg.write("# -1st line: distribution (%) of contacts before/after flipflop\n")
output_xvg.write("# -2nd line: distribution (%) of contacts during flipflop\n")
output_xvg.write("# -3rd line: distribution (nb) of contacts before/after flipflop\n")
output_xvg.write("# -4th line: distribution (nb) of contacts during flipflop\n")
output_xvg.write("@ xaxis label \"cluster size\"\n")
output_xvg.write("@ yaxis label \"% of contacts\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length 9\n")
output_xvg.write("@ s0 legend \"1\"\n")
output_xvg.write("@ s1 legend \"2\"\n")
output_xvg.write("@ s2 legend \"3\"\n")
output_xvg.write("@ s3 legend \"4\"\n")
output_xvg.write("@ s4 legend \"5\"\n")
output_xvg.write("@ s5 legend \"6\"\n")
output_xvg.write("@ s6 legend \"7\"\n")
output_xvg.write("@ s7 legend \"8\"\n")
output_xvg.write("@ s8 legend \"9\"\n")
#distribution: %
#---------------
#before/after
results = "0"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_outside_pc[l_index][c],2))
output_xvg.write(results + "\n")
#during
results = "1"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_during_pc[l_index][c],2))
output_xvg.write(results + "\n")
#distribution: nb
#----------------
#before/after
results = "0"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_outside_nb[l_index][c],2))
output_xvg.write(results + "\n")
#during
results = "1"
for c in range(0,9):
results += " " + str(round(lipids_ff_contacts_during_nb[l_index][c],2))
output_xvg.write(results + "\n")
output_xvg.close()
return
##########################################################################################
# ALGORITHM
##########################################################################################
#=========================================================================================
#process inputs
#=========================================================================================
#data loading
set_lipids_beads()
load_MDA_universe()
if args.selection_file_ff != "no":
identify_ff()
identify_proteins()
identify_leaflets()
#create data structures
print "\nInitialising data structures..."
data_struct_time()
data_ff_contacts()
#=========================================================================================
# generate data
#=========================================================================================
print "\nCalculating sizes sampled by flip-flopping lipids..."
for f_index in range(0,nb_frames_to_process):
ts = U.trajectory[frames_to_process[f_index]]
if ts.time/float(1000) > args.t_end:
break
progress = '\r -processing frame ' + str(ts.frame) + '/' + str(nb_frames_xtc) + ' '
sys.stdout.flush()
sys.stdout.write(progress)
#frame properties
f_time = ts.time/float(1000)
f_nb = ts.frame
frames_nb[f_index] = f_nb
frames_time[f_index] = f_time
box_dim = U.trajectory.ts.dimensions
#process ff lipids
identify_ff_contacts(box_dim, f_time)
print ''
#=========================================================================================
# process data
#=========================================================================================
print "\nCalculating statistics..."
for l_index in range(0,lipids_ff_nb):
lipids_ff_contacts_during_pc[l_index] = lipids_ff_contacts_during_nb[l_index] *100 / float(np.sum(lipids_ff_contacts_during_nb[l_index]))
lipids_ff_contacts_outside_pc[l_index] = lipids_ff_contacts_outside_nb[l_index] *100 / float(np.sum(lipids_ff_contacts_outside_nb[l_index]))
#=========================================================================================
# produce outputs
#=========================================================================================
print "\nWriting outputs..."
write_xvg()
#=========================================================================================
# exit
#=========================================================================================
print "\nFinished successfully! Check output in ./" + args.output_folder + "/"
print ""
sys.exit(0)
| jhelie/ff_contacts_sizes | ff_contacts_sizes.py | Python | gpl-2.0 | 39,696 | [
"MDAnalysis"
] | e4a88dc93f889b69150c14aca90665c9fb5ac56c8969bfdd9de6ebde22f9d66c |
#! /usr/bin/env python3
from vasppy.summary import potcar_spec
import argparse
def parse_command_line_arguments():
parser = argparse.ArgumentParser( description='Generate POTCAR specification based on hashing individual pseudopotential strings' )
parser.add_argument('potcar', help="filename of the VASP POTCAR to be processed", nargs='?', default='POTCAR' )
parser.add_argument('--hash', help="return the md5 hashes of the individual pseudopotential strings", action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_command_line_arguments()
if args.hash:
hashes = {}
for p, md5hash in potcar_spec(args.potcar, return_hashes=True).items():
hashes[p] = md5hash
for p, ps in potcar_spec(args.potcar).items():
if args.hash:
print(f'{p} {ps} md5:{hashes[p]}')
else:
print(f'{p} {ps}')
if __name__ == '__main__':
main()
| bjmorgan/vasppy | vasppy/scripts/potcar_spec.py | Python | mit | 953 | [
"VASP"
] | 425fb62750c867384a122e41586c52d9d73c197b313fa7f6adbb4c2a81be24a4 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Analysis (aka Latent Semantic Indexing) in Python.
Implements fast truncated SVD (Singular Value Decomposition). The SVD
decomposition can be updated with new observations at any time, for an online,
incremental, memory-efficient training.
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size
* corpora that are streamed: documents are only accessed sequentially, no
random access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import numpy
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
from six import iterkeys
from six.moves import xrange
logger = logging.getLogger(__name__)
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)",
k, 100 * rel_spectrum[k - 1])
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order", a.shape, name)
a = numpy.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order", a.shape, name)
a = numpy.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(
docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix", str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s**2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s", str(self.u.shape), str(other.u.shape))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = numpy.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= numpy.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
[matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix", k.shape)
try:
# in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of numpy, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/numpy-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/numpy/ticket/706
# sdoering: replaced numpy's linalg.svd with scipy's linalg.svd:
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :( //sdoering: maybe there is one in scipy?
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = scipy.linalg.svd(numpy.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k**2, self.k)
u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = numpy.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = numpy.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in xrange(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = numpy.dot(self.u.T, self.u) - numpy.eye(self.u.shape[1])
# logger.info('orth error after=%f' % numpy.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed. See also FAQ [2]_.
Model persistency is achieved via its load/save methods.
.. [2] https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [3]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print(lsi[doc_tfidf]) # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print(lsi[doc_tfidf])
.. [3] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
logger.debug("looking for dispatcher at %s", str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)", err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(
corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i", chunk_no)
self.dispatcher.putjob(job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s", doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s", doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents", corpus.shape[1])
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % (
self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
If `scaled` is set, scale topics by the inverse of singular values (default: no scaling).
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse), but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = numpy.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use numpy's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = numpy.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = numpy.dot(u.take(indices, axis=0).T, numpy.array(data, dtype=u.dtype))
if not is_corpus:
# convert back from matrix into a 1d vec
topic_dist = topic_dist.reshape(-1)
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a numpy array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as a string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.show_topic(10, topn=5)
[("category", -0.340), ("$M$", 0.298), ("algebra", 0.183), ("functor", -0.174), ("operator", -0.168)]
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = numpy.asarray(self.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = matutils.argsort(numpy.abs(c), topn, reverse=True)
return [(self.id2word[val], 1.0 * c[val] / norm) for val in most]
def print_topic(self, topicno, topn=10):
"""
Return a single topic as a formatted string. See `show_topic()` for parameters.
>>> lsimodel.print_topic(10, topn=5)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
return ' + '.join(['%.3f*"%s"' % (v, k) for k, v in self.show_topic(topicno, topn)])
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Return `num_topics` most significant topics (return all by default).
For each topic, show `num_words` most significant words (10 words by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of `(word, probability)` 2-tuples if False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in xrange(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
logger.info("topic #%i(%.3f): %s", i, self.projection.s[i], topic)
return shown
def print_topics(self, num_topics=5, num_words=10):
"""Alias for `show_topics()` which prints the top 5 topics to log."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(
self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words
)
def save(self, fname, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
Note: do not save as a compressed file if you intend to load the file back with `mmap`.
"""
if self.projection is not None:
self.projection.save(utils.smart_extension(fname, '.projection'), *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
>>> LsiModel.load(fname, mmap='r')
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LsiModel, cls).load(fname, *args, **kwargs)
projection_fname = utils.smart_extension(fname, '.projection')
try:
result.projection = super(LsiModel, cls).load(projection_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s" % (projection_fname, e))
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics', len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = numpy.abs(numpy.asarray(uvec).flatten())
udiff = uvec / numpy.sqrt(numpy.sum(numpy.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words", num_words, num_neg)
for topic in sorted(iterkeys(result)):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s', topic, s[topic], ', '.join(pos), ', '.join(neg))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=numpy.float64, eps=1e-6):
"""
Run truncated Singular Value Decomposition (SVD) on a sparse input.
Return (U, S): the left singular vectors and the singular values of the input
data stream `corpus` [4]_. The corpus may be larger than RAM (iterator of vectors).
This may return less than the requested number of top `rank` factors, in case
the input itself is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the input data. In case you can only
afford a single pass, set `onepass=True` in :class:`LsiModel` and avoid using
this function directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [4] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations", samples - rank, power_iters)
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix", str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix", str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations", power_iters)
for power_iter in xrange(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i', (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in xrange(power_iters):
logger.info("running power iteration #%i", power_iter + 1)
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
logger.info("2nd phase: constructing %s covariance matrix", str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix", str(x.shape))
u, s, vt = scipy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s**2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = numpy.dot(q, u)
return u.astype(dtype), s.astype(dtype)
| zachmayer/gensim | gensim/models/lsimodel.py | Python | lgpl-2.1 | 34,983 | [
"Gaussian"
] | 24dd8507419b6384b2d49840fbee588d5168f0ac2c0f17727778d0fc58da360b |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.447714
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/movietags.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class movietags(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(movietags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_67585048 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_67585048
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_movietags= 'respond'
## END CLASS DEFINITION
if not hasattr(movietags, '_initCheetahAttributes'):
templateAPIClass = getattr(movietags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(movietags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=movietags()).run()
| pli3/Openwebif | plugin/controllers/views/web/movietags.py | Python | gpl-2.0 | 5,108 | [
"VisIt"
] | a92b4f99e0c1a419e70419480bad5aabc5556bc519e0659ed3b34bcc2b6dfefe |
import libtcodpy as libtcod
import random
class RoomAddition:
'''
What I'm calling the Room Addition algorithm is an attempt to
recreate the dungeon generation algorithm used in Brogue, as
discussed at https://www.rockpapershotgun.com/2015/07/28/how-do-roguelikes-generate-levels/
I don't think Brian Walker has ever given a name to his
dungeon generation algorithm, so I've taken to calling it the
Room Addition Algorithm, after the way in which it builds the
dungeon by adding rooms one at a time to the existing dungeon.
This isn't a perfect recreation of Brian Walker's algorithm,
but I think it's good enough to demonstrait the concept.
'''
def __init__(self):
self.ROOM_MAX_SIZE = 18 # max height and width for cellular automata rooms
self.ROOM_MIN_SIZE = 16 # min size in number of floor tiles, not height and width
self.MAX_NUM_ROOMS = 30
self.SQUARE_ROOM_MAX_SIZE = 12
self.SQUARE_ROOM_MIN_SIZE = 6
self.CROSS_ROOM_MAX_SIZE = 12
self.CROSS_ROOM_MIN_SIZE = 6
self.cavernChance = 0.40 # probability that the first room will be a cavern
self.CAVERN_MAX_SIZE = 35 # max height an width
self.wallProbability = 0.45
self.neighbors = 4
self.squareRoomChance = 0.2
self.crossRoomChance = 0.15
self.buildRoomAttempts = 500
self.placeRoomAttempts = 20
self.maxTunnelLength = 12
self.includeShortcuts = True
self.shortcutAttempts = 500
self.shortcutLength = 5
self.minPathfindingDistance = 50
def generateLevel(self,level,mapWidth,mapHeight,seed):
# set the seed
if seed != None:
random.seed(seed)
self.rooms = []
self.level = level
# Generate an array of solid walls
self.level.terrain = [[0
for y in range(mapHeight)]
for x in range(mapWidth)]
for y in xrange(mapHeight):
for x in xrange(mapWidth):
self.level.setBlocksMovementTrue(x,y)
self.level.setBlocksSightTrue(x,y)
self.level.setHasBeenExploredFalse(x,y)
# generate the first room
room = self.generateRoom()
roomWidth,roomHeight = self.getRoomDimensions(room)
roomX = (mapWidth/2 - roomWidth/2)-1
roomY = (mapHeight/2 - roomHeight/2)-1
self.addRoom(roomX,roomY,room)
# generate other rooms
for i in range(self.buildRoomAttempts):
room = self.generateRoom()
# try to position the room, get roomX and roomY
roomX,roomY,wallTile,direction, tunnelLength = self.placeRoom(room,mapWidth,mapHeight)
if roomX and roomY:
self.addRoom(roomX,roomY,room)
self.addTunnel(wallTile,direction,tunnelLength)
if len(self.rooms) >= self.MAX_NUM_ROOMS:
break
if self.includeShortcuts == True:
self.addShortcuts(mapWidth,mapHeight)
self.placeStaircases(mapWidth,mapHeight)
def generateRoom(self):
# select a room type to generate
# generate and return that room
if self.rooms:
#There is at least one room already
choice = random.random()
if choice <self.squareRoomChance:
room = self.generateRoomSquare()
elif self.squareRoomChance <= choice < (self.squareRoomChance+self.crossRoomChance):
room = self.generateRoomCross()
else:
room = self.generateRoomCellularAutomata()
else: #it's the first room
choice = random.random()
if choice < self.cavernChance:
room = self.generateRoomCavern()
else:
room = self.generateRoomSquare()
return room
def generateRoomCross(self):
roomHorWidth = (random.randint(self.CROSS_ROOM_MIN_SIZE+2,self.CROSS_ROOM_MAX_SIZE))/2*2
roomVirHeight = (random.randint(self.CROSS_ROOM_MIN_SIZE+2,self.CROSS_ROOM_MAX_SIZE))/2*2
roomHorHeight = (random.randint(self.CROSS_ROOM_MIN_SIZE,roomVirHeight-2))/2*2
roomVirWidth = (random.randint(self.CROSS_ROOM_MIN_SIZE,roomHorWidth-2))/2*2
room = [[1
for y in xrange(roomVirHeight)]
for x in xrange(roomHorWidth)]
# Fill in horizontal space
virOffset = roomVirHeight/2 - roomHorHeight/2
for y in xrange(virOffset,roomHorHeight+virOffset):
for x in xrange(0,roomHorWidth):
room[x][y] = 0
# Fill in virtical space
horOffset = roomHorWidth/2 - roomVirWidth/2
for y in xrange(0,roomVirHeight):
for x in xrange(horOffset,roomVirWidth+horOffset):
room[x][y] = 0
return room
def generateRoomSquare(self):
roomWidth = random.randint(self.SQUARE_ROOM_MIN_SIZE,self.SQUARE_ROOM_MAX_SIZE)
roomHeight = random.randint(max(int(roomWidth*0.5),self.SQUARE_ROOM_MIN_SIZE),min(int(roomWidth*1.5),self.SQUARE_ROOM_MAX_SIZE))
room = [[1
for y in range(roomHeight)]
for x in range(roomWidth)]
room = [[0
for y in range(1,roomHeight-1)]
for x in range(1,roomWidth-1)]
return room
def generateRoomCellularAutomata(self):
while True:
# if a room is too small, generate another
room = [[1
for y in range(self.ROOM_MAX_SIZE)]
for x in range(self.ROOM_MAX_SIZE)]
# random fill map
for y in range (2,self.ROOM_MAX_SIZE-2):
for x in range (2,self.ROOM_MAX_SIZE-2):
if random.random() >= self.wallProbability:
room[x][y] = 0
# create distinctive regions
for i in range(4):
for y in range (1,self.ROOM_MAX_SIZE-1):
for x in range (1,self.ROOM_MAX_SIZE-1):
# if the cell's neighboring walls > self.neighbors, set it to 1
if self.getAdjacentWalls(x,y,room) > self.neighbors:
room[x][y] = 1
# otherwise, set it to 0
elif self.getAdjacentWalls(x,y,room) < self.neighbors:
room[x][y] = 0
# floodfill to remove small caverns
room = self.floodFill(room)
# start over if the room is completely filled in
roomWidth,roomHeight = self.getRoomDimensions(room)
for x in range (roomWidth):
for y in range (roomHeight):
if room[x][y] == 0:
return room
def generateRoomCavern(self):
while True:
# if a room is too small, generate another
room = [[1
for y in range(self.CAVERN_MAX_SIZE)]
for x in range(self.CAVERN_MAX_SIZE)]
# random fill map
for y in range (2,self.CAVERN_MAX_SIZE-2):
for x in range (2,self.CAVERN_MAX_SIZE-2):
if random.random() >= self.wallProbability:
room[x][y] = 0
# create distinctive regions
for i in range(4):
for y in range (1,self.CAVERN_MAX_SIZE-1):
for x in range (1,self.CAVERN_MAX_SIZE-1):
# if the cell's neighboring walls > self.neighbors, set it to 1
if self.getAdjacentWalls(x,y,room) > self.neighbors:
room[x][y] = 1
# otherwise, set it to 0
elif self.getAdjacentWalls(x,y,room) < self.neighbors:
room[x][y] = 0
# floodfill to remove small caverns
room = self.floodFill(room)
# start over if the room is completely filled in
roomWidth,roomHeight = self.getRoomDimensions(room)
for x in range (roomWidth):
for y in range (roomHeight):
if room[x][y] == 0:
return room
def floodFill(self,room):
'''
Find the largest region. Fill in all other regions.
'''
roomWidth,roomHeight = self.getRoomDimensions(room)
largestRegion = set()
for x in range (roomWidth):
for y in range (roomHeight):
if room[x][y] == 0:
newRegion = set()
tile = (x,y)
toBeFilled = set([tile])
while toBeFilled:
tile = toBeFilled.pop()
if tile not in newRegion:
newRegion.add(tile)
room[tile[0]][tile[1]] = 1
# check adjacent cells
x = tile[0]
y = tile[1]
north = (x,y-1)
south = (x,y+1)
east = (x+1,y)
west = (x-1,y)
for direction in [north,south,east,west]:
if room[direction[0]][direction[1]] == 0:
if direction not in toBeFilled and direction not in newRegion:
toBeFilled.add(direction)
if len(newRegion) >= self.ROOM_MIN_SIZE:
if len(newRegion) > len(largestRegion):
largestRegion.clear()
largestRegion.update(newRegion)
for tile in largestRegion:
room[tile[0]][tile[1]] = 0
return room
def placeRoom(self,room, mapWidth, mapHeight): #(self,room,direction,)
roomX = None
roomY = None
roomWidth, roomHeight = self.getRoomDimensions(room)
# try n times to find a wall that lets you build room in that direction
for i in xrange(self.placeRoomAttempts):
# try to place the room against the tile, else connected by a tunnel of length i
wallTile = None
direction = self.getDirection()
while not wallTile:
'''
randomly select tiles until you find
a wall that has another wall in the
chosen direction and has a floor in the
opposite direction.
'''
#direction == tuple(dx,dy)
tileX = random.randint(1,mapWidth-2)
tileY = random.randint(1,mapHeight-2)
if ((self.level.getBlocksMovement(tileX,tileY) == True) and
(self.level.getBlocksMovement((tileX+direction[0]),(tileY+direction[1])) == True) and
(self.level.getBlocksMovement((tileX-direction[0]),(tileY-direction[1])) == False)):
wallTile = (tileX,tileY)
#spawn the room touching wallTile
startRoomX = None
startRoomY = None
while not startRoomX and not startRoomY:
x = random.randint(0,roomWidth-1)
y = random.randint(0,roomHeight-1)
if room[x][y] == 0:
startRoomX = wallTile[0] - x
startRoomY = wallTile[1] - y
#then slide it until it doesn't touch anything
for tunnelLength in xrange(self.maxTunnelLength):
possibleRoomX = startRoomX + direction[0]*tunnelLength
possibleRoomY = startRoomY + direction[1]*tunnelLength
enoughRoom = self.getOverlap(room,possibleRoomX,possibleRoomY,mapWidth,mapHeight)
if enoughRoom:
roomX = possibleRoomX
roomY = possibleRoomY
# moved tunnel code into self.generateLevel()
return roomX,roomY, wallTile, direction, tunnelLength
return None, None, None, None, None
def addRoom(self,roomX,roomY,room):
roomWidth,roomHeight = self.getRoomDimensions(room)
for x in range (roomWidth):
for y in range (roomHeight):
if room[x][y] == 0:
self.level.setBlocksMovementFalse(roomX+x, roomY+y)
self.level.setBlocksSightFalse(roomX+x, roomY+y)
# Fill the room with monsters and objects
self.level.populateRoom(roomX,roomY,roomWidth,roomHeight,room)
self.rooms.append(room)
def addTunnel(self,wallTile,direction,tunnelLength):
# carve a tunnel from a point in the room back to
# the wall tile that was used in its original placement
startX = wallTile[0] + direction[0]*tunnelLength
startY = wallTile[1] + direction[1]*tunnelLength
#self.level[startX][startY] = 1
for i in range(self.maxTunnelLength):
x = startX - direction[0]*i
y = startY - direction[1]*i
self.level.setBlocksMovementFalse(x,y)
self.level.setBlocksSightFalse(x,y)
# If you want doors, this is where the code should go
if ((x+direction[0]) == wallTile[0] and
(y+direction[1]) == wallTile[1]):
break
# Populate Tunnel
def getRoomDimensions(self,room):
if room:
roomWidth = len(room)
roomHeight = len(room[0])
return roomWidth, roomHeight
else:
roomWidth = 0
roomHeight = 0
return roomWidth, roomHeight
def getAdjacentWalls(self, tileX, tileY, room): # finds the walls in 8 directions
wallCounter = 0
for x in range (tileX-1, tileX+2):
for y in range (tileY-1, tileY+2):
if (room[x][y] == 1):
if (x != tileX) or (y != tileY): # exclude (tileX,tileY)
wallCounter += 1
return wallCounter
def getDirection(self):
# direction = (dx,dy)
north = (0,-1)
south = (0,1)
east = (1,0)
west = (-1,0)
direction = random.choice([north,south,east,west])
return direction
def getOverlap(self,room,roomX,roomY,mapWidth,mapHeight):
'''
for each 0 in room, check the cooresponding tile in
self.level.terrain and the eight tiles around it. Though slow,
that should insure that there is a wall between each of
the rooms created in this way.
<> check for overlap with self.level.terrain
<> check for out of bounds
'''
roomWidth, roomHeight = self.getRoomDimensions(room)
for x in range(roomWidth):
for y in range(roomHeight):
if room[x][y] == 0:
# Check to see if the room is out of bounds
if ((1 <= (x+roomX) < mapWidth-1) and
(1 <= (y+roomY) < mapHeight-1)):
#Check for overlap with a one tile buffer
if self.level.getBlocksMovement(x+roomX-1, y+roomY-1) == False: # top left
return False
if self.level.getBlocksMovement(x+roomX, y+roomY-1) == False: # top center
return False
if self.level.getBlocksMovement(x+roomX+1, y+roomY-1) == False: # top right
return False
if self.level.getBlocksMovement(x+roomX-1, y+roomY) == False: # left
return False
if self.level.getBlocksMovement(x+roomX, y+roomY) == False: # center
return False
if self.level.getBlocksMovement(x+roomX+1, y+roomY) == False: # right
return False
if self.level.getBlocksMovement(x+roomX-1, y+roomY+1) == False: # bottom left
return False
if self.level.getBlocksMovement(x+roomX, y+roomY+1) == False: # bottom center
return False
if self.level.getBlocksMovement(x+roomX+1, y+roomY+1) == False: # bottom right
return False
else: #room is out of bounds
return False
return True
def addShortcuts(self,mapWidth,mapHeight):
'''
I use libtcodpy's built in pathfinding here, since I'm
already using libtcodpy for the iu. At the moment,
the way I find the distance between
two points to see if I should put a shortcut there
is horrible, and its easily the slowest part of this
algorithm. If I think of a better way to do this in
the future, I'll implement it.
'''
#initialize the libtcodpy map
libtcodMap = libtcod.map_new(mapWidth,mapHeight)
self.recomputePathMap(mapWidth,mapHeight,libtcodMap)
for i in xrange(self.shortcutAttempts):
# check i times for places where shortcuts can be made
while True:
#Pick a random floor tile
floorX = random.randint(self.shortcutLength+1,(mapWidth-self.shortcutLength-1))
floorY = random.randint(self.shortcutLength+1,(mapHeight-self.shortcutLength-1))
if self.level.getBlocksMovement(floorX,floorY) == False:
if (self.level.getBlocksMovement(floorX-1,floorY) == True or
self.level.getBlocksMovement(floorX+1,floorY) == True or
self.level.getBlocksMovement(floorX,floorY-1) == True or
self.level.getBlocksMovement(floorX,floorY+1) == True):
break
# look around the tile for other floor tiles
for x in xrange(-1,2):
for y in xrange(-1,2):
if x != 0 or y != 0: # Exclude the center tile
newX = floorX + (x*self.shortcutLength)
newY = floorY + (y*self.shortcutLength)
if self.level.getBlocksMovement(newX,newY) == False:
# run pathfinding algorithm between the two points
#back to the libtcodpy nonesense
pathMap = libtcod.path_new_using_map(libtcodMap)
libtcod.path_compute(pathMap,floorX,floorY,newX,newY)
distance = libtcod.path_size(pathMap)
if distance > self.minPathfindingDistance:
# make shortcut
self.carveShortcut(floorX,floorY,newX,newY)
self.recomputePathMap(mapWidth,mapHeight,libtcodMap)
# destroy the path object
libtcod.path_delete(pathMap)
def placeStaircases(self,mapWidth,mapHeight):
downStairsX = None
downStairsY = None
upStairsX = None
upStairsY = None
longestDistance = 0
#initialize the libtcodpy map
libtcodMap = libtcod.map_new(mapWidth,mapHeight)
self.recomputePathMap(mapWidth,mapHeight,libtcodMap)
# try n times
for i in xrange(20):
# pick a random point
while True:
tempX1 = random.randint(8,mapWidth-8)
tempY1 = random.randint(8,mapHeight-8)
if self.level.getBlocksMovement(tempX1,tempY1) == False:
break
# try n times
for i in xrange(20):
# pick another random point
while True:
tempX2 = random.randint(8,mapWidth-8)
tempY2 = random.randint(8,mapHeight-8)
if self.level.getBlocksMovement(tempX2,tempY2) == False:
break
# if pathingDistance between the two points is greater
# than the longest pathing distance, set the stair coordinates
# to the two points, else continue
pathMap = libtcod.path_new_using_map(libtcodMap)
libtcod.path_compute(pathMap,tempX1,tempY1,tempX2,tempY2)
distance = libtcod.path_size(pathMap)
if distance > longestDistance:
downStairsX = tempX1
downStairsY = tempY1
upStairsX = tempX2
upStairsY = tempY2
longestDistance = distance
self.level.placeStairs(downStairsX,downStairsY,upStairsX,upStairsY)
# destroy the path object
libtcod.path_delete(pathMap)
def recomputePathMap(self,mapWidth,mapHeight,libtcodMap):
for x in xrange(mapWidth):
for y in xrange(mapHeight):
if self.level.getBlocksMovement(x,y) == True:
libtcod.map_set_properties(libtcodMap,x,y,False,False)
else:
libtcod.map_set_properties(libtcodMap,x,y,True,True)
def carveShortcut(self,x1,y1,x2,y2):
if x1-x2 == 0:
# Carve virtical tunnel
for y in xrange(min(y1,y2),max(y1,y2)+1):
self.level.setBlocksMovementFalse(x1,y)
self.level.setBlocksSightFalse(x1,y)
#self.level[x1][y] = 0
elif y1-y2 == 0:
# Carve Horizontal tunnel
for x in xrange(min(x1,x2),max(x1,x2)+1):
self.level.setBlocksMovementFalse(x,y1)
self.level.setBlocksSightFalse(x,y1)
#self.level[x][y1] = 0
elif (y1-y2)/(x1-x2) == 1:
# Carve NW to SE Tunnel
x = min(x1,x2)
y = min(y1,y2)
while x != max(x1,x2):
x+=1
self.level.setBlocksMovementFalse(x,y)
self.level.setBlocksSightFalse(x,y)
#self.level[x][y] = 0
y+=1
self.level.setBlocksMovementFalse(x,y)
self.level.setBlocksSightFalse(x,y)
#self.level[x][y] = 0
elif (y1-y2)/(x1-x2) == -1:
# Carve NE to SW Tunnel
x = min(x1,x2)
y = max(y1,y2)
while x != max(x1,x2):
x += 1
self.level.setBlocksMovementFalse(x,y)
self.level.setBlocksSightFalse(x,y)
#self.level[x][y] = 0
y -= 1
self.level.setBlocksMovementFalse(x,y)
self.level.setBlocksSightFalse(x,y)
#self.level[x][y] = 0
def checkRoomExists(self,room):
roomWidth, roomHeight = self.getRoomDimensions(room)
for x in range(roomWidth):
for y in range(roomHeight):
if room[x][y] == 0:
return True
return False | AtTheMatinee/Roguelike-Core | dungeonGeneration.py | Python | mit | 18,166 | [
"Brian"
] | 01b9d69e0dd59d57b9d69391c2d988a561ffd2d5fac9aa0114c4741c48ae7767 |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes that deal with macromolecular crystal structures.
Includes: PDB and mmCIF parsers, a Structure class, a module to keep a local
copy of the PDB up-to-date, selective IO of PDB files, etc.
Author: Thomas Hamelryck. Additional code by Kristian Rother.
"""
# Get a Structure object from a PDB file
from PDBParser import PDBParser
try:
# Get a Structure object from an mmCIF file
from MMCIFParser import MMCIFParser
except:
# Not compiled I guess
pass
# Download from the PDB
from PDBList import PDBList
# Parse PDB header directly
from parse_pdb_header import parse_pdb_header
# Find connected polypeptides in a Structure
from Polypeptide import PPBuilder, CaPPBuilder, is_aa, standard_aa_names
# This is also useful :-)
from Bio.SCOP.Raf import to_one_letter_code
# IO of PDB files (including flexible selective output)
from PDBIO import PDBIO, Select
# Some methods to eg. get a list of Residues
# from a list of Atoms.
import Selection
# Superimpose atom sets
from Superimposer import Superimposer
# 3D vector class
from Vector import Vector, calc_angle, calc_dihedral, refmat, rotmat, rotaxis,\
vector_to_axis, m2rotaxis, rotaxis2m
# Alignment module
from StructureAlignment import StructureAlignment
# DSSP handle
# (secondary structure and solvent accessible area calculation)
from DSSP import DSSP, make_dssp_dict
# Residue depth:
# distance of residue atoms from solvent accessible surface
from ResidueDepth import ResidueDepth, get_surface
# Calculation of Half Sphere Solvent Exposure
from HSExposure import HSExposureCA, HSExposureCB, ExposureCN
# Kolodny et al.'s backbone libraries
from FragmentMapper import FragmentMapper
# Write out chain(start-end) to PDB file
from Dice import extract
# Fast atom neighbor search
# Depends on KDTree C++ module
try:
from NeighborSearch import NeighborSearch
except ImportError:
pass
| BlogomaticProject/Blogomatic | opt/blog-o-matic/usr/lib/python/Bio/PDB/__init__.py | Python | gpl-2.0 | 2,126 | [
"Biopython",
"CRYSTAL"
] | 230f8be6943c511ef85009d60b2d2c51eb14f4c6ab6cf6687a1a7cca7d7e0a52 |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.mcscf import mc1step
from pyscf.mcscf import mc2step
from pyscf.mcscf import casci_symm
from pyscf.mcscf import addons
from pyscf import fci
from pyscf.soscf.newton_ah import _force_SO3_degeneracy_, _force_Ex_Ey_degeneracy_
class SymAdaptedCASSCF(mc1step.CASSCF):
__doc__ = mc1step.CASSCF.__doc__
def __init__(self, mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
mc1step.CASSCF.__init__(self, mf_or_mol, ncas, nelecas, ncore, frozen)
assert(self.mol.symmetry)
fcisolver = self.fcisolver
if isinstance(fcisolver, fci.direct_spin0.FCISolver):
self.fcisolver = fci.direct_spin0_symm.FCISolver(self.mol)
else:
self.fcisolver = fci.direct_spin1_symm.FCISolver(self.mol)
self.fcisolver.__dict__.update(fcisolver.__dict__)
@property
def wfnsym(self):
return self.fcisolver.wfnsym
@wfnsym.setter
def wfnsym(self, wfnsym):
self.fcisolver.wfnsym = wfnsym
def mc1step(self, mo_coeff=None, ci0=None, callback=None):
return self.kernel(mo_coeff, ci0, callback, mc1step.kernel)
def mc2step(self, mo_coeff=None, ci0=None, callback=None):
return self.kernel(mo_coeff, ci0, callback, mc2step.kernel)
def kernel(self, mo_coeff=None, ci0=None, callback=None, _kern=None):
if mo_coeff is None:
mo_coeff = self.mo_coeff
if callback is None: callback = self.callback
if _kern is None: _kern = mc1step.kernel
self.check_sanity()
self.dump_flags()
log = logger.Logger(self.stdout, self.verbose)
# Initialize/overwrite self.fcisolver.orbsym and self.fcisolver.wfnsym
mo_coeff = self.mo_coeff = casci_symm.label_symmetry_(self, mo_coeff, ci0)
self.converged, self.e_tot, self.e_cas, self.ci, \
self.mo_coeff, self.mo_energy = \
_kern(self, mo_coeff,
tol=self.conv_tol, conv_tol_grad=self.conv_tol_grad,
ci0=ci0, callback=callback, verbose=self.verbose)
log.note('CASSCF energy = %.15g', self.e_tot)
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff, self.mo_energy
def uniq_var_indices(self, nmo, ncore, ncas, frozen):
mask = mc1step.CASSCF.uniq_var_indices(self, nmo, ncore, ncas, frozen)
# Call _symmetrize function to remove the symmetry forbidden matrix elements
# (by setting their mask value to 0 in _symmetrize). Then pack_uniq_var and
# unpack_uniq_var function only operates on those symmetry allowed matrix
# elements.
# self.mo_coeff.orbsym is initialized in kernel function
return _symmetrize(mask, self.mo_coeff.orbsym, self.mol.groupname)
def _eig(self, mat, b0, b1, orbsym=None):
# self.mo_coeff.orbsym is initialized in kernel function
if orbsym is None:
orbsym = self.mo_coeff.orbsym[b0:b1]
return casci_symm.eig(mat, orbsym)
def rotate_mo(self, mo, u, log=None):
'''Rotate orbitals with the given unitary matrix'''
mo = mc1step.CASSCF.rotate_mo(self, mo, u, log)
mo = lib.tag_array(mo, orbsym=self.mo_coeff.orbsym)
return mo
def sort_mo_by_irrep(self, cas_irrep_nocc,
cas_irrep_ncore=None, mo_coeff=None, s=None):
'''Select active space based on symmetry information.
See also :func:`pyscf.mcscf.addons.sort_mo_by_irrep`
'''
if mo_coeff is None: mo_coeff = self.mo_coeff
return addons.sort_mo_by_irrep(self, mo_coeff, cas_irrep_nocc,
cas_irrep_ncore, s)
def newton(self):
from pyscf.mcscf import newton_casscf_symm
mc1 = newton_casscf_symm.CASSCF(self._scf, self.ncas, self.nelecas)
mc1.__dict__.update(self.__dict__)
mc1.max_cycle_micro = 10
# MRH, 04/08/2019: enable state-average CASSCF second-order algorithm
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance (self, StateAverageMCSCFSolver):
mc1 = mc1.state_average_(self.weights)
return mc1
CASSCF = SymAdaptedCASSCF
def _symmetrize(mat, orbsym, groupname):
mat1 = numpy.zeros_like(mat)
orbsym = numpy.asarray(orbsym)
allowed = orbsym.reshape(-1,1) == orbsym
mat1[allowed] = mat[allowed]
if groupname == 'SO3':
_force_SO3_degeneracy_(mat1, orbsym)
elif groupname in ('Dooh', 'Coov'):
_force_Ex_Ey_degeneracy_(mat1, orbsym)
return mat1
from pyscf import scf
scf.hf_symm.RHF.CASSCF = scf.hf_symm.ROHF.CASSCF = lib.class_as_method(SymAdaptedCASSCF)
scf.uhf_symm.UHF.CASSCF = None
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
import pyscf.fci
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = CASSCF(m, 6, 4)
mc.fcisolver = pyscf.fci.solver(mol)
mc.verbose = 4
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
mc = CASSCF(m, 6, (3,1))
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
mc.verbose = 4
emc = mc.mc1step(mo)[0]
print(emc - -75.7155632535814)
mc = CASSCF(m, 6, (3,1))
mc.fcisolver.wfnsym = 'B1'
mc.verbose = 4
emc = mc.mc1step(mo)[0]
print(emc - -75.6406597705231)
| sunqm/pyscf | pyscf/mcscf/mc1step_symm.py | Python | apache-2.0 | 6,498 | [
"PySCF"
] | ec4c7d098057d05836e81ed1c477d092ead54266b6223cb767d5d02535de8cf9 |
# -*- coding:utf-8 -*-
# @author xupingmao
# @since 2016/12
# @modified 2022/02/07 13:04:50
import profile
import math
import re
import web
import xauth
import xutils
import xconfig
import xtables
import xtemplate
import xmanager
import os
from web import HTTPError
from xconfig import Storage
from xutils import History
from xutils import dbutil
from xutils import fsutil
from xtemplate import T
from .constant import CREATE_BTN_TEXT_DICT
PAGE_SIZE = xconfig.PAGE_SIZE
NOTE_DAO = xutils.DAO("note")
@xmanager.listen("note.view")
def visit_by_id(ctx):
note_id = ctx.id
user_name = ctx.user_name
NOTE_DAO.visit(user_name, note_id)
def check_auth(file, user_name):
if user_name == "admin":
return
if user_name == file.creator:
return
if file.is_public == 1:
return
if user_name is None:
xauth.redirect_to_login();
# 笔记的分享
if NOTE_DAO.get_share_to(user_name, file.id) != None:
return
# 笔记本的分享
if NOTE_DAO.get_share_to(user_name, file.parent_id) != None:
return
raise web.seeother("/unauthorized")
def handle_left_dir(kw, user_name, file, op):
is_iframe = xutils.get_argument("is_iframe")
dir_type = xutils.get_argument("dir_type")
tags = xutils.get_argument("tags")
if file.type in ("html", "csv"):
kw.show_aside = False
def handle_note_recommend(kw, file, user_name):
ctx = Storage(id=file.id, name = file.name, creator = file.creator,
content = file.content,
parent_id = file.parent_id,
result = [])
xmanager.fire("note.recommend", ctx)
kw.recommended_notes = ctx.result
kw.next_note = NOTE_DAO.find_next_note(file, user_name)
kw.prev_note = NOTE_DAO.find_prev_note(file, user_name)
def view_gallery_func(file, kw):
fpath = os.path.join(xconfig.UPLOAD_DIR, file.creator, str(file.parent_id), str(file.id))
filelist = []
# 处理相册
# print(file)
fpath = NOTE_DAO.get_gallery_path(file)
# print(fpath)
if fpath != None:
filelist = fsutil.list_files(fpath, webpath = True)
file.path = fpath
kw.show_aside = False
kw.path = fpath
kw.filelist = filelist
def view_html_func(file, kw):
"""处理html/post等类型的文档"""
content = file.content
content = content.replace(u'\xad', '\n')
content = content.replace(u'\n', '<br/>')
file.data = file.data.replace(u"\xad", "\n")
file.data = file.data.replace(u'\n', '<br/>')
if file.data == None or file.data == "":
file.data = content
kw.show_recommend = True
kw.show_pagination = False
def view_md_func(file, kw):
device = xutils.get_argument("device", "desktop")
kw.content = file.content
kw.show_recommend = True
kw.show_pagination = False
if kw.op == "edit":
kw.show_recommend = False
kw.template_name = "note/editor/markdown_edit.html"
if kw.op == "edit" and device == "mobile":
# 强制使用移动端编辑器
kw.template_name = "note/editor/markdown_edit.mobile.html"
def view_group_timeline_func(note, kw):
raise web.found("/note/timeline?type=default&parent_id=%s" % note.id)
def view_group_detail_func(file, kw):
# 代码暂时不用
orderby = kw.orderby
user_name = kw.user_name
page = kw.page
# pagesize = kw.pagesize
pagesize = 1000
dialog = xutils.get_argument("dialog", "false")
if kw.op == "edit":
# 编辑笔记本的简介
kw.show_recommend = False
kw.template_name = "note/editor/markdown_edit.html"
return
if orderby == None or orderby == "":
orderby = file.orderby
offset = max(page-1, 0) * pagesize
files = NOTE_DAO.list_by_parent(file.creator, file.id,
offset, pagesize, orderby)
amount = file.size
kw.content = file.content
kw.show_search_div = True
kw.show_add_file = True
kw.show_aside = False
kw.show_pagination = True
kw.files = files
kw.show_parent_link = False
kw.page_max = math.ceil(amount/pagesize)
kw.parent_id = file.id
if dialog == "true":
# 对话框的样式
kw.template_name = "note/ajax/group_detail_dialog.html"
else:
kw.template_name = "note/page/detail/group_detail.html"
share_from_info = NOTE_DAO.get_share_from(user_name, file.id)
if share_from_info != None:
kw.share_to_list = share_from_info.share_to_list
else:
kw.share_to_list = []
def view_list_func(note, kw):
kw.show_aside = False
kw.show_pagination = False
kw.show_comment_title = True
kw.comment_title = T("清单项")
kw.op = "view"
def view_table_func(note, kw):
kw.show_aside = False
def view_form_func(note, kw):
# 表单支持脚本处理,可以实现一些标准化的表单工具
kw.template_name = "note/page/detail/form_detail.html"
kw.file_id = note.id
VIEW_FUNC_DICT = {
"group": view_group_detail_func,
"md" : view_md_func,
"text": view_md_func,
"memo": view_md_func,
"log" : view_md_func,
"list": view_list_func,
"csv" : view_table_func,
"gallery": view_gallery_func,
"html": view_html_func,
"post": view_html_func,
"form": view_form_func,
}
def find_note_for_view0(token, id, name):
if token != "":
return NOTE_DAO.get_by_token(token)
if id != "":
return NOTE_DAO.get_by_id(id)
if name != "":
return NOTE_DAO.get_by_name(xauth.current_name(), name)
raise HTTPError(504)
def find_note_for_view(token, id, name):
note = find_note_for_view0(token, id, name)
if note != None:
note.mdate = note.mtime.split(" ")[0]
note.cdate = note.ctime.split(" ")[0]
note.adate = note.atime.split(" ")[0]
return note
def create_view_kw():
kw = Storage()
kw.show_left = False
kw.show_groups = False
kw.show_aside = True
kw.groups = []
kw.files = []
kw.show_mdate = False
kw.recommended_notes = []
kw.show_add_file = False
kw.template_name = "note/page/view.html"
kw.search_type = "note"
kw.comment_source_class = "hide"
return kw
class ViewHandler:
def handle_contents_btn(self, kw):
file = kw.file
can_edit = kw.can_edit
is_valid_type = (file.type != "group") and (file.parent_id != "0")
kw.show_contents_btn = is_valid_type and can_edit
@xutils.timeit(name = "Note.View", logfile = True)
def GET(self, op, id = None):
if id is None:
id = xutils.get_argument("id", "")
name = xutils.get_argument("name", "")
page = xutils.get_argument("page", 1, type=int)
pagesize = xutils.get_argument("pagesize", xconfig.PAGE_SIZE, type=int)
show_menu = xutils.get_argument("show_menu", "true") != "false"
show_search = xutils.get_argument("show_search", "true") != "false"
orderby = xutils.get_argument("orderby", "")
is_iframe = xutils.get_argument("is_iframe", "false")
token = xutils.get_argument("token", "")
user_name = xauth.current_name()
skey = xutils.get_argument("skey")
kw = create_view_kw()
kw.op = op
kw.user_name = user_name
kw.page = page
kw.orderby = orderby
kw.pagesize = pagesize
kw.page_url = "/note/view?id=%s&orderby=%s&page=" % (id, orderby)
if id == "0":
raise web.found("/")
if skey != None and skey != "":
try:
file = NOTE_DAO.get_or_create(skey, user_name)
except Exception as e:
return xtemplate.render("error.html", error = e)
else:
# 回收站的笔记也能看到
file = find_note_for_view(token, id, name)
if file is None:
raise web.notfound()
if token == "":
check_auth(file, user_name)
pathlist = NOTE_DAO.list_path(file)
can_edit = (file.creator == user_name) or (user_name == "admin")
role = xauth.get_current_role()
# 定义一些变量
recent_created = []
show_recommend = False
next_note = None
prev_note = None
event_ctx = Storage(id = file.id, user_name = user_name)
xmanager.fire("note.view", event_ctx)
view_func = VIEW_FUNC_DICT.get(file.type, view_md_func)
view_func(file, kw)
if show_recommend and user_name is not None:
# 推荐系统
handle_note_recommend(kw, file, user_name)
if op == "edit":
kw.show_aside = False
kw.show_search = False
kw.show_comment = False
if is_iframe == "true":
kw.show_menu = False
kw.show_search = False
template_name = kw['template_name']
del kw['template_name']
kw.file = file
kw.can_edit = can_edit
# 如果是页面,需要查出上级目录列表
handle_left_dir(kw, user_name, file, op)
# 处理目录按钮的展示
self.handle_contents_btn(kw)
return xtemplate.render_by_ua(template_name,
html_title = file.name,
note_id = id,
pathlist = pathlist,
recent_created = recent_created,
CREATE_BTN_TEXT_DICT = CREATE_BTN_TEXT_DICT,
is_iframe = is_iframe, **kw)
class ViewByIdHandler(ViewHandler):
def GET(self, id):
return ViewHandler.GET(self, "view", id)
def POST(self, id):
return ViewHandler.POST(self, "view", id)
class PrintHandler:
@xauth.login_required()
def GET(self):
id = xutils.get_argument("id")
file = xutils.call("note.get_by_id", id)
user_name = xauth.current_name()
check_auth(file, user_name)
return xtemplate.render("note/page/print.html", show_menu = False, note = file)
def sqlite_escape(text):
if text is None:
return "NULL"
if not (isinstance(text, str)):
return repr(text)
text = text.replace("'", "''")
return "'" + text + "'"
def result(success = True, msg=None):
return {"success": success, "result": None, "msg": msg}
def get_link(filename, webpath):
if xutils.is_img_file(filename):
return "" % (filename, webpath)
return "[%s](%s)" % (filename, webpath)
class MarkHandler:
@xauth.login_required()
def GET(self):
id = xutils.get_argument("id")
db = xtables.get_file_table()
db.update(is_marked=1, where=dict(id=id))
raise web.seeother("/note/view?id=%s"%id)
class UnmarkHandler:
@xauth.login_required()
def GET(self):
id = xutils.get_argument("id")
db = xtables.get_file_table()
db.update(is_marked=0, where=dict(id=id))
raise web.seeother("/note/view?id=%s"%id)
class NoteHistoryHandler:
@xauth.login_required()
def GET(self):
note_id = xutils.get_argument("id")
creator = xauth.current_name()
note = NOTE_DAO.get_by_id_creator(note_id, creator)
if note is None:
history_list = []
else:
history_list = NOTE_DAO.list_history(note_id)
return xtemplate.render("note/page/history_list.html",
current_note = note,
history_list = history_list,
show_aside = True)
class HistoryViewHandler:
@xauth.login_required()
def GET(self):
note_id = xutils.get_argument("id")
version = xutils.get_argument("version")
creator = xauth.current_name()
note = NOTE_DAO.get_by_id_creator(note_id, creator)
content = ""
if note != None:
note = xutils.call("note.get_history", note_id, version)
if note != None:
content = note.content
return dict(code = "success", data = content)
class NoticeHandler:
"""TODO 提醒的功能重新基于数据库来实现"""
@xauth.login_required()
def GET(self):
# 刷新提醒,上下文为空
user_name = xauth.current_name()
offset = 0
limit = 200
orderby = "ctime_desc"
xmanager.fire("notice.update")
# files = NOTE_DAO.list_by_type(user_name, "list", offset, limit, orderby)
return xtemplate.render("note/page/notice.html")
class QueryHandler:
@xauth.login_required("admin")
def GET(self, action = ""):
if action == "get_by_id":
id = xutils.get_argument("id")
return dict(code = "success", data = NOTE_DAO.get_by_id(id))
if action == "get_by_name":
name = xutils.get_argument("name")
return dict(code = "success", data = NOTE_DAO.get_by_name(xauth.current_name(), name))
return dict(code="fail", message = "unknown action")
class GetDialogHandler:
def get_group_option_dialog(self, kw):
note_id = xutils.get_argument("note_id")
file = NOTE_DAO.get_by_id(note_id)
kw.file = file
def get_share_group_dialog(self, kw):
user_name = xauth.current_name()
note_id = xutils.get_argument("note_id")
file = NOTE_DAO.get_by_id(note_id)
kw.file = file
kw.share_to_list = []
if file != None:
share_from_info = NOTE_DAO.get_share_from(user_name, file.id)
if share_from_info != None:
kw.share_to_list = share_from_info.share_to_list
def get_share_note_dialog(self, kw):
user_name = xauth.current_name()
note_id = xutils.get_argument("note_id")
file = NOTE_DAO.get_by_id(note_id)
kw.file = file
@xauth.login_required()
def GET(self, name = ""):
kw = Storage()
if name == "group_option_dialog":
self.get_group_option_dialog(kw)
if name == "share_group_dialog":
self.get_share_group_dialog(kw)
if name == "share_note_dialog":
self.get_share_note_dialog(kw)
return xtemplate.render("note/ajax/%s.html" % name, **kw)
xurls = (
r"/note/(edit|view)" , ViewHandler,
r"/note/print" , PrintHandler,
r"/note/(\d+)" , ViewByIdHandler,
r"/note/view/([\w\-]+)", ViewByIdHandler,
r"/note/history" , NoteHistoryHandler,
r"/note/history_view" , HistoryViewHandler,
r"/note/notice" , NoticeHandler,
r"/note/query/(\w+)" , QueryHandler,
r"/note/ajax/(.+)" , GetDialogHandler,
r"/file/mark" , MarkHandler,
r"/file/unmark" , UnmarkHandler,
r"/file/markdown" , ViewHandler
)
| xupingmao/xnote | handlers/note/note_view.py | Python | gpl-3.0 | 14,848 | [
"VisIt"
] | dfc66b763524feb67060f35d4c07ea946de137e8b02811a7cc4400210808f810 |
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import fftpack, linalg, special
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fftpack.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fftpack.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True (default), create a "periodic" window, ready to use with
`ifftshift` and be multiplied by the result of an FFT (see also
`fftpack.fftfreq`).
If False, create a "symmetric" window, for use in filter design.
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
`boxcar`, `triang`, `blackman`, `hamming`, `hann`, `bartlett`,
`flattop`, `parzen`, `bohman`, `blackmanharris`, `nuttall`,
`barthann`, `kaiser` (needs beta), `gaussian` (needs standard
deviation), `general_gaussian` (needs power, width), `slepian`
(needs width), `chebwin` (needs attenuation), `exponential`
(needs decay scale), `tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the `kaiser` window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| yuanagain/seniorthesis | venv/lib/python2.7/site-packages/scipy/signal/windows.py | Python | mit | 54,134 | [
"Gaussian"
] | c11d8ff6d985498cea08d72975a356642cd2238052536833b0ecc02d522dcfaa |
"""
Records who we trust to sign feeds.
Trust is divided up into domains, so that it is possible to trust a key
in some cases and not others.
@var trust_db: Singleton trust database instance.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, SafeException
import os
from zeroinstall.support import basedir
from .namespaces import config_site, config_prog, XMLNS_TRUST
class TrustDB(object):
"""A database of trusted keys.
@ivar keys: maps trusted key fingerprints to a set of domains for which where it is trusted
@type keys: {str: set(str)}
@ivar watchers: callbacks invoked by L{notify}
@see: L{trust_db} - the singleton instance of this class"""
__slots__ = ['keys', 'watchers', '_dry_run']
def __init__(self):
self.keys = None
self.watchers = []
self._dry_run = False
def is_trusted(self, fingerprint, domain = None):
"""@type fingerprint: str
@type domain: str | None
@rtype: bool"""
self.ensure_uptodate()
domains = self.keys.get(fingerprint, None)
if not domains: return False # Unknown key
if domain is None:
return True # Deprecated
return domain in domains or '*' in domains
def get_trust_domains(self, fingerprint):
"""Return the set of domains in which this key is trusted.
If the list includes '*' then the key is trusted everywhere.
@type fingerprint: str
@rtype: {str}
@since: 0.27"""
self.ensure_uptodate()
return self.keys.get(fingerprint, set())
def get_keys_for_domain(self, domain):
"""Return the set of keys trusted for this domain.
@type domain: str
@rtype: {str}
@since: 0.27"""
self.ensure_uptodate()
return set([fp for fp in self.keys
if domain in self.keys[fp]])
def ensure_uptodate(self):
if self._dry_run:
if self.keys is None: self.keys = {}
return
from xml.dom import minidom
# This is a bit inefficient... (could cache things)
self.keys = {}
trust = basedir.load_first_config(config_site, config_prog, 'trustdb.xml')
if trust:
keys = minidom.parse(trust).documentElement
for key in keys.getElementsByTagNameNS(XMLNS_TRUST, 'key'):
domains = set()
self.keys[key.getAttribute('fingerprint')] = domains
for domain in key.getElementsByTagNameNS(XMLNS_TRUST, 'domain'):
domains.add(domain.getAttribute('value'))
else:
# Convert old database to XML format
trust = basedir.load_first_config(config_site, config_prog, 'trust')
if trust:
#print "Loading trust from", trust_db
with open(trust, 'rt') as stream:
for key in stream:
if key:
self.keys[key] = set(['*'])
def domain_from_url(url):
"""Extract the trust domain for a URL.
@param url: the feed's URL
@type url: str
@return: the trust domain
@rtype: str
@since: 0.27
@raise SafeException: the URL can't be parsed"""
try:
import urlparse
except ImportError:
from urllib import parse as urlparse # Python 3
if os.path.isabs(url):
raise SafeException(_("Can't get domain from a local path: '%s'") % url)
domain = urlparse.urlparse(url)[1]
if domain and domain != '*':
return domain
raise SafeException(_("Can't extract domain from URL '%s'") % url)
trust_db = TrustDB()
| afb/0install | zeroinstall/injector/trust.py | Python | lgpl-2.1 | 3,217 | [
"VisIt"
] | cdc356ac7fe65cecb5570035e93cce28389723600c1f75a71f5d4bd864349ae5 |
""" Tools for using spherical harmonic models to fit diffusion data
References
----------
Aganj, I., et. al. 2009. ODF Reconstruction in Q-Ball Imaging With Solid
Angle Consideration.
Descoteaux, M., et. al. 2007. Regularized, fast, and robust analytical
Q-ball imaging.
Tristan-Vega, A., et. al. 2010. A new methodology for estimation of fiber
populations in white matter of the brain with Funk-Radon transform.
Tristan-Vega, A., et. al. 2009. Estimation of fiber orientation probability
density functions in high angular resolution diffusion imaging.
"""
"""
Note about the Transpose:
In the literature the matrix representation of these methods is often written
as Y = Bx where B is some design matrix and Y and x are column vectors. In our
case the input data, a dwi stored as a nifti file for example, is stored as row
vectors (ndarrays) of the form (x, y, z, n), where n is the number of diffusion
directions. We could transpose and reshape the data to be (n, x*y*z), so that
we could directly plug it into the above equation. However, I have chosen to
keep the data as is and implement the relevant equations rewritten in the
following form: Y.T = x.T B.T, or in python syntax data = np.dot(sh_coef, B.T)
where data is Y.T and sh_coef is x.T.
"""
import numpy as np
from numpy import concatenate, diag, diff, empty, eye, sqrt, unique, dot
from numpy.linalg import pinv, svd
from numpy.random import randint
from dipy.reconst.odf import OdfModel, OdfFit
from scipy.special import sph_harm, lpn, lpmv, gammaln
from dipy.core.sphere import Sphere
import dipy.core.gradients as grad
from dipy.sims.voxel import single_tensor, all_tensor_evecs
from dipy.core.geometry import cart2sphere
from dipy.core.onetime import auto_attr
from dipy.reconst.cache import Cache
from distutils.version import StrictVersion
import scipy
if StrictVersion(scipy.version.short_version) >= StrictVersion('0.15.0'):
SCIPY_15_PLUS = True
else:
SCIPY_15_PLUS = False
def _copydoc(obj):
def bandit(f):
f.__doc__ = obj.__doc__
return f
return bandit
def forward_sdeconv_mat(r_rh, n):
""" Build forward spherical deconvolution matrix
Parameters
----------
r_rh : ndarray
Rotational harmonics coefficients for the single fiber response
function. Each element `rh[i]` is associated with spherical harmonics
of degree `2*i`.
n : ndarray
The degree of spherical harmonic function associated with each row of
the deconvolution matrix. Only even degrees are allowed
Returns
-------
R : ndarray (N, N)
Deconvolution matrix with shape (N, N)
"""
if np.any(n % 2):
raise ValueError("n has odd degrees, expecting only even degrees")
return np.diag(r_rh[n // 2])
def sh_to_rh(r_sh, m, n):
""" Spherical harmonics (SH) to rotational harmonics (RH)
Calculate the rotational harmonic decomposition up to
harmonic order `m`, degree `n` for an axially and antipodally
symmetric function. Note that all ``m != 0`` coefficients
will be ignored as axial symmetry is assumed. Hence, there
will be ``(sh_order/2 + 1)`` non-zero coefficients.
Parameters
----------
r_sh : ndarray (N,)
ndarray of SH coefficients for the single fiber response function.
These coefficients must correspond to the real spherical harmonic
functions produced by `shm.real_sph_harm`.
m : ndarray (N,)
The order of the spherical harmonic function associated with each
coefficient.
n : ndarray (N,)
The degree of the spherical harmonic function associated with each
coefficient.
Returns
-------
r_rh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,)
Rotational harmonics coefficients representing the input `r_sh`
See Also
--------
shm.real_sph_harm, shm.real_sym_sh_basis
References
----------
.. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the
fibre orientation distribution in diffusion MRI: Non-negativity
constrained super-resolved spherical deconvolution
"""
mask = m == 0
# The delta function at theta = phi = 0 is known to have zero coefficients
# where m != 0, therefore we need only compute the coefficients at m=0.
dirac_sh = gen_dirac(0, n[mask], 0, 0)
r_rh = r_sh[mask] / dirac_sh
return r_rh
def gen_dirac(m, n, theta, phi):
""" Generate Dirac delta function orientated in (theta, phi) on the sphere
The spherical harmonics (SH) representation of this Dirac is returned as
coefficients to spherical harmonic functions produced by
`shm.real_sph_harm`.
Parameters
----------
m : ndarray (N,)
The order of the spherical harmonic function associated with each
coefficient.
n : ndarray (N,)
The degree of the spherical harmonic function associated with each
coefficient.
theta : float [0, 2*pi]
The azimuthal (longitudinal) coordinate.
phi : float [0, pi]
The polar (colatitudinal) coordinate.
See Also
--------
shm.real_sph_harm, shm.real_sym_sh_basis
Returns
-------
dirac : ndarray
SH coefficients representing the Dirac function. The shape of this is
`(m + 2) * (m + 1) / 2`.
"""
return real_sph_harm(m, n, theta, phi)
def spherical_harmonics(m, n, theta, phi):
r""" Compute spherical harmonics
This may take scalar or array arguments. The inputs will be broadcasted
against each other.
Parameters
----------
m : int ``|m| <= n``
The order of the harmonic.
n : int ``>= 0``
The degree of the harmonic.
theta : float [0, 2*pi]
The azimuthal (longitudinal) coordinate.
phi : float [0, pi]
The polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic $Y^m_n$ sampled at `theta` and `phi`.
Notes
-----
This is a faster implementation of scipy.special.sph_harm for
scipy version < 0.15.0.
"""
if SCIPY_15_PLUS:
return sph_harm(m, n, theta, phi)
x = np.cos(phi)
val = lpmv(m, n, x).astype(complex)
val *= np.sqrt((2 * n + 1) / 4.0 / np.pi)
val *= np.exp(0.5 * (gammaln(n - m + 1) - gammaln(n + m + 1)))
val = val * np.exp(1j * m * theta)
return val
def real_sph_harm(m, n, theta, phi):
r""" Compute real spherical harmonics.
Where the real harmonic $Y^m_n$ is defined to be:
Real($Y^m_n$) * sqrt(2) if m > 0
$Y^m_n$ if m == 0
Imag($Y^m_n$) * sqrt(2) if m < 0
This may take scalar or array arguments. The inputs will be broadcasted
against each other.
Parameters
----------
m : int ``|m| <= n``
The order of the harmonic.
n : int ``>= 0``
The degree of the harmonic.
theta : float [0, 2*pi]
The azimuthal (longitudinal) coordinate.
phi : float [0, pi]
The polar (colatitudinal) coordinate.
Returns
--------
y_mn : real float
The real harmonic $Y^m_n$ sampled at `theta` and `phi`.
See Also
--------
scipy.special.sph_harm
"""
# dipy uses a convention for theta and phi that is reversed with respect to
# function signature of scipy.special.sph_harm
sh = spherical_harmonics(np.abs(m), n, phi, theta)
real_sh = np.where(m > 0, sh.imag, sh.real)
real_sh *= np.where(m == 0, 1., np.sqrt(2))
return real_sh
def real_sym_sh_mrtrix(sh_order, theta, phi):
"""
Compute real spherical harmonics as in mrtrix, where the real harmonic
$Y^m_n$ is defined to be::
Real($Y^m_n$) if m > 0
$Y^m_n$ if m == 0
Imag($Y^|m|_n$) if m < 0
This may take scalar or array arguments. The inputs will be broadcasted
against each other.
Parameters
-----------
sh_order : int
The maximum degree or the spherical harmonic basis.
theta : float [0, pi]
The polar (colatitudinal) coordinate.
phi : float [0, 2*pi]
The azimuthal (longitudinal) coordinate.
Returns
--------
y_mn : real float
The real harmonic $Y^m_n$ sampled at `theta` and `phi` as
implemented in mrtrix. Warning: the basis is Tournier et al
2004 and 2007 is slightly different.
m : array
The order of the harmonics.
n : array
The degree of the harmonics.
"""
m, n = sph_harm_ind_list(sh_order)
phi = np.reshape(phi, [-1, 1])
theta = np.reshape(theta, [-1, 1])
m = -m
real_sh = real_sph_harm(m, n, theta, phi)
real_sh /= np.where(m == 0, 1., np.sqrt(2))
return real_sh, m, n
def real_sym_sh_basis(sh_order, theta, phi):
"""Samples a real symmetric spherical harmonic basis at point on the sphere
Samples the basis functions up to order `sh_order` at points on the sphere
given by `theta` and `phi`. The basis functions are defined here the same
way as in fibernavigator [1]_ where the real harmonic $Y^m_n$ is defined to
be:
Imag($Y^m_n$) * sqrt(2) if m > 0
$Y^m_n$ if m == 0
Real($Y^|m|_n$) * sqrt(2) if m < 0
This may take scalar or array arguments. The inputs will be broadcasted
against each other.
Parameters
-----------
sh_order : int
even int > 0, max spherical harmonic degree
theta : float [0, 2*pi]
The azimuthal (longitudinal) coordinate.
phi : float [0, pi]
The polar (colatitudinal) coordinate.
Returns
--------
y_mn : real float
The real harmonic $Y^m_n$ sampled at `theta` and `phi`
m : array
The order of the harmonics.
n : array
The degree of the harmonics.
References
----------
.. [1] http://code.google.com/p/fibernavigator/
"""
m, n = sph_harm_ind_list(sh_order)
phi = np.reshape(phi, [-1, 1])
theta = np.reshape(theta, [-1, 1])
real_sh = real_sph_harm(m, n, theta, phi)
return real_sh, m, n
sph_harm_lookup = {None: real_sym_sh_basis,
"mrtrix": real_sym_sh_mrtrix,
"fibernav": real_sym_sh_basis}
def sph_harm_ind_list(sh_order):
"""
Returns the degree (n) and order (m) of all the symmetric spherical
harmonics of degree less then or equal to `sh_order`. The results, `m_list`
and `n_list` are kx1 arrays, where k depends on sh_order. They can be
passed to :func:`real_sph_harm`.
Parameters
----------
sh_order : int
even int > 0, max degree to return
Returns
-------
m_list : array
orders of even spherical harmonics
n_list : array
degrees of even spherical harmonics
See also
--------
real_sph_harm
"""
if sh_order % 2 != 0:
raise ValueError('sh_order must be an even integer >= 0')
n_range = np.arange(0, sh_order + 1, 2, dtype=int)
n_list = np.repeat(n_range, n_range * 2 + 1)
ncoef = (sh_order + 2) * (sh_order + 1) / 2
offset = 0
m_list = empty(ncoef, 'int')
for ii in n_range:
m_list[offset:offset + 2 * ii + 1] = np.arange(-ii, ii + 1)
offset = offset + 2 * ii + 1
# makes the arrays ncoef by 1, allows for easy broadcasting later in code
return (m_list, n_list)
def order_from_ncoef(ncoef):
"""
Given a number n of coefficients, calculate back the sh_order
"""
# Solve the quadratic equation derived from :
# ncoef = (sh_order + 2) * (sh_order + 1) / 2
return int(-3 + np.sqrt(9 - 4 * (2-2*ncoef)))/2
def smooth_pinv(B, L):
"""Regularized psudo-inverse
Computes a regularized least square inverse of B
Parameters
----------
B : array_like (n, m)
Matrix to be inverted
L : array_like (n,)
Returns
-------
inv : ndarray (m, n)
regularized least square inverse of B
Notes
-----
In the literature this inverse is often written $(B^{T}B+L^{2})^{-1}B^{T}$.
However here this inverse is implemented using the psudo-inverse because it
is more numerically stable than the direct implementation of the matrix
product.
"""
L = diag(L)
inv = pinv(concatenate((B, L)))
return inv[:, :len(B)]
def lazy_index(index):
"""Produces a lazy index
Returns a slice that can be used for indexing an array, if no slice can be
made index is returned as is.
"""
index = np.array(index)
assert index.ndim == 1
if index.dtype.kind == 'b':
index = index.nonzero()[0]
if len(index) == 1:
return slice(index[0], index[0] + 1)
step = unique(diff(index))
if len(step) != 1 or step[0] == 0:
return index
else:
return slice(index[0], index[-1] + 1, step[0])
def _gfa_sh(coef, sh0_index=0):
"""The gfa of the odf, computed from the spherical harmonic coefficients
This is a private function because it only works for coefficients of
normalized sh bases.
Parameters
----------
coef : array
The coefficients, using a normalized sh basis, that represent each odf.
sh0_index : int
The index of the coefficient associated with the 0th order sh harmonic.
Returns
-------
gfa_values : array
The gfa of each odf.
"""
coef_sq = coef**2
numer = coef_sq[..., sh0_index]
denom = (coef_sq).sum(-1)
# The sum of the square of the coefficients being zero is the same as all
# the coefficients being zero
allzero = denom == 0
# By adding 1 to numer and denom where both and are 0, we prevent 0/0
numer = numer + allzero
denom = denom + allzero
return np.sqrt(1. - (numer / denom))
class SphHarmModel(OdfModel, Cache):
"""The base class to sub-classed by specific spherical harmonic models of
diffusion data"""
def __init__(self, gtab, sh_order, smooth=0.006, min_signal=1.,
assume_normed=False):
"""Creates a model that can be used to fit or sample diffusion data
Arguments
---------
gtab : GradientTable
Diffusion gradients used to acquire data
sh_order : even int >= 0
the spherical harmonic order of the model
smooth : float between 0 and 1, optional
The regularization parameter of the model
min_signal : float, > 0, optional
During fitting, all signal values less than `min_signal` are
clipped to `min_signal`. This is done primarily to avoid values
less than or equal to zero when taking logs.
assume_normed : bool, optional
If True, clipping and normalization of the data with respect to the
mean B0 signal are skipped during mode fitting. This is an advanced
feature and should be used with care.
See Also
--------
normalize_data
"""
OdfModel.__init__(self, gtab)
self._where_b0s = lazy_index(gtab.b0s_mask)
self._where_dwi = lazy_index(~gtab.b0s_mask)
self.assume_normed = assume_normed
self.min_signal = min_signal
x, y, z = gtab.gradients[self._where_dwi].T
r, theta, phi = cart2sphere(x, y, z)
B, m, n = real_sym_sh_basis(sh_order, theta[:, None], phi[:, None])
L = -n * (n + 1)
legendre0 = lpn(sh_order, 0)[0]
F = legendre0[n]
self.sh_order = sh_order
self.B = B
self.m = m
self.n = n
self._set_fit_matrix(B, L, F, smooth)
def _set_fit_matrix(self, *args):
"""Should be set in a subclass and is called by __init__"""
msg = "User must implement this method in a subclass"
raise NotImplementedError(msg)
def fit(self, data, mask=None):
"""Fits the model to diffusion data and returns the model fit"""
# Normalize the data and fit coefficients
if not self.assume_normed:
data = normalize_data(data, self._where_b0s, self.min_signal)
# Compute coefficients using abstract method
coef = self._get_shm_coef(data)
# Apply the mask to the coefficients
if mask is not None:
mask = np.asarray(mask, dtype=bool)
coef *= mask[..., None]
return SphHarmFit(self, coef, mask)
class SphHarmFit(OdfFit):
"""Diffusion data fit to a spherical harmonic model"""
def __init__(self, model, shm_coef, mask):
self.model = model
self._shm_coef = shm_coef
self.mask = mask
@property
def shape(self):
return self._shm_coef.shape[:-1]
def __getitem__(self, index):
"""Allowing indexing into fit"""
# Index shm_coefficients
if isinstance(index, tuple):
coef_index = index + (Ellipsis,)
else:
coef_index = index
new_coef = self._shm_coef[coef_index]
# Index mask
if self.mask is not None:
new_mask = self.mask[index]
assert new_mask.shape == new_coef.shape[:-1]
else:
new_mask = None
return SphHarmFit(self.model, new_coef, new_mask)
def odf(self, sphere):
"""Samples the odf function on the points of a sphere
Parameters
----------
sphere : Sphere
The points on which to sample the odf.
Returns
-------
values : ndarray
The value of the odf on each point of `sphere`.
"""
sampling_matrix = self.model.cache_get("sampling_matrix", sphere)
if sampling_matrix is None:
phi = sphere.phi.reshape((-1, 1))
theta = sphere.theta.reshape((-1, 1))
sh_order = self.model.sh_order
sampling_matrix, m, n = real_sym_sh_basis(sh_order, theta, phi)
self.model.cache_set("sampling_matrix", sphere, sampling_matrix)
return dot(self._shm_coef, sampling_matrix.T)
@auto_attr
def gfa(self):
return _gfa_sh(self._shm_coef, 0)
@property
def shm_coeff(self):
"""The spherical harmonic coefficients of the odf
Make this a property for now, if there is a usecase for modifying
the coefficients we can add a setter or expose the coefficients more
directly
"""
return self._shm_coef
def predict(self, gtab=None, S0=1.0):
"""
Predict the diffusion signal from the model coefficients.
Parameters
----------
gtab : a GradientTable class instance
The directions and bvalues on which prediction is desired
S0 : float array
The mean non-diffusion-weighted signal in each voxel. Default: 1 in
all voxels
"""
if not hasattr(self.model, 'predict'):
msg = "This model does not have prediction implemented yet"
raise NotImplementedError(msg)
return self.model.predict(self.shm_coeff, gtab, S0)
class CsaOdfModel(SphHarmModel):
"""Implementation of Constant Solid Angle reconstruction method.
References
----------
.. [1] Aganj, I., et. al. 2009. ODF Reconstruction in Q-Ball Imaging With
Solid Angle Consideration.
"""
min = .001
max = .999
_n0_const = .5 / np.sqrt(np.pi)
def _set_fit_matrix(self, B, L, F, smooth):
"""The fit matrix, is used by fit_coefficients to return the
coefficients of the odf"""
invB = smooth_pinv(B, sqrt(smooth) * L)
L = L[:, None]
F = F[:, None]
self._fit_matrix = (F * L) / (8 * np.pi) * invB
def _get_shm_coef(self, data, mask=None):
"""Returns the coefficients of the model"""
data = data[..., self._where_dwi]
data = data.clip(self.min, self.max)
loglog_data = np.log(-np.log(data))
sh_coef = dot(loglog_data, self._fit_matrix.T)
sh_coef[..., 0] = self._n0_const
return sh_coef
class OpdtModel(SphHarmModel):
"""Implementation of Orientation Probability Density Transform
reconstruction method.
References
----------
.. [1] Tristan-Vega, A., et. al. 2010. A new methodology for estimation of
fiber populations in white matter of the brain with Funk-Radon
transform.
.. [2] Tristan-Vega, A., et. al. 2009. Estimation of fiber orientation
probability density functions in high angular resolution diffusion
imaging.
"""
def _set_fit_matrix(self, B, L, F, smooth):
invB = smooth_pinv(B, sqrt(smooth) * L)
L = L[:, None]
F = F[:, None]
delta_b = F * L * invB
delta_q = 4 * F * invB
self._fit_matrix = delta_b, delta_q
def _get_shm_coef(self, data, mask=None):
"""Returns the coefficients of the model"""
delta_b, delta_q = self._fit_matrix
return _slowadc_formula(data[..., self._where_dwi], delta_b, delta_q)
def _slowadc_formula(data, delta_b, delta_q):
"""formula used in SlowAdcOpdfModel"""
logd = -np.log(data)
return dot(logd * (1.5 - logd) * data, delta_q.T) - dot(data, delta_b.T)
class QballModel(SphHarmModel):
"""Implementation of regularized Qball reconstruction method.
References
----------
.. [1] Descoteaux, M., et. al. 2007. Regularized, fast, and robust
analytical Q-ball imaging.
"""
def _set_fit_matrix(self, B, L, F, smooth):
invB = smooth_pinv(B, sqrt(smooth) * L)
F = F[:, None]
self._fit_matrix = F * invB
def _get_shm_coef(self, data, mask=None):
"""Returns the coefficients of the model"""
return dot(data[..., self._where_dwi], self._fit_matrix.T)
def normalize_data(data, where_b0, min_signal=1., out=None):
"""Normalizes the data with respect to the mean b0
"""
if out is None:
out = np.array(data, dtype='float32', copy=True)
else:
if out.dtype.kind != 'f':
raise ValueError("out must be floating point")
out[:] = data
out.clip(min_signal, out=out)
b0 = out[..., where_b0].mean(-1)
out /= b0[..., None]
return out
def hat(B):
"""Returns the hat matrix for the design matrix B
"""
U, S, V = svd(B, False)
H = dot(U, U.T)
return H
def lcr_matrix(H):
"""Returns a matrix for computing leveraged, centered residuals from data
if r = (d-Hd), the leveraged centered residuals are lcr = (r/l)-mean(r/l)
ruturns the matrix R, such lcr = Rd
"""
if H.ndim != 2 or H.shape[0] != H.shape[1]:
raise ValueError('H should be a square matrix')
leverages = sqrt(1 - H.diagonal())
leverages = leverages[:, None]
R = (eye(len(H)) - H) / leverages
return R - R.mean(0)
def bootstrap_data_array(data, H, R, permute=None):
"""Applies the Residual Bootstraps to the data given H and R
data must be normalized, ie 0 < data <= 1
This function, and the bootstrap_data_voxel function, calculate
residual-bootsrap samples given a Hat matrix and a Residual matrix. These
samples can be used for non-parametric statistics or for bootstrap
probabilistic tractography:
References
----------
.. [1] J. I. Berman, et al., "Probabilistic streamline q-ball tractography
using the residual bootstrap" 2008.
.. [2] HA Haroon, et al., "Using the model-based residual bootstrap to
quantify uncertainty in fiber orientations from Q-ball analysis"
2009.
.. [3] B. Jeurissen, et al., "Probabilistic Fiber Tracking Using the
Residual Bootstrap with Constrained Spherical Deconvolution" 2011.
"""
if permute is None:
permute = randint(data.shape[-1], size=data.shape[-1])
assert R.shape == H.shape
assert len(permute) == R.shape[-1]
R = R[permute]
data = dot(data, (H + R).T)
return data
def bootstrap_data_voxel(data, H, R, permute=None):
"""Like bootstrap_data_array but faster when for a single voxel
data must be 1d and normalized
"""
if permute is None:
permute = randint(data.shape[-1], size=data.shape[-1])
r = dot(data, R.T)
boot_data = dot(data, H.T)
boot_data += r[permute]
return boot_data
class ResidualBootstrapWrapper(object):
"""Returns a residual bootstrap sample of the signal_object when indexed
Wraps a signal_object, this signal object can be an interpolator. When
indexed, the the wrapper indexes the signal_object to get the signal.
There wrapper than samples the residual boostrap distribution of signal and
returns that sample.
"""
def __init__(self, signal_object, B, where_dwi, min_signal=1.):
"""Builds a ResidualBootstrapWapper
Given some linear model described by B, the design matrix, and a
signal_object, returns an object which can sample the residual
bootstrap distribution of the signal. We assume that the signals are
normalized so we clip the bootsrap samples to be between `min_signal`
and 1.
Parameters
----------
signal_object : some object that can be indexed
This object should return diffusion weighted signals when indexed.
B : ndarray, ndim=2
The design matrix of the spherical harmonics model used to fit the
data. This is the model that will be used to compute the residuals
and sample the residual bootstrap distribution
where_dwi :
indexing object to find diffusion weighted signals from signal
min_signal : float
The lowest allowable signal.
"""
self._signal_object = signal_object
self._H = hat(B)
self._R = lcr_matrix(self._H)
self._min_signal = min_signal
self._where_dwi = where_dwi
self.data = signal_object.data
self.voxel_size = signal_object.voxel_size
def __getitem__(self, index):
"""Indexes self._signal_object and bootstraps the result"""
signal = self._signal_object[index].copy()
dwi_signal = signal[self._where_dwi]
boot_signal = bootstrap_data_voxel(dwi_signal, self._H, self._R)
boot_signal.clip(self._min_signal, 1., out=boot_signal)
signal[self._where_dwi] = boot_signal
return signal
def sf_to_sh(sf, sphere, sh_order=4, basis_type=None, smooth=0.0):
"""Spherical function to spherical harmonics (SH).
Parameters
----------
sf : ndarray
Values of a function on the given `sphere`.
sphere : Sphere
The points on which the sf is defined.
sh_order : int, optional
Maximum SH order in the SH fit. For `sh_order`, there will be
``(sh_order + 1) * (sh_order_2) / 2`` SH coefficients (default 4).
basis_type : {None, 'mrtrix', 'fibernav'}
``None`` for the default dipy basis,
``mrtrix`` for the MRtrix basis, and
``fibernav`` for the FiberNavigator basis
(default ``None``).
smooth : float, optional
Lambda-regularization in the SH fit (default 0.0).
Returns
-------
sh : ndarray
SH coefficients representing the input function.
"""
sph_harm_basis = sph_harm_lookup.get(basis_type)
if sph_harm_basis is None:
raise ValueError("Invalid basis name.")
B, m, n = sph_harm_basis(sh_order, sphere.theta, sphere.phi)
L = -n * (n + 1)
invB = smooth_pinv(B, sqrt(smooth) * L)
sh = np.dot(sf, invB.T)
return sh
def sh_to_sf(sh, sphere, sh_order, basis_type=None):
"""Spherical harmonics (SH) to spherical function (SF).
Parameters
----------
sh : ndarray
SH coefficients representing a spherical function.
sphere : Sphere
The points on which to sample the spherical function.
sh_order : int, optional
Maximum SH order in the SH fit. For `sh_order`, there will be
``(sh_order + 1) * (sh_order_2) / 2`` SH coefficients (default 4).
basis_type : {None, 'mrtrix', 'fibernav'}
``None`` for the default dipy basis,
``mrtrix`` for the MRtrix basis, and
``fibernav`` for the FiberNavigator basis
(default ``None``).
Returns
-------
sf : ndarray
Spherical function values on the `sphere`.
"""
sph_harm_basis = sph_harm_lookup.get(basis_type)
if sph_harm_basis is None:
raise ValueError("Invalid basis name.")
B, m, n = sph_harm_basis(sh_order, sphere.theta, sphere.phi)
sf = np.dot(sh, B.T)
return sf
def sh_to_sf_matrix(sphere, sh_order, basis_type=None, return_inv=True, smooth=0):
""" Matrix that transforms Spherical harmonics (SH) to spherical
function (SF).
Parameters
----------
sphere : Sphere
The points on which to sample the spherical function.
sh_order : int, optional
Maximum SH order in the SH fit. For `sh_order`, there will be
``(sh_order + 1) * (sh_order_2) / 2`` SH coefficients (default 4).
basis_type : {None, 'mrtrix', 'fibernav'}
``None`` for the default dipy basis,
``mrtrix`` for the MRtrix basis, and
``fibernav`` for the FiberNavigator basis
(default ``None``).
return_inv : bool
If True then the inverse of the matrix is also returned
smooth : float, optional
Lambda-regularization in the SH fit (default 0.0).
Returns
-------
B : ndarray
Matrix that transforms spherical harmonics to spherical function
``sf = np.dot(sh, B)``.
invB : ndarray
Inverse of B.
"""
sph_harm_basis = sph_harm_lookup.get(basis_type)
if sph_harm_basis is None:
raise ValueError("Invalid basis name.")
B, m, n = sph_harm_basis(sh_order, sphere.theta, sphere.phi)
if return_inv:
L = -n * (n + 1)
invB = smooth_pinv(B, np.sqrt(smooth) * L)
return B.T, invB.T
return B.T
| samuelstjean/dipy | dipy/reconst/shm.py | Python | bsd-3-clause | 29,921 | [
"DIRAC"
] | b8e6544456dbbd32d7718ac408d6316d72c8aad021bd98e0ea8395c802a28060 |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in {} is using versioneer.py from {}".format(os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried {}".format(commands))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(full_tag, tag_prefix)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except OSError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories {} but none started with prefix {}".format(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file {} {}".format(versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| olof/svtplay-dl | versioneer.py | Python | mit | 67,775 | [
"Brian"
] | 17dddbb4d1f5149fbc3b552bb5bd2328e9cf8661e04154d8ed37e0b48a2002af |
'''
python module for ASE2-free and Numeric-free dacapo
U{John Kitchin<mailto:jkitchin@andrew.cmu.edu>} December 25, 2008
This module supports numpy directly.
* ScientificPython2.8 is required
- this is the first version to use numpy by default.
see https://wiki.fysik.dtu.dk/stuff/nc/ for dacapo netcdf variable
documentation
'''
__docformat__ = 'restructuredtext'
import sys
import exceptions, glob, os, pickle, string
from Scientific.IO.NetCDF import NetCDFFile as netCDF
import numpy as np
import subprocess as sp
import validate
import changed
try:
from uuid import uuid1
except ImportError: #probably an old python before 2.5
import random, time
def uuid1():
t = time.asctime()
host = os.environ['HOSTNAME']
random.seed(host + str(t))
s = host + '-' + t + '-'+str(random.random())
return s.replace(' ','-')
import logging
log = logging.getLogger('Jacapo')
handler = logging.StreamHandler()
if sys.version_info < (2,5): # no funcName in python 2.4
formatstring = ('%(levelname)-10s '
'lineno: %(lineno)-4d %(message)s')
else:
formatstring = ('%(levelname)-10s function: %(funcName)s '
'lineno: %(lineno)-4d %(message)s')
formatter = logging.Formatter(formatstring)
handler.setFormatter(formatter)
log.addHandler(handler)
from ase.calculators.jacapo.validate import get_dacapopath
class DacapoRunning(exceptions.Exception):
"""Raised when ncfile.status = 'running'"""
pass
class DacapoAborted(exceptions.Exception):
"""Raised when ncfile.status = 'aborted'"""
pass
class DacapoInput(exceptions.Exception):
''' raised for bad input variables'''
pass
class DacapoAbnormalTermination(exceptions.Exception):
"""Raised when text file does not end correctly"""
pass
class DacapoDryrun(exceptions.Exception):
"""Raised when text file does not end correctly"""
pass
def read(ncfile):
'''return atoms and calculator from ncfile
>>> atoms, calc = read('co.nc')
'''
calc = Jacapo(ncfile)
atoms = calc.get_atoms() #this returns a copy
return (atoms, calc)
class Jacapo:
'''
Python interface to the Fortran DACAPO code
'''
__name__ = 'Jacapo'
__version__ = '0.4'
#dictionary of valid input variables and default settings
default_input = {'atoms':None,
'pw':350,
'dw':350,
'xc':'PW91',
'nbands':None,
'ft':0.1,
'kpts':(1,1,1),
'spinpol':False,
'fixmagmom':None,
'symmetry':False,
'calculate_stress':False,
'dipole':{'status':False,
'mixpar':0.2,
'initval':0.0,
'adddipfield':0.0,
'position':None},
'status':'new',
'pseudopotentials':None,
'extracharge':None,
'extpot':None,
'fftgrid':None,
'ascii_debug':'Off',
'ncoutput':{'wf':'Yes',
'cd':'Yes',
'efp':'Yes',
'esp':'Yes'},
'ados':None,
'decoupling':None,
'external_dipole':None,
'convergence':{'energy':0.00001,
'density':0.0001,
'occupation':0.001,
'maxsteps':None,
'maxtime':None},
'charge_mixing':{'method':'Pulay',
'mixinghistory':10,
'mixingcoeff':0.1,
'precondition':'No',
'updatecharge':'Yes'},
'electronic_minimization':{'method':'eigsolve',
'diagsperband':2},
'occupationstatistics':'FermiDirac',
'fftgrid':{'soft':None,
'hard':None},
'mdos':None,
'psp':None
}
def __init__(self,
nc='out.nc',
outnc=None,
debug=logging.WARN,
stay_alive=False,
**kwargs):
'''
Initialize the Jacapo calculator
:Parameters:
nc : string
output netcdf file, or input file if nc already exists
outnc : string
output file. by default equal to nc
debug : integer
logging debug level.
Valid kwargs:
atoms : ASE.Atoms instance
atoms is an ase.Atoms object that will be attached
to this calculator.
pw : integer
sets planewave cutoff
dw : integer
sets density cutoff
kpts : iterable
set chadi-cohen, monkhorst-pack kpt grid,
e.g. kpts = (2,2,1) or explicit list of kpts
spinpol : Boolean
sets whether spin-polarization is used or not.
fixmagmom : float
set the magnetic moment of the unit cell. only used
in spin polarize calculations
ft : float
set the Fermi temperature used in occupation smearing
xc : string
set the exchange-correlation functional.
one of ['PZ','VWN','PW91','PBE','RPBE','revPBE'],
dipole
boolean
turn the dipole correction on (True) or off (False)
or:
dictionary of parameters to fine-tune behavior
{'status':False,
'mixpar':0.2,
'initval':0.0,
'adddipfield':0.0,
'position':None}
nbands : integer
set the number of bands
symmetry : Boolean
Turn symmetry reduction on (True) or off (False)
stress : Boolean
Turn stress calculation on (True) or off (False)
debug : level for logging
could be something like
logging.DEBUG or an integer 0-50. The higher the integer,
the less information you see set debug level (0 = off, 10 =
extreme)
Modification of the nc file only occurs at calculate time if needed
>>> calc = Jacapo('CO.nc')
reads the calculator from CO.nc if it exists or
minimally initializes CO.nc with dimensions if it does not exist.
>>> calc = Jacapo('CO.nc', pw=300)
reads the calculator from CO.nc or initializes it if
it does not exist and changes the planewave cutoff energy to
300eV
>>> atoms = Jacapo.read_atoms('CO.nc')
returns the atoms in the netcdffile CO.nc, with the calculator
attached to it.
>>> atoms, calc = read('CO.nc')
'''
self.debug = debug
log.setLevel(debug)
self.pars = Jacapo.default_input.copy()
self.pars_uptodate = {}
log.debug(self.pars)
for key in self.pars:
self.pars_uptodate[key] = False
self.kwargs = kwargs
self.set_psp_database()
self.set_nc(nc)
#assume not ready at init, rely on code later to change this
self.ready = False
# need to set a default value for stay_alive
self.stay_alive = stay_alive
# for correct updating, we need to set the correct frame number
# before setting atoms or calculator
self._set_frame_number()
if os.path.exists(nc):
self.atoms = self.read_only_atoms(nc)
#if atoms object is passed to
#__init__ we assume the user wants the atoms object
# updated to the current state in the file.
if 'atoms' in kwargs:
log.debug('Updating the atoms in kwargs')
atoms = kwargs['atoms']
atoms.set_cell(self.atoms.get_cell())
atoms.set_positions(self.atoms.get_positions())
atoms.calc = self
#update the parameter list from the ncfile
self.update_input_parameters()
self.ready = True
#change output file if needed
if outnc:
self.set_nc(outnc)
if len(kwargs) > 0:
if 'stress' in kwargs:
raise DacapoInput, '''\
stress keyword is deprecated.
you must use calculate_stress instead'''
#make sure to set calculator on atoms if it was in kwargs
#and do this first, since some parameters need info from atoms
if 'atoms' in kwargs:
#we need to set_atoms here so the atoms are written to
#the ncfile
self.set_atoms(kwargs['atoms'])
kwargs['atoms'].calc = self
del kwargs['atoms'] #so we don't call it in the next
#line. we don't want to do that
#because it will update the _frame
#counter, and that should not be
#done here.
self.set(**kwargs) #if nothing changes, nothing will be done
def set(self, **kwargs):
'''set a parameter
parameter is stored in dictionary that is processed later if a
calculation is need.
'''
if 'DACAPO_NOSET' in os.environ:
#it is probably a bug that this is detected so we raise an exception
raise Exception, 'DACAPO_NOSET detected, nothing is being set'
for key in kwargs:
if key not in self.default_input:
raise DacapoInput, '%s is not valid input' % key
if kwargs[key] is None:
continue
#now check for valid input
validf = 'validate.valid_%s' % key
valid = eval('%s(kwargs[key])' % validf)
if not valid:
s = 'Warning invalid input detected for key "%s" %s'
log.warn(s % (key,
kwargs[key]))
raise DacapoInput, s % (key, kwargs[key])
#now see if key has changed
if key in self.pars:
changef = 'changed.%s_changed' % key
if os.path.exists(self.get_nc()):
notchanged = not eval('%s(self,kwargs[key])' % changef)
else:
notchanged = False
log.debug('%s notchanged = %s' % (key, notchanged))
if notchanged:
continue
log.debug('setting: %s. self.ready = False ' % key)
# psp's are not stored in self.pars, everything else is
if key == 'psp':
self.psp[kwargs[key]['sym']] = kwargs[key]['psp']
else:
self.pars[key] = kwargs[key]
self.pars_uptodate[key] = False
self.ready = False
log.debug('exiting set function')
def write_input(self):
'''write out input parameters as needed
you must define a self._set_keyword function that does all the
actual writing.
'''
log.debug('Writing input variables out')
log.debug(self.pars)
if 'DACAPO_READONLY' in os.environ:
raise Exception, 'DACAPO_READONLY set and you tried to write!'
if self.ready:
log.debug('self.ready = %s' % self.ready)
log.debug('detected everything is ready, not writing input out')
return
# Only write out changed parameters. this function does not do
# the writing, that is done for each variable in private
# functions.
for key in self.pars:
if self.pars_uptodate[key] is False:
setf = 'set_%s' % key
if self.pars[key] is None:
continue
log.debug('trying to call: %s' % setf)
log.debug('self.%s(self.pars[key])' % setf)
log.debug('key = %s' % str(self.pars[key]))
if isinstance(self.pars[key], dict):
eval('self.%s(**self.pars[key])' % setf)
else:
eval('self.%s(self.pars[key])' % setf)
self.pars_uptodate[key] = True #update the changed flag
log.debug('wrote %s: %s' % (key, str(self.pars[key])))
#set Jacapo version
ncf = netCDF(self.get_nc(), 'a')
ncf.Jacapo_version = Jacapo.__version__
ncf.sync()
ncf.close()
def update_input_parameters(self):
'''read in all the input parameters from the netcdfile'''
log.debug('Updating parameters')
for key in self.default_input:
getf = 'self.get_%s()' % key
log.debug('getting key: %s' % key)
self.pars[key] = eval(getf)
self.pars_uptodate[key] = True
return self.pars
def write(self, new=False):
'''write out everything to the ncfile : self.get_nc()
new determines whether to delete any existing ncfile, and rewrite it.
'''
nc = self.get_nc()
if new:
if os.path.exists(nc):
os.unlink(nc)
self.ready = False
for key in self.pars_uptodate:
self.pars_uptodate[key] = False
if not os.path.exists(nc):
self.initnc()
self.write_input()
self.write_nc()
def initnc(self, ncfile=None):
'''create an ncfile with minimal dimensions in it
this makes sure the dimensions needed for other set functions
exist when needed.'''
if ncfile is None:
ncfile = self.get_nc()
else:
self.set_nc(ncfile)
log.debug('initializing %s' % ncfile)
base = os.path.split(ncfile)[0]
if base is not '' and not os.path.isdir(base):
os.makedirs(base)
ncf = netCDF(ncfile, 'w')
#first, we define some dimensions we always need
#unlimited
ncf.createDimension('number_ionic_steps', None)
ncf.createDimension('dim1', 1)
ncf.createDimension('dim2', 2)
ncf.createDimension('dim3', 3)
ncf.createDimension('dim4', 4)
ncf.createDimension('dim5', 5)
ncf.createDimension('dim6', 6)
ncf.createDimension('dim7', 7)
ncf.createDimension('dim20', 20) #for longer strings
ncf.status = 'new'
ncf.history = 'Dacapo'
ncf.uuid = str(uuid1())
ncf.Jacapo_version = Jacapo.__version__
ncf.close()
self.ready = False
self._frame = 0
def __del__(self):
'''If calculator is deleted try to stop dacapo program
'''
if hasattr(self, '_dacapo'):
if self._dacapo.poll()==None:
self.execute_external_dynamics(stopprogram=True)
#and clean up after Dacapo
if os.path.exists('stop'):
os.remove('stop')
#remove slave files
txt = self.get_txt()
if txt is not None:
slv = txt + '.slave*'
for slvf in glob.glob(slv):
os.remove(slvf)
def __str__(self):
'''
pretty-print the calculator and atoms.
we read everything directly from the ncfile to prevent
triggering any calculations
'''
s = []
if self.nc is None:
return 'No netcdf file attached to this calculator'
if not os.path.exists(self.nc):
return 'ncfile (%s) does not exist yet' % self.nc
nc = netCDF(self.nc, 'r')
s.append(' ---------------------------------')
s.append(' Dacapo calculation from %s' % self.nc)
if hasattr(nc, 'uuid'):
s.append(' uuid = %s' % nc.uuid)
if hasattr(nc, 'status'):
s.append(' status = %s' % nc.status)
if hasattr(nc, 'version'):
s.append(' version = %s' % nc.version)
if hasattr(nc, 'Jacapo_version'):
s.append(' Jacapo version = %s' % nc.Jacapo_version[0])
energy = nc.variables.get('TotalEnergy', None)
if energy and energy[:][-1] < 1E36: # missing values get
# returned at 9.3E36
s.append(' Energy = %1.6f eV' % energy[:][-1])
else:
s.append(' Energy = None')
s.append('')
atoms = self.get_atoms()
if atoms is None:
s.append(' no atoms defined')
else:
uc = atoms.get_cell()
#a, b, c = uc
s.append(" Unit Cell vectors (angstroms)")
s.append(" x y z length")
for i, v in enumerate(uc):
L = (np.sum(v**2))**0.5 #vector length
s.append(" a%i [% 1.4f % 1.4f % 1.4f] %1.2f" % (i,
v[0],
v[1],
v[2],
L))
stress = nc.variables.get('TotalStress', None)
if stress is not None:
stress = np.take(stress[:].ravel(), [0, 4, 8, 5, 2, 1])
s.append(' Stress: xx, yy, zz, yz, xz, xy')
s1 = ' % 1.3f % 1.3f % 1.3f % 1.3f % 1.3f % 1.3f'
s.append(s1 % tuple(stress))
else:
s.append(' No stress calculated.')
s.append(' Volume = %1.2f A^3' % atoms.get_volume())
s.append('')
z = " Atom, sym, position (in x,y,z), tag, rmsForce and psp"
s.append(z)
#this is just the ncvariable
forces = nc.variables.get('DynamicAtomForces', None)
for i, atom in enumerate(atoms):
sym = atom.symbol
pos = atom.position
tag = atom.tag
if forces is not None and (forces[:][-1][i] < 1E36).all():
f = forces[:][-1][i]
# Lars Grabow: this seems to work right for some
# reason, but I would expect this to be the right
# index order f=forces[-1][i][:]
# frame,atom,direction
rmsforce = (np.sum(f**2))**0.5
else:
rmsforce = None
st = " %2i %3.12s " % (i, sym)
st += "[% 7.3f%7.3f% 7.3f] " % tuple(pos)
st += " %2s " % tag
if rmsforce is not None:
st += " %4.3f " % rmsforce
else:
st += ' None '
st += " %s" % (self.get_psp(sym))
s.append(st)
s.append('')
s.append(' Details:')
xc = self.get_xc()
if xc is not None:
s.append(' XCfunctional = %s' % self.get_xc())
else:
s.append(' XCfunctional = Not defined')
pw = self.get_pw()
if pw is None:
pw = 'default (350eV)'
s.append(' Planewavecutoff = %s eV' % pw)
dw = self.get_dw()
if dw:
s.append(' Densitywavecutoff = %i eV' % int(self.get_dw()))
else:
s.append(' Densitywavecutoff = None')
ft = self.get_ft()
if ft is not None:
s.append(' FermiTemperature = %f kT' % ft)
else:
s.append(' FermiTemperature = not defined')
try:
nelectrons = self.get_valence()
except:
nelectrons = None
if nelectrons is not None:
s.append(' Number of electrons = %1.1f' % nelectrons)
else:
s.append(' Number of electrons = N/A')
s.append(' Number of bands = %s' % self.get_nbands())
s.append(' Kpoint grid = %s' % str(self.get_kpts_type()))
s.append(' Spin-polarized = %s' % self.get_spin_polarized())
# if self.get_spin_polarized():
# s.append(' Unit cell magnetic moment = %1.2f bohr-magnetons' % \
# self.get_magnetic_moment())
s.append(' Dipole correction = %s' % self.get_dipole())
s.append(' Symmetry = %s' % self.get_symmetry())
s.append(' Constraints = %s' % str(atoms._get_constraints()))
s.append(' ---------------------------------')
nc.close()
return string.join(s, '\n')
#todo figure out other xc psp databases
def set_psp_database(self, xc=None):
'''
get the xc-dependent psp database
:Parameters:
xc : string
one of 'PW91', 'PBE', 'revPBE', 'RPBE', 'PZ'
not all the databases are complete, and that means
some psp do not exist.
note: this function is not supported fully. only pw91 is
imported now. Changing the xc at this point results in loading
a nearly empty database, and I have not thought about how to
resolve that
'''
if xc == 'PW91' or xc is None:
from pw91_psp import defaultpseudopotentials
else:
log.warn('PW91 pseudopotentials are being used!')
#todo build other xc psp databases
from pw91_psp import defaultpseudopotentials
self.psp = defaultpseudopotentials
def _set_frame_number(self, frame=None):
'''set framenumber in the netcdf file
this is equal to the number of ionic steps dimension'''
if frame is None:
if os.path.exists(self.nc):
nc = netCDF(self.nc, 'r')
# nc.dimensions['number_ionic_steps'] is None
if 'TotalEnergy' in nc.variables:
number_ionic_steps = nc.variables['TotalEnergy'].shape[0]
else:
number_ionic_steps = nc.variables['DynamicAtomPositions'].shape[0]
frame = number_ionic_steps - 1
nc.close()
else:
if hasattr(self,'atoms'):
frame = 1
else:
#when atoms are set, the frame will be incremented
frame = 0
## if 'TotalEnergy' in nc.variables:
## frame = nc.variables['TotalEnergy'].shape[0]
## # make sure the last energy is reasonable. Sometime
## # the field is empty if the calculation ran out of
## # walltime for example. Empty values get returned as
## # 9.6E36. Dacapos energies should always be negative,
## # so if the energy is > 1E36, there is definitely
## # something wrong and a restart is required.
## if nc.variables.get('TotalEnergy', None)[-1] > 1E36:
## log.warn("Total energy > 1E36. NC file is incomplete. \
## calc.restart may be required")
## #self.restart()
log.info("Current frame number is: %i" % (frame - 1))
self._frame = frame - 1 #netCDF starts counting with 1
def _increment_frame(self):
'increment the framenumber'
log.debug('incrementing frame')
self._frame += 1
def set_pw(self, pw):
'''set the planewave cutoff.
:Parameters:
pw : integer
the planewave cutoff in eV
this function checks to make sure the density wave cutoff is
greater than or equal to the planewave cutoff.'''
nc = netCDF(self.nc, 'a')
if 'PlaneWaveCutoff' in nc.variables:
vpw = nc.variables['PlaneWaveCutoff']
vpw.assignValue(pw)
else:
vpw = nc.createVariable('PlaneWaveCutoff', 'd', ('dim1',))
vpw.assignValue(pw)
if 'Density_WaveCutoff' in nc.variables:
vdw = nc.variables['Density_WaveCutoff']
dw = vdw.getValue()
if pw > dw:
vdw.assignValue(pw) #make them equal
else:
vdw = nc.createVariable('Density_WaveCutoff', 'd', ('dim1',))
vdw.assignValue(pw)
nc.close()
self.restart() #nc dimension change for number_plane_Wave dimension
self.set_status('new')
self.ready = False
def set_dw(self, dw):
'''set the density wave cutoff energy.
:Parameters:
dw : integer
the density wave cutoff
The function checks to make sure it is not less than the
planewave cutoff.
Density_WaveCutoff describes the kinetic energy neccesary to
represent a wavefunction associated with the total density,
i.e. G-vectors for which $\vert G\vert^2$ $<$
4*Density_WaveCutoff will be used to describe the total
density (including augmentation charge and partial core
density). If Density_WaveCutoff is equal to PlaneWaveCutoff
this implies that the total density is as soft as the
wavefunctions described by the kinetic energy cutoff
PlaneWaveCutoff. If a value of Density_WaveCutoff is specified
(must be larger than or equal to PlaneWaveCutoff) the program
will run using two grids, one for representing the
wavefunction density (softgrid_dim) and one representing the
total density (hardgrid_dim). If the density can be
reprensented on the same grid as the wavefunction density
Density_WaveCutoff can be chosen equal to PlaneWaveCutoff
(default).
'''
pw = self.get_pw()
if pw > dw:
log.warn('Planewave cutoff %i is greater \
than density cutoff %i' % (pw, dw))
ncf = netCDF(self.nc, 'a')
if 'Density_WaveCutoff' in ncf.variables:
vdw = ncf.variables['Density_WaveCutoff']
vdw.assignValue(dw)
else:
vdw = ncf.createVariable('Density_WaveCutoff', 'i', ('dim1',))
vdw.assignValue(dw)
ncf.close()
self.restart() #nc dimension change
self.set_status('new')
self.ready = False
def set_xc(self, xc):
'''Set the self-consistent exchange-correlation functional
:Parameters:
xc : string
Must be one of 'PZ', 'VWN', 'PW91', 'PBE', 'revPBE', 'RPBE'
Selects which density functional to use for
exchange-correlation when performing electronic minimization
(the electronic energy is minimized with respect to this
selected functional) Notice that the electronic energy is also
evaluated non-selfconsistently by DACAPO for other
exchange-correlation functionals Recognized options :
* "PZ" (Perdew Zunger LDA-parametrization)
* "VWN" (Vosko Wilk Nusair LDA-parametrization)
* "PW91" (Perdew Wang 91 GGA-parametrization)
* "PBE" (Perdew Burke Ernzerhof GGA-parametrization)
* "revPBE" (revised PBE/1 GGA-parametrization)
* "RPBE" (revised PBE/2 GGA-parametrization)
option "PZ" is not allowed for spin polarized
calculation; use "VWN" instead.
'''
nc = netCDF(self.nc, 'a')
v = 'ExcFunctional'
if v in nc.variables:
nc.variables[v][:] = np.array('%7s' % xc, 'c')
else:
vxc = nc.createVariable('ExcFunctional', 'c', ('dim7',))
vxc[:] = np.array('%7s' % xc, 'c')
nc.close()
self.set_status('new')
self.ready = False
def set_nbands(self, nbands=None):
'''Set the number of bands. a few unoccupied bands are
recommended.
:Parameters:
nbands : integer
the number of bands.
if nbands = None the function returns with nothing done. At
calculate time, if there are still no bands, they will be set
by:
the number of bands is calculated as
$nbands=nvalence*0.65 + 4$
'''
if nbands is None:
return
self.delete_ncattdimvar(self.nc,
ncdims=['number_of_bands'],
ncvars=[])
nc = netCDF(self.nc, 'a')
v = 'ElectronicBands'
if v in nc.variables:
vnb = nc.variables[v]
else:
vnb = nc.createVariable('ElectronicBands', 'c', ('dim1',))
vnb.NumberOfBands = nbands
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def set_kpts(self, kpts):
'''
set the kpt grid.
Parameters:
kpts: (n1,n2,n3) or [k1,k2,k3,...] or one of these
chadi-cohen sets:
* cc6_1x1
* cc12_2x3
* cc18_sq3xsq3
* cc18_1x1
* cc54_sq3xsq3
* cc54_1x1
* cc162_sq3xsq3
* cc162_1x1
(n1,n2,n3) creates an n1 x n2 x n3 monkhorst-pack grid,
[k1,k2,k3,...] creates a kpt-grid based on the kpoints
defined in k1,k2,k3,...
There is also a possibility to have Dacapo (fortran) create
the Kpoints in chadi-cohen or monkhorst-pack form. To do this
you need to set the KpointSetup.gridtype attribute, and
KpointSetup.
KpointSetup = [3,0,0]
KpointSetup.gridtype = 'ChadiCohen'
KpointSetup(1) Chadi-Cohen k-point set
1 6 k-points 1x1
2 18-kpoints sqrt(3)*sqrt(3)
3 18-kpoints 1x1
4 54-kpoints sqrt(3)*sqrt(3)
5 54-kpoints 1x1
6 162-kpoints 1x1
7 12-kpoints 2x3
8 162-kpoints 3xsqrt 3
or
KpointSetup = [4,4,4]
KpointSetup.gridtype = 'MonkhorstPack'
we do not use this functionality.
'''
#chadi-cohen
if isinstance(kpts, str):
exec('from ase.dft.kpoints import %s' % kpts)
listofkpts = eval(kpts)
gridtype = kpts #stored in ncfile
#uc = self.get_atoms().get_cell()
#listofkpts = np.dot(ccgrid,np.linalg.inv(uc.T))
#monkhorst-pack grid
if np.array(kpts).shape == (3,):
from ase.dft.kpoints import monkhorst_pack
N1, N2, N3 = kpts
listofkpts = monkhorst_pack((N1, N2, N3))
gridtype = 'Monkhorst-Pack %s' % str(tuple(kpts))
#user-defined list is provided
if len(np.array(kpts).shape) == 2:
listofkpts = kpts
gridtype = 'user_defined_%i_kpts' % len(kpts) #stored in ncfile
nbzkpts = len(listofkpts)
#we need to get dimensions stored temporarily so
#we can delete all dimensions and variables associated with
#kpoints before we save them back out.
nc2 = netCDF(self.nc, 'r')
ncdims = nc2.dimensions
nc2.close()
if 'number_BZ_kpoints' in ncdims:
self.delete_ncattdimvar(self.nc,
ncdims=['number_plane_waves',
'number_BZ_kpoints',
'number_IBZ_kpoints'])
# now define dim and var
nc = netCDF(self.nc, 'a')
nc.createDimension('number_BZ_kpoints', nbzkpts)
bv = nc.createVariable('BZKpoints', 'd', ('number_BZ_kpoints',
'dim3'))
bv[:] = listofkpts
bv.gridtype = gridtype
nc.sync()
nc.close()
log.debug('kpts = %s' % str(self.get_kpts()))
self.set_status('new')
self.ready = False
def atoms_are_equal(self, atoms):
'''
comparison of atoms to self.atoms using tolerances to account
for float/double differences and float math.
'''
TOL = 1.0e-6 #angstroms
a = self.atoms.arrays
b = atoms.arrays
#match number of atoms in cell
lenmatch = len(atoms) == len(self.atoms)
if lenmatch is not True:
return False #the next two comparisons fail in this case.
#match positions in cell
posmatch = (abs(a['positions'] - b['positions']) <= TOL).all()
#match cell
cellmatch = (abs(self.atoms.get_cell()
- atoms.get_cell()) <= TOL).all()
if lenmatch and posmatch and cellmatch:
return True
else:
return False
def set_atoms(self, atoms):
'''attach an atoms to the calculator and update the ncfile
:Parameters:
atoms
ASE.Atoms instance
'''
log.debug('setting atoms to: %s' % str(atoms))
if hasattr(self, 'atoms') and self.atoms is not None:
#return if the atoms are the same. no change needs to be made
if self.atoms_are_equal(atoms):
log.debug('No change to atoms in set_atoms, returning')
return
# some atoms already exist. Test if new atoms are
# different from old atoms.
# this is redundant
if atoms != self.atoms:
# the new atoms are different from the old ones. Start
# a new frame.
log.debug('atoms != self.atoms, incrementing')
self._increment_frame()
self.atoms = atoms.copy()
self.ready = False
log.debug('self.atoms = %s' % str(self.atoms))
def set_ft(self, ft):
'''set the Fermi temperature for occupation smearing
:Parameters:
ft : float
Fermi temperature in kT (eV)
Electronic temperature, corresponding to gaussian occupation
statistics. Device used to stabilize the convergence towards
the electronic ground state. Higher values stabilizes the
convergence. Values in the range 0.1-1.0 eV are recommended,
depending on the complexity of the Fermi surface (low values
for d-metals and narrow gap semiconducters, higher for free
electron-like metals).
'''
nc = netCDF(self.nc, 'a')
v = 'ElectronicBands'
if v in nc.variables:
vnb = nc.variables[v]
else:
vnb = nc.createVariable('ElectronicBands', 'c', ('dim1',))
vnb.OccupationStatistics_FermiTemperature = ft
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def set_status(self, status):
'''set the status flag in the netcdf file
:Parameters:
status : string
status flag, e.g. 'new', 'finished'
'''
nc = netCDF(self.nc, 'a')
nc.status = status
nc.sync()
nc.close()
log.debug('set status to %s' % status)
def get_spinpol(self):
'Returns the spin polarization setting, either True or False'
nc = netCDF(self.nc, 'r')
v = 'ElectronicBands'
if v in nc.variables:
vnb = nc.variables[v]
if hasattr(vnb, 'SpinPolarization'):
spinpol = vnb.SpinPolarization
else:
spinpol = 1
else:
spinpol = 1
nc.close()
if spinpol == 1:
return False
else:
return True
def set_spinpol(self, spinpol=False):
'''set Spin polarization.
:Parameters:
spinpol : Boolean
set_spinpol(True) spin-polarized.
set_spinpol(False) no spin polarization, default
Specify whether to perform a spin polarized or unpolarized
calculation.
'''
nc = netCDF(self.nc, 'a')
v = 'ElectronicBands'
if v in nc.variables:
vnb = nc.variables[v]
else:
vnb = nc.createVariable('ElectronicBands', 'c', ('dim1',))
if spinpol is True:
vnb.SpinPolarization = 2
else:
vnb.SpinPolarization = 1
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def set_fixmagmom(self, fixmagmom=None):
'''set a fixed magnetic moment for a spin polarized calculation
:Parameters:
fixmagmom : float
the magnetic moment of the cell in Bohr magnetons
'''
if fixmagmom is None:
return
nc = netCDF(self.nc,'a')
v = 'ElectronicBands'
if v in nc.variables:
vnb = nc.variables[v]
else:
vnb = nc.createVariable('ElectronicBands', 'c', ('dim1',))
vnb.SpinPolarization = 2 #You must want spin-polarized
vnb.FixedMagneticMoment = fixmagmom
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def get_fixmagmom(self):
'returns the value of FixedMagneticMoment'
nc = netCDF(self.nc,'r')
if 'ElectronicBands' in nc.variables:
v = nc.variables['ElectronicBands']
if hasattr(v,'FixedMagneticMoment'):
fixmagmom = v.FixedMagneticMoment
else:
fixmagmom = None
else:
fixmagmom = None
nc.close()
return fixmagmom
def set_calculate_stress(self, stress=True):
'''Turn on stress calculation
:Parameters:
stress : boolean
set_calculate_stress(True) calculates stress
set_calculate_stress(False) do not calculate stress
'''
nc = netCDF(self.get_nc(),'a')
vs = 'NetCDFOutputControl'
if vs in nc.variables:
v = nc.variables[vs]
else:
v = nc.createVariable('NetCDFOutputControl', 'c', ('dim1',))
if stress is True:
v.PrintTotalStress = 'Yes'
else:
v.PrintTotalStress = 'No'
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def set_nc(self, nc='out.nc'):
'''
set filename for the netcdf and text output for this calculation
:Parameters:
nc : string
filename for netcdf file
if the ncfile attached to the calculator is changing, the old
file will be copied to the new file if it doesn not exist so
that all the calculator details are preserved. Otherwise, the
if the ncfile does not exist, it will get initialized.
the text file will have the same basename as the ncfile, but
with a .txt extension.
'''
#the first time this is called, there may be no self.nc defined
if not hasattr(self, 'nc'):
self.nc = nc
#check if the name is changing and if so, copy the old ncfile
#to the new one. This is necessary to ensure all the
#calculator details are copied over. if the file already
#exists we use the contents of the existing file
if nc != self.nc and not os.path.exists(nc):
log.debug('copying %s to %s' % (self.nc, nc))
#import shutil
#shutil.copy(self.nc,nc)
base = os.path.split(nc)[0]
if not os.path.isdir(base) and base is not '':
os.makedirs(base)
status = os.system("cp '%s' '%s'" % (self.nc, nc))
if status != 0:
raise Exception, 'Copying ncfile failed.'
self.nc = nc
elif os.path.exists(nc):
self._set_frame_number()
self.set_psp_database()
self.atoms = self.read_only_atoms(nc)
self.nc = nc
self.update_input_parameters()
#I always want the text file set based on the ncfile
#and I never want to set this myself.
base = os.path.splitext(self.nc)[0]
self.txt = "%s.txt" % base
def set_pseudopotentials(self, pspdict):
'''Set all the pseudopotentials from a dictionary.
The dictionary should have this form::
{symbol1: path1,
symbol2: path2}
'''
for key in pspdict:
self.set_psp(sym=key,
psp=pspdict[key])
def set_psp(self,
sym=None,
z=None,
psp=None):
'''
set the pseudopotential file for a species or an atomic number.
:Parameters:
sym : string
chemical symbol of the species
z : integer
the atomic number of the species
psp : string
filename of the pseudopotential
you can only set sym or z.
examples::
set_psp('N',psp='pspfile')
set_psp(z=6,psp='pspfile')
'''
log.debug(str([sym, z, psp]))
if (sym, z, psp) == (None, None, None):
return
if (sym is None and z is not None):
from ase.data import chemical_symbols
sym = chemical_symbols[z]
elif (sym is not None and z is None):
pass
else:
raise Exception, 'You can only specify Z or sym!'
if not hasattr(self, 'psp'):
self.set_psp_database()
#only make change if needed
if sym not in self.psp:
self.psp[sym] = psp
self.ready = False
self.set_status('new')
elif self.psp[sym] != psp:
self.psp[sym] = psp
self.ready = False
self.set_status('new')
if not self.ready:
#now we update the netcdf file
ncf = netCDF(self.nc, 'a')
vn = 'AtomProperty_%s' % sym
if vn not in ncf.variables:
if 'dim20' not in ncf.dimensions:
ncf.createDimension('dim20', 20)
p = ncf.createVariable(vn, 'c', ('dim20',))
else:
p = ncf.variables[vn]
ppath = self.get_psp(sym=sym)
p.PspotFile = ppath
ncf.close()
def get_pseudopotentials(self):
'get pseudopotentials set for atoms attached to calculator'
if self.atoms is None:
return None
psp = {}
for atom in self.atoms:
psp[atom.symbol] = self.psp[atom.symbol]
return {'pspdict':psp}
def get_symmetry(self):
'''return the type of symmetry used'''
nc = netCDF(self.nc, 'r')
if 'UseSymmetry' in nc.variables:
sym = string.join(nc.variables['UseSymmetry'][:],'').strip()
else:
sym = None
nc.close()
if sym in ['Off', None]:
return False
elif sym == 'Maximum':
return True
else:
raise Exception, 'Type of symmetry not recognized: %s' % sym
def set_symmetry(self, val=False):
'''set how symmetry is used to reduce k-points
:Parameters:
val : Boolean
set_sym(True) Maximum symmetry is used
set_sym(False) No symmetry is used
This variable controls the if and how DACAPO should attempt
using symmetry in the calculation. Imposing symmetry generally
speeds up the calculation and reduces numerical noise to some
extent. Symmetry should always be applied to the maximum
extent, when ions are not moved. When relaxing ions, however,
the symmetry of the equilibrium state may be lower than the
initial state. Such an equilibrium state with lower symmetry
is missed, if symmetry is imposed. Molecular dynamics-like
algorithms for ionic propagation will generally not break the
symmetry of the initial state, but some algorithms, like the
BFGS may break the symmetry of the initial state. Recognized
options:
"Off": No symmetry will be imposed, apart from time inversion
symmetry in recipical space. This is utilized to reduce the
k-point sampling set for Brillouin zone integration and has no
influence on the ionic forces/motion.
"Maximum": DACAPO will look for symmetry in the supplied
atomic structure and extract the highest possible symmetry
group. During the calculation, DACAPO will impose the found
spatial symmetry on ionic forces and electronic structure,
i.e. the symmetry will be conserved during the calculation.
'''
if val:
symval = 'Maximum'
else:
symval = 'Off'
ncf = netCDF(self.get_nc(), 'a')
if 'UseSymmetry' not in ncf.variables:
sym = ncf.createVariable('UseSymmetry', 'c', ('dim7',))
else:
sym = ncf.variables['UseSymmetry']
sym[:] = np.array('%7s' % symval, 'c')
ncf.sync()
ncf.close()
self.set_status('new')
self.ready = False
def set_extracharge(self, val):
'''add extra charge to unit cell
:Parameters:
val : float
extra electrons to add or subtract from the unit cell
Fixed extra charge in the unit cell (i.e. deviation from
charge neutrality). This assumes a compensating, positive
constant backgound charge (jellium) to forge overall charge
neutrality.
'''
nc = netCDF(self.get_nc(), 'a')
if 'ExtraCharge' in nc.variables:
v = nc.variables['ExtraCharge']
else:
v = nc.createVariable('ExtraCharge', 'd', ('dim1',))
v.assignValue(val)
nc.sync()
nc.close()
def get_extracharge(self):
'Return the extra charge set in teh calculator'
nc = netCDF(self.get_nc(), 'r')
if 'ExtraCharge' in nc.variables:
v = nc.variables['ExtraCharge']
exchg = v.getValue()
else:
exchg = None
nc.close()
return exchg
def get_extpot(self):
'return the external potential set in teh calculator'
nc = netCDF(self.get_nc(), 'r')
if 'ExternalPotential' in nc.variables:
v = nc.variables['ExternalPotential']
extpot = v[:]
else:
extpot = None
nc.close()
return extpot
def set_extpot(self, potgrid):
'''add external potential of value
see this link before using this
https://listserv.fysik.dtu.dk/pipermail/campos/2003-August/000657.html
:Parameters:
potgrid : np.array with shape (nx,ny,nz)
the shape must be the same as the fft soft grid
the value of the potential to add
you have to know both of the fft grid dimensions ahead of time!
if you know what you are doing, you can set the fft_grid you want
before hand with:
calc.set_fftgrid((n1,n2,n3))
'''
nc = netCDF(self.get_nc(), 'a')
if 'ExternalPotential' in nc.variables:
v = nc.variables['ExternalPotential']
else:
# I assume here you have the dimensions of potgrid correct
# and that the soft and hard grids are the same.
# if softgrid is defined, Dacapo requires hardgrid to be
# defined too.
s1, s2, s3 = potgrid.shape
if 'softgrid_dim1' not in nc.dimensions:
nc.createDimension('softgrid_dim1', s1)
nc.createDimension('softgrid_dim2', s2)
nc.createDimension('softgrid_dim3', s3)
nc.createDimension('hardgrid_dim1', s1)
nc.createDimension('hardgrid_dim2', s2)
nc.createDimension('hardgrid_dim3', s3)
v = nc.createVariable('ExternalPotential',
'd',
('softgrid_dim1',
'softgrid_dim2',
'softgrid_dim3',))
v[:] = potgrid
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def set_fftgrid(self, soft=None, hard=None):
'''
sets the dimensions of the FFT grid to be used
:Parameters:
soft : (n1,n2,n3) integers
make a n1 x n2 x n3 grid
hard : (n1,n2,n3) integers
make a n1 x n2 x n3 grid
>>> calc.set_fftgrid(soft=[42,44,46])
sets the soft and hard grid dimensions to 42,44,46
>>> calc.set_fftgrid(soft=[42,44,46],hard=[80,84,88])
sets the soft grid dimensions to 42,44,46 and the hard
grid dimensions to 80,84,88
These are the fast FFt grid numbers listed in fftdimensions.F
data list_of_fft /2, 4, 6, 8, 10, 12, 14, 16, 18, 20, &
22,24, 28, 30,32, 36, 40, 42, 44, 48, &
56,60, 64, 66, 70, 72, 80, 84, 88, 90, &
96,108,110,112,120,126,128,132,140,144,154, &
160,168,176,180,192,198,200, &
216,240,264,270,280,288,324,352,360,378,384,400,432, &
450,480,540,576,640/
otherwise you will get some errors from mis-dimensioned variables.
this is usually automatically set by Dacapo.
'''
if soft is not None:
self.delete_ncattdimvar(self.nc,
ncdims=['softgrid_dim1',
'softgrid_dim2',
'softgrid_dim3'
],
ncvars=[])
nc = netCDF(self.get_nc(), 'a')
nc.createDimension('softgrid_dim1', soft[0])
nc.createDimension('softgrid_dim2', soft[1])
nc.createDimension('softgrid_dim3', soft[2])
nc.sync()
nc.close()
if hard is None:
hard = soft
if hard is not None:
self.delete_ncattdimvar(self.nc,
ncdims=['hardgrid_dim1',
'hardgrid_dim2',
'hardgrid_dim3'
],
ncvars=[])
nc = netCDF(self.get_nc(),'a')
nc.createDimension('hardgrid_dim1', hard[0])
nc.createDimension('hardgrid_dim2', hard[1])
nc.createDimension('hardgrid_dim3', hard[2])
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def get_ascii_debug(self):
'Return the debug settings in Dacapo'
nc = netCDF(self.get_nc(), 'r')
if 'PrintDebugInfo' in nc.variables:
v = nc.variables['PrintDebugInfo']
debug = string.join(v[:], '')
else:
debug = None
nc.close()
return debug
def set_ascii_debug(self, level):
'''set the debug level for Dacapo
:Parameters:
level : string
one of 'Off', 'MediumLevel', 'HighLevel'
'''
nc = netCDF(self.get_nc(), 'a')
if 'PrintDebugInfo' in nc.variables:
v = nc.variables['PrintDebugInfo']
else:
if 'dim20' not in nc.dimensions:
nc.createDimension('dim20', 20)
v = nc.createVariable('PrintDebugInfo', 'c', ('dim20',))
v[:] = np.array('%20s' % level, dtype='c')
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def get_ncoutput(self):
'returns the control variables for the ncfile'
nc = netCDF(self.get_nc(), 'r')
if 'NetCDFOutputControl' in nc.variables:
v = nc.variables['NetCDFOutputControl']
ncoutput = {}
if hasattr(v, 'PrintWaveFunction'):
ncoutput['wf'] = v.PrintWaveFunction
if hasattr(v, 'PrintChargeDensity'):
ncoutput['cd'] = v.PrintChargeDensity
if hasattr(v, 'PrintEffPotential'):
ncoutput['efp'] = v.PrintEffPotential
if hasattr(v, 'PrintElsPotential'):
ncoutput['esp'] = v.PrintElsPotential
else:
ncoutput = None
nc.close()
return ncoutput
def set_ncoutput(self,
wf=None,
cd=None,
efp=None,
esp=None):
'''set the output of large variables in the netcdf output file
:Parameters:
wf : string
controls output of wavefunction. values can
be 'Yes' or 'No'
cd : string
controls output of charge density. values can
be 'Yes' or 'No'
efp : string
controls output of effective potential. values can
be 'Yes' or 'No'
esp : string
controls output of electrostatic potential. values can
be 'Yes' or 'No'
'''
nc = netCDF(self.get_nc(), 'a')
if 'NetCDFOutputControl' in nc.variables:
v = nc.variables['NetCDFOutputControl']
else:
v = nc.createVariable('NetCDFOutputControl', 'c', ())
if wf is not None:
v.PrintWaveFunction = wf
if cd is not None:
v.PrintChargeDensity = cd
if efp is not None:
v.PrintEffPotential = efp
if esp is not None:
v.PrintElsPotential = esp
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def get_ados(self, **kwargs):
'''
attempt at maintaining backward compatibility with get_ados
returning data
Now when we call calc.get_ados() it will return settings,
and calc.get_ados(atoms=[],...) should return data
'''
if len(kwargs) != 0:
return self.get_ados_data(**kwargs)
nc = netCDF(self.get_nc(),'r')
if 'PrintAtomProjectedDOS' in nc.variables:
v = nc.variables['PrintAtomProjectedDOS']
ados = {}
if hasattr(v, 'EnergyWindow'):
ados['energywindow'] = v.EnergyWindow
if hasattr(v, 'EnergyWidth'):
ados['energywidth'] = v.EnergyWidth[0]
if hasattr(v, 'NumberEnergyPoints'):
ados['npoints'] = v.NumberEnergyPoints[0]
if hasattr(v, 'CutoffRadius'):
ados['cutoff'] = v.CutoffRadius[0]
else:
ados = None
nc.close()
return ados
def set_ados(self,
energywindow=(-15,5),
energywidth=0.2,
npoints=250,
cutoff=1.0):
'''
setup calculation of atom-projected density of states
:Parameters:
energywindow : (float, float)
sets (emin,emax) in eV referenced to the Fermi level
energywidth : float
the gaussian used in smearing
npoints : integer
the number of points to sample the DOS at
cutoff : float
the cutoff radius in angstroms for the integration.
'''
nc = netCDF(self.get_nc(), 'a')
if 'PrintAtomProjectedDOS' in nc.variables:
v = nc.variables['PrintAtomProjectedDOS']
else:
v = nc.createVariable('PrintAtomProjectedDOS', 'c', ())
v.EnergyWindow = energywindow
v.EnergyWidth = energywidth
v.NumberEnergyPoints = npoints
v.CutoffRadius = cutoff
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def get_mdos(self):
'return multicentered projected dos parameters'
nc = netCDF(self.get_nc(),'r')
mdos = {}
if 'MultiCenterProjectedDOS' in nc.variables:
v = nc.variables['MultiCenterProjectedDOS']
mdos['energywindow'] = v.EnergyWindow
mdos['energywidth'] = v.EnergyWidth
mdos['numberenergypoints'] = v.NumberEnergyPoints
mdos['cutoffradius'] = v.CutoffRadius
mdos['mcenters'] = eval(v.mcenters)
nc.close()
return mdos
def get_mdos_data(self,
spin=0,
cutoffradius='infinite'):
'''returns data from multicentered projection
returns (mdos, rotmat)
the rotation matrices are retrieved from the text file. I am
not sure what you would do with these, but there was a note
about them in the old documentation so I put the code to
retrieve them here. the syntax for the return value is:
rotmat[atom#][label] returns the rotation matrix for the
center on the atom# for label. I do not not know what the
label refers to.
'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(),'r')
icut = 1 #short
if cutoffradius == "infinite":
icut = 0
#var = nc.variables['MultiCenterProjectedDOS']
integrated = nc.variables['MultiCenterProjectedDOS_IntegratedDOS'][:]
tz = 'MultiCenterProjectedDOS_EnergyResolvedDOS'
energyresolved = nc.variables[tz][:]
energygrid = nc.variables['MultiCenterProjectedDOS_EnergyGrid'][:]
number_of_multicenters = integrated.shape[0]
#number_of_cutoff = integrated.shape[1]
#number_of_spin = integrated.shape[2]
multicenterprojections = []
for multicenter in range(number_of_multicenters):
#orbitals = var[multicenter]
energyresolveddata = energyresolved[multicenter, icut, spin, :]
#integrateddata = integrated[multicenter, icut, spin]
multicenterprojections.append([energygrid, energyresolveddata])
log.info('Found %d multicenters' % len(multicenterprojections))
nc.close()
#now parse the text file for the rotation matrices
rot_mat_lines = []
txt = self.get_txt()
if os.path.exists(txt):
f = open(txt,'r')
for line in f:
if 'MUL: Rmatrix' in line:
rot_mat_lines.append(line)
f.close()
rotmat = []
for line in rot_mat_lines:
fields = line.split()
novl = int(fields[2])
ncen = int(fields[3])
row = [float(x) for x in fields[4:]]
try:
rotmat[novl-1][ncen-1].append(row)
except IndexError:
try:
rotmat[novl-1].append([])
rotmat[novl-1][ncen-1].append(row)
except IndexError:
rotmat.append([])
rotmat[novl-1].append([])
rotmat[novl-1][ncen-1].append(row)
else:
rotmat = None
return (multicenterprojections, rotmat)
def set_mdos(self,
mcenters=None,
energywindow=(-15,5),
energywidth=0.2,
numberenergypoints=250,
cutoffradius=1.0):
'''Setup multicentered projected DOS.
mcenters
a list of tuples containing (atom#,l,m,weight)
(0,0,0,1.0) specifies (atom 0, l=0, m=0, weight=1.0) an s orbital
on atom 0
(1,1,1,1.0) specifies (atom 1, l=1, m=1, weight=1.0) a p orbital
with m = +1 on atom 0
l=0 s-orbital
l=1 p-orbital
l=2 d-orbital
m in range of ( -l ... 0 ... +l )
The direction cosines for which the spherical harmonics are
set up are using the next different atom in the list
(cyclic) as direction pointer, so the z-direction is chosen
along the direction to this next atom. At the moment the
rotation matrices is only given in the text file, you can
use grep'MUL: Rmatrix' out_o2.txt to get this information.
adapated from old MultiCenterProjectedDOS.py
'''
if mcenters is None:
return
nc = netCDF(self.get_nc(), 'a')
_listofmcenters_ = mcenters
# get number of multi centers
ncenters = len(_listofmcenters_)
# get max number of orbitals any center
max_orbitals = max(map(len, _listofmcenters_))
mmatrix = np.zeros([ncenters, max_orbitals, 4], np.float)
ncenter = 0
for multicenter in _listofmcenters_:
norbital = 0
for orbital in multicenter:
mmatrix[ncenter, norbital] = orbital
norbital = norbital + 1
# signal that this multicenter contains less than
# max_orbital orbitals
if len(multicenter) < max_orbitals:
mmatrix[ncenter, len(multicenter):max_orbitals] = (-1.0, 0,
0, 0)
ncenter = ncenter + 1
nc.createDimension('max_orbitals', max_orbitals)
nc.createDimension('number_of_multicenters', ncenters)
if 'MultiCenterProjectedDOS' in nc.variables:
v = nc.variables['MultiCenterProjectedDOS']
else:
v = nc.createVariable('MultiCenterProjectedDOS',
'd',
('number_of_multicenters',
'max_orbitals',
'dim4'))
v.EnergyWindow = energywindow
v.EnergyWidth = energywidth
v.NumberEnergyPoints = numberenergypoints
v.CutoffRadius = cutoffradius
#this is kind of hacky, but it is needed for get_mdos so you
#can tell if the input is changed.
v.mcenters = str(mcenters)
v[:] = mmatrix
nc.sync()
nc.close()
def set_debug(self, debug):
'''
set debug level for python logging
debug should be an integer from 0-100 or one of the logging
constants like logging.DEBUG, logging.WARN, etc...
'''
self.debug = debug
log.setLevel(debug)
def get_debug(self):
'Return the python logging level'
return self.debug
def get_decoupling(self):
'return the electrostatic decoupling parameters'
nc = netCDF(self.get_nc(), 'r')
if 'Decoupling' in nc.variables:
v = nc.variables['Decoupling']
decoupling = {}
if hasattr(v,'NumberOfGaussians'):
decoupling['ngaussians'] = v.NumberOfGaussians
if hasattr(v,'ECutoff'):
decoupling['ecutoff'] = v.ECutoff
if hasattr(v,'WidthOfGaussian'):
decoupling['gausswidth'] = v.WidthOfGaussian
else:
decoupling = None
nc.close()
return decoupling
def set_decoupling(self,
ngaussians=3,
ecutoff=100,
gausswidth=0.35):
'''
Decoupling activates the three dimensional electrostatic
decoupling. Based on paper by Peter E. Bloechl: JCP 103
page7422 (1995).
:Parameters:
ngaussians : int
The number of gaussian functions per atom
used for constructing the model charge of the system
ecutoff : int
The cut off energy (eV) of system charge density in
g-space used when mapping constructing the model change of
the system, i.e. only charge density components below
ECutoff enters when constructing the model change.
gausswidth : float
The width of the Gaussians defined by
$widthofgaussian*1.5^(n-1)$ $n$=(1 to numberofgaussians)
'''
nc = netCDF(self.get_nc(), 'a')
if 'Decoupling' in nc.variables:
v = nc.variables['Decoupling']
else:
v = nc.createVariable('Decoupling', 'c', ())
v.NumberOfGaussians = ngaussians
v.ECutoff = ecutoff
v.WidthOfGaussian = gausswidth
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def set_external_dipole(self,
value,
position=None):
'''
Externally imposed dipole potential. This option overwrites
DipoleCorrection if set.
:Parameters:
value : float
units of volts
position : float
scaled coordinates along third unit cell direction.
if None, the compensation dipole layer plane in the
vacuum position farthest from any other atoms on both
sides of the slab. Do not set to 0.0.
'''
var = 'ExternalDipolePotential'
nc = netCDF(self.get_nc(), 'a')
if var in nc.variables:
v = nc.variables[var]
else:
v = nc.createVariable('ExternalDipolePotential', 'd', ())
v.assignValue(value)
if position is not None:
v.DipoleLayerPosition = position
nc.sync()
nc.close()
self.set_status('new')
self.ready = False
def get_external_dipole(self):
'return the External dipole settings'
var = 'ExternalDipolePotential'
nc = netCDF(self.get_nc(),'r')
if var in nc.variables:
v = nc.variables[var]
value = v.getValue()
if hasattr(v, 'DipoleLayerPosition'):
position = v.DipoleLayerPosition
else:
position = None
ed = {'value':value, 'position':position}
else:
ed = None
nc.close()
return ed
def set_dipole(self,
status=True,
mixpar=0.2,
initval=0.0,
adddipfield=0.0,
position=None):
'''turn on and set dipole correction scheme
:Parameters:
status : Boolean
True turns dipole on. False turns Dipole off
mixpar : float
Mixing Parameter for the the dipole correction field
during the electronic minimization process. If instabilities
occur during electronic minimization, this value may be
decreased.
initval : float
initial value to start at
adddipfield : float
additional dipole field to add
units : V/ang
External additive, constant electrostatic field along
third unit cell vector, corresponding to an external
dipole layer. The field discontinuity follows the position
of the dynamical dipole correction, i.e. if
DipoleCorrection:DipoleLayerPosition is set, the field
discontinuity is at this value, otherwise it is at the
vacuum position farthest from any other atoms on both
sides of the slab.
position : float
scaled coordinates along third unit cell direction.
If this attribute is set, DACAPO will position the
compensation dipole layer plane in at the provided value.
If this attribute is not set, DACAPO will put the compensation
dipole layer plane in the vacuum position farthest from any
other atoms on both sides of the slab. Do not set this to
0.0
calling set_dipole() sets all default values.
'''
if status == False:
self.delete_ncattdimvar(self.nc, ncvars=['DipoleCorrection'])
return
ncf = netCDF(self.get_nc(), 'a')
if 'DipoleCorrection' not in ncf.variables:
dip = ncf.createVariable('DipoleCorrection', 'c', ())
else:
dip = ncf.variables['DipoleCorrection']
dip.MixingParameter = mixpar
dip.InitialValue = initval
dip.AdditiveDipoleField = adddipfield
if position is not None:
dip.DipoleLayerPosition = position
ncf.sync()
ncf.close()
self.set_status('new')
self.ready = False
def set_stay_alive(self, value):
'set the stay alive setting'
self.delete_ncattdimvar(self.nc,
ncvars=['Dynamics'])
if (hasattr(self,'parent') or hasattr(self,'children')) and value == True:
log.debug("This is a parent/child calculator and stay_alive must be false.")
value = False
if value in [True, False]:
self.stay_alive = value
#self._dacapo_is_running = False
else:
log.debug("stay_alive must be boolean. Value was not changed.")
def get_stay_alive(self):
'return the stay alive settings'
return self.stay_alive
def get_fftgrid(self):
'return soft and hard fft grids'
nc = netCDF(self.nc, 'r')
soft = []
hard = []
for d in [1, 2, 3]:
sd = 'softgrid_dim%i' % d
hd = 'hardgrid_dim%i' % d
if sd in nc.dimensions:
soft.append(nc.dimensions[sd])
hard.append(nc.dimensions[hd])
nc.close()
if soft == []:
soft = None
if hard == []:
hard = None
return ({'soft':soft,
'hard':hard})
def get_kpts_type(self):
'return the kpt grid type'
nc = netCDF(self.nc, 'r')
if 'BZKpoints' in nc.variables:
bv = nc.variables['BZKpoints']
if hasattr(bv, 'gridtype'):
kpts_type = bv.gridtype #string saved in jacapo
else:
#no grid attribute, this ncfile was created pre-jacapo
kpts_type = '%i kpts' % len(bv[:])
else:
kpts_type = 'BZKpoints not defined. [[0,0,0]] used by default.'
nc.close()
return kpts_type
def get_kpts(self):
'return the BZ kpts'
nc = netCDF(self.nc, 'r')
if 'BZKpoints' in nc.variables:
bv = nc.variables['BZKpoints']
kpts = bv[:]
else:
kpts = np.array(([0, 0, 0])) #default Gamma point used in Dacapo when
#BZKpoints not defined
nc.close()
return kpts
def get_nbands(self):
'return the number of bands used in the calculation'
nc = netCDF(self.nc, 'r')
if 'ElectronicBands' in nc.variables:
v = nc.variables['ElectronicBands']
if hasattr(v, 'NumberOfBands'):
nbands = int(v.NumberOfBands[0])
else:
nbands = None
else:
nbands = None
nc.close()
return nbands
def get_ft(self):
'return the FermiTemperature used in the calculation'
nc = netCDF(self.nc, 'r')
if 'ElectronicBands' in nc.variables:
v = nc.variables['ElectronicBands']
if hasattr(v, 'OccupationStatistics_FermiTemperature'):
ft = v.OccupationStatistics_FermiTemperature
else:
ft = None
else:
ft = None
nc.close()
return ft
def get_dipole(self):
'return dictionary of parameters if the DipoleCorrection was used'
nc = netCDF(self.get_nc(), 'r')
pars = {}
if 'DipoleCorrection' in nc.variables:
v = nc.variables['DipoleCorrection']
pars['status'] = True
if hasattr(v, 'MixingParameter'):
pars['mixpar'] = v.MixingParameter
if hasattr(v, 'InitialValue'):
pars['initval'] = v.InitialValue
if hasattr(v, 'AdditiveDipoleField'):
pars['adddipfield'] = v.AdditiveDipoleField
if hasattr(v, 'DipoleLayerPosition'):
pars['position'] = v.DipoleLayerPosition
else:
pars = False
nc.close()
return pars
def get_pw(self):
'return the planewave cutoff used'
ncf = netCDF(self.nc, 'r')
if 'PlaneWaveCutoff' in ncf.variables:
pw = ncf.variables['PlaneWaveCutoff'].getValue()
else:
pw = None
ncf.close()
if (isinstance(pw, int)
or isinstance(pw, float)
or isinstance(pw,np.int32)):
return pw
elif pw is None:
return None
else:
return pw[0]
def get_dw(self):
'return the density wave cutoff'
ncf = netCDF(self.nc, 'r')
if 'Density_WaveCutoff' in ncf.variables:
dw = ncf.variables['Density_WaveCutoff'].getValue()
else:
dw = None
ncf.close()
#some old calculations apparently store ints, while newer ones
#are lists
if (isinstance(dw, int)
or isinstance(dw, float)
or isinstance(dw, np.int32)):
return dw
else:
if dw is None:
return None
else:
return dw[0]
def get_xc(self):
'''return the self-consistent exchange-correlation functional used
returns a string'''
nc = netCDF(self.nc, 'r')
v = 'ExcFunctional'
if v in nc.variables:
xc = nc.variables[v][:].tostring().strip()
else:
xc = None
nc.close()
return xc
def get_number_of_iterations(self):
niter = None
if self.calculation_required():
self.calculate()
txt = self.get_txt()
if os.path.exists(txt):
f = open(txt,'r')
for line in f:
if 'Number of iterations =' in line:
niter = int(line.split('=')[1])
break
f.close()
return niter
def get_potential_energy(self,
atoms=None,
force_consistent=False):
'''
return the potential energy.
'''
if self.calculation_required(atoms):
log.debug('calculation required for energy')
self.calculate()
else:
log.debug('no calculation required for energy')
nc = netCDF(self.get_nc(), 'r')
try:
if force_consistent:
e = nc.variables['TotalFreeEnergy'][-1]
else:
e = nc.variables['TotalEnergy'][-1]
nc.close()
return e
except (TypeError, KeyError):
raise RuntimeError('Error in calculating the total energy\n'
+ 'check %s for error messages'
% self.get_txt())
def get_forces(self, atoms=None):
"""Calculate atomic forces"""
if atoms is None:
atoms = self.atoms
if self.calculation_required(atoms):
self.calculate()
nc = netCDF(self.get_nc(), 'r')
forces = nc.variables['DynamicAtomForces'][-1]
nc.close()
return forces
def get_atoms(self):
'return the atoms attached to a calculator()'
if hasattr(self, 'atoms'):
if self.atoms is None:
return None
atoms = self.atoms.copy()
#it is not obvious the copy of atoms should have teh same
#calculator
atoms.set_calculator(self)
else:
atoms = None
return atoms
def get_nc(self):
'return the ncfile used for output'
return self.nc
def get_txt(self):
'return the txt file used for output'
if hasattr(self,'txt'):
return self.txt
else:
return None
def get_psp(self, sym=None, z=None):
'''get the pseudopotential filename from the psp database
:Parameters:
sym : string
the chemical symbol of the species
z : integer
the atomic number of the species
you can only specify sym or z. Returns the pseudopotential
filename, not the full path.
'''
if sym is None and z is None:
return None
if (sym is None and z is not None):
from ase.data import chemical_symbols
sym = chemical_symbols[z]
elif (sym is not None and z is None):
pass
else:
raise Exception, 'You can only specify Z or sym!'
psp = self.psp[sym]
return psp
def get_spin_polarized(self):
'Return True if calculate is spin-polarized or False if not'
#self.calculate() #causes recursion error with get_magnetic_moments
nc = netCDF(self.nc, 'r')
if 'ElectronicBands' in nc.variables:
v = nc.variables['ElectronicBands']
if hasattr(v, 'SpinPolarization'):
if v.SpinPolarization == 1:
spinpol = False
elif v.SpinPolarization == 2:
spinpol = True
else:
spinpol = False
else:
spinpol = 'Not defined'
nc.close()
return spinpol
def get_magnetic_moments(self, atoms=None):
'''return magnetic moments on each atom after the calculation is
run'''
if self.calculation_required(atoms):
self.calculate()
nc = netCDF(self.nc, 'r')
if 'InitialAtomicMagneticMoment' in nc.variables:
mom = nc.variables['InitialAtomicMagneticMoment'][:]
else:
mom = [0.0]*len(self.atoms)
nc.close()
return mom
def get_status(self):
'''get status of calculation from ncfile. usually one of:
'new',
'aborted'
'running'
'finished'
None
'''
nc = netCDF(self.nc, 'r')
if hasattr(nc, 'status'):
status = nc.status
else:
status = None
nc.close()
return status
def get_calculate_stress(self):
'return whether stress is calculated or not'
nc = netCDF(self.get_nc(), 'r')
if 'TotalStress' in nc.variables:
calcstress = True
else:
calcstress = False
nc.close()
return calcstress
def get_stress(self, atoms=None):
'''get stress on the atoms.
you should have set up the calculation
to calculate stress first.
returns [sxx, syy, szz, syz, sxz, sxy]'''
if self.calculation_required(atoms):
self.calculate()
nc = netCDF(self.get_nc(), 'r')
if 'TotalStress' in nc.variables:
stress = nc.variables['TotalStress'][:]
#ase expects the 6-element form
stress = np.take(stress.ravel(), [0, 4, 8, 5, 2, 1])
else:
#stress will not be here if you did not set it up by
#calling set_stress() or in the __init__
stress = None
nc.close()
return stress
def get_psp_valence(self, psp):
'''
get the psp valence charge on an atom from the pspfile.
'''
from struct import unpack
dacapopath = get_dacapopath()
if os.path.exists(psp):
#the pspfile may be in the current directory
#or defined by an absolute path
fullpsp = psp
else:
#or, it is in the default psp path
fullpsp = os.path.join(dacapopath, psp)
if os.path.exists(fullpsp.strip()):
f = open(fullpsp)
# read past version numbers and text information
buf = f.read(64)
# read number valence electrons
buf = f.read(8)
fmt = ">d"
nvalence = unpack(fmt, buf)[0]
f.close()
else:
raise Exception, "%s does not exist" % fullpsp
return nvalence
def get_psp_nuclear_charge(self, psp):
'''
get the nuclear charge of the atom from teh psp-file.
This is not the same as the atomic number, nor is it
necessarily the negative of the number of valence electrons,
since a psp may be an ion. this function is needed to compute
centers of ion charge for the dipole moment calculation.
We read in the valence ion configuration from the psp file and
add up the charges in each shell.
'''
from struct import unpack
dacapopath = get_dacapopath()
if os.path.exists(psp):
#the pspfile may be in the current directory
#or defined by an absolute path
fullpsp = psp
else:
#or, it is in the default psp path
fullpsp = os.path.join(dacapopath, psp)
if os.path.exists(fullpsp.strip()):
f = open(fullpsp)
unpack('>i', f.read(4))[0]
for i in range(3):
f.read(4)
for i in range(3):
f.read(4)
f.read(8)
f.read(20)
f.read(8)
f.read(8)
f.read(8)
nvalps = unpack('>i', f.read(4))[0]
f.read(4)
f.read(8)
f.read(8)
wwnlps = []
for i in range(nvalps):
f.read(4)
wwnlps.append(unpack('>d', f.read(8))[0])
f.read(8)
f.close()
else:
raise Exception, "%s does not exist" % fullpsp
return np.array(wwnlps).sum()
def get_valence(self, atoms=None):
'''return the total number of valence electrons for the
atoms. valence electrons are read directly from the
pseudopotentials.
the psp filenames are stored in the ncfile. They may be just
the name of the file, in which case the psp may exist in the
same directory as the ncfile, or in $DACAPOPATH, or the psp
may be defined by an absolute or relative path. This function
deals with all these possibilities.
'''
from struct import unpack
#do not use get_atoms() or recursion occurs
if atoms is None:
if hasattr(self, 'atoms'):
atoms = self.atoms
else:
return None
dacapopath = get_dacapopath()
totval = 0.0
for sym in atoms.get_chemical_symbols():
psp = self.get_psp(sym)
if os.path.exists(psp):
#the pspfile may be in the current directory
#or defined by an absolute path
fullpsp = psp
#let's also see if we can construct an absolute path to a
#local or relative path psp.
abs_path_to_nc = os.path.abspath(self.get_nc())
base = os.path.split(abs_path_to_nc)[0]
possible_path_to_psp = os.path.join(base, psp)
if os.path.exists(possible_path_to_psp):
fullpsp = possible_path_to_psp
else:
#or, it is in the default psp path
fullpsp = os.path.join(dacapopath, psp)
if os.path.exists(fullpsp.strip()):
f = open(fullpsp)
# read past version numbers and text information
buf = f.read(64)
# read number valence electrons
buf = f.read(8)
fmt = ">d"
nvalence = unpack(fmt, buf)[0]
f.close()
totval += float(nvalence)
else:
print "%s does not exist" % fullpsp
totval = None
return totval
def calculation_required(self, atoms=None, quantities=None):
'''
determines if a calculation is needed.
return True if a calculation is needed to get up to date data.
return False if no calculation is needed.
quantities is here because of the ase interface.
'''
# first, compare if the atoms is the same as the stored atoms
# if anything has changed, we need to run a calculation
log.debug('running calculation_required')
if self.nc is None:
raise Exception, 'No output ncfile specified!'
if atoms is not None:
if not self.atoms_are_equal(atoms):
log.debug('found that atoms != self.atoms')
tol = 1.0e-6 #tolerance that the unit cell is the same
new = atoms.get_cell()
old = self.atoms.get_cell()
#float comparison of equality
if not np.all(abs(old-new) < tol):
#this often changes the number of planewaves
#which requires a complete restart
log.debug('restart required! because cell changed')
self.restart()
else:
log.debug('Unitcells apparently the same')
self.set_atoms(atoms) #we have to update the atoms in any case
return True
#if we make it past the atoms check, we look in the
#nc file. if parameters have been changed the status
#will tell us if a calculation is needed
#past this point, atoms was None or equal, so there is nothing to
#update in the calculator
log.debug('atoms tested equal')
if os.path.exists(self.nc):
nc = netCDF(self.nc, 'r')
if hasattr(nc, 'status'):
if nc.status == 'finished' and self.ready:
nc.close()
return False
elif nc.status == 'running':
nc.close()
raise DacapoRunning('Dacapo is Running')
elif nc.status == 'aborted':
nc.close()
raise DacapoAborted('Dacapo aborted. see txt file!')
else:
log.debug('ncfile exists, but is not ready')
nc.close()
return True
else:
#legacy calculations do not have a status flag in them.
#let us guess that if the TotalEnergy is there
#no calculation needs to be run?
if 'TotalEnergy' in nc.variables:
runflag = False
else:
runflag = True
nc.close()
log.debug('Legacy calculation')
return runflag #if no status run calculation
nc.close()
#default, a calculation is required
return True
def get_scratch(self):
'''finds an appropriate scratch directory for the calculation'''
import getpass
username = getpass.getuser()
scratch_dirs = []
if os.environ.has_key('SCRATCH'):
scratch_dirs.append(os.environ['SCRATCH'])
if os.environ.has_key('SCR'):
scratch_dirs.append(os.environ['SCR'])
scratch_dirs.append('/scratch/'+username)
scratch_dirs.append('/scratch/')
scratch_dirs.append(os.curdir)
for scratch_dir in scratch_dirs:
if os.access(scratch_dir, os.W_OK):
return scratch_dir
raise IOError, "No suitable scratch directory and no write access \
to current dir."
def set_parent(self,parent):
if hasattr(self,'children'):
raise RuntimeError,"Cannot create grandparents."
self.parent = parent
def attach_child(self,child):
if hasattr(self,'parent'):
raise RuntimeError,"Cannot create grandchildren!"
if not hasattr(self,'children'):
self.children = []
self.children.append(child)
child.set_parent(self)
def calculate(self):
'''run a calculation.
you have to be a little careful with code in here. Use the
calculation_required function to tell if a calculation is
required. It is assumed here that if you call this, you mean
it.'''
#provide a way to make no calculation get run
if os.environ.get('DACAPO_DRYRUN', None) is not None:
raise DacapoDryrun, '$DACAPO_DRYRUN detected, and a calculation \
attempted'
if hasattr(self,'children'):
# We are a parent and call execute_parent_calculation
self.execute_parent_calculation()
return
if hasattr(self,'parent'): # we're a child and call the parent
log.debug("I'm a child. Calling parent instead.")
self.parent.calculate() # call the parent process to calculate all images
return
# hack: use the default psp path (see validate.get_dacapopath)
# export DACAPOPATH to the environment
env = os.environ
env['DACAPOPATH'] = get_dacapopath()
if not self.ready:
log.debug('Calculator is not ready.')
if not os.path.exists(self.get_nc()):
self.initnc()
log.debug('writing atoms out')
log.debug(self.atoms)
self.write_nc() #write atoms to ncfile
log.debug('writing input out')
self.write_input() #make sure input is uptodate
#check that the bands get set
if self.get_nbands() is None:
nelectrons = self.get_valence()
nbands = int(nelectrons * 0.65 + 4)
self.set_nbands(nbands)
log.debug('running a calculation')
nc = self.get_nc()
txt = self.get_txt()
scratch = self.get_scratch()
if self.stay_alive:
self.execute_external_dynamics(nc, txt)
self.ready = True
self.set_status('finished')
else:
# if Dynamics:ExternalIonMotion_script is set in the .nc file from a previous run
# and stay_alive is false for the continuation run, the Fortran executable continues
# taking steps of size 0 and ends in an infinite loop.
# Solution: remove the Dynamics variable if present when not running with stay_alive
#
self.delete_ncattdimvar(self.nc,ncvars=['Dynamics'])
cmd = "dacapo.run '%(innc)s' -out '%(txt)s' -scratch %(scratch)s"
cmd = cmd % {'innc':nc,
'txt':txt,
'scratch':scratch}
log.debug(cmd)
# using subprocess instead of commands subprocess is more
# flexible and works better for stay_alive
self._dacapo = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
status = self._dacapo.wait()
[stdout, stderr] = self._dacapo.communicate()
output = stdout+stderr
if status is 0: #that means it ended fine!
self.ready = True
self.set_status('finished')
else:
log.debug('Status was not 0')
log.debug(output)
self.ready = False
# directory cleanup has been moved to self.__del__()
del self._dacapo
#Sometimes dacapo dies or is killed abnormally, and in this
#case an exception should be raised to prevent a geometry
#optimization from continuing for example. The best way to
#detect this right now is actually to check the end of the
#text file to make sure it ends with the right line. The
#line differs if the job was run in parallel or in serial.
f = open(txt, 'r')
lines = f.readlines()
f.close()
if 'PAR: msexit halting Master' in lines[-1]:
pass #standard parallel end
elif ('TIM' in lines[-2]
and 'clexit: exiting the program' in lines[-1]):
pass #standard serial end
else:
# text file does not end as expected, print the last
# 10 lines and raise exception
log.debug(string.join(lines[-10:-1], ''))
s = 'Dacapo output txtfile (%s) did not end normally.\n'
s += ''.join(lines[-10:-1])
raise DacapoAbnormalTermination(s % txt)
def execute_parent_calculation(self):
'''
Implementation of an extra level of parallelization, where one jacapo calculator spawns several
dacapo.run processes. This is used for NEBs parallelized over images.
'''
# hack: use the default psp path (see validate.get_dacapopath)
# export DACAPOPATH to the environment
env = os.environ
env['DACAPOPATH'] = get_dacapopath()
nchildren = len(self.children)
log.debug("I'm a parent and start a calculation for ",nchildren," children.")
self._dacapo = nchildren*[None]
# export the number of children to the environment
env = os.environ
env['JACAPO_NIMAGES'] = str(nchildren)
# start a dacapo.run instance for each child
for i,child in enumerate(self.children):
nc = child.get_nc()
txt= child.get_txt()
scratch = child.get_scratch()
if not os.path.exists(nc):
child.initnc()
child.write_nc() #write atoms to ncfile
child.write_input() #make sure input is uptodate
#check that the bands get set
if child.get_nbands() is None:
nelectrons = child.get_valence()
nbands = int(nelectrons * 0.65 + 4)
child.set_nbands(nbands)
env['JACAPO_IMAGE'] = str(i)
cmd = "dacapo.run '%(innc)s' -out '%(txt)s' -scratch %(scratch)s"
cmd = cmd % {'innc':nc,
'txt':txt,
'scratch':scratch}
log.debug(cmd)
self._dacapo[i] = sp.Popen(cmd,stdout=sp.PIPE,stderr=sp.PIPE,shell=True,env=env)
print 'now waiting for all children to finish'
# now wait for all processes to finish
for i,child in enumerate(self.children):
status = self._dacapo[i].wait()
[stdout,stderr] = self._dacapo[i].communicate()
output = stdout+stderr
if status is 0: #that means it ended fine!
child.ready = True
child.set_status('finished')
else:
log.debug('Status was not 0')
log.debug(output)
child.ready = False
# could also check the end of the output .txt file to make sure everything was fine.
del self._dacapo
def execute_external_dynamics(self,
nc=None,
txt=None,
stoppfile='stop',
stopprogram=None):
'''
Implementation of the stay alive functionality with socket
communication between dacapo and python. Known limitations:
It is not possible to start 2 independent Dacapo calculators
from the same python process, since the python PID is used as
identifier for the script[PID].py file.
'''
from socket import socket, AF_INET, SOCK_STREAM, timeout
import tempfile
if hasattr(self, "_dacapo"):
msg = "Starting External Dynamics while Dacapo is runnning: %s"
msg = msg % str(self._dacapo.poll())
log.debug(msg)
else:
log.debug("No dacapo instance has been started yet")
log.debug("Stopprogram: %s" % stopprogram)
if not nc:
nc = self.get_nc()
if not txt:
txt = self.get_txt()
tempfile.tempdir = os.curdir
if stopprogram:
# write stop file
stfile = open(stoppfile, 'w')
stfile.write('1 \n')
stfile.close()
# signal to dacapo that positions are ready
# let dacapo continue, it is up to the python mainloop
# to allow dacapo enough time to finish properly.
self._client.send('ok too proceed')
# Wait for dacapo to acknowledge that netcdf file has
# been updated, and analysis part of the code has been
# terminated. Dacapo sends a signal at the end of call
# clexit().
log.info("waiting for dacapo to exit...")
self.s.settimeout(1200.0) # if dacapo exits with an
# error, self.s.accept()
# should time out,
# but we need to give it
# enough time to write the
# wave function to the nc
# file.
try:
self._client, self._addr = self.s.accept() # Last
# mumble
# before
# Dacapo
# dies.
os.system("sleep 5") # 5 seconds of silence
# mourning
# dacapo.
except timeout:
print '''Socket connection timed out.'''
print '''This usually means Dacapo crashed.'''
# close the socket s
self.s.close()
self._client.close()
# remove the script???? file
ncfile = netCDF(nc, 'r')
vdyn = ncfile.variables['Dynamics']
os.system("rm -f '"+vdyn.ExternalIonMotion_script+"'")
ncfile.close()
os.system('rm -f '+stoppfile)
if self._dacapo.poll()==None: # dacapo is still not dead!
# but this should do it!
sp.Popen("kill -9 "+str(self._dacapo.pid), shell=True)
#if Dacapo dies for example because of too few
#bands, subprocess never returns an exitcode.
#very strange, but at least the program is
#terminated. print self._dacapo.returncode
del self._dacapo
return
if hasattr(self, '_dacapo') and self._dacapo.poll()==None:
# returns None if dacapo is running self._dacapo_is_running:
# calculation_required already updated the positions in
# the nc file
self._client.send('ok too proceed')
else:
# get process pid that will be used as communication
# channel
pid = os.getpid()
# setup communication channel to dacapo
from sys import version
from string import split
effpid = (pid)%(2**16-1025)+1025 # This translate pid
# [0;99999] to a number
# in [1025;65535] (the
# allowed socket
# numbers)
self.s = socket(AF_INET, SOCK_STREAM)
foundafreesocket = 0
while not foundafreesocket:
try:
if split(version)[0] > "2": # new interface
self.s.bind(("", effpid))
else: # old interface
self.s.bind("", effpid)
foundafreesocket = 1
except:
effpid = effpid + 1
# write script file that will be used by dacapo
scriptname = 'script%s.py' % str(pid)
scriptfile = open(scriptname, 'w')
scriptfile.write(
"""#!/usr/bin/env python
from socket import *
from sys import version
from string import split
s = socket(AF_INET,SOCK_STREAM)
# tell python that dacapo has finished
if split(version)[0] > "2": # new interface
s.connect(("",%(effpid)s))
else: # old interface
s.connect("",%(effpid)s)
# wait for python main loop
s.recv(14)
""" % {'effpid':str(effpid)})
scriptfile.close()
os.system('chmod +x ' + scriptname)
# hack: use the default psp path (see validate.get_dacapopath)
# export DACAPOPATH to the environment
env = os.environ
env['DACAPOPATH'] = get_dacapopath()
# setup dynamics as external and set the script name
ncfile = netCDF(nc, 'a')
if 'Dynamics' not in ncfile.variables:
vdyn = ncfile.createVariable('Dynamics', 'c', ())
else:
vdyn = ncfile.variables['Dynamics']
vdyn.Type = "ExternalIonMotion"
vdyn.ExternalIonMotion_script = './'+ scriptname
ncfile.close()
# dacapo is not running start dacapo non blocking
scratch_in_nc = tempfile.mktemp()
os.system('mv '+nc+' '+scratch_in_nc)
os.system('rm -f '+stoppfile)
scratch = self.get_scratch()
cmd = "dacapo.run"
cmd += " '%(innc)s' '%(outnc)s' -out '%(txt)s' -scratch %(scratch)s"
cmd = cmd % {'innc':scratch_in_nc,
'outnc':nc,
'txt':txt,
'scratch':scratch}
log.debug(cmd)
self._dacapo = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
self.s.listen(1)
# wait for dacapo
self._client, self._addr = self.s.accept()
def write_nc(self, nc=None, atoms=None):
'''
write out atoms to a netcdffile.
This does not write out the calculation parameters!
:Parameters:
nc : string
ncfilename to write to. this file will get clobbered
if it already exists.
atoms : ASE.Atoms
atoms to write. if None use the attached atoms
if no atoms are attached only the calculator is
written out.
the ncfile is always opened in 'a' mode.
note: it is good practice to use the atoms argument to make
sure that the geometry you mean gets written! Otherwise, the
atoms in the calculator is used, which may be different than
the external copy of the atoms.
'''
log.debug('writing atoms to ncfile with write_nc')
#no filename was provided to function, use the current ncfile
if nc is None:
nc = self.get_nc()
if nc != self.nc:
#this means we are writing a new file, and we should copy
#the old file to it first. this makes sure the old
#calculator settings are preserved
new = nc
old = self.nc
log.debug('Copying old ncfile to new ncfile')
log.debug("cp '%s' '%s'" % (old, new))
os.system("cp '%s' '%s'" % (old, new))
if atoms is None:
atoms = self.get_atoms()
log.debug('self.atoms = %s' % str(self.atoms))
log.debug('atoms = %s' % str(atoms))
if atoms is not None: #there may still be no atoms attached
log.debug('about to write to %s' % nc)
ncf = netCDF(nc, 'a')
if 'number_of_dynamic_atoms' not in ncf.dimensions:
ncf.createDimension('number_of_dynamic_atoms',
len(atoms))
else:
# number of atoms is already a dimension, but we might
# be setting new atoms here
# check for same atom symbols (implicitly includes
# a length check)
symbols = np.array(['%2s' % s for s in
atoms.get_chemical_symbols()], dtype='c')
ncsym = ncf.variables['DynamicAtomSpecies'][:]
if (symbols.size != ncsym.size) or (np.any(ncsym != symbols)):
# the number of atoms or their order has changed.
# Treat this as a new calculation and reset
# number_of_ionic_steps and
# number_of_dynamic_atoms.
ncf.close() #nc file must be closed for
#delete_ncattdimvar to work correctly
self.delete_ncattdimvar(nc, ncattrs=[],
ncdims=['number_of_dynamic_atoms',
'number_ionic_steps'])
ncf = netCDF(nc, 'a')
ncf.createDimension('number_of_dynamic_atoms',
len(atoms))
ncf.createDimension('number_ionic_steps', None)
self._set_frame_number(0)
ncf.close() #nc file must be closed for restart to
#work correctly
self.restart()
ncf = netCDF(nc, 'a')
#now, create variables
if 'DynamicAtomSpecies' not in ncf.variables:
sym = ncf.createVariable('DynamicAtomSpecies',
'c',
('number_of_dynamic_atoms',
'dim2',))
else:
sym = ncf.variables['DynamicAtomSpecies']
#note explicit array casting was required here
symbols = atoms.get_chemical_symbols()
sym[:] = np.array(['%2s' % s for s in symbols], dtype='c')
if 'DynamicAtomPositions' not in ncf.variables:
pos = ncf.createVariable('DynamicAtomPositions',
'd',
('number_ionic_steps',
'number_of_dynamic_atoms',
'dim3'))
else:
pos = ncf.variables['DynamicAtomPositions']
spos = atoms.get_scaled_positions()
if pos.typecode() == 'f':
spos = np.array(spos, dtype=np.float32)
pos[self._frame, :] = spos
if 'UnitCell' not in ncf.variables:
uc = ncf.createVariable('UnitCell', 'd',
('number_ionic_steps',
'dim3', 'dim3'))
else:
uc = ncf.variables['UnitCell']
cell = atoms.get_cell()
if uc.typecode() == 'f':
cell = np.array(cell, dtype=np.float32)
uc[self._frame, :] = cell
if 'AtomTags' not in ncf.variables:
tags = ncf.createVariable('AtomTags', 'i',
('number_of_dynamic_atoms',))
else:
tags = ncf.variables['AtomTags']
tags[:] = np.array(atoms.get_tags(), np.int32)
if 'InitialAtomicMagneticMoment' not in ncf.variables:
mom = ncf.createVariable('InitialAtomicMagneticMoment',
'd',
('number_of_dynamic_atoms',))
else:
mom = ncf.variables['InitialAtomicMagneticMoment']
#explain why we have to use get_initial_magnetic_moments()
moms = atoms.get_initial_magnetic_moments()
if mom.typecode() == 'f':
moms = np.array(moms, dtype=np.float32)
mom[:] = moms
#finally the atom pseudopotentials
for sym in atoms.get_chemical_symbols():
vn = 'AtomProperty_%s' % sym
if vn not in ncf.variables:
p = ncf.createVariable(vn, 'c', ('dim20',))
else:
p = ncf.variables[vn]
ppath = self.get_psp(sym=sym)
p.PspotFile = ppath
ncf.sync()
ncf.close()
#store constraints if they exist
constraints = atoms._get_constraints()
if constraints != []:
nc = netCDF(self.get_nc(), 'a')
if 'constraints' not in nc.variables:
if 'dim1' not in nc.dimensions:
nc.createDimension('dim1', 1)
c = nc.createVariable('constraints', 'c', ('dim1',))
else:
c = nc.variables['constraints']
#we store the pickle string as an attribute of a
#netcdf variable because that way we do not have to
#know how long the string is. with a character
#variable you have to specify the dimension of the
#string ahead of time.
c.data = pickle.dumps(constraints)
nc.close()
else:
# getting here means there where no constraints on the
# atoms just written we should check if there are any
# old constraints left in the ncfile
# from a previous atoms, and delete them if so
delete_constraints = False
nc = netCDF(self.get_nc())
if 'constraints' in nc.variables:
delete_constraints = True
nc.close()
if delete_constraints:
log.debug('deleting old constraints')
self.delete_ncattdimvar(self.nc,
ncvars=['constraints'])
def read_atoms(filename):
'''read atoms and calculator from an existing netcdf file.
:Parameters:
filename : string
name of file to read from.
static method
example::
>>> atoms = Jacapo.read_atoms(ncfile)
>>> calc = atoms.get_calculator()
this method is here for legacy purposes. I used to use it alot.
'''
calc = Jacapo(filename)
atoms = calc.get_atoms()
return atoms
read_atoms = staticmethod(read_atoms)
def read_only_atoms(self, ncfile):
'''read only the atoms from an existing netcdf file. Used to
initialize a calculator from a ncfilename.
:Parameters:
ncfile : string
name of file to read from.
return ASE.Atoms with no calculator attached or None if no
atoms found
'''
from ase import Atoms
nc = netCDF(ncfile, 'r')
#some ncfiles do not have atoms in them
if 'UnitCell' not in nc.variables:
log.debug('no unit cell found in ncfile')
nc.close()
return None
cell = nc.variables['UnitCell'][:][-1]
sym = nc.variables['DynamicAtomSpecies'][:]
symbols = [x.tostring().strip() for x in sym]
spos = nc.variables['DynamicAtomPositions'][:][-1]
pos = np.dot(spos, cell)
atoms = Atoms(symbols=symbols,
positions=pos,
cell=cell)
if 'AtomTags' in nc.variables:
tags = nc.variables['AtomTags'][:]
atoms.set_tags(tags)
if 'InitialAtomicMagneticMoment' in nc.variables:
mom = nc.variables['InitialAtomicMagneticMoment'][:]
atoms.set_initial_magnetic_moments(mom)
#update psp database
for sym in symbols:
vn = 'AtomProperty_%s' % sym
if vn in nc.variables:
var = nc.variables[vn]
pspfile = var.PspotFile
self.psp[sym] = pspfile
#get constraints if they exist
c = nc.variables.get('constraints', None)
if c is not None:
constraints = pickle.loads(c.data)
atoms.set_constraint(constraints)
nc.close()
return atoms
def delete_ncattdimvar(self, ncf, ncattrs=None, ncdims=None, ncvars=None):
'''
helper function to delete attributes,
dimensions and variables in a netcdffile
this functionality is not implemented for some reason in
netcdf, so the only way to do this is to copy all the
attributes, dimensions, and variables to a new file, excluding
the ones you want to delete and then rename the new file.
if you delete a dimension, all variables with that dimension
are also deleted.
'''
if ncattrs is None:
ncattrs = []
if ncdims is None:
ncdims = []
if ncvars is None:
ncvars = []
log.debug('beginning: going to delete dims: %s' % str(ncdims))
log.debug('beginning: going to delete vars: %s' % str(ncvars))
oldnc = netCDF(ncf, 'r')
#h,tempnc = tempfile.mkstemp(dir='.',suffix='.nc')
tempnc = ncf+'.temp'
newnc = netCDF(tempnc, 'w')
for attr in dir(oldnc):
if attr in ['close', 'createDimension',
'createVariable', 'flush', 'sync']:
continue
if attr in ncattrs:
continue #do not copy this attribute
setattr(newnc, attr, getattr(oldnc, attr))
#copy dimensions
for dim in oldnc.dimensions:
if dim in ncdims:
log.debug('deleting %s of %s' % (dim, str(ncdims)))
continue #do not copy this dimension
size = oldnc.dimensions[dim]
newnc.createDimension(dim, size)
# we need to delete all variables that depended on a deleted dimension
for v in oldnc.variables:
dims1 = oldnc.variables[v].dimensions
for dim in ncdims:
if dim in dims1:
s = 'deleting "%s" because it depends on dim "%s"'
log.debug(s %(v, dim))
ncvars.append(v)
#copy variables, except the ones to delete
for v in oldnc.variables:
if v in ncvars:
log.debug('vars to delete: %s ' % ncvars)
log.debug('deleting ncvar: %s' % v)
continue #we do not copy this v over
ncvar = oldnc.variables[v]
tcode = ncvar.typecode()
#char typecodes do not come out right apparently
if tcode == " ":
tcode = 'c'
ncvar2 = newnc.createVariable(v, tcode, ncvar.dimensions)
try:
ncvar2[:] = ncvar[:]
except TypeError:
#this exception occurs for scalar variables
#use getValue and assignValue instead
ncvar2.assignValue(ncvar.getValue())
#and variable attributes
#print dir(ncvar)
for att in dir(ncvar):
if att in ['assignValue', 'getValue', 'typecode']:
continue
setattr(ncvar2, att, getattr(ncvar, att))
oldnc.close()
newnc.close()
s = 'looking for .nfs files before copying: %s'
log.debug(s % glob.glob('.nfs*'))
#ack!!! this makes .nfsxxx files!!!
#os.close(h) #this avoids the stupid .nfsxxx file
#import shutil
#shutil.move(tempnc,ncf)
#this seems to avoid making the .nfs files
os.system("cp '%s' '%s'" % (tempnc, ncf))
os.system("rm '%s'" % tempnc)
s = 'looking for .nfs files after copying: %s'
log.debug(s % glob.glob('.nfs*'))
def restart(self):
'''
Restart the calculator by deleting nc dimensions that will
be rewritten on the next calculation. This is sometimes required
when certain dimensions change related to unitcell size changes
planewave/densitywave cutoffs and kpt changes. These can cause
fortran netcdf errors if the data does not match the pre-defined
dimension sizes.
also delete all the output from previous calculation.
'''
log.debug('restarting!')
ncdims = ['number_plane_waves',
'number_IBZ_kpoints',
'softgrid_dim1',
'softgrid_dim2',
'softgrid_dim3',
'hardgrid_dim1',
'hardgrid_dim2',
'hardgrid_dim3',
'max_projectors_per_atom',
'atomdos_energygrid_size',
'atomdos_angular_channels',
'atomdos_radial_orbs']
ncvars = ['TotalEnergy',
'TotalFreeEnergy',
'EvaluateTotalEnergy',
'DynamicAtomForces',
'FermiLevel',
'EnsembleXCEnergies',
'AtomProjectedDOS_IntegratedDOS',
'AtomProjectedDOS_OrdinalMap',
'NumberPlaneWavesKpoint',
'AtomProjectedDOS_EnergyResolvedDOS',
'AtomProjectedDOS_EnergyGrid',
'EvaluateCorrelationEnergy',
'DynamicAtomVelocities',
'KpointWeight',
'EvaluateExchangeEnergy',
'EffectivePotential',
'TotalStress',
'ChargeDensity',
'WaveFunction',
'WaveFunctionFFTindex',
'NumberOfNLProjectors',
'NLProjectorPsi',
'TypeNLProjector1',
'NumberofNLProjectors',
'PartialCoreDensity',
'ChargeDensity',
'ElectrostaticPotential',
'StructureFactor',
'EigenValues',
'OccupationNumbers']
self.delete_ncattdimvar(self.nc,
ncattrs=[],
ncdims=ncdims,
ncvars=ncvars)
self.set_status('new')
self.ready = False
def get_convergence(self):
'return convergence settings for Dacapo'
nc = netCDF(self.get_nc(), 'r')
vname = 'ConvergenceControl'
if vname in nc.variables:
v = nc.variables[vname]
convergence = {}
if hasattr(v, 'AbsoluteEnergyConvergence'):
convergence['energy'] = v.AbsoluteEnergyConvergence[0]
if hasattr(v, 'DensityConvergence'):
convergence['density'] = v.DensityConvergence[0]
if hasattr(v, 'OccupationConvergence'):
convergence['occupation'] = v.OccupationConvergence[0]
if hasattr(v, 'MaxNumberOfSteps'):
convergence['maxsteps'] = v.MaxNumberOfSteps[0]
if hasattr(v, 'CPUTimeLimit'):
convergence['cputime'] = v.CPUTimeLimit[0]
else:
convergence = None
nc.close()
return convergence
def set_convergence(self,
energy=0.00001,
density=0.0001,
occupation=0.001,
maxsteps=None,
maxtime=None
):
'''set convergence criteria for stopping the dacapo calculator.
:Parameters:
energy : float
set total energy change (eV) required for stopping
density : float
set density change required for stopping
occupation : float
set occupation change required for stopping
maxsteps : integer
specify maximum number of steps to take
maxtime : integer
specify maximum number of hours to run.
Autopilot not supported here.
'''
nc = netCDF(self.get_nc(), 'a')
vname = 'ConvergenceControl'
if vname in nc.variables:
v = nc.variables[vname]
else:
v = nc.createVariable(vname, 'c', ('dim1',))
if energy is not None:
v.AbsoluteEnergyConvergence = energy
if density is not None:
v.DensityConvergence = density
if occupation is not None:
v.OccupationConvergence = occupation
if maxsteps is not None:
v.MaxNumberOfSteps = maxsteps
if maxtime is not None:
v.CPUTimeLimit = maxtime
nc.sync()
nc.close()
def get_charge_mixing(self):
'return charge mixing parameters'
nc = netCDF(self.get_nc(), 'r')
vname = 'ChargeMixing'
if vname in nc.variables:
v = nc.variables[vname]
charge_mixing = {}
if hasattr(v, 'Method'):
charge_mixing['method'] = v.Method
if hasattr(v, 'UpdateCharge'):
charge_mixing['updatecharge'] = v.UpdateCharge
if hasattr(v, 'Pulay_MixingHistory'):
charge_mixing['mixinghistory'] = v.Pulay_MixingHistory[0]
if hasattr(v, 'Pulay_DensityMixingCoeff'):
charge_mixing['mixingcoeff'] = v.Pulay_DensityMixingCoeff[0]
if hasattr(v, 'Pulay_KerkerPrecondition'):
charge_mixing['precondition'] = v.Pulay_KerkerPrecondition
else:
charge_mixing = None
nc.close()
return charge_mixing
def set_charge_mixing(self,
method='Pulay',
mixinghistory=10,
mixingcoeff=0.1,
precondition='No',
updatecharge='Yes'):
'''set density mixing method and parameters
:Parameters:
method : string
'Pulay' for Pulay mixing. only one supported now
mixinghistory : integer
number of iterations to mix
Number of charge residual vectors stored for generating
the Pulay estimate on the self-consistent charge density,
see Sec. 4.2 in Kresse/Furthmuller:
Comp. Mat. Sci. 6 (1996) p34ff
mixingcoeff : float
Mixing coefficient for Pulay charge mixing, corresponding
to A in G$^1$ in Sec. 4.2 in Kresse/Furthmuller:
Comp. Mat. Sci. 6 (1996) p34ff
precondition : string
'Yes' or 'No'
* "Yes" : Kerker preconditiong is used,
i.e. q$_0$ is different from zero, see eq. 82
in Kresse/Furthmuller: Comp. Mat. Sci. 6 (1996).
The value of q$_0$ is fix to give a damping of 20
of the lowest q vector.
* "No" : q$_0$ is zero and mixing is linear (default).
updatecharge : string
'Yes' or 'No'
* "Yes" : Perform charge mixing according to
ChargeMixing:Method setting
* "No" : Freeze charge to initial value.
This setting is useful when evaluating the Harris-Foulkes
density functional
'''
if method == 'Pulay':
nc = netCDF(self.get_nc(), 'a')
vname = 'ChargeMixing'
if vname in nc.variables:
v = nc.variables[vname]
else:
v = nc.createVariable(vname, 'c', ('dim1',))
v.Method = 'Pulay'
v.UpdateCharge = updatecharge
v.Pulay_MixingHistory = mixinghistory
v.Pulay_DensityMixingCoeff = mixingcoeff
v.Pulay_KerkerPrecondition = precondition
nc.sync()
nc.close()
self.ready = False
def set_electronic_minimization(self,
method='eigsolve',
diagsperband=2):
'''set the eigensolver method
Selector for which subroutine to use for electronic
minimization
Recognized options : "resmin", "eigsolve" and "rmm-diis".
* "resmin" : Power method (Lennart Bengtson), can only handle
k-point parallization.
* "eigsolve : Block Davidson algorithm
(Claus Bendtsen et al).
* "rmm-diis : Residual minimization
method (RMM), using DIIS (direct inversion in the iterate
subspace) The implementaion follows closely the algorithm
outlined in Kresse and Furthmuller, Comp. Mat. Sci, III.G/III.H
:Parameters:
method : string
should be 'resmin', 'eigsolve' or 'rmm-diis'
diagsperband : int
The number of diagonalizations per band for
electronic minimization algorithms (maps onto internal
variable ndiapb). Applies for both
ElectronicMinimization:Method = "resmin" and "eigsolve".
default value = 2
'''
nc = netCDF(self.get_nc(), 'a')
vname = 'ElectronicMinimization'
if vname in nc.variables:
v = nc.variables[vname]
else:
log.debug('Creating ElectronicMinimization')
v = nc.createVariable(vname, 'c', ('dim1',))
log.debug('setting method for ElectronicMinimization: % s' % method)
v.Method = method
log.debug('setting DiagonalizationsBand for ElectronicMinimization')
if diagsperband is not None:
v.DiagonalizationsPerBand = diagsperband
log.debug('synchronizing ncfile')
nc.sync()
nc.close()
def get_electronic_minimization(self):
'''get method and diagonalizations per band for electronic
minimization algorithms'''
log.debug('getting electronic minimization parameters')
nc = netCDF(self.get_nc(), 'r')
vname = 'ElectronicMinimization'
if vname in nc.variables:
v = nc.variables[vname]
method = v.Method
if hasattr(v, 'DiagonalizationsPerBand'):
diagsperband = v.DiagonalizationsPerBand[0]
else:
diagsperband = None
else:
method = None
diagsperband = None
nc.close()
return {'method':method,
'diagsperband':diagsperband}
def get_occupationstatistics(self):
'return occupation statistics method'
nc = netCDF(self.get_nc(), 'r')
if 'ElectronicBands' in nc.variables:
v = nc.variables['ElectronicBands']
if hasattr(v, 'OccupationStatistics'):
occstat = v.OccupationStatistics
else:
occstat = None
else:
occstat = None
nc.close()
return occstat
def set_occupationstatistics(self, method):
'''
set the method used for smearing the occupations.
:Parameters:
method : string
one of 'FermiDirac' or 'MethfesselPaxton'
Currently, the Methfessel-Paxton scheme (PRB 40, 3616 (1989).)
is implemented to 1th order (which is recommemded by most authors).
'FermiDirac' is the default
'''
nc = netCDF(self.get_nc(), 'a')
if 'ElectronicBands' in nc.variables:
v = nc.variables['ElectronicBands']
v.OccupationStatistics = method
nc.sync()
nc.close()
def get_fermi_level(self):
'return Fermi level'
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
ef = nc.variables['FermiLevel'][-1]
nc.close()
return ef
def get_occupation_numbers(self, kpt=0, spin=0):
'''return occupancies of eigenstates for a kpt and spin
:Parameters:
kpt : integer
index of the IBZ kpoint you want the occupation of
spin : integer
0 or 1
'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
occ = nc.variables['OccupationNumbers'][:][-1][kpt, spin]
nc.close()
return occ
def get_xc_energies(self, *functional):
"""
Get energies for different functionals self-consistent and
non-self-consistent.
:Parameters:
functional : strings
some set of 'PZ','VWN','PW91','PBE','revPBE', 'RPBE'
This function returns the self-consistent energy and/or
energies associated with various functionals.
The functionals are currently PZ,VWN,PW91,PBE,revPBE, RPBE.
The different energies may be useful for calculating improved
adsorption energies as in B. Hammer, L.B. Hansen and
J.K. Norskov, Phys. Rev. B 59,7413.
Examples:
get_xcenergies() #returns all the energies
get_xcenergies('PBE') # returns the PBE total energy
get_xcenergies('PW91','PBE','revPBE') # returns a
# list of energies in the order asked for
"""
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
funcenergies = nc.variables['EvaluateTotalEnergy'][:][-1]
xcfuncs = nc.variables['EvalFunctionalOfDensity_XC'][:]
nc.close()
xcfuncs = [xc.tostring().strip() for xc in xcfuncs]
edict = dict(zip(xcfuncs, funcenergies))
if len(functional) == 0:
#get all energies by default
functional = xcfuncs
return [edict[xc] for xc in functional]
# break of compatibility
def get_ados_data(self,
atoms,
orbitals,
cutoff,
spin):
'''get atom projected data
:Parameters:
atoms
list of atom indices (integers)
orbitals
list of strings
['s','p','d'],
['px','py','pz']
['d_zz', 'dxx-yy', 'd_xy', 'd_xz', 'd_yz']
cutoff : string
cutoff radius you want the results for 'short' or 'infinite'
spin
: list of integers
spin you want the results for
[0] or [1] or [0,1] for both
returns (egrid, ados)
egrid has the fermi level at 0 eV
'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
omapvar = nc.variables['AtomProjectedDOS_OrdinalMap']
omap = omapvar[:] #indices
c = omapvar.AngularChannels
channels = [x.strip() for x in c.split(',')] #channel names
#this has dimensions(nprojections, nspins, npoints)
ados = nc.variables['AtomProjectedDOS_EnergyResolvedDOS'][:]
#this is the energy grid for all the atoms
egrid = nc.variables['AtomProjectedDOS_EnergyGrid'][:]
nc.close()
#it is apparently not necessary to normalize the egrid to
#the Fermi level. the data is already for ef = 0.
#get list of orbitals, replace 'p' and 'd' in needed
orbs = []
for o in orbitals:
if o == 'p':
orbs += ['p_x', 'p_y', 'p_z']
elif o == 'd':
orbs += ['d_zz', 'dxx-yy', 'd_xy', 'd_xz', 'd_yz']
else:
orbs += [o]
orbinds = [channels.index(x) for x in orbs]
cutdict = {'infinite':0,
'short':1}
icut = cutdict[cutoff]
ydata = np.zeros(len(egrid), np.float)
for atomind in atoms:
for oi in orbinds:
ind = omap[atomind, icut, oi]
for si in spin:
ydata += ados[ind, si]
return (egrid, ydata)
def get_all_eigenvalues(self, spin=0):
'''return all the eigenvalues at all the kpoints for a spin.
:Parameters:
spin : integer
which spin the eigenvalues are for'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
ev = nc.variables['EigenValues'][:][-1][:, spin]
nc.close()
return ev
def get_eigenvalues(self, kpt=0, spin=0):
'''return the eigenvalues for a kpt and spin
:Parameters:
kpt : integer
index of the IBZ kpoint
spin : integer
which spin the eigenvalues are for'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
ev = nc.variables['EigenValues'][:][-1][kpt, spin]
nc.close()
return ev
def get_k_point_weights(self):
'return the weights on the IBZ kpoints'
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
kw = nc.variables['KpointWeight'][:]
nc.close()
return kw
def get_magnetic_moment(self, atoms=None):
'calculates the magnetic moment (Bohr-magnetons) of the supercell'
if not self.get_spin_polarized():
return None
if self.calculation_required():
self.calculate()
nibzk = len(self.get_ibz_kpoints())
ibzkw = self.get_k_point_weights()
spinup, spindn = 0.0, 0.0
for k in range(nibzk):
spinup += self.get_occupation_numbers(k, 0).sum()*ibzkw[k]
spindn += self.get_occupation_numbers(k, 1).sum()*ibzkw[k]
return (spinup - spindn)
def get_number_of_spins(self):
'if spin-polarized returns 2, if not returns 1'
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
spv = nc.variables['ElectronicBands']
nc.close()
if hasattr(spv, 'SpinPolarization'):
return spv.SpinPolarization
else:
return 1
def get_ibz_kpoints(self):
'return list of kpoints in the irreducible brillouin zone'
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
ibz = nc.variables['IBZKpoints'][:]
nc.close()
return ibz
get_ibz_k_points = get_ibz_kpoints
def get_bz_k_points(self):
'return list of kpoints in the Brillouin zone'
nc = netCDF(self.get_nc(), 'r')
if 'BZKpoints' in nc.variables:
bz = nc.variables['BZKpoints'][:]
else:
bz = None
nc.close()
return bz
def get_effective_potential(self, spin=1):
'''
returns the realspace local effective potential for the spin.
the units of the potential are eV
:Parameters:
spin : integer
specify which spin you want, 0 or 1
'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
efp = np.transpose(nc.variables['EffectivePotential'][:][spin])
nc.close()
fftgrids = self.get_fftgrid()
hardgrid = fftgrids['hard']
x, y, z = self.get_ucgrid(hardgrid)
return (x, y, z, efp)
def get_electrostatic_potential(self, spin=0):
'''get electrostatic potential
Netcdf documentation::
double ElectrostaticPotential(number_of_spin,
hardgrid_dim3,
hardgrid_dim2,
hardgrid_dim1) ;
ElectrostaticPotential:
Description = "realspace local effective potential" ;
unit = "eV" ;
'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
esp = np.transpose(nc.variables['ElectrostaticPotential'][:][spin])
nc.close()
fftgrids = self.get_fftgrid()
x, y, z = self.get_ucgrid(fftgrids['hard'])
return (x, y, z, esp)
def get_charge_density(self, spin=0):
'''
return x,y,z,charge density data
x,y,z are grids sampling the unit cell
cd is the charge density data
netcdf documentation::
ChargeDensity(number_of_spin,
hardgrid_dim3,
hardgrid_dim2,
hardgrid_dim1)
ChargeDensity:Description = "realspace charge density" ;
ChargeDensity:unit = "-e/A^3" ;
'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
cd = np.transpose(nc.variables['ChargeDensity'][:][spin])
#I am not completely sure why this has to be done
#it does give units of electrons/ang**3
vol = self.get_atoms().get_volume()
cd /= vol
nc.close()
grids = self.get_fftgrid()
x, y, z = self.get_ucgrid(grids['hard'])
return x, y, z, cd
def get_ucgrid(self, dims):
'''Return X,Y,Z grids for uniform sampling of the unit cell
dims = (n0,n1,n2)
n0 points along unitcell vector 0
n1 points along unitcell vector 1
n2 points along unitcell vector 2
'''
n0, n1, n2 = dims
s0 = 1.0/n0
s1 = 1.0/n1
s2 = 1.0/n2
X, Y, Z = np.mgrid[0.0:1.0:s0,
0.0:1.0:s1,
0.0:1.0:s2]
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
atoms = self.get_atoms()
uc = atoms.get_cell()
real = np.dot(C, uc)
#now convert arrays back to unitcell shape
RX = np.reshape(real[:, 0], (n0, n1, n2))
RY = np.reshape(real[:, 1], (n0, n1, n2))
RZ = np.reshape(real[:, 2], (n0, n1, n2))
return (RX, RY, RZ)
def get_number_of_grid_points(self):
'return soft fft grid'
# needed by ase.dft.wannier
fftgrids = self.get_fftgrid()
return np.array(fftgrids['soft'])
def get_wannier_localization_matrix(self, nbands, dirG, kpoint,
nextkpoint, G_I, spin):
'return wannier localization matrix'
if self.calculation_required():
self.calculate()
if not hasattr(self, 'wannier'):
from utils.wannier import Wannier
self.wannier = Wannier(self)
self.wannier.set_bands(nbands)
self.wannier.set_spin(spin)
locmat = self.wannier.get_zi_bloch_matrix(dirG,
kpoint,
nextkpoint,
G_I)
return locmat
def initial_wannier(self,
initialwannier,
kpointgrid,
fixedstates,
edf,
spin):
'return initial wannier'
if self.calculation_required():
self.calculate()
if not hasattr(self, 'wannier'):
from utils.wannier import Wannier
self.wannier = Wannier(self)
self.wannier.set_data(initialwannier)
self.wannier.set_k_point_grid(kpointgrid)
self.wannier.set_spin(spin)
waves = [[self.get_reciprocal_bloch_function(band=band,
kpt=kpt,
spin=spin)
for band in range(self.get_nbands())]
for kpt in range(len(self.get_ibz_k_points()))]
self.wannier.setup_m_matrix(waves, self.get_bz_k_points())
#lfn is too keep line length below 78 characters
lfn = self.wannier.get_list_of_coefficients_and_rotation_matrices
c, U = lfn((self.get_nbands(), fixedstates, edf))
U = np.array(U)
for k in range(len(c)):
c[k] = np.array(c[k])
return c, U
def get_dipole_moment(self,atoms=None):
'''
return dipole moment of unit cell
Defined by the vector connecting the center of electron charge
density to the center of nuclear charge density.
Units = eV*angstrom
1 Debye = 0.208194 eV*angstrom
'''
if self.calculation_required():
self.calculate()
if atoms is None:
atoms = self.get_atoms()
#center of electron charge density
x, y, z, cd = self.get_charge_density()
n1, n2, n3 = cd.shape
nelements = n1*n2*n3
voxel_volume = atoms.get_volume()/nelements
total_electron_charge = -cd.sum()*voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center*total_electron_charge
electron_dipole_moment *= -1.0 #we need the - here so the two
#negatives don't cancel
# now the ion charge center
psps = self.get_pseudopotentials()['pspdict']
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = self.get_psp_nuclear_charge(psps[atom.symbol])
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center*total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
def get_reciprocal_bloch_function(self, band=0, kpt=0, spin=0):
'''return the reciprocal bloch function. Need for Jacapo
Wannier class.'''
if self.calculation_required():
self.calculate()
nc = netCDF(self.get_nc(), 'r')
# read reciprocal bloch function
npw = nc.variables['NumberPlaneWavesKpoint'][:]
bf = nc.variables['WaveFunction'][kpt, spin, band]
wflist = np.zeros(npw[kpt], np.complex)
wflist.real = bf[0:npw[kpt], 1]
wflist.imag = bf[0:npw[kpt], 0]
nc.close()
return wflist
def get_reciprocal_fft_index(self, kpt=0):
'''return the Wave Function FFT Index'''
nc = netCDF(self.get_nc(), 'r')
recind = nc.variables['WaveFunctionFFTindex'][kpt, :, :]
nc.close()
return recind
def get_ensemble_coefficients(self):
'returns exchange correlation ensemble coefficients'
# adapted from ASE/dacapo.py
# def GetEnsembleCoefficients(self):
# self.Calculate()
# E = self.GetPotentialEnergy()
# xc = self.GetNetCDFEntry('EnsembleXCEnergies')
# Exc = xc[0]
# exc_c = self.GetNetCDFEntry('EvaluateCorrelationEnergy')
# exc_e = self.GetNetCDFEntry('EvaluateExchangeEnergy')
# exc = exc_c + exc_e
# if self.GetXCFunctional() == 'RPBE':
# Exc = exc[-1][-1]
#
# E0 = xc[1] # Fx = 0
#
# diff0 = xc[2] # - Exc
# diff1 = xc[3] # - Exc
# diff2 = xc[4] # - Exc
# coefs = (E + E0 - Exc,diff0-E0 ,diff1-E0,diff2-E0)
# print 'ensemble: (%.9f, %.9f, %.9f, %.9f)'% coefs
# return num.array(coefs)
if self.calculation_required():
self.calculate()
E = self.get_potential_energy()
nc = netCDF(self.get_nc(), 'r')
if 'EnsembleXCEnergies' in nc.variables:
v = nc.variables['EnsembleXCEnergies']
xc = v[:]
EXC = xc[0]
if 'EvaluateCorrelationEnergy' in nc.variables:
v = nc.variables['EvaluateCorrelationEnergy']
exc_c = v[:]
if 'EvaluateExchangeEnergy' in nc.variables:
v = nc.variables['EvaluateExchangeEnergy']
exc_e = v[:]
exc = exc_c + exc_e
if self.get_xc == 'RPBE':
EXC = exc[-1][-1]
E0 = xc[1] # Fx = 0
diff0 = xc[2] # - Exc
diff1 = xc[3] # - Exc
diff2 = xc[4] # - Exc
coefs = (E + E0 - EXC, diff0-E0, diff1-E0, diff2-E0)
log.info('ensemble: (%.9f, %.9f, %.9f, %.9f)'% coefs)
return np.array(coefs)
def get_pseudo_wave_function(self, band=0, kpt=0, spin=0, pad=True):
'''return the pseudo wavefunction'''
# pad=True does nothing here.
if self.calculation_required():
self.calculate()
ibz = self.get_ibz_kpoints()
#get the reciprocal bloch function
wflist = self.get_reciprocal_bloch_function(band=band,
kpt=kpt,
spin=spin)
# wflist == Reciprocal Bloch Function
recind = self. get_reciprocal_fft_index(kpt)
grids = self.get_fftgrid()
softgrid = grids['soft']
# GetReciprocalBlochFunctionGrid
wfrec = np.zeros((softgrid), np.complex)
for i in xrange(len(wflist)):
wfrec[recind[0, i]-1,
recind[1, i]-1,
recind[2, i]-1] = wflist[i]
# calculate Bloch Function
wf = wfrec.copy()
dim = wf.shape
for i in range(len(dim)):
wf = np.fft.fft(wf, dim[i], axis=i)
#now the phase function to get the bloch phase
basis = self.get_atoms().get_cell()
kpoint = np.dot(ibz[kpt], basis) #coordinates of relevant
#kpoint in cartesian
#coordinates
def phasefunction(coor):
'return phasefunction'
pf = np.exp(1.0j*np.dot(kpoint, coor))
return pf
# Calculating the Bloch phase at the origin (0,0,0) of the grid
origin = np.array([0., 0., 0.])
blochphase = phasefunction(origin)
spatialshape = wf.shape[-len(basis):]
gridunitvectors = np.array(map(lambda unitvector,
shape:unitvector/shape,
basis,
spatialshape))
for dim in range(len(spatialshape)):
# Multiplying with the phase at the origin
deltaphase = phasefunction(gridunitvectors[dim])
# and calculating phase difference between each point
newphase = np.fromfunction(lambda i, phase=deltaphase:phase**i,
(spatialshape[dim],))
blochphase = np.multiply.outer(blochphase, newphase)
return blochphase*wf
def get_wave_function(self, band=0, kpt=0, spin=0):
'''return the wave function. This is the pseudo wave function
divided by volume.'''
pwf = self.get_pseudo_wave_function(band=band,
kpt=kpt,
spin=spin,
pad=True)
vol = self.get_atoms().get_volume()
fftgrids = self.get_fftgrid()
softgrid = fftgrids['soft']
x, y, z = self.get_ucgrid((softgrid))
return x, y, z, pwf/np.sqrt(vol)
def strip(self):
'''remove all large memory nc variables not needed for
anything I use very often.
'''
self.delete_ncattdimvar(self.nc,
ncdims=['max_projectors_per_atom'],
ncvars=['WaveFunction',
'WaveFunctionFFTindex',
'NumberOfNLProjectors',
'NLProjectorPsi',
'TypeNLProjector1',
'NumberofNLProjectors',
'PartialCoreDensity',
'ChargeDensity',
'ElectrostaticPotential',
'StructureFactor'])
# shortcut function names
Jacapo.get_cd = Jacapo.get_charge_density
Jacapo.get_wf = Jacapo.get_wave_function
Jacapo.get_esp = Jacapo.get_electrostatic_potential
Jacapo.get_occ = Jacapo.get_occupation_numbers
Jacapo.get_ef = Jacapo.get_fermi_level
Jacapo.get_number_of_bands = Jacapo.get_nbands
Jacapo.get_electronic_temperature = Jacapo.get_ft
Jacapo.get_number_of_electrons = Jacapo.get_valence
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/calculators/jacapo/jacapo.py | Python | gpl-2.0 | 152,002 | [
"ASE",
"Gaussian",
"NetCDF"
] | 0d3fdbd26f988fda2d553befb65a95bcbbfe06a74dc45f5fe75e84f533aeaf4d |
"""Cluster based on RMSD between conformations
{{header}}
Meta
----
depends:
- meta.pandas.pickl
- trajs
- top.pdb
"""
import mdtraj as md
from msmbuilder.io import load_meta, itertrajs, save_trajs, preload_top
## Load
meta = load_meta()
centroids = md.load("centroids.xtc", top=preload_top(meta))
## Kernel
SIGMA = 0.3 # nm
from msmbuilder.featurizer import RMSDFeaturizer
import numpy as np
featurizer = RMSDFeaturizer(centroids)
lfeats = {}
for i, traj in itertrajs(meta):
lfeat = featurizer.partial_transform(traj)
lfeat = np.exp(-lfeat ** 2 / (2 * (SIGMA ** 2)))
lfeats[i] = lfeat
save_trajs(lfeats, 'ftrajs', meta)
| mpharrigan/mixtape | msmbuilder/project_templates/landmarks/featurize.py | Python | lgpl-2.1 | 646 | [
"MDTraj"
] | 4ab328bf627a061ce95799a94a1bbe1387cd4327eac0c6d499e29f4028cdc386 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from inspire_schemas.utils import load_schema, validate
from inspire_schemas.builders.references import (
ReferenceBuilder,
_split_refextract_authors_str,
)
def test_set_label():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_label('Abe et al, 2008')
expected = [
{
'reference': {
'label': 'Abe et al, 2008',
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_record():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
ref = {
'$ref': 'http://localhost:5000/api/literature/1226234',
}
builder.set_record(ref)
expected = [
{
'curated_relation': False,
'record': {
'$ref': 'http://localhost:5000/api/literature/1226234',
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_curate():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.curate()
expected = [
{'curated_relation': True},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_texkey():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_texkey('Aaij:2016qlz')
expected = [
{
'reference': {
'texkey': 'Aaij:2016qlz',
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_title():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_title('The CMS experiment at the CERN LHC')
expected = [
{
'reference': {
'title': {
'title': 'The CMS experiment at the CERN LHC',
}
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_parent_title():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_parent_title('Geom. Funct. Anal., GAFA 2000')
expected = [
{
'reference': {
'publication_info': {
'parent_title': 'Geom. Funct. Anal., GAFA 2000',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_misc():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc('[Erratum:')
expected = [
{
'reference': {
'misc': [
'[Erratum:',
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_misc_with_dupes():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc('[Erratum:')
builder.add_misc('[Erratum:')
expected = [
{
'reference': {
'misc': [
'[Erratum:',
'[Erratum:',
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_raw_reference_no_source():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_raw_reference('Phys. Rev. C 80 (doi:10.1103/'
'PhysRevC.80.044313)')
expected = [
{
'raw_refs': [
{
'schema': 'text',
'value': 'Phys. Rev. C 80 (doi:10.1103/'
'PhysRevC.80.044313)',
},
],
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_raw_reference_with_source():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_raw_reference('Phys. Rev. C 80 (doi:10.1103/'
'PhysRevC.80.044313)', 'arXiv')
expected = [
{
'raw_refs': [
{
'schema': 'text',
'source': 'arXiv',
'value': 'Phys. Rev. C 80 (doi:10.1103/'
'PhysRevC.80.044313)',
},
],
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_year():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_year(2017)
expected = [
{
'reference': {
'publication_info': {
'year': 2017,
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_year_rejects_malformed_years():
builder = ReferenceBuilder()
builder.set_year('foobar')
expected = [{}]
result = [builder.obj]
assert expected == result
def test_set_year_rejects_invalid_years():
builder = ReferenceBuilder()
builder.set_year(666)
expected = [{}]
result = [builder.obj]
assert expected == result
builder.set_year(2112)
expected = [{}]
result = [builder.obj]
assert expected == result
def test_add_url():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('http://www.muonsinc.com')
expected = [
{
'reference': {
'urls': [
{'value': 'http://www.muonsinc.com'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_url_uses_fix_url():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('www.muonsinc.com')
expected = [
{
'reference': {
'urls': [
{'value': 'http://www.muonsinc.com'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_url_adds_uid():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('10.1109/NSSMIC.2005.1596597')
builder.add_url('https://doi.org/10.1109/NSSMIC.2005.1596597')
expected = [
{
'reference': {
'dois': [
'10.1109/NSSMIC.2005.1596597'
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_refextract_author_str():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_refextract_authors_str('S. Frixione, P. Nason, and C. Oleari')
expected = [
{
'reference': {
'authors': [
{'full_name': 'Frixione, S.'},
{'full_name': 'Nason, P.'},
{'full_name': 'Oleari, C.'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_refextract_authors_str_noninitials():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_refextract_authors_str(
'Igor R. Klebanov and Juan Martin Maldacena'
)
expected = [
{
'reference': {
'authors': [
{'full_name': 'Klebanov, Igor R.'},
{'full_name': 'Maldacena, Juan Martin'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_refextract_authors_str_discards_et_al():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_refextract_authors_str(
'S. B. Cenko, M. M. Kasliwal, D. A. Perley et al.'
)
expected = [
{
'reference': {
'authors': [
{'full_name': 'Cenko, S.B.'},
{'full_name': 'Kasliwal, M.M.'},
{'full_name': 'Perley, D.A.'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_refextract_authors_str_unicode():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_refextract_authors_str(u'Kätlne, J.')
expected = [
{
'reference': {
'authors': [
{'full_name': u'Kätlne, J.'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_author():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_author('Cox, Brian')
expected = [
{
'reference': {
'authors': [
{'full_name': 'Cox, Brian'},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_author_handles_inspire_role():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_author("O'Brian, Dara", 'ed.')
expected = [
{
'reference': {
'authors': [
{
'full_name': "O'Brian, Dara",
'inspire_role': 'editor',
},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_pubnote():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_pubnote('Nucl.Phys.,B360,362')
expected = [
{
'reference': {
'publication_info': {
'artid': '362',
'journal_title': 'Nucl.Phys.B',
'journal_volume': '360',
'page_start': '362',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_pubnote_falls_back_to_misc():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_pubnote('not-a-valid-pubnote')
expected = [
{
'reference': {
'misc': ['not-a-valid-pubnote'],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_pubnote_does_not_overwrite_pubnote():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_pubnote('Phys.Rev.,D43,124-156')
builder.set_pubnote(',12,18')
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'Phys.Rev.D',
'journal_volume': '43',
'page_start': '124',
'page_end': '156',
},
'misc': ['Additional pubnote: ,12,18'],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_pubnote_puts_incomplete_pubnote_in_misc():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_pubnote('Phys.Rev.,D43,')
expected = [
{
'reference': {
'misc': ['Phys.Rev.,D43,']
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_publisher():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_publisher('Elsevier')
expected = [
{
'reference': {
'imprint': {
'publisher': 'Elsevier',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_imprint_date():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_imprint_date('23/12/2015')
expected = [
{
'reference': {
'imprint': {
'date': '2015-12-23',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_imprint_place():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_imprint_place('New York')
expected = [
{
'reference': {
'imprint': {
'place': 'New York',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_report_number_handles_several_report_numbers():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_report_number('CMS-B2G-17-001')
builder.add_report_number('CERN-EP-2017-184')
expected = [
{
'reference': {
'report_numbers': [
'CMS-B2G-17-001',
'CERN-EP-2017-184',
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_report_number_handles_arxiv_ids():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_report_number('hep-th/0603001')
expected = [
{
'reference': {
'arxiv_eprint': 'hep-th/0603001',
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_uid_handles_arxiv_ids():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('hep-th/0603001')
expected = [
{
'reference': {
'arxiv_eprint': 'hep-th/0603001',
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_uid_handles_dois():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('http://dx.doi.org/10.3972/water973.0145.db')
expected = [
{
'reference': {
'dois': [
'10.3972/water973.0145.db',
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_uid_handles_handles():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('hdl:10443/1646')
expected = [
{
'reference': {
'persistent_identifiers': [
{
'schema': 'HDL',
'value': '10443/1646',
},
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_uid_handles_cnums():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('C87-11-11')
expected = [
{
'reference': {
'publication_info': {
'cnum': 'C87-11-11',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_url_handles_ads_ids():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('http://adsabs.harvard.edu/abs/2018ApJ...853...70U')
expected = [
{
'reference': {
'external_system_identifiers': [{
'schema': 'ADS',
'value': '2018ApJ...853...70U',
}],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_url_handles_cds_ids():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('http://cds.cern.ch/record/2310556?ln=en')
builder.add_url('http://cds.cern.ch/record/2310556?ln=fr')
expected = [
{
'reference': {
'external_system_identifiers': [{
'schema': 'CDS',
'value': '2310556',
}],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_uid_falls_back_to_isbn():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('1449344852')
expected = [
{
'reference': {
'isbn': '9781449344856',
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_uid_rejects_invalid_isbns():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('123456789')
expected = [
{
'reference': {
'misc': [
'123456789',
]
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_collaboration():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_collaboration('ALICE')
expected = [
{
'reference': {
'collaborations': [
'ALICE',
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_journal_title():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_journal_title('Phys. Rev. D')
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'Phys. Rev. D'
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_journal_issue():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_journal_issue('12')
expected = [
{
'reference': {
'publication_info': {
'journal_issue': '12'
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_journal_volume():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_journal_volume('2016')
expected = [
{
'reference': {
'publication_info': {
'journal_volume': '2016'
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_page_artid():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_page_artid('12', '13', '014568')
expected = [
{
'reference': {
'publication_info': {
'page_start': '12',
'page_end': '13',
'artid': '014568',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_set_page_artid_none():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.set_page_artid(None, None, '014568')
expected = [
{
'reference': {
'publication_info': {
'artid': '014568',
},
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_is_arxiv_matches_valid_categories():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_uid('BF/0234502')
builder.add_uid('math/0311149')
expected = [
{
'reference': {
'persistent_identifiers': [{
'value': 'BF/0234502',
'schema': 'HDL',
}],
'arxiv_eprint': 'math/0311149'
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_url_rejects_empty_cds_id():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('https://cds.cern.ch/record/')
expected = [
{
'reference': {
'urls': [{
'value': 'https://cds.cern.ch/record/'
}],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_url_rejects_empty_ads_id():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_url('http://adsabs.harvard.edu/abs/')
expected = [
{
'reference': {
'urls': [{
'value': 'http://adsabs.harvard.edu/abs/'
}],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_add_report_number_rejects_duplicates():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_report_number('ATL-TDR-19')
builder.add_report_number('ATL-TDR-19')
expected = [
{
'reference': {
'report_numbers': [
'ATL-TDR-19',
],
},
},
]
result = [builder.obj]
assert validate(result, subschema) is None
assert expected == result
def test_pop_additional_pubnotes_no_misc():
builder = ReferenceBuilder()
expected = []
result = list(builder.pop_additional_pubnotes())
assert expected == result
def test_pop_additional_pubnotes_no_additional_pubnote():
builder = ReferenceBuilder()
builder.add_misc("No additional pubnote")
expected = []
result = list(builder.pop_additional_pubnotes())
assert expected == result
def test_pop_additional_pubnotes_single_pubnote():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc("Additional pubnote: J.Testing,42,R477")
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'J.Testing',
'journal_volume': '42',
'page_start': 'R477',
'artid': 'R477'
},
'misc': [
'Additional pubnote split from previous reference',
],
},
},
]
result = list(builder.pop_additional_pubnotes())
assert validate(result, subschema) is None
assert expected == result
assert 'misc' not in builder.obj['reference']
def test_pop_additional_pubnotes_several_pubnotes():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc("Additional pubnote: J.Improbable Testing,453,42-47 / some other stuff")
builder.add_misc("Additional pubnote: J.Testing,42,R477")
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'J.Improbable Testing',
'journal_volume': '453',
'page_start': '42',
'page_end': '47'
},
'misc': [
'Additional pubnote split from previous reference',
],
},
},
{
'reference': {
'publication_info': {
'journal_title': 'J.Testing',
'journal_volume': '42',
'page_start': 'R477',
'artid': 'R477'
},
'misc': [
'Additional pubnote split from previous reference',
],
},
},
]
result = list(builder.pop_additional_pubnotes())
assert validate(result, subschema) is None
assert expected == result
assert builder.obj['reference']['misc'] == ['some other stuff']
def test_pop_additional_pubnotes_several_pubnotes_without_remaining_misc():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc("Additional pubnote: J.Improbable Testing,453,42-47")
builder.add_misc("Additional pubnote: J.Testing,42,R477")
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'J.Improbable Testing',
'journal_volume': '453',
'page_start': '42',
'page_end': '47'
},
'misc': [
'Additional pubnote split from previous reference',
],
},
},
{
'reference': {
'publication_info': {
'journal_title': 'J.Testing',
'journal_volume': '42',
'page_start': 'R477',
'artid': 'R477'
},
'misc': [
'Additional pubnote split from previous reference',
],
},
},
]
result = list(builder.pop_additional_pubnotes())
assert validate(result, subschema) is None
assert expected == result
assert 'misc' not in builder.obj['reference']
def test_pop_additional_pubnotes_includes_label():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc("Additional pubnote: J.Testing,42,R477")
builder.set_label('Hello')
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'J.Testing',
'journal_volume': '42',
'page_start': 'R477',
'artid': 'R477'
},
'misc': [
'Additional pubnote split from previous reference',
],
'label': 'Hello',
},
},
]
result = list(builder.pop_additional_pubnotes())
assert validate(result, subschema) is None
assert expected == result
assert 'misc' not in builder.obj['reference']
assert builder.obj['reference']['label'] == 'Hello'
def test_pop_additional_pubnotes_includes_raw_ref():
schema = load_schema('hep')
subschema = schema['properties']['references']
builder = ReferenceBuilder()
builder.add_misc("Additional pubnote: J.Testing,42,R477")
builder.add_raw_reference("A raw ref")
expected_raw_refs = [
{
'schema': 'text',
'value': 'A raw ref'
},
]
expected = [
{
'reference': {
'publication_info': {
'journal_title': 'J.Testing',
'journal_volume': '42',
'page_start': 'R477',
'artid': 'R477'
},
'misc': [
'Additional pubnote split from previous reference',
],
},
'raw_refs': expected_raw_refs,
},
]
result = list(builder.pop_additional_pubnotes())
assert validate(result, subschema) is None
assert expected == result
assert 'misc' not in builder.obj['reference']
assert builder.obj['raw_refs'] == expected_raw_refs
def test_reference_builder_skip_authors_without_full_names():
rb = ReferenceBuilder()
rb.add_refextract_authors_str("Author 1, ,Author 2")
rb.add_raw_reference("Author 1, , Author 2, Some Title"),
rb.add_title("Some title")
expected_authors = [{'full_name': '1, Author'}, {'full_name': '2, Author'}]
assert len(rb.obj['reference']['authors'])
assert rb.obj['reference']['authors'] == expected_authors
def test_reference_builder_is_not_creating_author_empty_list_when_authors_missing():
rb = ReferenceBuilder()
rb.add_author(" ")
rb.add_author(" ")
assert 'reference' not in rb.obj
rb.add_title("Title")
rb.add_author(" ")
assert 'authors' not in rb.obj['reference']
def test_reference_builder_is_not_adding_doi_when_already_present():
rb = ReferenceBuilder()
rb.add_url('https://doi.org/10.1088/1009-0630/7/4/022')
rb.add_uid('10.1088/1009-0630/7/4/022')
assert rb.obj['reference']['dois'] == ['10.1088/1009-0630/7/4/022']
| inspirehep/inspire-schemas | tests/unit/test_reference_builder.py | Python | gpl-2.0 | 33,261 | [
"Brian"
] | 7974e9035281a32631caf6ea446f4d2d1efdd783b119fc607df13ba0a41a45f5 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from numbers import Number
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
from pymatgen.phasediagram.entries import PDEntryIO, PDEntry
class PDAnalyzerTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(elements, entries) = PDEntryIO.from_csv(os.path.join(module_dir,
"pdentries_test.csv"))
self.pd = PhaseDiagram(entries)
self.analyzer = PDAnalyzer(self.pd)
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(self.analyzer.get_e_above_hull(entry), 1e-11,
"Stable entries should have e above hull of zero!")
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.analyzer.get_e_above_hull(entry)
self.assertGreaterEqual(e_ah, 0)
self.assertTrue(isinstance(e_ah, Number))
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.analyzer.get_equilibrium_reaction_energy(entry), 0,
"Stable entries should have negative equilibrium reaction energy!")
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(len(self.analyzer.get_decomposition(entry.composition)), 1,
"Stable composition should have only 1 decomposition!")
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.analyzer.get_decomposition(entry.composition))
self.assertTrue(ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.")
#Just to test decomp for a ficitious composition
ansdict = {entry.composition.formula: amt
for entry, amt in
self.analyzer.get_decomposition(Composition("Li3Fe7O11")).items()}
expected_ans = {"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.analyzer.get_transition_chempots(el)),
len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(len(self.analyzer.get_element_profile(el, entry.composition)),
len(self.pd.facets))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.analyzer.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.analyzer.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if abs(c[Element("O")]+7.115) < 1e-2 and abs(c[Element("Fe")]+6.596) < 1e-2 and \
abs(c[Element("Li")]+3.931) < 1e-2:
test_equality = True
self.assertTrue(test_equality,"there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.analyzer.get_chempot_range_stability_phase(
Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.analyzer.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.analyzer.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry('H', 0)
pd = PhaseDiagram([entry])
pda = PDAnalyzer(pd)
decomp, e = pda.get_decomp_and_e_above_hull(PDEntry('H', 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
if __name__ == '__main__':
unittest.main()
| migueldiascosta/pymatgen | pymatgen/phasediagram/tests/test_pdanalyzer.py | Python | mit | 5,104 | [
"pymatgen"
] | 4640f8da089df7bbb9c4c7d7e157620ba31a079e6ca3d94421127a3c2c270ab7 |
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" lazy generator of 2D pharmacophore signature data
"""
from rdkit.Chem.Pharm2D import SigFactory, Matcher
raise NotImplementedError('not finished yet')
class Generator(object):
"""
Important attributes:
- mol: the molecules whose signature is being worked with
- sigFactory : the SigFactory object with signature parameters
NOTE: no preprocessing is carried out for _sigFactory_.
It *must* be pre-initialized.
**Notes**
-
"""
def __init__(self, sigFactory, mol, dMat=None, bitCache=True):
""" constructor
**Arguments**
- sigFactory: a signature factory, see class docs
- mol: a molecule, see class docs
- dMat: (optional) a distance matrix for the molecule. If this
is not provided, one will be calculated
- bitCache: (optional) if nonzero, a local cache of which bits
have been queried will be maintained. Otherwise things must
be recalculate each time a bit is queried.
"""
if not isinstance(sigFactory, SigFactory.SigFactory):
raise ValueError('bad factory')
self.sigFactory = sigFactory
self.mol = mol
if dMat is None:
useBO = sigFactory.includeBondOrder
dMat = Chem.GetDistanceMatrix(mol, useBO)
self.dMat = dMat
if bitCache:
self.bits = {}
else:
self.bits = None
featFamilies = [fam for fam in sigFactory.featFactory.GetFeatureFamilies()
if fam not in sigFactory.skipFeats]
nFeats = len(featFamilies)
featMatches = {}
for fam in featFamilies:
featMatches[fam] = []
feats = sigFactory.featFactory.GetFeaturesForMol(mol)
for feat in feats:
if feat.GetFamily() not in sigFactory.skipFeats:
featMatches[feat.GetFamily()].append(feat.GetAtomIds())
featMatches = [None] * nFeats
for i in range(nFeats):
featMatches[i] = sigFactory.featFactory.GetMolFeature()
self.pattMatches = pattMatches
def GetBit(self, idx):
""" returns a bool indicating whether or not the bit is set
"""
if idx < 0 or idx >= self.sig.GetSize():
raise IndexError('Index %d invalid' % (idx))
if self.bits is not None and idx in self.bits:
return self.bits[idx]
tmp = Matcher.GetAtomsMatchingBit(self.sig, idx, self.mol, dMat=self.dMat, justOne=1,
matchingAtoms=self.pattMatches)
if not tmp or len(tmp) == 0:
res = 0
else:
res = 1
if self.bits is not None:
self.bits[idx] = res
return res
def __len__(self):
""" allows class to support len()
"""
return self.sig.GetSize()
def __getitem__(self, itm):
""" allows class to support random access.
Calls self.GetBit()
"""
return self.GetBit(itm)
if __name__ == '__main__':
import time
from rdkit import RDConfig, Chem
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
import random
factory = Gobbi_Pharm2D.factory
nToDo = 100
inD = open(RDConfig.RDDataDir + "/NCI/first_5K.smi", 'r').readlines()[:nToDo]
mols = [None] * len(inD)
for i in range(len(inD)):
smi = inD[i].split('\t')[0]
smi.strip()
mols[i] = Chem.MolFromSmiles(smi)
sig = factory.GetSignature()
nBits = 300
random.seed(23)
bits = [random.randint(0, sig.GetSize() - 1) for x in range(nBits)]
print('Using the Lazy Generator')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
gen = Generator(factory, mols[i])
for bit in bits:
v = gen[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
print('Generating and checking signatures')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
sig = Generate.Gen2DFingerprint(mols[i], factory)
for bit in bits:
v = sig[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
| greglandrum/rdkit | rdkit/Chem/Pharm2D/LazyGenerator.py | Python | bsd-3-clause | 4,270 | [
"RDKit"
] | 720d6987d92a094702d73334115e251617aeaefb1990591c8e3a013bd172d59a |
# This class provides access to selected data from a trajectory
# via an interface that can be accessed remotely by Pyro. It
# can be used by programs that wish to inspect a trajectory that
# resides on a different machine.
#
# Written by Konrad Hinsen
# last revision: 2000-8-3
#
from Scientific.IO.NetCDF import NetCDFFile
import Numeric
class TrajectoryInspector:
def __init__(self, filename):
self.filename = filename
self.file = NetCDFFile(self.filename, 'r')
try:
self.block_size = self.file.dimensions['minor_step_number']
except KeyError:
self.block_size = 1
self._countSteps()
def close(self):
self.file.close()
def reopen(self):
self.file.close()
self.file = NetCDFFile(self.filename, 'r')
self._countSteps()
def _countSteps(self):
if self.block_size == 1:
self.nsteps = self.file.variables['step'].shape[0]
else:
blocks = self.file.variables['step'].shape[0]
last_block = self.file.variables['step'][blocks-1]
unused = Numeric.sum(Numeric.equal(last_block, -2147483647))
self.nsteps = blocks*self.block_size-unused
def comment(self):
try:
return self.file.comment
except AttributeError:
return ''
def history(self):
try:
return self.file.history
except AttributeError:
return ''
def description(self):
return self.file.variables['description'][:].tostring()
def numberOfAtoms(self):
return self.file.dimensions['atom_number']
def numberOfSteps(self):
return self.nsteps
def variableNames(self):
return self.file.variables.keys()
def readScalarVariable(self, name, first=0, last=None, step=1):
if last is None:
last = self.nsteps
variable = self.file.variables[name]
if self.block_size > 1:
variable = Numeric.ravel(variable[:, :])
return variable[first:last:step]
def readConfiguration(self, index):
if self.block_size == 1:
try:
cell = self.file.variables['box_size'][index]
except KeyError:
cell = None
return cell, self.file.variables['configuration'][index]
else:
i1 = index / self.block_size
i2 = index % self.block_size
try:
cell = self.file.variables['box_size'][i1, :, i2]
except KeyError:
cell = None
return cell, self.file.variables['configuration'][i1, :, :, i2]
| fxia22/ASM_xf | PythonD/site_python/MMTK/Tools/TrajectoryViewer/TrajectoryInspector.py | Python | gpl-2.0 | 2,663 | [
"NetCDF"
] | 2ac8a06037fb749ab5c155b837ff58f2b6d21e83d958e36422f0466a1e4bd33b |
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to build targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
print '\tfinding build targets for match', target.name
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
print 'roots:'
for root in roots:
print '\t', root.name
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
print 'supplied targets'
for target in config.targets:
print '\t', target
print 'expanded supplied targets'
for target in search_targets:
print '\t', target.name
matched_search_targets = _GetTargetsDependingOn(search_targets)
print 'raw matched search targets:'
for target in matched_search_targets:
print '\t', target.name
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
print 'Finding build targets'
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| dreamllq/node | tools/gyp/pylib/gyp/generator/analyzer.py | Python | apache-2.0 | 23,439 | [
"VisIt"
] | 188d528ce5318e5d039f235b76dccc1f42ac5125301b2dadfcb7a047beb073de |
import os
import copy
import cPickle
import numpy as np
seed = np.random.randint(2**16)
# seed = 2958
seed = 60017
print "Seed: ", seed
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from optofit.cneuron.compartment import Compartment, SquidCompartment
from optofit.cneuron.channels import LeakChannel, NaChannel, KdrChannel
from optofit.cneuron.simulate import forward_euler
from optofit.cneuron.gpchannel import GPChannel, sigma
from hips.inference.particle_mcmc import *
from optofit.cinference.pmcmc import *
import kayak
import scipy
# Set the random seed for reproducibility
np.random.seed(seed)
# Make a simple compartment
hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.03,
'E_leak' : -65.0}
gp1_hypers = {'D': 2,
'sig' : 1,
'g_gp' : 12.0,
'E_gp' : 50.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 1.0}
gp2_hypers = {'D' : 1,
'sig' : 1,
'g_gp' : 3.60,
# 'g_gp' : 0,
'E_gp' : -77.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 1.0}
squid_hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.03,
'E_leak' : -65.0,
'g_na' : 12.0,
# 'g_na' : 0.0,
'E_na' : 50.0,
'g_kdr' : 3.60,
'E_kdr' : -77.0
}
def create_gp_model():
# Add a few channels
body = Compartment(name='body', hypers=hypers)
leak = LeakChannel(name='leak', hypers=hypers)
gp1 = GPChannel(name='gpna', hypers=gp1_hypers)
gp2 = GPChannel(name='gpk', hypers=gp2_hypers)
body.add_child(leak)
body.add_child(gp1)
body.add_child(gp2)
# Initialize the model
D, I = body.initialize_offsets()
return body, gp1, gp2, D, I
def sample_squid_model(start = 20, stop = 80, intensity = 7.):
squid_body = SquidCompartment(name='body', hypers=squid_hypers)
# squid_body = Compartment(name='body', hypers=squid_hypers)
# leak = LeakChannel(name='leak', hypers=squid_hypers)
# na = NaChannel(name='na', hypers=squid_hypers)
# kdr = KdrChannel(name='kdr', hypers=squid_hypers)
# squid_body.add_child(leak)
# body.add_child(na)
# squid_body.add_child(kdr)
# Initialize the model
D, I = squid_body.initialize_offsets()
# Set the recording duration
t_start = 0
t_stop = 300.
dt = 0.1
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[20/dt:40/dt,:] = 3.
inpt[120/dt:160/dt,:] = 5.
inpt[220/dt:280/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
squid_body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# TODO: Fix the hack which requires us to know the number of particles
N = 100
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[squid_body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N, D, squid_body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([squid_body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,N,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.zeros((N,), dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Downsample
t_ds = 0.1
intvl = int(t_ds / dt)
td = t[::intvl].copy('C')
zd = z[::intvl, :].copy('C')
xd = x[::intvl, :].copy('C')
inptd = inpt[::intvl].copy('C')
# Plot the first particle trajectory
plt.ion()
st_axs, _ = squid_body.plot(td, zd, color='k')
# Plot the observed voltage
st_axs[0].plot(td, xd[:,0], 'r')
# plt.plot(t, x[:,0], 'r')
plt.show()
plt.pause(0.01)
return td, zd, xd, inptd, st_axs
def sample_gp_model():
body, gp1, gp2, D, I = create_gp_model()
# Set the recording duration
t_start = 0
t_stop = 100.
dt = 1.0
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[50/dt:60/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, 1, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,1,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Plot the first particle trajectory
st_axs, _ = body.plot(t, z, color='k')
# Plot the observed voltage
st_axs[0].plot(t, x[:,0], 'r')
# Plot the GP channel dynamics
# gp1_fig = plt.figure()
# gp1_ax1 = gp1_fig.add_subplot(121)
# gp1.plot(ax=gp1_ax1)
# gp1_ax2 = gp1_fig.add_subplot(122)
#
# gp2_fig = plt.figure()
# gp2_ax1 = gp2_fig.add_subplot(121)
# gp2.plot(ax=gp2_ax1)
# gp2_ax2 = gp2_fig.add_subplot(122)
plt.ion()
plt.show()
plt.pause(0.01)
return t, z, x, inpt, st_axs
# Now run the pMCMC inference
def sample_z_given_x(t, x, inpt,
z0=None,
initialize='constant',
N_particles=1000,
N_samples=100,
axs=None, gp1_ax=None, gp2_ax=None):
dt = np.diff(t)
T,O = x.shape
# Make a model
body, gp1, gp2, D, I = create_gp_model()
# Set the initial distribution to be Gaussian around the steady state
ss = np.zeros(D)
body.steady_state(ss)
init = GaussianInitialDistribution(ss, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# sigmas = np.ones(D)
sigmas = 0.2*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
# sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N_particles, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.ones((T,N_particles,D)) * ss[None, None, :] + np.random.randn(T,N_particles,D) * sigmas[None, None, :]
if z0 is not None:
if initialize == 'ground_truth':
logit = lambda zz: np.log(zz/(1-zz))
# Fix the observed voltage
z[:, 0, body.x_offset] = z0[:, 0]
# Fix the Na latent state
m = z0[:,1]
h = z0[:,2]
z[:,0, gp1.x_offset] = logit(np.clip(m**3 *h, 1e-4,1-1e-4))
# Fix the Kdr latent state
n = z0[:,3]
z[:,0, gp2.x_offset] = logit(np.clip(n**4, 1e-4, 1-1e-4))
else:
z[:,0,:] = z0
elif initialize == 'from_model':
# Sample the latent state sequence with the given initial condition
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Fix the observed voltage
z[i+1, 0, body.x_offset] = x[i+1, body.x_offset]
elif initialize == 'optimize':
# By default, optimize the latent state
# Set the voltage...
z[:, 0, body.x_offset] = x[:, body.x_offset]
# Set the initial latent trace
z[1:, 0, 1:] = initial_latent_trace(body, inpt, x[:, 0], t).transpose()
# Set the initial voltage
z[0, 0, 1:] = np.array([0, 0, 0])
else:
# Constant initialization
pass
# Initialize conductance values with MCMC to match the observed voltage...
# body.resample(t, z[:,0,:])
# resample_body(body, t, z[:,0,:], sigmas[0])
#
# if z0 is None:
# # Sample the latent state sequence with the given initial condition
# for i in np.arange(0,T-1):
# # The interface kinda sucks. We have to tell it that
# # the first particle is always its ancestor
# prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Resample the Gaussian processes
# gp1.resample(z[:,0,:], dt)
# gp2.resample(z[:,0,:], dt)
# Prepare the particle Gibbs sampler with the first particle
pf = ParticleGibbsAncestorSampling(T, N_particles, D)
pf.initialize(init, prop, lkhd, x, z[:,0,:].copy('C'))
# Plot the initial state
gp1_ax, im1, l_gp1 = gp1.plot(ax=gp1_ax, data=z[:,0,:])
gp2_ax, im2, l_gp2 = gp2.plot(ax=gp2_ax, data=z[:,0,:])
axs, lines = body.plot(t, z[:,0,:], color='b', axs=axs)
axs[0].plot(t, x[:,0], 'r')
# Update figures
for i in range(1,4):
plt.figure(i)
plt.pause(0.001)
# Initialize sample outputs
z_smpls = np.zeros((N_samples,T,D))
z_smpls[0,:,:] = z[:,0,:]
gp1_smpls = []
gp2_smpls = []
# Resample observation noise
# eta_sqs = resample_observation_noise(z_smpls[0,:,:], x)
# lkhd.set_etasq(eta_sqs)
for s in range(1,N_samples):
print "Iteration %d" % s
# raw_input("Press enter to continue\n")
# Reinitialize with the previous particle
pf.initialize(init, prop, lkhd, x, z_smpls[s-1,:,:])
# Sample a new trajectory given the updated kinetics and the previous sample
z_smpls[s,:,:] = pf.sample()
# z_smpls[s,:,:] = z_smpls[s-1,:,:]
# print "dz: ", (z_smpls[s,:,:] - z_smpls[s-1,:,:]).sum(0)
# Resample the GP
gp1.resample(z_smpls[s,:,:], dt)
gp2.resample(z_smpls[s,:,:], dt)
# Resample the noise levels
sigmasq = resample_transition_noise(body, z_smpls[s,:,:], inpt, t)
# HACK: Fix the voltage transition noise
# sigmasq[0] = 0.5
print "Sigmasq: ", sigmasq
# prop.set_sigmasq(sigmasq)
gp1.set_sigmas(sigmasq)
gp2.set_sigmas(sigmasq)
# gp1.resample_transition_noise(z_smpls[s, :, :], t)
# gp2.resample_transition_noise(z_smpls[s, :, :], t)
# eta_sqs = resample_observation_noise(z_smpls[s,:,:], x)
# lkhd.set_etasq(eta_sqs)
# Resample the conductances
# resample_body(body, t, z_smpls[s,:,:], sigmas[0])
# Plot the sample
body.plot(t, z_smpls[s,:,:], lines=lines)
gp1.plot(im=im1, l=l_gp1, data=z_smpls[s,:,:])
gp2.plot(im=im2, l=l_gp2, data=z_smpls[s,:,:])
# Update figures
for i in range(1,4):
plt.figure(i)
plt.pause(0.001)
gp1_smpls.append(gp1.gps)
gp2_smpls.append(gp2.gps)
z_mean = z_smpls.mean(axis=0)
z_std = z_smpls.std(axis=0)
z_env = np.zeros((T*2,2))
z_env[:,0] = np.concatenate((t, t[::-1]))
z_env[:,1] = np.concatenate((z_mean[:,0] + z_std[:,0], z_mean[::-1,0] - z_std[::-1,0]))
plt.ioff()
plt.show()
return z_smpls, gp1_smpls, gp2_smpls
def resample_transition_noise(body, data, inpt, t,
alpha0=100, beta0=100):
"""
Resample sigma, the transition noise variance, under an inverse gamma prior
"""
# import pdb; pdb.set_trace()
Xs = []
X_preds = []
X_diffs = []
T = data.shape[0]
D = data.shape[1]
dxdt = np.zeros((T,1,D))
x = np.zeros((T,1,D))
x[:,0,:] = data
# Compute kinetics of the voltage
body.kinetics(dxdt, x, inpt, np.arange(T-1).astype(np.int32))
dt = np.diff(t)
# TODO: Loop over data
dX_pred = dxdt[:-1, 0, :]
dX_data = (data[1:, :] - data[:-1, :]) / dt[:,None]
X_diffs = dX_pred - dX_data
# Resample transition noise.
X_diffs = np.array(X_diffs)
n = X_diffs.shape[0]
sigmasq = np.zeros(D)
for d in range(D):
alpha = alpha0 + n / 2.0
beta = beta0 + np.sum(X_diffs[:,d] ** 2) / 2.0
# self.sigmas[d] = beta / alpha
sigmasq[d] = 1.0 / np.random.gamma(alpha, 1.0/beta)
# print "Sigma V: %.3f" % (sigmas[d])
return sigmasq
def resample_observation_noise(z, x,
alpha0=1.0, beta0=1.0):
"""
Resample sigma, the transition noise variance, under an inverse gamma prior
"""
# TODO: Iterate over obs dimensions. For now assume 1d
V_pred = z[:,0]
V_data = x[:,0]
V_diff = V_pred - V_data
# Resample transition noise.
n = V_diff.shape[0]
alpha = alpha0 + n / 2.0
beta = beta0 + np.sum(V_diff ** 2) / 2.0
etasq = 1.0 / np.random.gamma(alpha, 1.0/beta)
print "eta V: %.3f" % (etasq)
return np.array([etasq])
from hips.inference.mh import mh
def resample_body(body, ts=[], datas=[], sigma=1.0):
"""
Resample the conductances of this neuron.
"""
assert isinstance(datas, list) or isinstance(datas, np.ndarray)
if isinstance(datas, np.ndarray):
datas = [datas]
if isinstance(ts, np.ndarray):
ts = [ts]
Is = []
dV_dts = []
# Compute I and dV_dt for each dataset
for t,data in zip(ts, datas):
# Compute dV dt
T = data.shape[0]
V = data[:,body.x_offset]
dV_dt = (V[1:] - V[:-1])/(t[1:] - t[:-1])
dV_dts.append(dV_dt[:,None])
# Compute the (unscaled) currents through each channel
I = np.empty((T-1, len(body.children)))
for m,c in enumerate(body.children):
for i in range(T-1):
I[i,m] = c.current(data[:,None,:].copy('C'), V[i], i, 0)
Is.append(I)
# Concatenate values from all datasets
dV_dt = np.vstack(dV_dts)
I = np.vstack(Is)
# Now do a nonnegative regression of dVdt onto I
gs = 0.1 * np.ones(len(body.children))
perm = np.random.permutation(len(body.children))
# Define a helper function to compute the log likelihood and make MH proposals
def _logp(m, gm):
gtmp = gs.copy()
gtmp[m] = gm
dV_dt_pred = I.dot(gtmp)
return (-0.5/sigma * (dV_dt_pred - dV_dt)**2).sum()
# Define a metropolis hastings proposal
def _q(x0, xf):
lx0, lxf = np.log(x0), np.log(xf)
return -0.5 * (lx0-lxf)**2
def _sample_q(x0):
lx0 = np.log(x0)
xf = np.exp(lx0 + np.random.randn())
return xf
# Sample each channel in turn
for m in perm:
gs[m] = mh(gs[m], lambda g: _logp(m, g), _q, _sample_q, steps=10)[-1]
for c,g in zip(body.children, gs):
c.g = g
print "Gs: ", gs
def initial_latent_trace(body, inpt, voltage, t):
I_true = np.diff(voltage) * body.C
T = I_true.shape[0]
gs = np.diag([c.g for c in body.children])
D = int(sum([c.D for c in body.children]))
driving_voltage = np.dot(np.ones((len(body.children), 1)), np.array([voltage]))[:, :T]
child_i = 0
for i in range(D):
driving_voltage[i, :] = voltage[:T] - body.children[child_i].E
K = np.array([[max(i-j, 0) for i in range(T)] for j in range(T)])
K = K.T + K
K = -1*(K ** 2)
K = np.exp(K / 2)
L = np.linalg.cholesky(K + (1e-7) * np.eye(K.shape[0]))
Linv = scipy.linalg.solve_triangular(L.transpose(), np.identity(K.shape[0]))
N = 1
batch_size = 500
learn = .0000001
batcher = kayak.Batcher(batch_size, N)
inputs = kayak.Parameter(driving_voltage)
targets = kayak.Targets(np.array([I_true]), batcher)
g_params = kayak.Parameter(gs)
I_input = kayak.Parameter(inpt.T[:, :T])
Kinv = kayak.Parameter(np.dot(Linv.transpose(), Linv))
initial_latent = np.random.randn(D, T)
latent_trace = kayak.Parameter(initial_latent)
sigmoid = kayak.Logistic(latent_trace)
quadratic = kayak.ElemMult(
sigmoid,
kayak.MatMult(
kayak.Parameter(np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])),
sigmoid
)
)
three_quadratic = kayak.MatMult(
kayak.Parameter(np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0]])),
quadratic
)
linear = kayak.MatMult(
kayak.Parameter(np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 1]])),
sigmoid
)
leak_open = kayak.Parameter(np.vstack((np.ones((1, T)), np.ones((2, T)))))
open_fractions = kayak.ElemAdd(leak_open, kayak.ElemAdd(three_quadratic, linear))
I_channels = kayak.ElemMult(
kayak.MatMult(g_params, inputs),
open_fractions
)
I_ionic = kayak.MatMult(
kayak.Parameter(np.array([[1, 1, 1]])),
I_channels
)
predicted = kayak.MatAdd(I_ionic, I_input)
nll = kayak.ElemPower(predicted - targets, 2)
hack_vec = kayak.Parameter(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]))
kyk_loss = kayak.MatSum(nll) + kayak.MatMult(
kayak.Reshape(
kayak.MatMult(
kayak.MatMult(latent_trace, Kinv),
kayak.Transpose(latent_trace)
),
(9,)
),
hack_vec
) + kayak.MatSum(kayak.ElemPower(I_channels, 2))
grad = kyk_loss.grad(latent_trace)
for ii in xrange(5000):
for batch in batcher:
loss = kyk_loss.value
if ii % 100 == 0:
print loss, np.sum(np.power(predicted.value - I_true, 2)) / T
grad = kyk_loss.grad(latent_trace) + .5 * grad
latent_trace.value -= learn * grad
return sigmoid.value
# Sample data from either a GP model or a squid compartment
# t, z, x, inpt, st_axs = sample_gp_model()
t, z, x, inpt, st_axs = sample_squid_model()
"""
raw_input("Press enter to being sampling...\n")
sample_z_given_x(t, x, inpt, axs=st_axs, initialize='optimize')
"""
# raw_input("Press enter to being sampling...\n")
# sample_z_given_x(t, x, inpt, z0=z, axs=st_axs)
z_smpls, gp1_smpls, gp2_smpls = sample_z_given_x(t, x, inpt, N_samples=100, axs=st_axs, initialize='constant')
# sample_z_given_x(t, x, inpt, axs=st_axs, z0=z, initialize='ground_truth')
# sample_z_given_x(t, x, inpt, axs=st_axs, initialize='optimize')
with open('squid_results.pkl', 'w') as f:
cPickle.dump((z_smpls, gp1_smpls, gp2_smpls), f, protocol=-1)
| HIPS/optofit | examples/my_two_gp_demo.py | Python | gpl-2.0 | 20,290 | [
"Gaussian",
"NEURON"
] | fcc489bf07abffe31a4702fcd67f934d622051188462cb505d92ff0220933440 |
#!/usr/bin/env python
""" Enable using one or more Storage Elements
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
read = False
write = False
check = False
remove = False
site = ''
mute = False
Script.setUsageMessage("""
Enable using one or more Storage Elements
Usage:
%s SE1 [SE2 ...]
""" % Script.scriptName)
Script.registerSwitch("r", "AllowRead", " Allow only reading from the storage element")
Script.registerSwitch("w", "AllowWrite", " Allow only writing to the storage element")
Script.registerSwitch("k", "AllowCheck", " Allow only check access to the storage element")
Script.registerSwitch("v", "AllowRemove", " Allow only remove access to the storage element")
Script.registerSwitch("a", "All", " Allow all access to the storage element")
Script.registerSwitch("m", "Mute", " Do not send email")
Script.registerSwitch("S:", "Site=", " Allow all SEs associated to site")
Script.parseCommandLine(ignoreErrors=True)
ses = Script.getPositionalArgs()
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() in ("r", "allowread"):
read = True
if switch[0].lower() in ("w", "allowwrite"):
write = True
if switch[0].lower() in ("k", "allowcheck"):
check = True
if switch[0].lower() in ("v", "allowremove"):
remove = True
if switch[0].lower() in ("a", "all"):
read = True
write = True
check = True
remove = True
if switch[0].lower() in ("m", "mute"):
mute = True
if switch[0].lower() in ("s", "site"):
site = switch[1]
# from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC import gConfig, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import resolveSEGroup
if not (read or write or check or remove):
# No switch was specified, means we need all of them
gLogger.notice("No option given, all accesses will be allowed if they were not")
read = True
write = True
check = True
remove = True
ses = resolveSEGroup(ses)
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue('/DIRAC/Setup', '')
if not setup:
print 'ERROR: Could not contact Configuration Service'
exitCode = 2
DIRAC.exit(exitCode)
res = getProxyInfo()
if not res['OK']:
gLogger.error('Failed to get proxy information', res['Message'])
DIRAC.exit(2)
userName = res['Value'].get('username')
if not userName:
gLogger.error('Failed to get username for proxy')
DIRAC.exit(2)
if site:
res = gConfig.getOptionsDict('/Resources/Sites/LCG/%s' % site)
if not res['OK']:
gLogger.error('The provided site (%s) is not known.' % site)
DIRAC.exit(-1)
ses.extend(res['Value']['SE'].replace(' ', '').split(','))
if not ses:
gLogger.error('There were no SEs provided')
DIRAC.exit()
STATUS_TYPES = ["ReadAccess", "WriteAccess", "CheckAccess", "RemoveAccess"]
ALLOWED_STATUSES = ["Unknown", "InActive", "Banned", "Probing", "Degraded"]
statusAllowedDict = {}
for statusType in STATUS_TYPES:
statusAllowedDict[statusType] = []
statusFlagDict = {}
statusFlagDict['ReadAccess'] = read
statusFlagDict['WriteAccess'] = write
statusFlagDict['CheckAccess'] = check
statusFlagDict['RemoveAccess'] = remove
resourceStatus = ResourceStatus()
res = resourceStatus.getElementStatus(ses, "StorageElement")
if not res['OK']:
gLogger.error('Storage Element %s does not exist' % ses)
DIRAC.exit(-1)
reason = 'Forced with dirac-admin-allow-se by %s' % userName
for se, seOptions in res['Value'].iteritems():
# InActive is used on the CS model, Banned is the equivalent in RSS
for statusType in STATUS_TYPES:
if statusFlagDict[statusType]:
if seOptions.get(statusType) == "Active":
gLogger.notice('%s status of %s is already Active' % (statusType, se))
continue
if statusType in seOptions:
if not seOptions[statusType] in ALLOWED_STATUSES:
gLogger.notice('%s option for %s is %s, instead of %s' %
(statusType, se, seOptions['ReadAccess'], ALLOWED_STATUSES))
gLogger.notice('Try specifying the command switches')
else:
resR = resourceStatus.setElementStatus(se, "StorageElement", statusType, 'Active', reason, userName)
if not resR['OK']:
gLogger.fatal("Failed to update %s %s to Active, exit -" % (se, statusType), resR['Message'])
DIRAC.exit(-1)
else:
gLogger.notice("Successfully updated %s %s to Active" % (se, statusType))
statusAllowedDict[statusType].append(se)
totalAllowed = 0
totalAllowedSEs = []
for statusType in STATUS_TYPES:
totalAllowed += len(statusAllowedDict[statusType])
totalAllowedSEs += statusAllowedDict[statusType]
totalAllowedSEs = list(set(totalAllowedSEs))
if not totalAllowed:
gLogger.info("No storage elements were allowed")
DIRAC.exit(-1)
if mute:
gLogger.notice('Email is muted by script switch')
DIRAC.exit(0)
subject = '%s storage elements allowed for use' % len(totalAllowedSEs)
addressPath = 'EMail/Production'
address = Operations().getValue(addressPath, '')
body = ''
if read:
body = "%s\n\nThe following storage elements were allowed for reading:" % body
for se in statusAllowedDict['ReadAccess']:
body = "%s\n%s" % (body, se)
if write:
body = "%s\n\nThe following storage elements were allowed for writing:" % body
for se in statusAllowedDict['WriteAccess']:
body = "%s\n%s" % (body, se)
if check:
body = "%s\n\nThe following storage elements were allowed for checking:" % body
for se in statusAllowedDict['CheckAccess']:
body = "%s\n%s" % (body, se)
if remove:
body = "%s\n\nThe following storage elements were allowed for removing:" % body
for se in statusAllowedDict['RemoveAccess']:
body = "%s\n%s" % (body, se)
if not address:
gLogger.notice("'%s' not defined in Operations, can not send Mail\n" % addressPath, body)
DIRAC.exit(0)
res = diracAdmin.sendMail(address, subject, body)
gLogger.notice('Notifying %s' % address)
if res['OK']:
gLogger.notice(res['Value'])
else:
gLogger.notice(res['Message'])
DIRAC.exit(0)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| andresailer/DIRAC | DataManagementSystem/scripts/dirac-admin-allow-se.py | Python | gpl-3.0 | 6,499 | [
"DIRAC"
] | 037e8fb63c03c24e80fb62eb11c8dafa8cb7050ae558e22a5639ac7ad512cf0e |
from __future__ import division
import numpy as np
from pyhsmm.internals.hmm_states import HMMStatesPython, HMMStatesEigen
from pyhsmm.internals.hsmm_states import HSMMStatesPython, HSMMStatesEigen, \
GeoHSMMStates
from autoregressive.util import AR_striding
from pylds.lds_messages_interface import filter_and_sample
class _SLDSStatesMixin(object):
def resample(self,niter=1):
for itr in xrange(niter):
self.resample_discrete_states()
self.resample_gaussian_states()
## resampling discrete states
def resample_discrete_states(self):
super(_SLDSStatesMixin,self).resample()
@property
def aBl(self):
if self._aBl is None:
aBl = self._aBl = np.empty(
(self.gaussian_states.shape[0],self.num_states))
for idx, distn in enumerate(self.init_dynamics_distns):
aBl[0,idx] = distn.log_likelihood(self.gaussian_states[0])
for idx, distn in enumerate(self.dynamics_distns):
aBl[1:,idx] = distn.log_likelihood(
self.strided_gaussian_states)
aBl[np.isnan(aBl).any(1)] = 0.
return self._aBl
## resampling conditionally Gaussian dynamics
def resample_gaussian_states(self):
self._aBl = None # clear any caching
self._normalizer, self.gaussian_states = filter_and_sample(
self.mu_init, self.sigma_init,
self.As, self.BBTs, self.Cs, self.DDTs,
self.data)
@property
def strided_gaussian_states(self):
return AR_striding(self.gaussian_states,1)
## generation
def generate_states(self):
super(_SLDSStatesMixin,self).generate_states()
self.generate_gaussian_states()
def generate_gaussian_states(self):
# TODO this is dumb, but generating from the prior will be unstable
self.gaussian_states = np.random.normal(size=(self.T,self.D_latent))
def generate_obs(self):
raise NotImplementedError
## convenience properties
@property
def D_latent(self):
return self.dynamics_distns[0].D
@property
def D_emission(self):
return self.emission_distns[0].D
@property
def dynamics_distns(self):
return self.model.dynamics_distns
@property
def emission_distns(self):
return self.model.emission_distns
@property
def init_dynamics_distns(self):
return self.model.init_dynamics_distns
@property
def mu_init(self):
return self.init_dynamics_distns[self.stateseq[0]].mu
@property
def sigma_init(self):
return self.init_dynamics_distns[self.stateseq[0]].sigma
@property
def As(self):
Aset = np.concatenate([d.A[None,...] for d in self.dynamics_distns])
return Aset[self.stateseq]
@property
def BBTs(self):
Bset = np.concatenate([d.sigma[None,...] for d in self.dynamics_distns])
return Bset[self.stateseq]
@property
def Cs(self):
Cset = np.concatenate([d.A[None,...] for d in self.emission_distns])
return Cset[self.stateseq]
@property
def DDTs(self):
Dset = np.concatenate([d.sigma[None,...] for d in self.emission_distns])
return Dset[self.stateseq]
class HMMSLDSStatesPython(_SLDSStatesMixin,HMMStatesPython):
pass
class HMMSLDSStatesEigen(_SLDSStatesMixin,HMMStatesEigen):
pass
class HSMMSLDSStatesPython(_SLDSStatesMixin,HSMMStatesPython):
pass
class HSMMSLDSStatesEigen(_SLDSStatesMixin,HSMMStatesEigen):
pass
class GeoHSMMSLDSStates(_SLDSStatesMixin,GeoHSMMStates):
pass
| fivejjs/pyhsmm-slds | pyslds/states.py | Python | mit | 3,637 | [
"Gaussian"
] | 27e7319ac33d7e32743d002de316708839e349e5d44658bd3e67006874fc1f1d |
########################################################################
# $Id$
# File : TorqueComputingElement.py
# Author : Stuart Paterson, Paul Szczypka
########################################################################
""" The simplest Computing Element instance that submits jobs locally.
"""
__RCSID__ = "$Id$"
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC import S_OK, S_ERROR
from DIRAC import systemCall, rootPath
from DIRAC import gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
import os, sys, time, re, socket
import string, shutil, bz2, base64, tempfile
CE_NAME = 'Torque'
UsedParameters = [ 'ExecQueue', 'SharedArea', 'BatchOutput', 'BatchError', 'UserName' ]
MandatoryParameters = [ 'Queue' ]
class TorqueComputingElement( ComputingElement ):
""" Direct Torque submission
"""
mandatoryParameters = MandatoryParameters
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.submittedJobs = 0
self.queue = self.ceConfigDict['Queue']
self.execQueue = self.ceConfigDict['ExecQueue']
self.log.info( "Using queue: ", self.queue )
self.hostname = socket.gethostname()
self.sharedArea = self.ceConfigDict['SharedArea']
self.batchOutput = self.ceConfigDict['BatchOutput']
self.batchError = self.ceConfigDict['BatchError']
self.userName = self.ceConfigDict['UserName']
self.removeOutput = True
if 'RemoveOutput' in self.ceParameters:
if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']:
self.removeOutput = False
#############################################################################
def _addCEConfigDefaults( self ):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults( self )
# Now Torque specific ones
if 'ExecQueue' not in self.ceConfigDict:
self.ceConfigDict['ExecQueue'] = self.ceConfigDict['Queue']
if 'SharedArea' not in self.ceConfigDict:
self.ceConfigDict['SharedArea'] = ''
if 'UserName' not in self.ceConfigDict:
self.ceConfigDict['UserName'] = ''
if 'BatchOutput' not in self.ceConfigDict:
self.ceConfigDict['BatchOutput'] = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'data' )
if 'BatchError' not in self.ceConfigDict:
self.ceConfigDict['BatchError'] = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'data' )
#############################################################################
def makeProxyExecutableFile( self, executableFile, proxy ):
""" Make a single executable bundling together executableFile and proxy
"""
compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy ) ).replace( '\n', '' )
compressedAndEncodedExecutable = base64.encodestring( bz2.compress( open( executableFile, "rb" ).read(), 9 ) ).replace( '\n', '' )
wrapperContent = """#!/usr/bin/env python
# Wrapper script for executable and proxy
import os, tempfile, sys, base64, bz2
try:
workingDirectory = tempfile.mkdtemp( suffix = '_wrapper', prefix= 'TORQUE_' )
os.chdir( workingDirectory )
open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
open( '%(executable)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedExecutable)s" ) ) )
os.chmod('proxy',0600)
os.chmod('%(executable)s',0700)
os.environ["X509_USER_PROXY"]=os.path.join(workingDirectory, 'proxy')
except Exception, x:
print >> sys.stderr, x
sys.exit(-1)
cmd = "%(executable)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )
shutil.rmtree( workingDirectory )
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, \
'compressedAndEncodedExecutable': compressedAndEncodedExecutable, \
'executable': os.path.basename( executableFile ) }
fd, name = tempfile.mkstemp( suffix = '_wrapper.py', prefix = 'TORQUE_', dir = os.getcwd() )
wrapper = os.fdopen( fd, 'w' )
wrapper.write( wrapperContent )
wrapper.close()
return name
#############################################################################
def submitJob( self, executableFile, proxy, numberOfJobs = 1 ):
""" Method to submit job, should be overridden in sub-class.
"""
self.log.info( "Executable file path: %s" % executableFile )
if not os.access( executableFile, 5 ):
os.chmod( executableFile, 0755 )
#Perform any other actions from the site admin
if self.ceParameters.has_key( 'AdminCommands' ):
commands = self.ceParameters['AdminCommands'].split( ';' )
for command in commands:
self.log.verbose( 'Executing site admin command: %s' % command )
result = shellCall( 30, command, callbackFunction = self.sendOutput )
if not result['OK'] or result['Value'][0]:
self.log.error( 'Error during "%s":' % command, result )
return S_ERROR( 'Error executing %s CE AdminCommands' % CE_NAME )
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
if proxy:
self.log.verbose( 'Setting up proxy for payload' )
submitFile = self.makeProxyExecutableFile( executableFile, proxy )
else: # no proxy
submitFile = executableFile
# submit submitFile to the batch system
cmd = "qsub -o %(output)s -e %(error)s -q %(queue)s -N DIRACPilot %(executable)s" % \
{'output': self.batchOutput, \
'error': self.batchError, \
'queue': self.queue, \
'executable': os.path.abspath( submitFile ) }
self.log.verbose( 'CE submission command: %s' % ( cmd ) )
batchIDList = []
for i in range( numberOfJobs ):
result = shellCall( 30, cmd )
if not result['OK'] or result['Value'][0]:
self.log.warn( '===========>Torque CE result NOT OK' )
self.log.debug( result )
return S_ERROR( result['Value'] )
else:
self.log.debug( 'Torque CE result OK' )
batchID = result['Value'][1].strip()
batchIDList.append( batchID )
self.submittedJobs += 1
return S_OK( batchIDList )
#############################################################################
def getCEStatus( self ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.submittedJobs
cmd = ["qstat", "-Q" , self.execQueue ]
if self.userName:
cmd = [ "qstat", "-u", self.userName, self.execQueue ]
ret = systemCall( 10, cmd )
if not ret['OK']:
self.log.error( 'Timeout', ret['Message'] )
return ret
status = ret['Value'][0]
stdout = ret['Value'][1]
stderr = ret['Value'][2]
self.log.debug( "status:", status )
self.log.debug( "stdout:", stdout )
self.log.debug( "stderr:", stderr )
if status:
self.log.error( 'Failed qstat execution:', stderr )
return S_ERROR( stderr )
if self.userName:
# Parse qstat -u userName queueName
runningJobs = 0
waitingJobs = 0
lines = stdout.replace( '\r', '' ).split( '\n' )
for line in lines:
if not line:
continue
if line.find( self.userName ) != -1:
if 'R' == line.split( ' ' )[-2]:
runningJobs += 1
else:
# every other status to assimilate to Waiting
waitingJobs += 1
else:
# parse qstat -Q queueName
matched = re.search( self.queue + "\D+(\d+)\D+(\d+)\W+(\w+)\W+(\w+)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+)\D+(\d+)\W+(\w+)", stdout )
if matched.groups < 6:
return S_ERROR( "Error retrieving information from qstat:" + stdout + stderr )
try:
waitingJobs = int( matched.group( 5 ) )
runningJobs = int( matched.group( 6 ) )
except ValueError:
return S_ERROR( "Error retrieving information from qstat:" + stdout + stderr )
result['WaitingJobs'] = waitingJobs
result['RunningJobs'] = runningJobs
self.log.verbose( 'Waiting Jobs: ', waitingJobs )
self.log.verbose( 'Running Jobs: ', runningJobs )
return result
def getJobStatus( self, jobIDList ):
""" Get the status information for the given list of jobs
"""
jobDict = {}
for job in jobIDList:
if not job:
continue
jobNumber = job.split( '.' )[0]
jobDict[jobNumber] = job
cmd = [ 'qstat' ] + jobIDList
result = systemCall( 10, cmd )
if not result['OK']:
return result
resultDict = {}
output = result['Value'][1].replace( '\r', '' )
lines = output.split( '\n' )
for job in jobDict:
resultDict[jobDict[job]] = 'Unknown'
for line in lines:
if line.find( job ) != -1:
if line.find( 'Unknown' ) != -1:
resultDict[jobDict[job]] = 'Unknown'
else:
torqueStatus = line.split()[4]
if torqueStatus in ['E', 'C']:
resultDict[jobDict[job]] = 'Done'
elif torqueStatus in ['R']:
resultDict[jobDict[job]] = 'Running'
elif torqueStatus in ['S', 'W', 'Q', 'H', 'T']:
resultDict[jobDict[job]] = 'Waiting'
return S_OK( resultDict )
def getJobOutput( self, jobID, localDir = None ):
""" Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
jobNumber = jobID.split( '.' )[0]
# Find the output files
outFile = ''
outNames = os.listdir( self.batchOutput )
for outName in outNames:
if outName.find( jobNumber ) != -1:
outFile = os.path.join( self.batchOutput, outName )
break
errFile = ''
errNames = os.listdir( self.batchError )
for errName in errNames:
if errName.find( jobNumber ) != -1:
errFile = os.path.join( self.batchError, errName )
break
if localDir:
if outFile:
doutFile = os.path.join( localDir, os.path.basename( outFile ) )
shutil.copyfile( outFile, doutFile )
if errFile:
derrFile = os.path.join( localDir, os.path.basename( errFile ) )
shutil.copyfile( errFile, derrFile )
# The result is OK, we can remove the output
if self.removeOutput:
result = os.system( 'rm -f %s/*%s* %s/*%s*' % ( self.batchOutput, jobNumber, self.batchError, jobNumber ) )
if localDir:
if outFile and errFile:
return S_OK( ( doutFile, derrFile ) )
else:
return S_ERROR( 'Output files not found' )
else:
# Return the output as a string
output = ''
error = ''
if outFile:
outputFile = open( outFile, 'r' )
output = outputFile.read()
outputFile.close()
if errFile:
outputFile = open( errFile, 'r' )
error = outputFile.read()
outputFile.close()
return S_OK( ( output, error ) )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| calancha/DIRAC | Resources/Computing/TorqueComputingElement.py | Python | gpl-3.0 | 11,733 | [
"DIRAC"
] | 1a2fda22a4cfdd7404e28c7b3ae62e2f676822c56689919dee9ec7959fd73fdb |
from ..utils import floor_threshold, Timing
from ..utils import shuffle as shuffle_data
from multiprocessing import cpu_count
import pandas as pd
import numpy as np
from msmbuilder.featurizer import (AlphaAngleFeaturizer, ContactFeaturizer,
DihedralFeaturizer)
class BaseMetric(object):
"""Base metric object"""
def _shuffle(self):
self.shuffled_data = shuffle_data(self.shuffled_data)
def _extract_data(self, traj):
pass
def _before_exec(self, traj):
self.data = self._extract_data(traj)
self.shuffled_data = self.data
self.labels = np.unique(self.data.columns.levels[0])
def _exec(self):
pass
def _floored_exec(self):
return floor_threshold(self._exec())
def partial_transform(self, traj, shuffle=0, verbose=False):
"""Transform a single mdtraj.Trajectory into an array of metric scores.
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to transform
shuffle : int
Number of shuffle iterations (default: 0)
verbose : bool
Whether to display performance
Returns
-------
result : np.ndarray
Scoring matrix
"""
self._before_exec(traj)
result = self._floored_exec()
correction = np.zeros_like(result)
for i in range(shuffle):
with Timing(i, verbose=verbose):
self._shuffle()
correction += self._floored_exec()
return floor_threshold(result - np.nan_to_num(correction / shuffle))
def transform(self, trajs, shuffle=0, verbose=False):
"""Invokes partial_transform over a list of mdtraj.Trajectory objects
Parameters
----------
trajs : list
List of trajectories to transform
shuffle : int
Number of shuffle iterations (default: 0)
verbose : bool
Whether to display performance
Returns
-------
result : array_like
List of scoring matrices
"""
for traj in trajs:
yield self.partial_transform(traj, shuffle=shuffle,
verbose=verbose)
def __init__(self, n_bins=3, rng=None, method='knn',
threads=None):
self.data = None
self.shuffled_data = None
self.labels = None
self.n_bins = n_bins
self.rng = rng
self.method = method
self.n_threads = threads or int(cpu_count() / 2)
class DihedralBaseMetric(BaseMetric):
"""Base dihedral metric object"""
def _featurizer(self, **kwargs):
return DihedralFeaturizer(sincos=False, **kwargs)
def _extract_data(self, traj):
data = []
for tp in self.types:
featurizer = self._featurizer(types=[tp])
angles = featurizer.partial_transform(traj)
summary = featurizer.describe_features(traj)
idx = [[traj.topology.atom(ati).residue.index
for ati in item['atominds']][1] for item in summary]
data.append(pd.DataFrame((angles + np.pi) % (2. * np.pi),
columns=[idx, len(idx) * [tp]]))
return pd.concat(data, axis=1)
def __init__(self, types=None, rng=None, **kwargs):
self.types = types or ['phi', 'psi']
self.rng = rng or [0., 2 * np.pi]
super(DihedralBaseMetric, self).__init__(**kwargs)
class AlphaAngleBaseMetric(DihedralBaseMetric):
"""Base alpha angle metric object"""
def _featurizer(self, **kwargs):
return AlphaAngleFeaturizer(sincos=False)
def __init__(self, **kwargs):
self.types = ['alpha']
super(AlphaAngleBaseMetric, self).__init__(**kwargs)
class ContactBaseMetric(BaseMetric):
"""Base contact metric object"""
def _extract_data(self, traj):
contact = ContactFeaturizer(contacts=self.contacts, scheme=self.scheme,
ignore_nonprotein=self.ignore_nonprotein)
distances = contact.partial_transform(traj)
summary = contact.describe_features(traj)
pairs = [item['resids'] for item in summary]
resids = np.unique(pairs)
data = []
for resid in resids:
idx = list(list(set(pair) - {resid})[0]
for pair in pairs if resid in pair)
mapping = np.array([True if resid in pair else False
for pair in pairs])
data.append(pd.DataFrame(distances[:, mapping],
columns=[idx, len(idx) * [resid]]))
return pd.concat(data, axis=1)
def __init__(self, contacts='all', scheme='closest-heavy',
ignore_nonprotein=True, **kwargs):
self.contacts = contacts
self.scheme = scheme
self.ignore_nonprotein = ignore_nonprotein
super(ContactBaseMetric, self).__init__(**kwargs)
| msmbuilder/mdentropy | mdentropy/metrics/base.py | Python | mit | 5,007 | [
"MDTraj"
] | 58bb85d06caf1f85a5987ca179a0b228015b6ead78bdd1fab0fcaf2f88a2ccce |
#!/usr/bin/python
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
# $Id$
# TODO: add comments to methods
# TODO: code cleanup
# TODO: scraper should be able to process each directory more than once. Requires changes in store.py
import subprocess
import logging
import time
import datetime
import sys
import os
import os.path
import threading
import multiprocessing
from ltastorageoverview import store
from ltastorageoverview.utils import humanreadablesize
from random import random
#logging.basicConfig(filename='scraper.' + time.strftime("%Y-%m-%d") + '.log', level=logging.DEBUG, format="%(asctime)-15s %(levelname)s %(message)s")
logging.basicConfig(level=logging.DEBUG, format="%(asctime)-15s %(levelname)s %(message)s")
logger = logging.getLogger()
class FileInfo:
'''Simple struct to hold filename and size'''
def __init__(self, filename, size, created_at):
'''
Parameters
----------
filename : string
size : int
'''
self.filename = filename
self.size = size
self.created_at = created_at
def __str__(self):
return self.filename + " " + humanreadablesize(self.size) + " " + str(self.created_at)
class SrmlsException(Exception):
def __init__(self, command, exitcode, stdout, stderr):
self.command = command
self.exitcode = exitcode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "%s failed with code %d.\nstdout: %s\nstderr: %s" % \
(self.command, self.exitcode, self.stdout, self.stderr)
class ParseException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Location:
'''A Location is a directory at a storage site which can be queried with getResult()'''
def __init__(self, srmurl, directory):
'''
Parameters
----------
srmurl : string
the srm url of the storage site. for example: srm://srm.grid.sara.nl:8443
directory : int
a directory at the storage site. for example: /pnfs/grid.sara.nl/data/lofar/storage
'''
self.srmurl = srmurl
self.directory = directory
def path(self):
'''returns the full path srmurl + directory'''
return self.srmurl + self.directory
def isRoot(self):
'''is this a root directory?'''
return self.directory == '/'
def parentDir(self):
'''returns parent directory path'''
if self.isRoot():
return '/'
stripped = self.directory.rstrip('/')
ridx = stripped.rindex('/')
if ridx == 0:
return '/'
return stripped[:ridx]
def parentLocation(self):
'''returns a Location object for the parent directory'''
return Location(self.srmurl, self.parentDir())
def __str__(self):
'''returns the full path'''
return self.path()
def getResult(self, offset=0):
'''Returns LocationResult with the subdirectries and files in at this location'''
foundFiles = []
foundDirectories = []
logger.info("Scanning %s", self.path())
# the core command: do an srmls call and parse the results
# srmls can only yield max 900 items in a result, hence we can recurse for the next 900 by using the offset
cmd = ["bash", "-c", "source %s;srmls -l -count=900 -offset=%d %s%s" % ('/globalhome/ingest/service/bin/init.sh', offset, self.srmurl, self.directory)]
# logger.debug(' '.join(cmd))
p = subprocess.Popen(cmd, stdin=open('/dev/null'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logs = p.communicate()
# logger.debug('Shell command for %s exited with code %s' % (self.path(), p.returncode))
loglines = logs[0].split('\n')
# parse logs from succesfull command
if p.returncode == 0 and len(loglines) > 1:
entries = []
entry = []
for line in loglines:
entry.append(line)
if 'Type:' in line:
entries.append(entry)
entry = []
for lines in entries:
if len(lines) < 2:
continue
pathLine = lines[0].strip()
pathLineItems = [x.strip() for x in pathLine.split()]
entryType = lines[-1].strip().split('Type:')[-1].strip()
if len(pathLineItems) < 2:
raise ParseException("path line shorter than expected: %s" % pathLine)
if entryType.lower() == 'directory':
dirname = pathLineItems[1]
if dirname.rstrip('/') == self.directory.rstrip('/'):
# skip current directory
continue
if len(dirname) < 1 or not dirname[0] == '/':
raise ParseException("Could not parse dirname from line: %s\nloglines:\n%s"
% (pathLineItems[1], logs[0]))
foundDirectories.append(Location(self.srmurl, dirname))
elif entryType.lower() == 'file':
try:
filesize = int(pathLineItems[0])
filename = pathLineItems[1]
timestamplines = [x for x in lines if 'ed at:' in x]
timestampline = None
for line in timestamplines:
if 'created' in line:
timestampline = line
break
timestampline = line
timestamppart = timestampline.split('at:')[1].strip()
timestamp = datetime.datetime.strptime(timestamppart + ' UTC', '%Y/%m/%d %H:%M:%S %Z')
foundFiles.append(FileInfo(filename, filesize, timestamp))
except Exception as e:
raise ParseException("Could not parse fileproperies:\n%s\nloglines:\n%s"
% (str(e), logs[0]))
else:
logger.error("Unknown type: %s" % entryType)
# recurse and ask for more files if we hit the 900 line limit
if len(entries) >= 900:
logger.debug('There are more than 900 lines in the results')
extraResult = self.getResult(offset + 900)
logger.debug('extraResult %s' % str(extraResult))
foundDirectories += extraResult.subDirectories
foundFiles += extraResult.files
else:
raise SrmlsException(' '.join(cmd), p.returncode, logs[0], logs[1])
return LocationResult(self, foundDirectories, foundFiles)
class LocationResult:
'''Holds the query result for a Location: a list of subDirectories and/or a list of files'''
def __init__(self, location, subDirectories = None, files = None):
'''
Parameters
----------
location : Location
For which location this result was generated. (i.e. it is the parent of the subdirectories)
subDirectories : [Location]
A list of subdirectories
files : [FileInfo]
A list of files in this location
'''
self.location = location
self.subDirectories = subDirectories if subDirectories else []
self.files = files if files else []
def __str__(self):
return "LocationResult: path=%s # subdirs=%d # files=%d totalFileSizeOfDir=%s" % (self.location.path(), self.nrOfSubDirs(), self.nrOfFiles(), humanreadablesize(self.totalFileSizeOfDir()))
def nrOfSubDirs(self):
return len(self.subDirectories)
def nrOfFiles(self):
return len(self.files)
def totalFileSizeOfDir(self):
return sum([fileinfo.size for fileinfo in self.files])
# our lock for safe guarding locations and results
# which will be queried in parallel
lock = threading.Lock()
class ResultGetterThread(threading.Thread):
'''Helper class to query Locations asynchronously for results.
Gets the result for the first Location in the locations deque and appends it to the results deque
Appends the subdirectory Locations at the end of the locations deque for later processing'''
def __init__(self, db, dir_id):
threading.Thread.__init__(self)
self.daemon = True
self.db = db
self.dir_id = dir_id
def run(self):
'''A single location is pop\'ed from the locations deque and the results are queried.
Resulting subdirectories are appended to the locations deque'''
try:
with lock:
dir = self.db.directory(self.dir_id)
if not dir:
return
dir_id = dir[0]
dir_name = dir[1]
self.db.updateDirectoryLastVisitTime(dir_id, datetime.datetime.utcnow())
site_id = dir[2]
site = self.db.site(site_id)
srm_url = site[2]
location = Location(srm_url, dir_name)
try:
# get results... long blocking
result = location.getResult()
logger.info(result)
with lock:
self.db.insertFileInfos([(file.filename, file.size, file.created_at, dir_id) for file in result.files])
# skip empty nikhef dirs
filteredSubDirectories = [loc for loc in result.subDirectories
if not ('nikhef' in loc.srmurl and 'generated' in loc.directory) ]
# filteredSubDirectories = [loc for loc in filteredSubDirectories
# if not 'lc3_007' in loc.directory ]
subDirectoryNames = [loc.directory for loc in filteredSubDirectories]
if subDirectoryNames:
self.db.insertSubDirectories(subDirectoryNames, dir_id,
datetime.datetime.utcnow() - datetime.timedelta(days=1000))
except (SrmlsException, ParseException) as e:
logger.error('Error while scanning %s\n%s' % (location.path(), str(e)))
logger.info('Rescheduling %s for new visit.' % (location.path(),))
self.db.updateDirectoryLastVisitTime(self.dir_id, datetime.datetime.utcnow() - datetime.timedelta(days=1000))
except Exception as e:
logger.error(str(e))
logger.info('Rescheduling dir_id %d for new visit.' % (self.dir_id,))
self.db.updateDirectoryLastVisitTime(self.dir_id, datetime.datetime.utcnow() - datetime.timedelta(days=1000))
def main(argv):
'''the main function scanning all locations and gathering the results'''
db = store.LTAStorageDb('/data2/ltastorageoverview.sqlite')
if not db.sites():
db.insertSite('target', 'srm://srm.target.rug.nl:8444')
db.insertSite('nikhef', 'srm://tbn18.nikhef.nl:8446')
db.insertSite('sara', 'srm://srm.grid.sara.nl:8443')
db.insertSite('juelich', 'srm://lofar-srm.fz-juelich.de:8443')
db.insertRootDirectory('target', '/lofar/ops')
db.insertRootDirectory('target', '/lofar/ops/disk')
db.insertRootDirectory('nikhef', '/dpm/nikhef.nl/home/lofar')
db.insertRootDirectory('sara', '/pnfs/grid.sara.nl/data/lofar/ops')
db.insertRootDirectory('sara', '/pnfs/grid.sara.nl/data/lofar/user')
db.insertRootDirectory('sara', '/pnfs/grid.sara.nl/data/lofar/software')
db.insertRootDirectory('sara', '/pnfs/grid.sara.nl/data/lofar/storage')
db.insertRootDirectory('sara', '/pnfs/grid.sara.nl/data/lofar/pulsar')
db.insertRootDirectory('juelich', '/pnfs/fz-juelich.de/data/lofar/ops')
for dir_id in [x[0] for x in db.rootDirectories()]:
db.updateDirectoryLastVisitTime(dir_id, datetime.datetime.utcnow() - datetime.timedelta(days=1000))
# for each site we want one or more ResultGetterThreads
# so make a dict with a list per site based on the locations
getters = dict([(site[1],[]) for site in db.sites()])
# some helper functions
def numLocationsInQueues():
'''returns the total number of locations in the queues'''
return db.numDirectoriesNotVisitedSince(datetime.datetime.utcnow() - datetime.timedelta(days=1))
def totalNumGetters():
'''returns the total number of parallel running ResultGetterThreads'''
return sum([len(v) for v in getters.values()])
# only enter main loop if there is anything to process
if numLocationsInQueues() > 0:
# the main loop
# loop over the locations and spawn ResultGetterThreads to get the results parallel
# use load balancing over the different sites and with respect to queue lengths
# do not overload this host system
while numLocationsInQueues() > 0 or totalNumGetters() > 0:
# get rid of old finished ResultGetterThreads
finishedGetters = dict([(site_name, [getter for getter in getterList if not getter.isAlive()]) for site_name, getterList in getters.items()])
for site_name,finishedGetterList in finishedGetters.items():
for finishedGetter in finishedGetterList:
getters[site_name].remove(finishedGetter)
# spawn new ResultGetterThreads
# do not overload this host system
while numLocationsInQueues() > 0 and (totalNumGetters() <= 4 or
(os.getloadavg()[0] < 3*multiprocessing.cpu_count() and
totalNumGetters() < 2.5*multiprocessing.cpu_count())):
with lock:
sitesStats = db.visitStats(datetime.datetime.utcnow() - datetime.timedelta(days=1))
for site_name, site_stats in sitesStats.items():
numGetters = len(getters[site_name])
queue_length = site_stats['queue_length']
weight = float(queue_length) / float(20 * (numGetters + 1))
if numGetters == 0 and queue_length > 0:
weight = 1e6 # make getterless sites extra important, so each site keeps flowing
site_stats['# get'] = numGetters
site_stats['weight'] = weight
totalWeight = sum([site_stats['weight'] for site_stats in sitesStats.values()])
#logger.debug("siteStats:\n%s" % str('\n'.join([str((k, v)) for k, v in sitesStats.items()])))
# now pick a random site using the weights
chosen_site_name = None
cumul = 0.0
r = random()
for site_name,site_stats in sitesStats.items():
ratio = site_stats['weight']/totalWeight
cumul += ratio
if r <= cumul and site_stats['queue_length'] > 0:
chosen_site_name = site_name
break
if not chosen_site_name:
break
chosen_dir_id = sitesStats[chosen_site_name]['least_recent_visited_dir_id']
# make and start a new ResultGetterThread the location deque of the chosen site
newGetter = ResultGetterThread(db, chosen_dir_id)
newGetter.start()
getters[chosen_site_name].append(newGetter)
logger.info('numLocationsInQueues=%d totalNumGetters=%d' % (numLocationsInQueues(), totalNumGetters()))
# small sleep between starting multiple getters
time.sleep(0.25)
# sleep before main loop next iteration
# to wait for some results
# and some getters to finis
time.sleep(1)
# all locations were processed
if __name__ == "__main__":
main(sys.argv[1:])
| jjdmol/LOFAR | LTA/ltastorageoverview/lib/scraper.py | Python | gpl-3.0 | 16,943 | [
"VisIt"
] | afa8e14cde7e1b5aec3457a1bd807c707f00042e2b91cb73fe134b795e5eb182 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import espressomd.lb
import espressomd.lbboundaries
import espressomd.shapes
import unittest as ut
import numpy as np
@ut.skipIf(not espressomd.has_features(["LB", "LB_BOUNDARIES"]),
"Features not available, skipping test.")
class LBBoundaryVelocityTest(ut.TestCase):
"""Test slip velocity of boundaries.
In this simple test add wall with a slip verlocity is
added and checkeckt if the fluid obtains the same velocity.
"""
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.time_step = .5
system.cell_system.skin = 0.1
def test(self):
system = self.system
lb_fluid = espressomd.lb.LBFluid(
agrid=2.0, dens=1.0, visc=1.0, fric=1.0, tau=0.03)
system.actors.add(lb_fluid)
v_boundary = [0.03, 0.02, 0.01]
wall_shape = espressomd.shapes.Wall(normal=[1, 2, 3], dist=0.5)
wall = espressomd.lbboundaries.LBBoundary(
shape=wall_shape, velocity=v_boundary)
system.lbboundaries.add(wall)
system.integrator.run(10000)
v_fluid = lb_fluid[1, 0, 0].velocity
self.assertAlmostEqual(v_fluid[0], v_boundary[0], places=3)
self.assertAlmostEqual(v_fluid[1], v_boundary[1], places=3)
self.assertAlmostEqual(v_fluid[2], v_boundary[2], places=3)
if __name__ == "__main__":
ut.main()
| hmenke/espresso | testsuite/python/lb_boundary_velocity.py | Python | gpl-3.0 | 2,102 | [
"ESPResSo"
] | 9e2a6bccaea12ae1465ee7a874d1e0d93caa1ca92cfe7d90a0fbf5704174d474 |
# -*- coding: utf-8 -*-
"""testowyl -- some tests for owyl.
Copyright 2008 David Eyk. All rights reserved.
$Author$\n
$Rev$\n
$Date$
"""
__author__ = "$Author$"[9:-2]
__revision__ = "$Rev$"[6:-2]
__date__ = "$Date$"[7:-2]
import unittest
import owyl
from owyl import blackboard
class OwylTests(unittest.TestCase):
"""Tests for Owyl.
Note: tests should run the tree twice to make sure that the
constructed tree is re-usable.
"""
def testSucceed(self):
"""Can we succeed?
"""
s = owyl.succeed()
t = s()
self.assertEqual(t.next(), True)
self.assertRaises(StopIteration, t.next)
t = s()
self.assertEqual(t.next(), True)
self.assertRaises(StopIteration, t.next)
def testFail(self):
"""Can we fail?
"""
s = owyl.fail()
t = s()
self.assertEqual(t.next(), False)
self.assertRaises(StopIteration, t.next)
t = s()
self.assertEqual(t.next(), False)
self.assertRaises(StopIteration, t.next)
def testVisitSequenceSuccess(self):
"""Can we visit a successful sequence?
"""
tree = owyl.sequence(owyl.succeed(),
owyl.succeed(),
owyl.succeed())
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True, True, True, True])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True, True, True, True])
def testVisitSequenceFailure(self):
"""Can we visit a failing sequence?
"""
tree = owyl.sequence(owyl.succeed(),
owyl.succeed(),
owyl.fail(),
owyl.succeed())
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True, True, False, False])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True, True, False, False])
def testVisitSelectorSuccess(self):
"""Can we visit a successful selector?
"""
tree = owyl.selector(owyl.fail(),
owyl.fail(),
owyl.succeed(),
owyl.fail())
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False, False, True, True])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False, False, True, True])
def testVisitSelectorFailure(self):
"""Can we visit a failing selector?
"""
tree = owyl.selector(owyl.fail(),
owyl.fail(),
owyl.fail())
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False, False, False, False])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False, False, False, False])
def testParallel_AllSucceed_Success(self):
"""Can we visit a suceeding parallel (all succeed)?
"""
tree = owyl.parallel(owyl.sequence(owyl.succeed(),
owyl.succeed()),
owyl.sequence(owyl.succeed(),
owyl.succeed()),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL)
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True])
def testParallel_OneSucceeds_Success(self):
"""Can we visit a suceeding parallel (one succeeds)?
"""
tree = owyl.parallel(owyl.sequence(owyl.succeed(),
owyl.succeed()),
owyl.sequence(owyl.succeed(),
owyl.fail()),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ONE)
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True])
def testParallel_AllSucceed_Failure(self):
"""Can we visit a failing parallel (all succeed)?
"""
tree = owyl.parallel(owyl.sequence(owyl.succeed(),
owyl.fail()),
owyl.sequence(owyl.succeed(),
owyl.succeed()),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL)
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False])
def testParallel_OneSucceeds_Failure(self):
"""Can we visit a failing parallel (one succeeds)?
"""
tree = owyl.parallel(owyl.sequence(owyl.fail(),
owyl.fail()),
owyl.sequence(owyl.fail(),
owyl.fail()),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ONE)
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False])
def testParallel_DelayedFailure(self):
"""Can parallel fail if child fails later (all succeed)?
"""
# Fail after 5 iterations.
after = 5
tree = owyl.parallel(owyl.succeed(),
owyl.failAfter(after=after),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL)
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [False])
def testParallel_DelayedSuccess(self):
"""Can parallel succeed if child succeeds later (one succeeds)?
"""
# Succeed after 5 iterations.
after = 5
tree = owyl.parallel(owyl.fail(),
owyl.succeedAfter(after=after),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ONE)
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True])
v = owyl.visit(tree)
results = [x for x in v if x is not None]
self.assertEqual(results, [True])
def testThrow(self):
"""Can we throw an exception within the tree?
"""
tree = owyl.sequence(owyl.succeed(),
owyl.succeed(),
owyl.throw(throws=ValueError,
throws_message="AUGH!!"),
)
v = owyl.visit(tree)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
self.assertRaises(ValueError, v.next)
v = owyl.visit(tree)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
self.assertRaises(ValueError, v.next)
def testCatch(self):
"""Can we catch an exception thrown within the tree?
"""
tree = owyl.sequence(owyl.succeed(),
owyl.succeed(),
owyl.catch(owyl.throw(throws=ValueError,
throws_message="AUGH!!"),
caught=ValueError,
branch=owyl.succeed())
)
v = owyl.visit(tree)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
v = owyl.visit(tree)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
def testCatchIgnoresOthers(self):
"""Does catch ignore other exceptions thrown within the tree?
"""
tree = owyl.sequence(owyl.succeed(),
owyl.succeed(),
owyl.catch(owyl.throw(throws=ValueError,
throws_message="AUGH!!"),
caught=IndexError,
branch=owyl.succeed())
)
v = owyl.visit(tree)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
self.assertRaises(ValueError, v.next)
v = owyl.visit(tree)
self.assertEqual(v.next(), True)
self.assertEqual(v.next(), True)
self.assertRaises(ValueError, v.next)
def testIdentity(self):
"""Does identity pass on return values unchanged?
"""
# Succeed after 5 iterations.
after = 5
tree = owyl.identity(owyl.succeedAfter(after=after))
v = owyl.visit(tree)
for x in xrange(after):
self.assertEqual(v.next(), None)
self.assertEqual(v.next(), True)
v = owyl.visit(tree)
for x in xrange(after):
self.assertEqual(v.next(), None)
self.assertEqual(v.next(), True)
tree = owyl.identity(owyl.failAfter(after=after))
v = owyl.visit(tree)
for x in xrange(after):
self.assertEqual(v.next(), None)
self.assertEqual(v.next(), False)
v = owyl.visit(tree)
for x in xrange(after):
self.assertEqual(v.next(), None)
self.assertEqual(v.next(), False)
def testCheckBB(self):
"""Can we check a value on a blackboard?
"""
value = "foo"
checker = lambda x: x == value
bb = blackboard.Blackboard('test', value=value)
tree = blackboard.checkBB(key='value',
check=checker)
# Note that we can pass in the blackboard at run-time.
v = owyl.visit(tree, blackboard=bb)
# Check should succeed.
self.assertEqual(v.next(), True)
v = owyl.visit(tree, blackboard=bb)
self.assertEqual(v.next(), True)
bb['value'] = 'bar'
# Check should now fail.
v = owyl.visit(tree, blackboard=bb)
self.assertEqual(v.next(), False)
v = owyl.visit(tree, blackboard=bb)
self.assertEqual(v.next(), False)
def testSetBB(self):
"""Can we set a value on a blackboard?
"""
value = 'foo'
checker = lambda x: x == value
bb = blackboard.Blackboard('test', value='bar')
tree = owyl.sequence(blackboard.setBB(key="value",
value=value),
blackboard.checkBB(key='value',
check=checker)
)
# Note that we can pass in the blackboard at run-time.
v = owyl.visit(tree, blackboard=bb)
# Sequence will succeed if the check succeeds.
result = [x for x in v][-1]
self.assertEqual(result, True)
v = owyl.visit(tree, blackboard=bb)
result = [x for x in v][-1]
self.assertEqual(result, True)
def testRepeatUntilSucceed(self):
"""Can we repeat a behavior until it succeeds?
"""
bb = blackboard.Blackboard('test', ) # 'value' defaults to None.
checker = lambda x: x is not None
parallel = owyl.parallel
repeat = owyl.repeatUntilSucceed
checkBB = blackboard.checkBB
setBB = blackboard.setBB
tree = parallel(repeat(checkBB(key='value',
check=checker),
final_value=True),
# That should fail until this sets the value:
owyl.selector(owyl.fail(),
owyl.fail(),
setBB(key='value',
value='foo')),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL)
v = owyl.visit(tree, blackboard=bb)
results = [x for x in v]
result = results[-1]
self.assertEqual(result, True)
# Need to reset the blackboard to get the same results.
bb = blackboard.Blackboard('test', ) # 'value' defaults to None.
v = owyl.visit(tree, blackboard=bb)
results = [x for x in v]
result = results[-1]
self.assertEqual(result, True)
def testRepeatUntilFail(self):
"""Can we repeat a behavior until it fails?
"""
bb = blackboard.Blackboard('test', value="foo")
checker = lambda x: x and True or False # must eval to True
parallel = owyl.parallel
repeat = owyl.repeatUntilFail
checkBB = blackboard.checkBB
setBB = blackboard.setBB
tree = parallel(repeat(checkBB(key='value',
check=checker),
final_value=True),
# That should succeed until this sets the value:
owyl.selector(owyl.fail(),
owyl.fail(),
setBB(key='value',
value=None)),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL)
v = owyl.visit(tree, blackboard=bb)
results = [x for x in v]
result = results[-1]
self.assertEqual(result, True)
# Need to reset the blackboard to get the same results.
bb = blackboard.Blackboard('test', value="foo")
v = owyl.visit(tree, blackboard=bb)
results = [x for x in v]
result = results[-1]
self.assertEqual(result, True)
def testRepeatUntilSucceed_Count(self):
"""Does repeatUntilSucceed execute its child with every tick?
"""
# How many times to repeat the behavior?
ticks = 100
bb = blackboard.Blackboard('test', count=0)
@owyl.task
def increment(**kwargs):
bb, key = kwargs['blackboard'], kwargs['key']
bb[key] += 1
yield False
tree = owyl.repeatUntilSucceed(increment(key='count'))
v = owyl.visit(tree, blackboard=bb)
for i in xrange(ticks):
v.next()
self.assertEqual(bb['count'], ticks)
# Need to reset the blackboard to get the same results.
bb = blackboard.Blackboard('test', count=0)
v = owyl.visit(tree, blackboard=bb)
for i in xrange(ticks):
v.next()
self.assertEqual(bb['count'], ticks)
def testRepeatUntilFail_Count(self):
"""Does repeatUntilFail execute its child with every tick?
"""
# How many times to repeat the behavior?
ticks = 100
bb = blackboard.Blackboard('test', count=0)
@owyl.task
def increment(**kwargs):
bb, key = kwargs['blackboard'], kwargs['key']
bb[key] += 1
yield True
tree = owyl.repeatUntilFail(increment(key='count'))
v = owyl.visit(tree, blackboard=bb)
for i in xrange(ticks):
v.next()
self.assertEqual(bb['count'], ticks)
# Need to reset the blackboard to get the same results.
bb = blackboard.Blackboard('test', count=0)
v = owyl.visit(tree, blackboard=bb)
for i in xrange(ticks):
v.next()
self.assertEqual(bb['count'], ticks)
def testRepeatAlways_Count(self):
"""Does repeatAlways execute its child with every tick?
"""
# How many times to repeat the behavior?
ticks = 100
bb = blackboard.Blackboard('test', count=0)
@owyl.task
def increment(**kwargs):
bb, key = kwargs['blackboard'], kwargs['key']
bb[key] += 1
yield True
tree = owyl.repeatAlways(increment(key='count'))
v = owyl.visit(tree, blackboard=bb)
for i in xrange(ticks):
v.next()
self.assertEqual(bb['count'], ticks)
# Need to reset the blackboard to get the same results.
bb = blackboard.Blackboard('test', count=0)
v = owyl.visit(tree, blackboard=bb)
for i in xrange(ticks):
v.next()
self.assertEqual(bb['count'], ticks)
if __name__ == "__main__":
runner = unittest
try:
import testoob
runner = testoob
except ImportError:
pass
runner.main()
| Gaboose/owyl | tests/testowyl.py | Python | bsd-3-clause | 17,101 | [
"VisIt"
] | e2f1a3b0311e45804652d202bc7f251dff2b6592b2cd0d85356d051883546ec1 |
import numpy as np
import unittest
import datetime
import pytest
from laserchicken import utils, test_tools, keys
from laserchicken.utils import fit_plane
from time import time
class TestUtils(unittest.TestCase):
def test_GetPointCloudPoint(self):
""" Should not raise exception. """
pc = test_tools.generate_tiny_test_point_cloud()
x, y, z = utils.get_point(pc, 1)
self.assertEqual(2, x)
self.assertEqual(3, y)
self.assertEqual(4, z)
def test_GetPointCloudPointFeature(self):
""" Should not raise exception. """
pc = test_tools.generate_tiny_test_point_cloud()
cols = 0.5 * (pc[keys.point]["x"]["data"] + pc[keys.point]["y"]["data"])
pc[keys.point]["color"] = {"type": "double", "data": cols}
x, y, z = utils.get_point(pc, 1)
c = utils.get_attribute_value(pc, 1, "color")
self.assertEqual(c, 0.5 * (x + y))
def test_GetPointCloudPointFeatures(self):
""" Should not raise exception. """
pc = test_tools.generate_tiny_test_point_cloud()
cols = 0.5 * (pc[keys.point]["x"]["data"] + pc[keys.point]["y"]["data"])
flavs = 0.5 * (pc[keys.point]["x"]["data"] - pc[keys.point]["y"]["data"])
pc[keys.point]["color"] = {"type": "double", "data": cols}
pc[keys.point]["flavor"] = {"type": "double", "data": flavs}
x, y, z = utils.get_point(pc, 2)
c, f = utils.get_features(pc, ("color", "flavor"), 2)
self.assertEqual(c, 0.5 * (x + y))
self.assertEqual(f, 0.5 * (x - y))
def test_CopyEmptyPointCloud(self):
""" Should not raise exception. """
pc = test_tools.generate_tiny_test_point_cloud()
pc[keys.point]["x"]["data"] = np.array([])
pc[keys.point]["y"]["data"] = np.array([])
pc[keys.point]["z"]["data"] = np.array([])
copypc = utils.copy_point_cloud(pc)
self.assertEqual(0, len(copypc[keys.point]["x"]["data"]))
def test_CopyNonEmptyPointCloud(self):
""" Test whether coordinates are copied """
pc = test_tools.generate_tiny_test_point_cloud()
x = pc[keys.point]["x"]["data"]
y = pc[keys.point]["y"]["data"]
z = pc[keys.point]["z"]["data"]
copypc = utils.copy_point_cloud(pc)
self.assertTrue(all(x == copypc[keys.point]["x"]["data"]))
self.assertTrue(all(y == copypc[keys.point]["y"]["data"]))
self.assertTrue(all(z == copypc[keys.point]["z"]["data"]))
def test_CopyPointCloudMetaData(self):
""" Test whether metadata are copied """
pc = test_tools.generate_tiny_test_point_cloud()
pc["log"] = [
{"time": datetime.datetime(2018, 1, 23, 12, 15, 59), "module": "filter", "parameters": [("z", "gt", 0.5)]}]
copypc = utils.copy_point_cloud(pc)
self.assertEqual(datetime.datetime(2018, 1, 23, 12, 15, 59), copypc["log"][0]["time"])
self.assertEqual("filter", copypc["log"][0]["module"])
self.assertEqual([("z", "gt", 0.5)], copypc["log"][0]["parameters"])
def test_CopyNonEmptyPointCloudBoolMask(self):
""" Test whether coordinates are copied with boolean mask """
pc = test_tools.generate_tiny_test_point_cloud()
x = pc[keys.point]["x"]["data"][2]
y = pc[keys.point]["y"]["data"][2]
z = pc[keys.point]["z"]["data"][2]
copypc = utils.copy_point_cloud(pc, array_mask=np.array([False, False, True]))
self.assertTrue(all(np.array([x]) == copypc[keys.point]["x"]["data"]))
self.assertTrue(all(np.array([y]) == copypc[keys.point]["y"]["data"]))
self.assertTrue(all(np.array([z]) == copypc[keys.point]["z"]["data"]))
def test_CopyNonEmptyPointCloudIntMask(self):
""" Test whether coordinates are copied with array indexing """
pc = test_tools.generate_tiny_test_point_cloud()
x0, x1 = pc[keys.point]["x"]["data"][0], pc[keys.point]["x"]["data"][1]
y0, y1 = pc[keys.point]["y"]["data"][0], pc[keys.point]["y"]["data"][1]
z0, z1 = pc[keys.point]["z"]["data"][0], pc[keys.point]["z"]["data"][1]
copypc = utils.copy_point_cloud(pc, array_mask=np.array([1, 0]))
self.assertTrue(all(np.array([x1, x0]) == copypc[keys.point]["x"]["data"]))
self.assertTrue(all(np.array([y1, y0]) == copypc[keys.point]["y"]["data"]))
self.assertTrue(all(np.array([z1, z0]) == copypc[keys.point]["z"]["data"]))
def test_AddMetaDataToPointCloud(self):
""" Test adding info to the point cloud for test module """
pc = test_tools.generate_tiny_test_point_cloud()
from laserchicken import filter as somemodule
utils.add_metadata(pc,somemodule,params = (0.5,"cylinder",4))
self.assertEqual(len(pc[keys.provenance]),1)
def test_AddToPointCloudEmpty(self):
pc_1 = utils.create_point_cloud([],[],[])
pc_2 = test_tools.generate_tiny_test_point_cloud()
utils.add_to_point_cloud(pc_1, pc_2)
for attr in pc_2[keys.point].keys():
self.assertIn(attr, pc_1[keys.point])
self.assertEqual(pc_1[keys.point][attr]['type'],
pc_2[keys.point][attr]['type'])
self.assertTrue(all(pc_1[keys.point][attr]['data'] == pc_2[keys.point][attr]['data']))
def test_AddToPointCloudInvalid(self):
pc_1 = test_tools.SimpleTestData.get_point_cloud()
# invalid format
pc_2 = {}
with pytest.raises(TypeError):
utils.add_to_point_cloud(pc_1, pc_2)
with pytest.raises(AttributeError):
utils.add_to_point_cloud(pc_2, pc_1)
# non-matching attributes
test_data = test_tools.ComplexTestData()
pc_2 = test_data.get_point_cloud()
with pytest.raises(AttributeError):
utils.add_to_point_cloud(pc_1, pc_2)
# different structure
pc_2 = {'vertex':{'x':1, 'y':2, 'z':3}}
with pytest.raises(TypeError):
utils.add_to_point_cloud(pc_1, pc_2)
# different data types
pc_2 = {'vertex': {'x': {'data': np.zeros(3, dtype=int), 'type': 'int'},
'y': {'data': np.zeros(3, dtype=int), 'type': 'int'},
'z': {'data': np.zeros(3, dtype=int), 'type': 'int'}}}
with pytest.raises(ValueError):
utils.add_to_point_cloud(pc_1, pc_2)
def test_AddToPointCloud(self):
test_data = test_tools.ComplexTestData()
pc_source = test_data.get_point_cloud()
pc_dest = utils.copy_point_cloud(pc_source)
utils.add_to_point_cloud(pc_dest, pc_source)
for key in pc_source.keys():
self.assertIn(key, pc_dest)
for attr in pc_source[keys.point].keys():
self.assertEqual(len(pc_dest[keys.point][attr]['data']),
2*len(pc_source[keys.point][attr]['data']))
self.assertEqual(pc_dest[keys.provenance][-1]['module'],
'laserchicken.utils')
def test_AddFeatureArray(self):
test_data = test_tools.ComplexTestData()
pc = test_data.get_point_cloud()
feature_add = np.array([1, 1, 1, 1, 1], dtype=int)
utils.update_feature(pc, 'test_feature', feature_add)
self.assertIn('test_feature', pc[keys.point])
self.assertTrue(all(pc[keys.point]['test_feature']['data'] == feature_add))
def test_AddFeatureArrayInvalid(self):
test_data = test_tools.ComplexTestData()
pc = test_data.get_point_cloud()
feature_add = np.array([1, 1, 1, 1, 1, 2], dtype=int)
with pytest.raises(AssertionError):
utils.update_feature(pc, 'test_feature', feature_add)
def test_AddFeatureArrayMask(self):
test_data = test_tools.ComplexTestData()
pc = test_data.get_point_cloud()
feature_add = np.array([1, 2, 3, 4], dtype=int)
mask = np.array([1, 1, 0, 1, 1], dtype=bool)
utils.update_feature(pc, 'test_feature', feature_add, array_mask=mask)
self.assertIn('test_feature', pc[keys.point])
self.assertTrue(all(pc[keys.point]['test_feature']['data'] == [1, 2, 0, 3, 4]))
def test_AddFeatureArrayMaskInvalid(self):
test_data = test_tools.ComplexTestData()
pc = test_data.get_point_cloud()
feature_add = np.array([1, 2, 3, 4], dtype=int)
mask = np.array([1, 1, 1, 1, 1], dtype=bool)
with pytest.raises(AssertionError):
utils.update_feature(pc, 'test_feature', feature_add, array_mask=mask)
def test_AddFeatureValueMask(self):
test_data = test_tools.ComplexTestData()
pc = test_data.get_point_cloud()
feature_add = 1.1
mask = np.array([1, 1, 0, 1, 1], dtype=bool)
utils.update_feature(pc, 'test_feature', feature_add, array_mask=mask)
self.assertIn('test_feature', pc[keys.point])
self.assertTrue(all(pc[keys.point]['test_feature']['data'] == [1.1, 1.1, 0.0, 1.1, 1.1]))
def test_AddFeatureValueMaskInvalid(self):
test_data = test_tools.ComplexTestData()
pc = test_data.get_point_cloud()
feature_add = 1.1
mask = np.array([1, 1, 0, 1, 1, 1], dtype=bool)
with pytest.raises(AssertionError):
utils.update_feature(pc, 'test_feature', feature_add, array_mask=mask)
class TestPlaneFit(unittest.TestCase):
def test_leastsqr(self):
# n_points = 100
# points = np.zeros((n_points, 3))
# for i in range(n_points):
# z = 5 + i % np.sqrt(n_points)
# points[i] = np.array(((i % np.sqrt(n_points)), (np.floor(i / np.sqrt(n_points))), z))
#t0 = time()
f = fit_plane(self.points[:, 0], self.points[:, 1], self.points[:, 2])
#print('LSQR : %f' %(time()-t0))
estimates = f(self.points[:, 0], self.points[:, 1])
np.testing.assert_allclose(estimates, self.points[:, 2])
def test_FitPlaneSVD(self):
"""Test the normal vector extraction with SVD."""
#t0 = time()
nfit = utils.fit_plane_svd(self.points[:,0], self.points[:,1], self.points[:,2])
#print('SVD : %f' %(time()-t0))
self.assertTrue(np.allclose(nfit, self.n))
def generate_random_points_inplane(self,nvect, dparam=0, npts=100, eps=0.0):
"""
Generate a series of point all belonging to a plane.
:param nvect: normal vector of the plane
:param dparam: zero point value of the plane
:param npts: number of points
:param eps: std of the gaussian noise added to the z values of the planes
:return: x,y,z coordinate of the points
"""
a, b, c = nvect / np.linalg.norm(nvect)
x, y = np.random.rand(npts), np.random.rand(npts)
z = (dparam - a * x - b * y) / c
if eps > 0:
z += np.random.normal(loc=0., scale=eps, size=npts)
return np.column_stack((x, y, z))
def setUp(self):
"""Set up the data points."""
self.n = np.array([1., 2., 3.])
self.n /= np.linalg.norm(self.n)
self.points = self.generate_random_points_inplane(self.n,eps=0)
def tearDown(self):
"""Tear it down."""
pass
if __name__ == '__main__':
unittest.main()
| eEcoLiDAR/eEcoLiDAR | laserchicken/test_utils.py | Python | apache-2.0 | 11,259 | [
"Gaussian"
] | 314efbbcb5042e5166a28dd742f07293c43dffc9734ddbae0a79e6c12146ad88 |
"""Test PAC methods."""
import numpy as np
from tensorpac.methods.meth_switch import get_pac_fcn, pacstr
from tensorpac.methods.meth_surrogates import compute_surrogates, normalize
from tensorpac.methods.meth_pp import preferred_phase
from tensorpac.methods.meth_erpac import erpac, ergcpac, _ergcpac_perm
n_pac_range = range(1, 7)
n_sur_range = range(4)
n_norm_range = range(1, 5)
n_epochs = 5
n_times = 1000
n_pha_freqs = 2
n_amp_freqs = 3
n_bins = 18
pval = .05
n_perm = 2
pha = np.random.uniform(-np.pi, np.pi, (n_pha_freqs, n_epochs, n_times))
amp = np.random.rand(n_amp_freqs, n_epochs, n_times)
class TestMethods(object):
"""Test individual pac methods."""
@staticmethod
def _get_methods(implementation='tensor'):
meths = []
q = 1
while q is not None:
try:
meths += [get_pac_fcn(q, n_bins, pval, implementation)]
q += 1
except KeyError as e: # noqa
q = None
return meths
def test_pacstr(self):
"""Test getting pac string name."""
for p in n_pac_range:
for s in n_sur_range:
for n in n_norm_range:
names = pacstr((p, s, n))
assert all([isinstance(k, str) for k in names])
# test assert error
try:
pacstr((p + 1, s, n))
except ValueError as e: # noqa
pass
try:
pacstr((p, s + 1, n))
except ValueError as e: # noqa
pass
try:
pacstr((p, s, n + 1))
except ValueError as e: # noqa
pass
def test_pac_methods(self):
"""Test individual pac methods."""
for imp in ['tensor']: # 'numba' = FAIL (core dumped)
for n, meth in enumerate(self._get_methods(imp)):
# print(meth.func.__name__)
if n + 1 == 6: # gc pac need additional multivariate axis
_pha = np.stack([np.sin(pha), np.cos(pha)], axis=-2)
_amp = amp[..., np.newaxis, :]
pac = meth(_pha, _amp)
elif n + 1 == 4: # Try with different values of p for coverage
pac = meth(pha, amp, p=0.5)
pac = meth(pha, amp, p=1)
pac = meth(pha, amp, p=None)
else:
pac = meth(pha, amp)
assert pac.shape == (n_amp_freqs, n_pha_freqs, n_epochs)
def test_surrogates(self):
"""Test computing surrogates."""
fcn = get_pac_fcn(1, n_bins, pval)
s_shape = (n_perm, n_amp_freqs, n_pha_freqs, n_epochs)
for s in n_sur_range:
surro = compute_surrogates(pha, amp, s, fcn, n_perm, 1, 0)
assert (surro is None) or (surro.shape == s_shape)
def test_normalize(self):
"""Test normalization."""
for k in n_norm_range:
true_pac = np.random.rand(n_amp_freqs, n_pha_freqs)
perm_pac = np.random.rand(n_perm, n_amp_freqs, n_pha_freqs)
normalize(k, true_pac, perm_pac)
def test_erpac(self):
"""Test event-related PAC."""
er_pha, er_amp = np.moveaxis(pha, -2, -1), np.moveaxis(amp, -2, -1)
# circular
er_circ, pv_circ = erpac(er_pha, er_amp)
assert er_circ.shape == pv_circ.shape
assert er_circ.shape == (n_amp_freqs, n_pha_freqs, n_times)
# gaussian copula
_pha = np.stack([np.sin(er_pha), np.cos(er_pha)], axis=-2)
_amp = er_amp[..., np.newaxis, :]
ergc_circ = ergcpac(_pha, _amp, smooth=None)
assert ergc_circ.shape == (n_amp_freqs, n_pha_freqs, n_times)
ergcpac(_pha, _amp, smooth=5)
# test erpac permutations
ergc_perm = _ergcpac_perm(_pha, _amp, smooth=None, n_perm=n_perm)
assert ergc_perm.shape == (n_perm, n_amp_freqs, n_pha_freqs, n_times)
def test_preferred_phase(self):
"""Test preferred phase method."""
bin_amp, pp, fvec = preferred_phase(pha, amp, n_bins)
assert bin_amp.shape == (n_bins, n_amp_freqs, n_pha_freqs, n_epochs)
assert pp.shape == (n_amp_freqs, n_pha_freqs, n_epochs)
assert fvec.shape == (n_bins,)
| EtienneCmb/tensorpac | tensorpac/methods/tests/test_pac_methods.py | Python | bsd-3-clause | 4,223 | [
"Gaussian"
] | 494b145dd13a8409482439a3baffa1cc3d1ec9d5b12019b2cc21c9012bb93134 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Editors implementation for open/close operation on till operation"""
from datetime import timedelta
from kiwi.currency import currency
from kiwi.datatypes import ValidationError
from kiwi.python import Settable
from kiwi.ui.objectlist import Column, ColoredColumn, SummaryLabel
from stoqdrivers.exceptions import DriverError
from stoqlib.api import api
from stoqlib.database.expr import TransactionTimestamp
from stoqlib.domain.account import AccountTransaction
from stoqlib.domain.events import (TillOpenEvent, TillCloseEvent,
TillAddTillEntryEvent,
TillAddCashEvent, TillRemoveCashEvent)
from stoqlib.domain.person import Employee
from stoqlib.domain.till import Till
from stoqlib.exceptions import DeviceError, TillError
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.slaves.tillslave import RemoveCashSlave, BaseCashSlave
from stoqlib.lib.dateutils import localnow
from stoqlib.lib.message import warning
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
def _create_transaction(store, till_entry):
if till_entry.value > 0:
operation_type = AccountTransaction.TYPE_IN
source_account = sysparam.get_object_id('IMBALANCE_ACCOUNT')
dest_account = sysparam.get_object_id('TILLS_ACCOUNT')
else:
operation_type = AccountTransaction.TYPE_OUT
source_account = sysparam.get_object_id('TILLS_ACCOUNT')
dest_account = sysparam.get_object_id('IMBALANCE_ACCOUNT')
AccountTransaction(description=till_entry.description,
source_account_id=source_account,
account_id=dest_account,
value=abs(till_entry.value),
code=unicode(till_entry.identifier),
date=TransactionTimestamp(),
store=store,
payment=till_entry.payment,
operation_type=operation_type)
class _TillOpeningModel(object):
def __init__(self, till, value):
self.till = till
self.value = value
def get_balance(self):
return currency(self.till.get_balance() + self.value)
class _TillClosingModel(object):
def __init__(self, till, value):
self.till = till
self.value = value
def get_opening_date(self):
# self.till is None only in the special case that the user added the ECF
# to Stoq with a pending reduce Z, so we need to close the till on the
# ECF, but not on Stoq.
# Return a date in the past
if not self.till:
return localnow() - timedelta(1)
return self.till.opening_date
def get_cash_amount(self):
if not self.till:
return currency(0)
return currency(self.till.get_cash_amount() - self.value)
def get_balance(self):
if not self.till:
return currency(0)
return currency(self.till.get_balance() - self.value)
class TillOpeningEditor(BaseEditor):
"""An editor to open a till.
You can add cash to the till in the editor and it also shows
the balance of the till, after the cash has been added.
Callers of this editor are responsible for sending in a valid Till object,
which the method open_till() can be called.
"""
title = _(u'Till Opening')
model_type = _TillOpeningModel
gladefile = 'TillOpening'
confirm_widgets = ['value']
proxy_widgets = ('value',
'balance')
help_section = 'till-open'
#
# BaseEditorSlave
#
def create_model(self, store):
till = Till(store=store, station=api.get_current_station(store))
till.open_till()
return _TillOpeningModel(till=till, value=currency(0))
def setup_proxies(self):
self.proxy = self.add_proxy(self.model, TillOpeningEditor.proxy_widgets)
def on_confirm(self):
till = self.model.till
# Using api.get_default_store instead of self.store
# or it will return self.model.till
last_opened = Till.get_last_opened(api.get_default_store())
if (last_opened and
last_opened.opening_date.date() == till.opening_date.date()):
warning(_("A till was opened earlier this day."))
self.retval = False
return
try:
TillOpenEvent.emit(till=till)
except (TillError, DeviceError) as e:
warning(str(e))
self.retval = False
return
value = self.proxy.model.value
if value:
TillAddCashEvent.emit(till=till, value=value)
till_entry = till.add_credit_entry(value, _(u'Initial Cash amount'))
_create_transaction(self.store, till_entry)
# The callsite is responsible for interacting with
# the fiscal printer
#
# Kiwi callbacks
#
def on_value__validate(self, entry, data):
if data < currency(0):
self.proxy.update('balance', currency(0))
return ValidationError(
_("You cannot add a negative amount when opening the till."))
def after_value__content_changed(self, entry):
self.proxy.update('balance')
class TillClosingEditor(BaseEditor):
size = (500, 440)
title = _(u'Closing Opened Till')
model_type = _TillClosingModel
gladefile = 'TillClosing'
confirm_widgets = ['value']
proxy_widgets = ('value',
'balance',
'opening_date',
'observations')
help_section = 'till-close'
def __init__(self, store, model=None, previous_day=False, close_db=True,
close_ecf=True):
"""
Create a new TillClosingEditor object.
:param previous_day: If the till wasn't closed previously
"""
self._previous_day = previous_day
self.till = Till.get_last(store)
if close_db:
assert self.till
self._close_db = close_db
self._close_ecf = close_ecf
BaseEditor.__init__(self, store, model)
self._setup_widgets()
def _setup_widgets(self):
# We cant remove cash if closing till from a previous day
self.value.set_sensitive(not self._previous_day)
if self._previous_day:
value = 0
else:
value = self.model.get_balance()
self.value.update(value)
self.day_history.set_columns(self._get_columns())
self.day_history.connect('row-activated', lambda olist, row: self.confirm())
self.day_history.add_list(self._get_day_history())
summary_day_history = SummaryLabel(
klist=self.day_history,
column='value',
label='<b>%s</b>' % api.escape(_(u'Total balance:')))
summary_day_history.show()
self.day_history_box.pack_start(summary_day_history, False)
def _get_day_history(self):
if not self.till:
assert self._close_ecf and not self._close_db
return
day_history = {}
day_history[_(u'Initial Amount')] = self.till.initial_cash_amount
for entry in self.till.get_entries():
payment = entry.payment
if payment is not None:
desc = payment.method.get_description()
else:
if entry.value > 0:
desc = _(u'Cash In')
else:
desc = _(u'Cash Out')
if desc in day_history.keys():
day_history[desc] += entry.value
else:
day_history[desc] = entry.value
for description, value in day_history.items():
yield Settable(description=description, value=value)
def _get_columns(self):
return [Column('description', title=_('Description'), data_type=str,
width=300, sorted=True),
ColoredColumn('value', title=_('Amount'), data_type=currency,
color='red', data_func=lambda x: x < 0)]
#
# BaseEditorSlave
#
def create_model(self, trans):
return _TillClosingModel(till=self.till, value=currency(0))
def setup_proxies(self):
if self.till and not self.till.get_balance():
self.value.set_sensitive(False)
self.proxy = self.add_proxy(self.model,
TillClosingEditor.proxy_widgets)
def validate_confirm(self):
till = self.model.till
removed = abs(self.model.value)
if removed and removed > till.get_balance():
warning(_("The amount that you want to remove is "
"greater than the current balance."))
return False
return True
def on_confirm(self):
till = self.model.till
removed = abs(self.model.value)
if removed:
# We need to do this inside a new transaction, because if the
# till closing fails further on, this still needs to be recorded
# in the database
store = api.new_store()
t_till = store.fetch(till)
TillRemoveCashEvent.emit(till=t_till, value=removed)
reason = _('Amount removed from Till by %s') % (
api.get_current_user(self.store).get_description(), )
till_entry = t_till.add_debit_entry(removed, reason)
# Financial transaction
_create_transaction(store, till_entry)
# DB transaction
store.confirm(True)
store.close()
if self._close_ecf:
try:
retval = TillCloseEvent.emit(till=till,
previous_day=self._previous_day)
except (TillError, DeviceError) as e:
warning(str(e))
return None
# If the event was captured and its return value is False, then we
# should not close the till.
if retval is False:
return False
if self._close_db:
try:
till.close_till(observations=self.model.observations)
except ValueError as err:
warning(str(err))
return
# The callsite is responsible for interacting with
# the fiscal printer
return self.model
#
# Kiwi handlers
#
def after_value__validate(self, widget, value):
if not hasattr(self, 'proxy'):
return
if value < currency(0):
self.proxy.update('balance', currency(0))
return ValidationError(_("Value cannot be less than zero"))
if value > self.till.get_balance():
self.proxy.update('balance', currency(0))
return ValidationError(_("You can not specify an amount "
"removed greater than the "
"till balance."))
def after_value__content_changed(self, entry):
self.proxy.update('balance')
class TillVerifyEditor(TillClosingEditor):
title = _('Till verification')
help_section = 'till-verify'
def __init__(self, store, model=None, previous_day=False,
close_db=False, close_ecf=False):
assert not close_db and not close_ecf
super(TillVerifyEditor, self).__init__(store, model=model,
previous_day=previous_day,
close_db=close_db,
close_ecf=close_ecf)
self.set_message(
_("Use this to adjust the till for the next user.\n"
"Note that this will not really close the till or ecf."))
class CashAdvanceEditor(BaseEditor):
"""An editor which extends BaseCashSlave to include.
It extends BaseCashSlave to include an employee combobox
"""
model_name = _(u'Cash Advance')
model_type = Settable
gladefile = 'CashAdvanceEditor'
def _get_employee(self):
return self.employee_combo.get_selected_data()
def _get_employee_name(self):
return self.employee_combo.get_selected_label()
def _setup_widgets(self):
employees = self.store.find(Employee)
self.employee_combo.prefill(api.for_person_combo(employees))
self.employee_combo.set_active(0)
#
# BaseEditorSlave
#
def create_model(self, store):
till = Till.get_current(self.store)
return Settable(employee=None,
payment=None,
# FIXME: should send in consts.now()
open_date=None,
till=till,
balance=till.get_balance(),
value=currency(0))
def setup_slaves(self):
self.cash_slave = RemoveCashSlave(self.store,
self.model)
self.cash_slave.value.connect('content-changed',
self._on_cash_slave__value_changed)
self.attach_slave("base_cash_holder", self.cash_slave)
self._setup_widgets()
def on_confirm(self):
till = self.model.till
value = abs(self.model.value)
assert till
try:
TillRemoveCashEvent.emit(till=till, value=value)
except (TillError, DeviceError, DriverError) as e:
warning(str(e))
self.retval = False
return
till_entry = till.add_debit_entry(
value, (_(u'Cash advance paid to employee: %s') % (
self._get_employee_name(), )))
TillAddTillEntryEvent.emit(till_entry, self.store)
_create_transaction(self.store, till_entry)
#
# Callbacks
#
def _on_cash_slave__value_changed(self, entry):
self.cash_slave.model.value = -abs(self.cash_slave.model.value)
class BaseCashEditor(BaseEditor):
model_type = Settable
gladefile = 'BaseCashEditor'
def __init__(self, store):
BaseEditor.__init__(self, store)
self.set_confirm_widget(self.reason)
self.set_confirm_widget(self.cash_slave.value)
#
# BaseEditorSlave
#
def create_model(self, store):
till = Till.get_current(store)
return Settable(value=currency(0),
reason=u'',
till=till,
balance=till.get_balance())
def setup_proxies(self):
self.proxy = self.add_proxy(self.model, [u'reason'])
def setup_slaves(self):
self.cash_slave = self.cash_slave_class(self.store, self.model)
self.attach_slave("base_cash_holder", self.cash_slave)
def on_confirm(self):
value = abs(self.model.value)
till = self.model.till
assert till
try:
self.event.emit(till=till, value=value)
except (TillError, DeviceError, DriverError) as e:
warning(str(e))
self.retval = False
return
till_entry = self.create_entry(till, value, self.model.reason)
TillAddTillEntryEvent.emit(till_entry, self.store)
_create_transaction(self.store, till_entry)
class CashOutEditor(BaseCashEditor):
"""An editor to Remove cash from the Till
"""
model_name = _(u'Cash Out')
title = _(u'Reverse Payment')
cash_slave_class = RemoveCashSlave
event = TillRemoveCashEvent
help_section = 'till-remove-money'
def create_entry(self, till, value, reason):
return till.add_debit_entry(value, (_(u'Cash out: %s') % (reason, )))
class CashInEditor(BaseCashEditor):
"""An editor to Add cash to the Till
"""
model_name = _(u'Cash In')
cash_slave_class = BaseCashSlave
event = TillAddCashEvent
help_section = 'till-add-money'
def create_entry(self, till, value, reason):
return till.add_credit_entry(value, (_(u'Cash in: %s') % (reason, )))
| tiagocardosos/stoq | stoqlib/gui/editors/tilleditor.py | Python | gpl-2.0 | 16,970 | [
"VisIt"
] | 17726217b4ada8c65e98badba0b053269f7f85ff048992808fe344f69755f7f3 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from joblib import Parallel, delayed, effective_n_jobs
from ..utils.validation import _num_samples
from ..utils.validation import check_non_negative
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches, get_chunk_n_rows
from ..utils import is_scalar_nan
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..utils._mask import _get_mask
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import sp_version, parse_version
from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from ..exceptions import DataConversionWarning
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
@_deprecate_positional_args
def check_pairwise_arrays(X, Y, *, precomputed=False, dtype=None,
accept_sparse='csr', force_all_finite=True,
copy=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of types, default=None
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of strings, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
@_deprecate_positional_args
def euclidean_distances(X, Y=None, *, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like of shape (n_samples_2,), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples,), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Notes
-----
To achieve better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as ``float32``.
Returns
-------
distances : array, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
# If norms are passed as float32, they are unused. If arrays are passed as
# float32, norms needs to be recomputed on upcast chunks.
# TODO: use a float64 accumulator in row_norms to avoid the latter.
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
if XX.dtype == np.float32:
XX = None
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y and XX is not None:
# shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
if YY.dtype == np.float32:
YY = None
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = - 2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
@_deprecate_positional_args
def nan_euclidean_distances(X, Y=None, *, squared=False,
missing_values=np.nan, copy=True):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like, shape=(n_samples_1, n_features)
Y : array-like, shape=(n_samples_2, n_features)
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value
copy : boolean, default=True
Make and use a deep copy of X and Y (if Y exists)
Returns
-------
distances : array, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
See also
--------
paired_distances : distances between pairs of elements of X and Y.
"""
force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False,
force_all_finite=force_all_finite, copy=copy)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
((x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)) / 10,
10 * 2 ** 17)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
@_deprecate_positional_args
def pairwise_distances_argmin_min(X, Y, *, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples1, n_features)
Array containing points.
Y : {array-like, sparse matrix}, shape (n_samples2, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
indices, values = zip(*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric,
**metric_kwargs))
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
@_deprecate_positional_args
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis=axis, metric=metric,
metric_kwargs=metric_kwargs)[0]
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like, shape (n_samples_1, 2)
Y : array-like of shape (n_samples_2, 2), default=None
Returns
-------
distance : {array}, shape (n_samples_1, n_samples_2)
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris, France)
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from sklearn.neighbors import DistanceMetric
return DistanceMetric.get_metric('haversine').pairwise(X, Y)
@_deprecate_positional_args
def manhattan_distances(X, Y=None, *, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like
An array with shape (n_samples_X, n_features).
Y : array-like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
--------
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = np.full((2, 2), 2.)
>>> manhattan_distances(X, y, sum_over_features=False)
array([[1., 1.],
[1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Matrix `X`.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Matrix `Y`.
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine : dense matrices only
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
@_deprecate_positional_args
def paired_distances(X, Y, *, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : str or callable, default="euclidean"
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
See also
--------
pairwise_distances : Computes the distance between every pair of samples
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features), default=None
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features), default=None
degree : int, default=3
gamma : float, default=None
if None, defaults to 1.0 / n_features
coef0 : float, default=1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features
coef0 : float, default=1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T,
dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features), default=None
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features), default=None
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'haversine': haversine_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
'nan_euclidean': nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix"""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)))
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski",
'nan_euclidean', 'haversine']
_NAN_METRICS = ['nan_euclidean']
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same
"""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, '__iter__')
for r in reduced):
raise TypeError('reduce_func returned %r. '
'Expected sequence(s) of length %d.' %
(reduced if is_tuple else reduced[0], chunk_size))
if any(_num_samples(r) != chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError('reduce_func returned object of length %s. '
'Expected same length as input: %d.' %
(actual_size if is_tuple else actual_size[0],
chunk_size))
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided
"""
if metric == "seuclidean" and 'V' not in kwds:
# There is a bug in scipy < 1.5 that will cause a crash if
# X.dtype != np.double (float64). See PR #15730
dtype = np.float64 if sp_version < parse_version('1.5') else None
if X is Y:
V = np.var(X, axis=0, ddof=1, dtype=dtype)
else:
warnings.warn("from version 0.25, pairwise_distances for "
"metric='seuclidean' will require V to be "
"specified if Y is passed.", FutureWarning)
V = np.var(np.vstack([X, Y]), axis=0, ddof=1, dtype=dtype)
return {'V': V}
if metric == "mahalanobis" and 'VI' not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
warnings.warn("from version 0.25, pairwise_distances for "
"metric='mahalanobis' will require VI to be "
"specified if Y is passed.", FutureWarning)
VI = np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T
return {'VI': VI}
return {}
@_deprecate_positional_args
def pairwise_distances_chunked(X, Y=None, *, reduce_func=None,
metric='euclidean', n_jobs=None,
working_memory=None, **kwds):
"""Generate a distance matrix chunk by chunk with optional reduction
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : array of shape (n_samples_a, n_samples_a) if metric == "precomputed", \
or of shape (n_samples_a, n_features) otherwise
Array of pairwise distances between samples, or a feature array.
Y : array of shape (n_samples_b, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, default=None
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : array or sparse matrix
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == 'precomputed':
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric,
n_jobs=n_jobs, **kwds)
if ((X is Y or Y is None)
and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None)
is euclidean_distances):
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start::_num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
@_deprecate_positional_args
def pairwise_distances(X, Y=None, metric="euclidean", *, n_jobs=None,
force_all_finite=True, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array of shape (n_samples_b, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
pairwise_distances_chunked : performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True,
force_all_finite=force_all_finite)
whom = ("`pairwise_distances`. Precomputed distance "
" need to have non-negative values.")
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric,
force_all_finite=force_all_finite, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if (dtype == bool and
(X.dtype != bool or (Y is not None and Y.dtype != bool))):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(X, Y, dtype=dtype,
force_all_finite=force_all_finite)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
@_deprecate_positional_args
def pairwise_kernels(X, Y=None, metric="linear", *, filter_params=False,
n_jobs=None, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array of shape (n_samples_b, n_features), default=None
A second feature array only if X has shape [n_samples_a, n_features].
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds
if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bnaul/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 68,070 | [
"Gaussian"
] | ff09781c14ce5e0be22a237253f674d6b29524211c733b9450c602492b3a34b3 |
#!/usr/bin/env python
from setuptools import setup, find_packages
def get_install_reqs():
install_reqs = []
with open('requirements.txt') as f:
install_reqs.append(f.read().splitlines())
return install_reqs
def get_long_description():
with open('README.rst') as f:
desc = f.read()
return desc
def setup_package():
setup(
name="rBCM",
description="A robust Bayesian Committee Machine Regressor.",
author="Lucas J. Kolstad",
author_email="lkolstad@uw.edu",
version="0.2.0",
license="MIT",
packages=find_packages(),
install_requires=get_install_reqs(),
keywords="statistics gaussian process bayesian regression committee",
long_description=get_long_description(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Mathematics",
],
)
if __name__ == '__main__':
setup_package()
| lucaskolstad/rBCM | setup.py | Python | bsd-3-clause | 1,370 | [
"Gaussian"
] | c92845ad1e56ffbbc33f5c6a10499e17c05767b5cc8d832639d2810d1630dd9f |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
import pbtest
class ServiceWorkersTest(pbtest.PBSeleniumTest):
"""Verifies interaction with sites that use Service Worker caches"""
def get_tab_data_domains(self):
domains = self.js(
"let tabData = chrome.extension.getBackgroundPage().badger.tabData;"
"return (Object.keys(tabData).map(tab_id => {"
" return tabData[tab_id].frames[0].host;"
"}));"
)
return domains
def test_returning_to_sw_cached_page(self):
FIXTURE_URL = (
"https://efforg.github.io/privacybadger-test-fixtures/html/"
"service_workers.html"
)
# visit the Service Worker page to activate the worker
self.load_url(FIXTURE_URL)
# Service Workers are off by default in Firefox 60 ESR
if not self.js("return 'serviceWorker' in navigator;"):
self.skipTest("Service Workers are disabled")
# wait for the worker to initialize its cache
self.wait_for_script("return window.WORKER_READY;")
# visit a different site (doesn't matter what it is,
# just needs to be an http site with a different domain)
self.load_url("https://dnt-test.trackersimulator.org/")
# return to the SW page
self.driver.back()
# now open a new window (to avoid clearing badger.tabData)
# and verify results
self.open_window()
self.load_url(self.options_url)
domains = self.get_tab_data_domains()
self.assertIn("efforg.github.io", domains,
"SW page URL was not correctly attributed")
self.assertEqual(len(domains), 1,
"tabData contains an unexpected number of entries")
if __name__ == "__main__":
unittest.main()
| EFForg/privacybadgerchrome | tests/selenium/service_workers_test.py | Python | gpl-3.0 | 1,827 | [
"VisIt"
] | 2794caa15d4140e8cd3d1cf2e7b041302eb4ce95ad03c2ac8cd38c0a01b3b09d |
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2014 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function, absolute_import
from .version import __version__
import os
import math, operator
import numpy
from .InfixParser import MyInfixParser
InfixParser = MyInfixParser()
InfixParser.buildlexer()
InfixParser.buildparser(debug=0, debugfile='infix.dbg', tabmodule='infix_tabmodule')
InfixParser.setNameStr('self.', '()')
class MapList(list):
def __init__(self, *args):
list.__init__(self,*args)
def asSet(self):
return set(self.__getslice__(0, self.__len__()))
class NewCoreBase(object):
__DEBUG__ = False
name = None
annotations = None
def getName(self):
return self.name
def setName(self,name):
self.name = name
def get(self, attr):
"""Return an attribute whose name is str(attr)"""
return self.__getattribute__(attr)
def getAnnotation(self):
"""Returns an annotation dictionary"""
if self.annotations == None:
self.annotations = {}
return self.annotations.copy()
def setAnnotation(self, key, value):
"""Set an annotation as a key:value pair"""
if self.annotations == None:
self.annotations = {}
self.annotations.update({key : value})
class NumberBase(NewCoreBase):
value = None
value_initial = None
def __call__(self):
return self.value
def getValue(self):
return self.value
def setValue(self, v):
self.value = v
class Compartment(NewCoreBase):
size = None
dimensions = None
Compartment = None
reactions = None
species = None
area = None
def __init__(self, name, compartment=None):
self.name = name
self.Compartment = compartment
self.reactions = []
self.species = []
def __call__(self):
return self.size
def setSize(self, size, dim):
self.size = size
assert dim in [0,1,2,3], '\nOkeee! {0:d} dimensions?'.format(dim)
self.dimensions = dim
def setArea(self, area=None):
if area == None and self.dimensions == 2:
self.area = self.size
if self.__DEBUG__: print('Setting reactive area to size for 2D compartment {0:s}'.format(self.name) )
elif area == None and self.dimensions == 3:
self.area = (113.09733552923255*self.size**2.0)**(0.33333333333333331)
if self.__DEBUG__: print('Setting reactive area to surface area for 3D compartment {0:s} (assuming a sphere geometry)'.format(self.name) )
self.area = area
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def hasSpecies(self):
return MapList([s.name for s in self.species])
def addReaction(self, reaction):
if reaction.name not in self.hasReactions():
self.reactions.append(reaction)
self.__setattr__(reaction.name, reaction)
if self.__DEBUG__: print('Adding reaction {0:s}'.format(reaction.name) )
def addSpecies(self, species):
if species.name not in self.hasSpecies():
self.species.append(species)
self.__setattr__(species.name, species)
if self.__DEBUG__: print('Adding species {0:s}'.format(species.name) )
else:
if self.__DEBUG__: print('Species {0:s} already added'.format(species.name) )
def getDimensions(self):
return self.dimensions
def getCompartment(self):
return self.Compartment
def hasCompartment(self):
if self.Compartment != None:
return True
else:
return False
def isVolume(self):
if self.dimensions == 3: return True
else: return False
def isArea(self):
if self.dimensions == 2: return True
else: return False
def isLength(self):
if self.dimensions == 1: return True
else: return False
def isPoint(self):
if self.dimensions == 0: return True
else: return False
class BaseUnit(NewCoreBase):
'''Base Unit can be of type: time, substance, volume'''
_types = ('time', 'substance', 'volume','area','length')
value = 1.0
type = None
def __init__(self, name, type):
self.name = name
assert type in self._types, '\nType must be one of: {0:s}'.format( str(self._types) )
self.type = type
def __call__(self):
return self.value
def getType(self):
return self.type
class SimpleUnit(NewCoreBase):
exponent = 1.0
scale = 0.0
multiplier = 1.0
baseunit = None
type = None
def __init__(self, baseunit, name, exp=1.0, scale=0.0, mult=1.0):
self.baseunit = baseunit
self.exponent = exp
self.scale = scale
self.multiplier = mult
self.name = name
self.type = baseunit.type
def __call__(self):
return (self.multiplier*self.baseunit()*10**self.scale)**self.exponent
def getType(self):
return self.type
class CompoundUnit(NewCoreBase):
units = None
_HAS_USERNAME = False
def __init__(self, name=None):
self.units = []
if name != None:
self.name = name
self._HAS_USERNAME = True
else:
self.name = ''
def __call__(self):
U = 1.0
for u in self.units:
U *= u()
return U
def addUnit(self, unit):
self.units.append(unit)
if not self._HAS_USERNAME:
self.name = '{0:s}{1}'.format(self.name, unit.getName())
def getUnits(self):
return self.units
def hasUnits(self):
return MapList([u.getName() for u in self.units])
class Species(NumberBase):
subs = None
prods = None
mods = None
fixed = False
Compartment = None
__amount__ = False
def __init__(self, name, value):
self.setName(name)
self.value = value
self.value_initial = value
self.subs = []
self.prods = []
self.mods = []
def getCompartment(self):
return self.Compartment
def setCompartment(self, c):
self.Compartment = c
def hasCompartment(self):
if self.Compartment != None:
return True
else:
return False
def setSubstrate(self, reaction):
self.__setattr__(reaction.name, reaction)
self.subs.append(reaction)
def setProduct(self, reaction):
self.__setattr__(reaction.name, reaction)
self.prods.append(reaction)
def setModifier(self, reaction):
self.__setattr__(reaction.name, reaction)
self.mods.append(reaction)
def isSubstrateOf(self):
return MapList([r.name for r in self.subs])
def isProductOf(self):
return MapList([r.name for r in self.prods])
def isModifierOf(self):
return MapList([r.name for r in self.mods])
def isReagentOf(self):
return MapList(self.isSubstrateOf() + self.isProductOf())
def setAmount(self, b):
self.__amount__ = bool(b)
def isAmount(self):
return self.__amount__
class SpeciesAssignmentRule(Species):
formula = None
code_string = None
_names = None
_functions = None
type = 'assignment'
_TIME_ = None
def __init__(self, name, value):
Species.__init__(self, name, value)
def __call__(self):
exec(self.xcode)
return self.value
def addFormula(self, formula):
formula = formula.replace('self.','')
self.formula = formula
InfixParser.setNameStr('self.', '()')
InfixParser.parse(formula)
self.code_string = 'self.value={0}'.format(InfixParser.output)
self._names = InfixParser.names
self._functions = InfixParser.functions
self.xcode = compile(self.code_string, '<string>', 'exec')
def addModelAttr(self, obj):
self.__setattr__(obj.name, obj)
class Function(NewCoreBase):
formula = None
code_string = None
xcode = None
value = None
_names = None
args = None
_TIME_ = None
def __init__(self, name):
self.setName(name)
self.args = []
def __call__(self, *args):
for ar in range(len(args)):
self.__setattr__(self.args[ar], args[ar])
exec(self.xcode)
return self.value
def setArg(self, var, value=None):
self.__setattr__(var, value)
self.args.append(var)
def addFormula(self, formula):
formula = formula.replace('self.','')
self.formula = formula
InfixParser.setNameStr('self.', '')
InfixParser.SymbolReplacements = {'_TIME_':'_TIME_()'}
InfixParser.parse(formula)
self._names = InfixParser.names
self.code_string = 'self.value={0}'.format(InfixParser.output)
self.xcode = compile(self.code_string, '<string>', 'exec')
class Reaction(NewCoreBase):
modifiers = None
substrates = None
products = None
stoichiometry = None
multistoich = None
multistoich_enabled = False
parameters = None
functions = None
reversible = True
formula = None
code_string = None
rate = None
xcode = None
_names = None
_functions = None
_TIME_ = None
Compartment = None
def __call__(self):
exec(self.xcode)
return self.rate
def __init__(self, name):
self.setName(name)
self.modifiers = []
self.substrates = []
self.products = []
self.stoichiometry = {}
self.parameters = []
self.functions = []
self.multistoich = []
def addSubstrate(self, species):
self.__setattr__(species.name, species)
self.substrates.append(species)
def addProduct(self, species):
self.__setattr__(species.name, species)
self.products.append(species)
def addModifier(self, species):
self.__setattr__(species.name, species)
self.modifiers.append(species)
def addFormula(self, formula):
formula = formula.replace('self.','')
self.formula = formula
InfixParser.setNameStr('self.', '()')
InfixParser.parse(formula)
self._names = InfixParser.names
self._functions = InfixParser.functions
self.code_string = 'self.rate={0}'.format(InfixParser.output)
self.xcode = compile(self.code_string, '<string>', 'exec')
def addParameter(self, par):
self.__setattr__(par.name, par)
self.parameters.append(par)
def addFunction(self, func):
self.__setattr__(func.name, func)
self.functions.append(func)
def hasProducts(self, t=type):
return MapList([p.name for p in self.products])
def hasSubstrates(self):
return MapList([s.name for s in self.substrates])
def hasModifiers(self):
return MapList([m.name for m in self.modifiers])
def hasParameters(self):
return MapList([p.name for p in self.parameters])
def hasReagents(self):
return MapList(self.hasSubstrates() + self.hasProducts())
def setCompartment(self, compartment):
self.Compartment = compartment
def getCompartment(self):
return self.Compartment
def hasCompartment(self):
if self.Compartment != None:
return True
else:
return False
class Parameter(NumberBase):
association = None
def __init__(self, name, value):
self.name = name
self.value = value
self.value_initial = value
self.association = []
def setAssociation(self, reac):
self.association.append(reac)
self.__setattr__(reac.name, reac)
def isParameterOf(self):
return MapList([a.name for a in self.association])
class AssignmentRule(Parameter):
formula = None
code_string = None
_names = None
_functions = None
type = 'assignment'
_TIME_ = None
fixed = False # added so that assignment rules can modify fixed species
def __init__(self, name, value):
Parameter.__init__(self, name, value)
def __call__(self):
exec(self.xcode)
return self.value
def addFormula(self, formula):
formula = formula.replace('self.','')
self.formula = formula
InfixParser.setNameStr('self.', '()')
InfixParser.parse(formula)
self.code_string = 'self.value={0}'.format(InfixParser.output)
self._names = InfixParser.names
self._functions = InfixParser.functions
self.xcode = compile(self.code_string, '<string>', 'exec')
def addModelAttr(self, obj):
self.__setattr__(obj.name, obj)
class RateRule(NewCoreBase):
formula = None
rate = None
xcode = None
code_string = None
_names = None
_functions = None
compartment = None
def __init__(self, name, formula):
self.name = name
self.addFormula(formula)
def __call__(self):
exec(self.xcode)
return self.rate
def addFormula(self, formula):
formula = formula.replace('self.','')
self.formula = formula.replace('()','')
InfixParser.setNameStr('self.', '()')
InfixParser.parse(self.formula)
self.code_string = 'self.rate={0}'.format(InfixParser.output)
self._names = InfixParser.names
self._functions = InfixParser.functions
self.xcode = compile(self.code_string, 'RateRule: {0:s}'.format(self.name), 'exec')
def getFormula(self):
return self.formula
def addModelAttr(self, obj):
self.__setattr__(obj.name, obj)
class ODE(NewCoreBase):
sdot = None
value = None
coefficients = None
reactions = None
independent = None
ode_terms = None
formula = ''
formula_alt = ''
code_string = 'self.value='
code_string_alt = 'sdot='
def __init__(self, species, independent=True):
self.sdot = species
self.name = 'ODE_'+species.name
self.reactions = []
self.coefficients = []
self.ode_terms = []
self.independent = independent
def __call__(self):
exec(self.code_string)
return self.value
def addReaction(self, reaction, coefficient):
self.reactions.append(reaction)
self.coefficients.append(coefficient)
if coefficient > 0.0:
if coefficient == 1.0:
term = '+self.{0:s}() '.format(reaction.name)
aterm = '+({0}) '.format(reaction.code_string.replace('self.rate=',''))
fterm = '+{0:s} '.format(reaction.name)
afterm = '+ ({0}) '.format(reaction.formula)
else:
term = '+{0}*self.{1}() '.format(abs(coefficient), reaction.name)
aterm = '+{0}*({1}) '.format(abs(coefficient), reaction.code_string.replace('self.rate=',''))
fterm = '+{0}*{1}'.format(abs(coefficient), reaction.name)
afterm = '+ {0}*({1}) '.format(abs(coefficient), reaction.formula)
else:
if coefficient == -1.0:
term = '-self.{0:s}() '.format(reaction.name)
aterm = '-({0}) '.format(reaction.code_string.replace('self.rate=',''))
fterm = '-{0:s} '.format(reaction.name)
afterm = '- ({0}) '.format(reaction.formula)
else:
term = '-{0}*self.{1}() '.format(abs(coefficient), reaction.name)
aterm = '-{0}*({1}) '.format(abs(coefficient), reaction.code_string.replace('self.rate=',''))
fterm = '-{0}*{1}'.format(abs(coefficient), reaction.name)
afterm = '- {0}*({1}) '.format(abs(coefficient), reaction.formula)
self.ode_terms.append(term)
self.code_string += term
self.code_string_alt += aterm
self.formula += fterm
self.formula_alt += afterm
self.__setattr__(reaction.name, reaction)
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def getFormula(self):
return self.code_string
def getGlobalFormula(self):
return self.code_string_alt
class StructMatrix(NewCoreBase):
"""
This class is specifically designed to store structural matrix information
give it an array and row/col index permutations it can generate its own
row/col labels given the label src.
"""
array = None
ridx = None
cidx = None
row = None
col = None
def __init__(self, array, ridx, cidx, row=None, col=None):
"""
Instantiate with array and matching row/col index arrays, optional label arrays
"""
self.array = array
self.ridx = ridx
self.cidx = cidx
self.row = row
self.col = col
self.shape = array.shape
def __call__(self):
return self.array
def getRowsByIdx(self, *args):
"""Return the rows referenced by index (1,3,5)"""
return self.array.take(args, axis=0)
def getColsByIdx(self, *args):
"""Return the columns referenced by index (1,3,5)"""
return self.array.take(args, axis=1)
def setRow(self, src):
"""
Assuming that the row index array is a permutation (full/subset)
of a source label array by supplying that source to setRow it
maps the row labels to ridx and creates self.row (row label list)
"""
self.row = [src[r] for r in self.ridx]
def setCol(self, src):
"""
Assuming that the col index array is a permutation (full/subset)
of a source label array by supplying that src to setCol
maps the row labels to cidx and creates self.col (col label list)
"""
self.col = [src[c] for c in self.cidx]
def getRowsByName(self, *args):
"""Return the rows referenced by label ('s','x','d')"""
assert self.row != None, "\nI need row labels"
try:
return self.array.take([self.row.index(l) for l in args], axis=0)
except Exception as ex:
print(ex)
print("\nValid row labels are: {0}".format(self.row) )
return None
def getColsByName(self, *args):
"""Return the columns referenced by label ('s','x','d')"""
assert self.col != None, "\nI need column labels"
try:
return self.array.take([self.col.index(l) for l in args], axis=1)
except Exception as ex:
print(ex)
print("Valid column labels are: {0}".format(self.col) )
return None
def getLabels(self, axis='all'):
"""Return the matrix labels ([rows],[cols]) where axis='row'/'col'/'all'"""
if axis == 'row': return self.row
elif axis == 'col': return self.col
else: return self.row, self.col
def getIndexes(self, axis='all'):
"""Return the matrix indexes ([rows],[cols]) where axis='row'/'col'/'all'"""
if axis == 'row': return self.ridx
elif axis == 'col': return self.cidx
else: return self.ridx, self.cidx
def getByIdx(self, row, col):
assert row in self.ridx, '\n{0} is an invalid index'.format(row)
assert col in self.cidx, '\n{0} is an invalid index'.format(col)
return self.array[row, col]
def getByName(self, row, col):
assert row in self.row, '\n{0} is an invalid name'.format(row)
assert col in self.col, '\n{0} is an invalid name'.format(col)
return self.array[self.row.index(row), self.col.index(col)]
def setByIdx(self, row, col, val):
assert row in self.ridx, '\n{0} is an invalid index'.format(row)
assert col in self.cidx, '\n{0} is an invalid index'.format(col)
self.array[row, col] = val
def setByName(self, row, col, val):
assert row in self.row, '\n{0} is an invalid name'.format(row)
assert col in self.col, '\n{0} is an invalid name'.format(col)
self.array[self.row.index(row), self.col.index(col)] = val
def shape(self):
return self.array.shape
class EventAssignment(NumberBase):
variable = None
_names = None
formula = None
code_string = None
xcode = None
def __call__(self):
self.variable.value = self.value
if self.__DEBUG__: print('\tAssigning {0:s} = {1}'.format(self.variable.name, self.value))
return True
def __init__(self, name='None'):
self.setName(name)
def setVariable(self, var):
self.variable = var
def setFormula(self, formula):
self.formula = formula
InfixParser.setNameStr('self.', '()')
## InfixParser.SymbolReplacements = {'_TIME_':'_TIME_()'}
InfixParser.parse(formula)
self._names = InfixParser.names
self.code_string = 'self.value={0}'.format(InfixParser.output)
self.xcode = compile(self.code_string, '<string>', 'exec')
if self.__DEBUG__: '\t', self.name, self.code_string
def evaluateAssignment(self):
exec(self.xcode)
class Event(NewCoreBase):
trigger = None
delay = 0.0
formula = None
code_string = None
xcode = None
state0 = False
state = False
assignments = None
_TIME_ = None
_ASS_TIME_ = 0.0
_need_action = False
_names = None
_time_symbol = None
def __init__(self, name):
self.setName(name)
self.assignments = []
def __call__(self, time):
self._TIME_.set(time)
exec(self.xcode)
if self.state0 and not self.state:
self.state0 = self.state
if not self.state0 and self.state:
for ass in self.assignments:
ass.evaluateAssignment()
self.state0 = self.state
self._need_action = True
self._ASS_TIME_ = self._TIME_() + self.delay
if self.__DEBUG__: print('event {0:s} is evaluating at {1}'.format(self.name, time))
if self._need_action and self._TIME_() >= self._ASS_TIME_:
for ass in self.assignments:
ass()
if self.__DEBUG__: print('event {0:s} is assigning at {1} (delay={2})'.format(self.name, time, self.delay))
self._need_action = False
def setTrigger(self, formula, delay=0.0):
self.formula = formula
self.delay = delay
InfixParser.setNameStr('self.', '()')
## print self._time_symbol
if self._time_symbol != None:
InfixParser.SymbolReplacements = {self._time_symbol : '_TIME_'}
## self.formula = formula.replace(self._time_symbol, '_TIME_')
InfixParser.parse(formula)
self._names = InfixParser.names
self.code_string = 'self.state={0}'.format(InfixParser.output)
if self._time_symbol != None:
InfixParser.setNameStr('', '')
InfixParser.SymbolReplacements = {self._time_symbol : '_TIME_'}
InfixParser.parse(formula)
self.formula = InfixParser.output
self.xcode = compile(self.code_string, '<string>', 'exec')
if self.__DEBUG__: self.name, self.code_string
def setTriggerAttributes(self, core):
# TODO: experimental
for n in self._names:
self.__setattr__(n, core.__getattribute__(n))
def setAssignment(self, var, formula):
ass = EventAssignment(var.name)
ass.setVariable(var)
ass.setFormula(formula)
self.assignments.append(ass)
self.__setattr__('_'+var.name, ass)
class PieceWise(NewCoreBase):
"""
Generic piecewise class written by me!
- *args* a dictionary of piecewise information generated by the InfixParser
"""
name = None
value = None
formula = None
code_string = None
xcode = None
_names = None
_TIME_ = None
def __init__(self, pwd):
pwd = pwd.copy()
if pwd['other'] != None:
other = 'self.value = {0}'.format( pwd.pop('other') )
else:
other = 'pass'
pwd.pop('other')
InfixParser.setNameStr('self.', '')
InfixParser.SymbolReplacements = {'_TIME_':'_TIME_()'}
self._names = []
if len(list(pwd)) == 1:
formula = pwd[0][0]
InfixParser.parse(formula)
for n in InfixParser.names:
if n not in self._names and n != '_TIME_()':
self._names.append(n)
formula = InfixParser.output
self.code_string = 'if {0}:\n self.value = {1}\nelse:\n {2}'.format(formula, pwd[0][1], other)
self.formula = self.code_string.replace('self.','')
else:
formula = pwd[0][0]
InfixParser.parse(formula)
for n in InfixParser.names:
if n not in self._names and n != '_TIME_()':
self._names.append(n)
formula = InfixParser.output
self.code_string = 'if {0}:\n self.value = {1}\n'.format(formula, pwd[0][1])
pwd.pop(0)
for p in pwd:
formula = pwd[p][0]
InfixParser.SymbolReplacements = {'_TIME_':'_TIME_()'}
InfixParser.parse(formula)
for n in InfixParser.names:
if n not in self._names and n != '_TIME_()':
self._names.append(n)
formula = InfixParser.output
self.code_string += 'elif {0}:\n self.value = {1}\n'.format(formula, pwd[p][1])
self.code_string += 'else:\n {0}'.format(other)
self.formula = self.code_string.replace('self.','')
self.xcode = compile(self.code_string, 'PieceWise','exec')
def __call__(self):
exec(self.xcode)
return self.value
class Time(object):
value = None
name = '__TIME__'
def __init__(self, t=0):
self.value = t
def __call__(self):
return self.value
def set(self, t):
self.value=t
## def delay(*args):
## print 'delay() ignored'
## return 1.0
class NewCore(NewCoreBase):
__nDict__ = None
reactions = None
species = None
species_variable = None
__model__ = None
__InitDict__ = None
__not_inited__ = None
global_parameters = None
__parameter_store__ = None
forcing_functions = None
__rules__ = None
__events__ = None
# new
__compartments__ = None
compartments = None
rate_rules = None
description = "Pysces Core2"
__uDict__ = None
stoichiometric_matrix = None
struct = None
ODEs = None
functions = None
_TIME_ = None
events = None
__sDict__ = None
__KeyWords__ = None
__piecewises__ = None
piecewise_functions = None
netStoich = None
def __init__(self, model, iValues=True, netStoich=True):
# setup core dictionaries
self.__nDict__ = model.__nDict__
self.__sDict__ = model.__sDict__
self.__KeyWords__ = model.__KeyWords__
if self.__KeyWords__['Modelname'] != None:
self.setName(self.__KeyWords__['Modelname'])
else:
self.setName('PySCeSModel')
if self.__KeyWords__['Description'] != None:
self.setDescription(self.__KeyWords__['Description'])
else:
self.setDescription('PySCeSModel')
self.__model__ = model
self.__InitDict__ = model.__InitDict__
if not iValues:
if self.__DEBUG__: print(self.__InitDict__)
for k in self.__InitDict__.keys():
self.__InitDict__[k] = getattr(self.__model__, k)
for c in model.__compartments__:
model.__compartments__[c]['size'] = getattr(self.__model__, c)
self.netStoich = netStoich
self.global_parameters = []
self.__parameter_store__ = []
self.__not_inited__ = []
self.forcing_functions = []
self.__rules__ = model.__rules__
self.__uDict__ = model.__uDict__
self.__piecewises__ = model.__piecewises__
InfixParser.__pwcntr__ = 0
# start building objects
self.__compartments__ = model.__compartments__
self.addCompartments()
self._TIME_ = Time()
self.addPieceWiseFunctions() # this adds any piecewise functions
self.addSpecies()
# the order is important from here as eg functions can occur in rate equations
try:
self.__functions__ = model.__functions__
except:
self.__functions__ = {}
if self.__DEBUG__: print('No functions')
self.functions = []
self.addFunctions()
self.addReactions()
self.generateMappings()
self.setAssignmentRules()
self.setRateRules()
# add event support
self.__events__ = self.__model__.__eDict__
self.events = []
self.addEvents()
self.addPieceWiseFunctions(update=True) # this updates their attributes
## # get rid of _TIME_ in not intited
## if '_TIME_' in self.__not_inited__:
## self.__not_inited__.pop(self.__not_inited__.index('_TIME_'))
assert len(self.__not_inited__) < 1, "\nERROR: Uninitialised parameters: {0}".format(self.__not_inited__)
def __cleanString__(self,s):
s = s.lstrip()
s = s.rstrip()
return s
def setDescription(self, txt):
self.description = str(txt)
def getDescription(self):
return str(self.description)
def setGlobalUnits(self, **kwargs):
for un in kwargs.keys():
self.__uDict__[un] = (kwargs[un][0], kwargs[un][1])
if self.__DEBUG__: print("Modified \"{0}\" to be {1}*{0}*10**{2}".format(un, kwargs[un][0], kwargs[un][1]))
def getGlobalUnits(self):
return self.__uDict__
def addPieceWiseFunctions(self, update=False):
if not update:
self.piecewise_functions = []
for pw in self.__piecewises__.keys():
if self.__DEBUG__: print('Info: adding piecewise function:{0}'.format(pw) )
P = PieceWise(self.__piecewises__[pw])
P.setName(pw)
P.__setattr__('_TIME_', self.__getattribute__('_TIME_'))
self.piecewise_functions.append(P)
self.__setattr__(pw, P)
else:
for pw in self.piecewise_functions:
for a in pw._names:
pw.__setattr__(a, self.__getattribute__(a))
def addOneCompartment(self, name, size, dimensions, compartment=None, area=None):
C = Compartment(name, compartment)
C.setSize(size, dimensions)
## C.setArea(area)
self.compartments.append(C)
self.__setattr__(name, C)
def addCompartments(self):
self.compartments = []
for C in self.__compartments__:
c2 = self.__compartments__[C]
if self.__DEBUG__: print('Adding compartment {0}'.format(c2['name']))
self.addOneCompartment(c2['name'], c2['size'], c2['dimensions'],
compartment=c2['compartment'], area=None)
def addOneSpecies(self, species, value, fix=False, comp=None, amount=False, fullName=None):
s = Species(species, value)
## if comp != None:
s.setCompartment(comp)
s.setAmount(amount)
s.setAnnotation('sbml_name', fullName)
if fix: s.fixed = True
self.__setattr__(species, s)
self.species.append(s)
if not fix: self.species_variable.append(s)
if comp != None:
comp.addSpecies(s)
def addSpecies(self):
self.species = []
self.species_variable = []
for s in self.__sDict__:
name = self.__sDict__[s]['name']
if s in self.__InitDict__:
val = self.__InitDict__[s]
else:
val = 0.0
fix = self.__sDict__[s]['fixed']
if self.__sDict__[s]['compartment'] != None:
comp = self.__getattribute__(self.__sDict__[s]['compartment'])
else:
comp = None
amount = self.__sDict__[s]['isamount']
fullName = None
if 'fullName' in self.__sDict__[s]:
fullName = self.__sDict__[s]['fullName']
self.addOneSpecies(name, val, fix=fix, comp=comp, amount=amount, fullName=fullName)
def addOneFunction(self, name, args, formula):
func = Function(name)
# TODO: make better
setattr(func, '_TIME_', self._TIME_)
for a in args:
func.setArg(a)
func.addFormula(formula)
self.functions.append(func)
self.__setattr__(name, func)
def addFunctions(self):
for f in self.__functions__.keys():
self.addOneFunction(f,\
self.__functions__[f]['args'],\
self.__functions__[f]['formula'])
def addOneReaction(self, rDict):
r = Reaction(rDict['name'])
if rDict['compartment'] != None:
C = self.__getattribute__(rDict['compartment'])
r.setCompartment(C)
C.addReaction(r)
fullName = None
if 'fullName' in rDict:
r.setAnnotation('sbml_name', rDict['fullName'])
# TODO: make better
setattr(r, '_TIME_', self._TIME_)
r.addFormula(rDict['RateEq'].replace('self.',''))
if rDict['Type'] == 'Irrev': r.reversible = False
# now we can add formulas that occured in the rate equation
if len(r._functions) > 0:
for func in r._functions:
try:
r.addFunction(self.__getattribute__(func))
except Exception as ex:
print(ex)
print('\nHave you added the function objects yet (addFunctions())')
#fxnames = self.hasFixedSpecies()
processed_parameter = []
# where parameters are defined `locally' per reaction
for p in rDict['Params']:
p = p.replace('self.','')
if p not in self.hasGlobalParameters() and not (p in self.hasFixedSpecies() or p in self.__compartments__):
if self.__DEBUG__: print("Adding parameter {0} from networkdict".format(p))
self.addParameter(p)
par = self.__getattribute__(p)
par.setAssociation(r)
r.addParameter(par)
processed_parameter.append(p)
elif not (p in self.hasFixedSpecies() or p in self.__compartments__):
if self.__DEBUG__: print("Updating parameter {0} from networkdict".format(p))
pidx = self.hasGlobalParameters().index(p)
self.global_parameters[pidx].setAssociation(r)
r.addParameter(self.global_parameters[pidx])
processed_parameter.append(p)
#print self.hasGlobalParameters()
# where parameters are not `locally' defined and are extracted from Req (ie from SBML)
for p in r._names:
p = p.replace('self.','')
if p == '_TIME_':
pass
elif p in [pw.name for pw in self.piecewise_functions]:
pass
elif p in self.hasCompartments() and p not in processed_parameter:
C = self.__getattribute__(p)
C.addReaction(r)
# TODO: this will work until isParameterOf is called on a compartment object
r.addParameter(C)
# dirty alternative
#setattr(r, C.name, C)
processed_parameter.append(p)
elif p not in processed_parameter and p not in self.hasGlobalParameters() and p not in self.hasSpecies():
if self.__DEBUG__: print("Adding parameter {0} from global".format(p) )
self.addParameter(p)
par = self.__getattribute__(p)
par.setAssociation(r)
r.addParameter(par)
processed_parameter.append(p)
elif p not in processed_parameter and p not in self.hasSpecies():
if self.__DEBUG__: print("Updating parameter {0} from global".format(p) )
pidx = self.hasGlobalParameters().index(p)
self.global_parameters[pidx].setAssociation(r)
r.addParameter(self.global_parameters[pidx])
processed_parameter.append(p)
self.__setattr__(rDict['name'], r)
self.reactions.append(r)
def addParameter(self, name):
if not name in self.__piecewises__:
if name in self.__InitDict__:
par = Parameter(name, self.__InitDict__[name])
else:
par = Parameter(name, 0.0)
if name not in self.__not_inited__: self.__not_inited__.append(name)
self.global_parameters.append(par)
self.__setattr__(name, par)
def addReactions(self):
self.reactions = []
for r in self.__model__.reactions:
self.addOneReaction(self.__nDict__[r])
non_parameters = self.hasGlobalParameters()+self.hasSpecies()+self.hasFixedSpecies()
for k in self.__InitDict__.keys():
if k not in non_parameters:
if self.__DEBUG__: print( 'Adding new parameter:', k)
self.addParameter(k)
def replaceParameterWithRule(self, ar):
par = self.__getattribute__(ar.name)
for r in par.association:
ar.setAssociation(r)
setattr(r, ar.name, ar)
r.parameters[r.hasParameters().index(ar.name)] = ar
self.global_parameters[self.hasGlobalParameters().index(ar.name)] = ar
self.__setattr__(ar.name, ar)
def replaceFixedSpeciesWithRule(self, ar):
fs = self.__getattribute__(ar.name)
ar.fixed = fs.fixed
for r in fs.subs:
ar.setSubstrate(r)
setattr(r, ar.name, ar)
r.substrates[r.hasSubstrates().index(ar.name)] = ar
for r in fs.prods:
ar.setProduct(r)
setattr(r, ar.name, ar)
r.products[r.hasProducts().index(ar.name)] = ar
for r in fs.mods:
ar.setModifier(r)
setattr(r, ar.name, ar)
r.modifiers[r.hasModifiers().index(ar.name)] = ar
self.species[self.hasSpecies().index(ar.name)] = ar
self.__setattr__(ar.name, ar)
def replaceSpeciesWithRule(self, ar):
fs = self.__getattribute__(ar.name)
for r in fs.subs:
ar.setSubstrate(r)
setattr(r, ar.name, ar)
r.substrates[r.hasSubstrates().index(ar.name)] = ar
for r in fs.prods:
ar.setProduct(r)
setattr(r, ar.name, ar)
r.products[r.hasProducts().index(ar.name)] = ar
for r in fs.mods:
ar.setModifier(r)
setattr(r, ar.name, ar)
r.modifiers[r.hasModifiers().index(ar.name)] = ar
self.species[self.hasSpecies().index(ar.name)] = ar
self.species_variable[self.hasVariableSpecies().index(ar.name)] = ar
self.__setattr__(ar.name, ar)
def setAssignmentRules(self):
aps = [self.__rules__[ar]['name'] for ar in self.__rules__ if self.__rules__[ar]['type'] == 'assignment']
## for p in self.global_parameters + [self.get(fs) for fs in self.hasFixedSpecies()]:
for p in self.global_parameters + self.species:
#print p.name
if p.name in aps:
if self.__DEBUG__: print('Assigning: {0:s} = {1}'.format(p.name, self.__rules__[p.name]['formula']))
p2 = None
# TODO: make better
if p.name in self.hasGlobalParameters():
p2 = AssignmentRule(p.name, self.__InitDict__[p.name])
setattr(p2, '_TIME_', self._TIME_)
self.replaceParameterWithRule(p2)
elif p.name in self.hasFixedSpecies():
p2 = SpeciesAssignmentRule(p.name, self.__InitDict__[p.name])
p2.setCompartment(p.getCompartment())
setattr(p2, '_TIME_', self._TIME_)
self.replaceFixedSpeciesWithRule(p2)
elif p.name in self.hasVariableSpecies():
p2 = SpeciesAssignmentRule(p.name, self.__InitDict__[p.name])
p2.setCompartment(p.getCompartment())
setattr(p2, '_TIME_', self._TIME_)
self.replaceSpeciesWithRule(p2)
assert isinstance(p2, AssignmentRule) or isinstance(p2, SpeciesAssignmentRule), "\nHappy assertion error"
#print type(p2)
p2.addFormula(self.__rules__[p.name]['formula'])
## print p2._names
for n in p2._names+p2._functions:
p2.addModelAttr(self.__getattribute__(n))
## # setup initial values
## p2.value_initial = self.p2()
if p2.name in self.__not_inited__:
self.__not_inited__.pop(self.__not_inited__.index(p.name))
for p in self.global_parameters:
if p.name in self.hasAssignmentRules():
# TODO assignment rules need a list of properties
for ar in p._names:
if ar in self.hasAssignmentRules():
setattr(p, ar, self.__getattribute__(ar))
#TODO this is where things will go wrong if fs --> ar contains nested ar's
def setRateRules(self):
# TODO mayvbe split into two methods for now read from self.__rules__
# TODO add functions to rules
ars = [self.__rules__[ar]['name'] for ar in self.__rules__ if self.__rules__[ar]['type'] == 'rate']
self.rate_rules = []
for rr in ars:
rrobj = RateRule(self.__rules__[rr]['name'], self.__rules__[rr]['formula'])
## print 'RR:', rrobj.name, rrobj._names, rrobj._functions
for symb in rrobj._names+rrobj._functions:
rrobj.addModelAttr(self.__getattribute__(symb))
self.rate_rules.append(rrobj)
# TODO investgiate this as it is problematic, the rate rule
# is not a model property as such more an ODE property
## self.__setattr__(rrobj.name, rrobj)
if self.__DEBUG__: print( 'Adding RateRule {0} with formula: {1}'.format(rrobj.name, rrobj.formula))
def addOneEvent(self, e):
"""Add a single event using an event dictionary """
# translate self.__events__[e] to e
ev = Event(e['name'])
ev._time_symbol = e['tsymb']
ev.setTrigger(e['trigger'], e['delay'])
# associate model attributes with event
# TODO: check that this still works
ev.setTriggerAttributes(self)
## for n in ev._names:
## setattr(ev, n, self.__getattribute__(n))
# for each assignment
for ass in e['assignments']:
ev.setAssignment(self.__getattribute__(ass), e['assignments'][ass])
assref = getattr(ev, '_'+ass) # don\t like this at all :-(
# associate model attributes with assignment
for n in assref._names:
setattr(assref, n, self.__getattribute__(n))
self.events.append(ev)
self.__setattr__(ev.name, ev)
setattr(ev, '_TIME_', self._TIME_)
def addEvents(self):
# TODO: check that you can change the trigger on the fly (might need a setAttr thing in event obj)
self.events = []
# for each event
for e in self.__events__:
self.addOneEvent(self.__events__[e])
def generateMappings(self):
## self.netStoich = False
for reac in self.reactions:
if self.netStoich:
for reag in self.__nDict__[reac.name]['Reagents']:
if self.__nDict__[reac.name]['Reagents'][reag] < 0.0:
reac.addSubstrate(self.__getattribute__(reag.replace('self.','')))
self.__getattribute__(reag.replace('self.','')).setSubstrate(self.__getattribute__(reac.name))
else:
reac.addProduct(self.__getattribute__(reag.replace('self.','')))
self.__getattribute__(reag.replace('self.','')).setProduct(self.__getattribute__(reac.name))
reac.stoichiometry.setdefault(reag.replace('self.',''), self.__nDict__[reac.name]['Reagents'][reag])
else:
for reag in self.__nDict__[reac.name]['AllReagents']:
if reag[1] < 0.0:
reac.addSubstrate(self.__getattribute__(reag[0].replace('self.','')))
self.__getattribute__(reag[0].replace('self.','')).setSubstrate(self.__getattribute__(reac.name))
else:
reac.addProduct(self.__getattribute__(reag[0].replace('self.','')))
self.__getattribute__(reag[0].replace('self.','')).setProduct(self.__getattribute__(reac.name))
reac.multistoich.append((reag[0].replace('self.',''), reag[1]))
if reag[0].replace('self.','') in reac.stoichiometry:
reac.multistoich_enabled = True
reac.stoichiometry.setdefault(reag[0].replace('self.',''), reag[1])
for mod in self.__nDict__[reac.name]['Modifiers']:
reac.addModifier(self.__getattribute__(mod.replace('self.','')))
self.__getattribute__(mod.replace('self.','')).setModifier(self.__getattribute__(reac.name))
## print 'I AM LEGEND'
## print reac.stoichiometry
## print reac.multistoich
## print 'reac.multistoich_enabled', reac.multistoich_enabled
## print self.__nDict__[reac.name]['Reagents']
## print self.__nDict__[reac.name]['AllReagents']
def setStoichiometricMatrix(self):
vspec = self.hasVariableSpecies()
react = self.hasReactions()
nm = numpy.zeros((len(vspec), len(react)),'d')
for sp in vspec:
for r in self.get(sp).isReagentOf():
nm[vspec.index(sp)][react.index(r)] = self.get(r).stoichiometry[sp]
# this is if absolute stoichiometry value is used
## for r in self.get(sp).isSubstrateOf():
## nm[vspec.index(sp)][react.index(r)] = abs(self.get(r).stoichiometry[sp])
## for r in self.get(sp).isProductOf():
## nm[vspec.index(sp)][react.index(r)] = -abs(self.get(r).stoichiometry[sp])
self.stoichiometric_matrix = StructMatrix(nm, range(len(vspec)), range(len(react)))
self.stoichiometric_matrix.setRow(vspec)
self.stoichiometric_matrix.setCol(react)
def addODEs(self):
self.ODEs = []
for varspec in self.stoichiometric_matrix.row:
if self.struct != None:
if varspec not in self.struct.Nr.row:
if self.__DEBUG__: print('Creating dependent ODE_{0}'.format(varspec) )
ode = ODE(self.get(varspec), independent=False)
else:
if self.__DEBUG__: print('Creating independent ODE_{0}'.format(varspec) )
ode = ODE(self.get(varspec), independent=True)
else:
if self.__DEBUG__: print( 'Creating independent* ODE_{0} (*assumed - no structural information available)'.format(varspec) )
ode = ODE(self.get(varspec), independent=True)
mrow = self.stoichiometric_matrix.getRowsByName(varspec)
for e in range(len(mrow[0])):
if mrow[0,e] != 0.0:
print('Adding term: {0}*{1}'.format(mrow[0,e], self.stoichiometric_matrix.col[e]))
ode.addReaction(self.get(self.stoichiometric_matrix.col[e]), mrow[0,e])
self.__setattr__(ode.name, ode)
self.ODEs.append(ode)
self.__setattr__('xcode_'+ode.name, compile(ode.getGlobalFormula(), '<string>', 'exec'))
def hasODEs(self):
return MapList([o.name for o in self.ODEs])
def evalODEs(self, odes):
return [v() for v in odes]
def evalXcode(self, ode):
exec(self.__getattribute__('xcode_'+ode.name))
return sdot
def hasFunctions(self):
return MapList([f.name for f in self.functions])
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def hasSpecies(self):
return MapList([s.name for s in self.species])
def hasFixedSpecies(self):
return MapList([s.name for s in self.species if s.fixed])
def hasVariableSpecies(self):
return MapList([s.name for s in self.species if not s.fixed])
def findReactionsThatIncludeAllSpecifiedReagents(self, *args):
assert len(args) > 1, '\nNeed two or more species for this one!'
setlist = [self.__getattribute__(s).isReagentOf().asSet() for s in args]
isect = setlist[0]
for s in setlist:
isect.intersection_update(s)
return MapList(isect)
def hasGlobalParameters(self):
return MapList(p.name for p in self.global_parameters)
def hasAssignmentRules(self):
return MapList([ar.name for ar in self.global_parameters+self.species if hasattr(ar, 'type')=='assignemnt'])
def hasAssignmentRules(self):
return MapList([ar.name for ar in self.global_parameters+self.species if hasattr(ar, 'type')=='rate'])
def hasEvents(self):
return MapList(e.name for e in self.events)
def hasCompartments(self):
return MapList(c.name for c in self.compartments)
| SystemsBioinformatics/stochpy | stochpy/core2/PyscesCore2.py | Python | gpl-3.0 | 50,226 | [
"PySCeS"
] | 2b951edbe9ea3ca160a051123d03d6f820d9edc4055b2103c7d6fedd79807b36 |
#!/usr/bin/python
import os,random
from math import pi,sqrt
def crossprod(vec1,vec2):
vector = [vec1[1]*vec2[2]-vec1[2]*vec2[1],-(vec1[0]*vec2[2]-vec1[2]*vec2[0]),vec1[0]*vec2[1]-vec1[1]*vec2[0]]
return vector
def dotprod(vec1,vec2):
vector = vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2]
return vector
def magnitude(vector):
mag=sqrt(vector[0]**2+vector[1]**2+vector[2]**2)
return mag
def magnitude2d(vector):
mag=sqrt(vector[0]**2+vector[1]**2)
return mag
def recip(latvecs):
crossprod1=crossprod(latvecs[1],latvecs[2])
crossprod2=crossprod(latvecs[2],latvecs[0])
crossprod3=crossprod(latvecs[0],latvecs[1])
dotprod1=dotprod(latvecs[0],crossprod1)
dotprod2=dotprod(latvecs[1],crossprod2)
dotprod3=dotprod(latvecs[2],crossprod3)
component1=[x/dotprod1 for x in crossprod1]
component2=[x/dotprod2 for x in crossprod2]
component3=[x/dotprod3 for x in crossprod3]
recipcell = [component1,component2,component3]
return recipcell
def kmesh(recipunitcell,resolution):
mesh1=[[recipunitcell[x][y]/resolution for y in range(3)] for x in range(3)]
mesh=[str(mesh1[x][0])+ ' ' + str(mesh1[x][1]) + ' ' + str(mesh1[x][2]) for x in range(3)]
return mesh
def findenergy():
if os.path.isfile('OUTCAR') == True:
outcarlines = readfile('OUTCAR')
else:
outcarlines = []
count = 0
energylines = [];
for i in outcarlines:
list = i.split()
if 'TOTEN' in list:
energylines.append(count)
count = count + 1
if len(energylines) > 0:
last = energylines[-1]
energyline = outcarlines[last]
energy = float(energyline.split()[4])
else:
energy = '????????'
return energy
def formationenthalpy(mixture,a,b,aatoms,batoms,totalatoms):
totatoms = aatoms + batoms
e = mixture/totatoms - (a/totalatoms * aatoms/totatoms + b/totalatoms * batoms/totatoms)
return e
def updatestructure(structurenumber,formationenthalpy):
structuresread = open('structures.in','r')
number = 0
count = 0
for i in structuresread:
if i == ('formation enthalpy' + str(structurenumber) + '\n'):
number = count
count = count + 1
structuresread.close()
readlines = readfile('structures.in')
if number != 0:
readlines[number] = str(formationenthalpy) + '\n'
writefile('structures.in',readlines)
def vegardslaw(constone,consttwo,largelatpar,smalllatpar):
latpar = largelatpar - (largelatpar - smalllatpar) * float(constone)/(float(constone) + float(consttwo))
return latpar
def getscore():
if os.path.isfile('finalcvs.out')==True:
output = open('finalcvs.out','r')
else:
output = []
count = 0
found = 0
for i in output:
if i == " Final Cross Validation Score:\n":
found = count + 1
count = count + 1
if os.path.isfile('finalcvs.out') == True:
output.close()
lines = readfile('finalcvs.out')
if found != 0:
score = float(lines[found])
else:
score = '???????????'
return score
def conv(file,keyword):
test = open(file,'r')
count = 0
energylines = [];
for i in test:
list = i.split()
if keyword in list:
energylines.append(count)
count = count + 1
test.close()
test1 = open(file,'r')
lines = test1.readlines()
test.close()
energies = []
for i in energylines:
energies.append(lines[i])
# energy = float(energyline.split()[4])
return energies
def randomize(numofstructs,path):
print numofstructs
data = range(2,numofstructs + 2)
ranlist = []
print data
for i in range(1,numofstructs + 1):
print i
ran = random.choice(data)
ranlist.append(ran)
data.remove(ran)
structlines = readfile(path + '/structures.orig')
list = structuresindices(structlines)
structlist = ['peratom\n']
constonelines = ''.join(structlines[1:list[1]])
structlist.append(constonelines)
for i in ranlist:
lines = ''.join(structlines[list[i-1]:list[i]])
structlist.append(lines)
print structlist
print '\n'
consttwolines = ''.join(structlines[list[-1]:])
structlist.append(consttwolines)
writefile(path +'/structures.in',structlist)
def readfile(file):
openfile = open(file,'r')
lines = openfile.readlines()
openfile.close()
return lines
def writefile(file,lines):
openfile = open(file,'w')
openfile.writelines(lines)
openfile.close()
def structuresindices(structlines):
index = 0
list = []
for i in structlines:
if '#-----' in i:
list.append(index)
index = index + 1
return list
def getindices(file,whattolookfor):
index = 0
list = []
lines = readfile(file)
for i in lines:
if whattolookfor in i:
list.append(index)
index = index + 1
return list
def placeunclefiles(homedir):
if os.path.isfile(homedir + '/GApar.in') == True:
os.system('cp ' + homedir + '/GApar.in .')
if os.path.isfile(homedir + '/lat.in') == True:
os.system('cp ' + homedir + '/lat.in .')
if os.path.isfile(homedir + '/fitpar.in') == True:
os.system('cp ' + homedir + '/fitpar.in .')
if os.path.isfile(homedir + '/control.in') == True:
os.system('cp ' + homedir + '/control.in .')
if os.path.isfile(homedir + '/bulkpar.in') == True:
os.system('cp ' + homedir + '/bulkpar.in .')
if os.path.isfile(homedir + '/groundstatesearch.in') == True:
os.system('cp ' + homedir + '/groundstatesearch.in .')
if os.path.isfile(homedir + '/MCpar.in') == True:
os.system('cp ' + homedir + '/MCpar.in .')
if os.path.isfile(homedir + '/finalecis.out') == True:
os.system('cp ' + homedir + '/finalecis.out .')
if os.path.isfile(homedir + '/figures.out') == True:
os.system('cp ' + homedir + '/figures.out .')
if os.path.isfile(homedir + '/finalcvs.out') == True:
os.system('cp ' + homedir + '/finalcvs.out .')
if os.path.isfile(homedir + '/struct_enum.out') == True:
os.system('cp ' + homedir + '/struct_enum.out .')
if os.path.isfile(homedir + '/genalgsummary.dat') == True:
os.system('cp ' + homedir + '/genalgsummary.dat .')
if os.path.isfile(homedir + '/listchildren.dat') == True:
os.system('cp ' + homedir + '/listchildren.dat .')
if os.path.isfile(homedir + '/listgeneration.dat') == True:
os.system('cp ' + homedir + '/listgeneration.dat .')
if os.path.isfile(homedir + '/population.out') == True:
os.system('cp ' + homedir + '/population.out .')
if os.path.isfile(homedir + '/gss.out') == True:
os.system('cp ' + homedir + '/gss.out .')
def concentrations(list):
uniqueconclist = uniquelist(list,1)
if os.path.isdir('concentrations') == False:
os.mkdir('concentrations')
else:
os.system('rm -rf concentrations')
os.mkdir('concentrations')
os.chdir('concentrations')
for i in uniqueconclist:
specificconcs = []
for j in list:
if j.split()[1] == i and len(j.split()) > 2:
specificconcs.append(j)
specificconcs = sorted(specificconcs)
writefile('concentration' + str(i),specificconcs)
os.chdir('../')
def numberofstructures(list):
uniqueconclist = uniquelist(list,1)
os.chdir('concentrations')
for l in uniqueconclist:
total = 0
struct = ['structure # # of occcurences\n--------------------------------------------\n']
plot = []
lines = readfile('concentration' + l)
uniquestructlist = uniquelist(lines,0)
for j in lines:
struct.append(j.split()[0])
for i in uniquestructlist:
number = struct.count(i)
total = total + number
plot.append(i.rjust(5) + ' ' + str(number).rjust(3) + '\n')
plot.append('total --> ' + str(total) + '\n')
writefile('plot' + l,plot)
def uniquelist(list,index):
uniquevaluelist = []
for i in list:
if i.split()[index] in uniquevaluelist:
nothing = 1
else:
uniquevaluelist.append(i.split()[index])
return uniquevaluelist
def getdirs(path,tag,line):
linelength = len(line)
dirlist = os.listdir(path)
changevar = []
for i in dirlist:
if tag in i:
index = i.find(line + '_') + linelength + 1
changevar.append(i[index:])
return changevar
def mkposcars(file):
input = readfile(file)
if input[0].split()[0] == 'all':
superstruct = getdirs('vaspruns','str','str')
elif '-' in input[0].split():
superstruct = []
for i in range(int(input[0].split()[0]),int(input[0].split()[2])+1):
superstruct.append(str(i))
else:
superstruct = input[0].split()
parentatoms = int(input[6])
element2 = 8 + parentatoms
elementone = input[1].split()[0]
elementtwo = input[element2].split()[0]
totatoms = int(input[6]) #find total number of atoms in parent unit cell
latparindex=9+totatoms #find location of lattice parameter
firstlatpar = float(input[2])
secondlatpar = float(input[latparindex])
if firstlatpar > secondlatpar:
largelatpar=firstlatpar
smalllatpar=secondlatpar
n=1
p=0
else:
largelatpar=secondlatpar
smalllatpar=firstlatpar
n=0
p=1
for i in superstruct:
os.system('makestr.new struct_enum.out ' + i)
if int(i) > 9999:
poscarread = open('vasp.0' + i, 'r')
elif int(i) > 999:
poscarread = open('vasp.00' + i, 'r')
elif int(i) > 99:
poscarread = open('vasp.000' + i, 'r')
elif int(i) > 9:
poscarread = open('vasp.0000' + i, 'r')
else:
poscarread = open('vasp.00000' + i, 'r')
lines = poscarread.readlines()
poscarread.close()
atoms = lines[5].split()
lines[1] = str(vegardslaw(atoms[n],atoms[p],largelatpar,smalllatpar)) + '\n'
totalatoms = int(atoms[0]) + int(atoms[1])
if int(i) > 9999:
poscarwrite = open('vasp.0' + i, 'w')
elif int(i) > 999:
poscarwrite = open('vasp.00' + i, 'w')
elif int(i) > 99:
poscarwrite = open('vasp.000' + i, 'w')
elif int(i) > 9:
poscarwrite = open('vasp.0000' + i, 'w')
else:
poscarwrite = open('vasp.00000' + i, 'w')
poscarwrite.writelines(lines)
poscarwrite.close()
indexone = 8+totatoms
indextwo = input.index('kpoints XxXxX\n')
writefile('vasp.' + elementone,input[1:indexone])
writefile('vasp.' + elementtwo,input[indexone:indextwo])
def strtocartesian(basisvecs,latticevecs):
cartesianbasisvecs = []
for k in basisvecs:
vecone = round(float(k.split()[0]) * float(latticevecs[0].split()[0]) + float(k.split()[1]) * float(latticevecs[1].split()[0]) + float(k.split()[2]) * float(latticevecs[2].split()[0]),8)
vectwo = round(float(k.split()[0]) * float(latticevecs[0].split()[1]) + float(k.split()[1]) * float(latticevecs[1].split()[1]) + float(k.split()[2]) * float(latticevecs[2].split()[1]),8)
vecthree = round(float(k.split()[0]) * float(latticevecs[0].split()[2]) + float(k.split()[1]) * float(latticevecs[1].split()[2]) + float(k.split()[2]) * float(latticevecs[2].split()[2]),8)
cartesianbasisvecs.append([vecone,vectwo,vecthree])
return cartesianbasisvecs
def strtocartesian2d(basisvecs,latticevecs):
cartesianbasisvecs = []
for k in basisvecs:
vecone = round(float(k.split()[0]) * float(latticevecs[0].split()[0]) + float(k.split()[1]) * float(latticevecs[1].split()[0]),2)
vectwo = round(float(k.split()[0]) * float(latticevecs[0].split()[1]) + float(k.split()[1]) * float(latticevecs[1].split()[1]),2)
cartesianbasisvecs.append([vecone,vectwo])
return cartesianbasisvecs
def xcombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xcombinations(items[:i]+items[i+1:],n-1):
yield [items[i]]+cc
def xuniqueCombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xuniqueCombinations(items[i+1:],n-1):
yield [items[i]]+cc
def xselections(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for ss in xselections(items, n-1):
yield [items[i]]+ss
def xpermutations(items):
return xcombinations(items, len(items))
def cubicsymops():
temp = []
latveccombs = []
tuples = []
for z in xselections([1,-1],3):
tuples.append(z)
for x in xpermutations([1,0,0]):
if x not in temp:
temp.append(x)
for y in xpermutations(temp):
if y not in latveccombs:
latveccombs.append(y)
list = []
for i in tuples:
for j in latveccombs:
vec = [[j[x][0]*i[x],j[x][1]*i[x],j[x][2]*i[x]] for x in range(0,3)]
if vec not in list:
list.append(vec)
return list
def squaresymops():
temp = []
latveccombs = []
tuples = []
for z in xselections([1,-1],2):
tuples.append(z)
for x in xpermutations([1,0]):
if x not in temp:
temp.append(x)
for y in xpermutations(temp):
if y not in latveccombs:
latveccombs.append(y)
list = []
for i in tuples:
for j in latveccombs:
vec = [[j[x][0]*i[x],j[x][1]*i[x]] for x in range(0,2)]
if vec not in list:
list.append(vec)
return list
def matrixmultiply(vec,matrix):
vecone = matrix[0][0]*vec[0] + matrix[0][1]*vec[1] + matrix[0][2]*vec[2]
vectwo = matrix[1][0]*vec[0] + matrix[1][1]*vec[1] + matrix[1][2]*vec[2]
vecthree = matrix[2][0]*vec[0] + matrix[2][1]*vec[1] + matrix[2][2]*vec[2]
return [vecone,vectwo,vecthree]
def matrixmultiply2d(vec,matrix):
vecone = round(matrix[0][0]*vec[0] + matrix[0][1]*vec[1])
vectwo = round(matrix[1][0]*vec[0] + matrix[1][1]*vec[1])
return [vecone,vectwo]
def mapback(latvecs,vec,basisvecs):
vecs = []
for i in range(-3,3):
for j in range(-3,3):
for k in range(-3,3):
vecone = [i*float(latvecs[0].split()[0]),i * float(latvecs[0].split()[1]),i * float(latvecs[0].split()[2])]
vectwo = [j*float(latvecs[1].split()[0]),j * float(latvecs[1].split()[1]),j * float(latvecs[1].split()[2])]
vecthree = [k*float(latvecs[2].split()[0]),k * float(latvecs[2].split()[1]),k * float(latvecs[2].split()[2])]
newvector = [vecone[0] + vectwo[0] + vecthree[0] + vec[0],vecone[1] + vectwo[1] + vecthree[1] + vec[1],vecone[2] + vectwo[2] + vecthree[2] + vec[2]]
vecs.append(newvector)
index = 0
returnvar = '????????'
for l in basisvecs:
for m in vecs:
if l == m:
print basisvecs
print 'basisvecs'
print index + 1
print 'INDEX'
returnvar = basisvecs[index + 1]
print returnvar
index = index + 1
return returnvar
def mapback2d(latvecs,vec,basisvecs,upperlimit):
vecs = []
for j in range(-upperlimit/2,upperlimit/2+1):
for k in range(-upperlimit/2 ,upperlimit/2+1):
vecone = [j * float(latvecs[0].split()[0]),j * float(latvecs[0].split()[1])]
vectwo = [k * float(latvecs[1].split()[0]),k * float(latvecs[1].split()[1])]
newvector = [vecone[0] + vectwo[0] + vec[0],vecone[1] + vectwo[1] + vec[1]]
vecs.append(newvector)
index = 0
returnvar = '????????'
for l in basisvecs:
for m in vecs:
if l == m:
returnvar = m
index = index + 1
return returnvar
def twovectoradd(vectorone,vectortwo):
return [vectorone[0] + vectortwo[0],vectorone[1] + vectortwo[1],vectorone[2] + vectortwo[2]]
def twovectoradd2d(vectorone,vectortwo):
return [vectorone[0] + vectortwo[0],vectorone[1] + vectortwo[1]]
def threevectoradd(vectorone,vectortwo,vectorthree):
return [vectorone[0] + vectortwo[0] + vectorthree[0],vectorone[1] + vectortwo[1] + vectorthree[1],vectorone[2] + vectortwo[2] + vectorthree[2]]
def scalartimesvector(scalar,vector):
return [scalar * float(vector.split()[0]),scalar * float(vector.split()[1]),scalar * float(vector.split()[2])]
def scalartimesvector2d(scalar,vector):
return [scalar * float(vector.split()[0]),scalar * float(vector.split()[1])]
def eliminateduplicates2d(figurelist,uniquefigs,parent,symops,figtype):
for j in figurelist:
test = 'n'
if j not in uniquefigs:
for k in parent:
for l in symops:
trans = [twovectoradd2d(j[x],k) for x in range(0,figtype)]
rot = [matrixmultiply2d(trans[x],l) for x in range(0,figtype)]
for f in uniquefigs:
if figtype == 2 and rot[0] in f and rot[1] in f:
test = 'y'
if figtype == 3 and rot[0] in f and rot[1] in f and rot[2] in f:
test = 'y'
if figtype == 4 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f:
test = 'y'
if figtype == 5 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f:
test = 'y'
if figtype == 6 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f:
test = 'y'
if figtype == 7 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f:
test = 'y'
if figtype == 8 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f and rot[7] in f:
test = 'y'
if test == 'n':
uniquefigs.append(j)
return uniquefigs
def eliminateduplicates(figurelist,uniquefigs,parent,symops,figtype):
for j in figurelist:
test = 'n'
if j not in uniquefigs:
for k in parent:
for l in symops:
trans = [twovectoradd(j[x],k) for x in range(0,figtype)]
rot = [matrixmultiply(trans[x],l) for x in range(0,figtype)]
for f in uniquefigs:
if figtype == 2 and rot[0] in f and rot[1] in f:
test = 'y'
if figtype == 3 and rot[0] in f and rot[1] in f and rot[2] in f:
test = 'y'
if figtype == 4 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f:
test = 'y'
if figtype == 5 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f:
test = 'y'
if figtype == 6 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f:
test = 'y'
if figtype == 7 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f:
test = 'y'
if figtype == 8 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f and rot[7] in f:
test = 'y'
if test == 'n':
uniquefigs.append(j)
return uniquefigs
def findfigures2d(figurelist,structurefigs,parent,symops,figtype):
templist = []
for j in structurefigs:
for k in parent:
for l in symops:
trans = [twovectoradd2d(j[x],k) for x in range(0,figtype)]
rot = [matrixmultiply2d(trans[x],l) for x in range(0,figtype)]
for f in figurelist:
if figtype == 2 and rot[0] in f and rot[1] in f and f not in templist:
templist.append(f)
if figtype == 3 and rot[0] in f and rot[1] in f and rot[2] in f and f not in templist:
templist.append(f)
if figtype == 4 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and f not in templist:
templist.append(f)
if figtype == 5 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and f not in templist:
templist.append(f)
if figtype == 6 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and f not in templist:
templist.append(f)
if figtype == 7 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f and f not in templist:
templist.append(f)
if figtype == 8 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f and rot[7] in f and f not in templist:
templist.append(f)
return templist
def findfigures(figurelist,structurefigs,parent,symops,figtype):
templist = []
for j in structurefigs:
for k in parent:
for l in symops:
trans = [twovectoradd(j[x],k) for x in range(0,figtype)]
rot = [matrixmultiply(trans[x],l) for x in range(0,figtype)]
for f in figurelist:
if figtype == 2 and rot[0] in f and rot[1] in f and f not in templist:
templist.append(f)
if figtype == 3 and rot[0] in f and rot[1] in f and rot[2] in f and f not in templist:
templist.append(f)
if figtype == 4 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and f not in templist:
templist.append(f)
if figtype == 5 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and f not in templist:
templist.append(f)
if figtype == 6 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and f not in templist:
templist.append(f)
if figtype == 7 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f and f not in templist:
templist.append(f)
if figtype == 8 and rot[0] in f and rot[1] in f and rot[2] in f and rot[3] in f and rot[4] in f and rot[5] in f and rot[6] in f and rot[7] in f and f not in templist:
templist.append(f)
return templist
def surfacedistancecheck(l):
if int(l) > 9999:
poscar = readfile('vasp.0' + l)
elif int(l) > 999:
poscar = readfile('vasp.00' + l)
elif int(l) > 99:
poscar = readfile('vasp.000' + l)
elif int(l) > 9:
poscar = readfile('vasp.0000' + l)
else:
poscar = readfile('vasp.00000' + l)
lvtwo = poscar[3].split()[1] + ' ' + poscar[3].split()[2]
lvthree = poscar[4].split()[1] + ' ' + poscar[4].split()[2]
lvecs = [lvtwo,lvthree]
numofbasis = int(poscar[5].split()[0])
basisvecs = []
for k in range(7,7+numofbasis):
basisvecs.append(poscar[k].split()[1] + ' ' + poscar[k].split()[2])
cartesianvecs=strtocartesian2d(basisvecs,lvecs)
lengths = []
for m in cartesianvecs:
for l in cartesianvecs:
diff = [float(m[0])-float(l[0]),float(m[1])-float(l[1])]
distance = magnitude2d(diff)
if distance not in lengths and l != m:
lengths.append(distance)
for r in lengths:
if float(r) <= 0.5:
status = 'dont make'
break
else:
status = 'make it so'
if len(lengths) == 0:
status = 'make it so'
return status
| msg-byu/enumlib | support/unclefuncs.py | Python | mit | 26,160 | [
"VASP"
] | 9a83b0179acf704cc88cceec2353d31db19d0393aff2290e820640d27b2d17b4 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProductPurchase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('amount', models.PositiveSmallIntegerField(default=1, verbose_name='Amount')),
],
options={
'ordering': ('-visit', 'product'),
'verbose_name': 'Product Purchase',
'verbose_name_plural': 'Product Purchases',
},
),
migrations.CreateModel(
name='ServicePurchase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
],
options={
'ordering': ('-visit', 'service'),
'verbose_name': 'Service Purchase',
'verbose_name_plural': 'Service Purchases',
},
),
]
| dogukantufekci/supersalon | supersalon/purchases/migrations/0001_initial.py | Python | bsd-3-clause | 1,157 | [
"VisIt"
] | 89ffa7cba3784f3f73083c6972c1a9c3c30f2a08be603704393b9244060a18a7 |
"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
from numpy import (sqrt, log, asarray, newaxis, all, dot, exp, eye,
float_)
from scipy import linalg
__all__ = ['Rbf']
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function approximation/interpolation of
n-dimensional scattered data.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(default is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g, the default::
def euclidean_norm(x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
which is called with x1=x1[ndims,newaxis,:] and
x2=x2[ndims,:,newaxis] such that the result is a matrix of the
distances from each point in x1 to each point in x2.
Examples
--------
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> di = rbfi(xi, yi, zi) # interpolated values
"""
def _euclidean_norm(self, x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
def _h_multiquadric(self, r):
return sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
result = r**2 * log(r)
result[r == 0] = 0 # the spline is zero at zero
return result
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "im_func"):
val = self.function.im_func
elif hasattr(self.function, "__call__"):
val = self.function.__call__.im_func
else:
raise ValueError("Cannot determine number of arguments to function")
argcount = val.func_code.co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
if sys.version_info[0] >= 3:
self._function = self.function.__get__(self, Rbf)
else:
import new
self._function = new.instancemethod(self.function, self,
Rbf)
else:
raise ValueError("Function argument must take 1 or 2 arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of the same shape")
return a0
def __init__(self, *args, **kwargs):
self.xi = asarray([asarray(a, dtype=float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.di = asarray(args[-1]).flatten()
if not all([x.size==self.di.size for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', self._euclidean_norm)
r = self._call_norm(self.xi, self.xi)
self.epsilon = kwargs.pop('epsilon', None)
if self.epsilon is None:
self.epsilon = r.mean()
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self
# for use by any user-callable function or
# to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
self.A = self._init_function(r) - eye(self.N)*self.smooth
self.nodes = linalg.solve(self.A, self.di)
def _call_norm(self, x1, x2):
if len(x1.shape) == 1:
x1 = x1[newaxis, :]
if len(x2.shape) == 1:
x2 = x2[newaxis, :]
x1 = x1[..., :, newaxis]
x2 = x2[..., newaxis, :]
return self.norm(x1, x2)
def __call__(self, *args):
args = [asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
shp = args[0].shape
self.xa = asarray([a.flatten() for a in args], dtype=float_)
r = self._call_norm(self.xa, self.xi)
return dot(self._function(r), self.nodes).reshape(shp)
| teoliphant/scipy | scipy/interpolate/rbf.py | Python | bsd-3-clause | 8,919 | [
"Gaussian"
] | c5ab68b7f9b25e063a8f123cd0bbe93a880a9d504a0bc7b0bcef41965b802319 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Processing/Edit Database Owner Information"""
#-------------------------------------------------------------------------
#
# gnome/gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gen.config import config
from gramps.gen.utils.config import get_researcher
from gramps.gui.display import display_help
from gramps.gui.widgets import MonitoredEntry
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.plug import tool
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gui.glade import Glade
from gramps.gui.utils import is_right_click
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Edit_Database_Owner_Information')
#-------------------------------------------------------------------------
#
# constants
#
#-------------------------------------------------------------------------
config_keys = (
'researcher.researcher-name',
'researcher.researcher-addr',
'researcher.researcher-locality',
'researcher.researcher-city',
'researcher.researcher-state',
'researcher.researcher-country',
'researcher.researcher-postal',
'researcher.researcher-phone',
'researcher.researcher-email',
)
#-------------------------------------------------------------------------
#
# OwnerEditor
#
#-------------------------------------------------------------------------
class OwnerEditor(tool.Tool, ManagedWindow):
"""
Allow editing database owner information.
Provides a possibility to directly verify and edit the owner data of the
current database. It also allows copying data from/to the preferences.
"""
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
ManagedWindow.__init__(self, uistate, [], self.__class__)
tool.Tool.__init__(self, dbstate, options_class, name)
self.display()
def display(self):
# get the main window from glade
topDialog = Glade()
# set gramps style title for the window
window = topDialog.toplevel
self.set_window(window,
topDialog.get_object("title"),
_("Database Owner Editor"))
self.setup_configs('interface.ownereditor', 500, 400)
# move help button to the left side
action_area = topDialog.get_object("action_area")
help_button = topDialog.get_object("help_button")
action_area.set_child_secondary(help_button, True)
# connect signals
topDialog.connect_signals({
"on_ok_button_clicked": self.on_ok_button_clicked,
"on_cancel_button_clicked": self.close,
"on_help_button_clicked": self.on_help_button_clicked,
"on_eventbox_button_press_event": self.on_button_press_event,
"on_menu_activate": self.on_menu_activate,
"on_delete_event" : self.close,
})
# fetch the popup menu
self.menu = topDialog.get_object("popup_menu")
#topDialog.connect_signals({"on_menu_activate": self.on_menu_activate})
# get current db owner and attach it to the entries of the window
self.owner = self.db.get_researcher()
self.entries = []
entry = [
("name", self.owner.set_name, self.owner.get_name),
("address", self.owner.set_address, self.owner.get_address),
("locality", self.owner.set_locality, self.owner.get_locality),
("city", self.owner.set_city, self.owner.get_city),
("state", self.owner.set_state, self.owner.get_state),
("country", self.owner.set_country, self.owner.get_country),
("zip", self.owner.set_postal_code, self.owner.get_postal_code),
("phone", self.owner.set_phone, self.owner.get_phone),
("email", self.owner.set_email, self.owner.get_email),
]
for (name,set_fn,get_fn) in entry:
self.entries.append(MonitoredEntry(topDialog.get_object(name),
set_fn,
get_fn,
self.db.readonly))
# ok, let's see what we've done
self.show()
def on_ok_button_clicked(self, obj):
"""Update the current db's owner information from editor"""
self.db.set_researcher(self.owner)
self.close()
def on_help_button_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def on_button_press_event(self, obj, event):
"""Shows popup-menu for db <-> preferences copying"""
if is_right_click(event):
self.menu.popup(None, None, None, None, 0, 0)
def build_menu_names(self, obj):
return (_('Main window'), _("Edit database owner information"))
def on_menu_activate(self, menuitem):
"""Copies the owner information from/to the preferences"""
if menuitem.props.name == 'copy_from_preferences_to_db':
self.owner.set_from(get_researcher())
for entry in self.entries:
entry.update()
elif menuitem.props.name == 'copy_from_db_to_preferences':
for i in range(len(config_keys)):
config.set(config_keys[i], self.owner.get()[i])
#-------------------------------------------------------------------------
#
# OwnerEditorOptions (None at the moment)
#
#-------------------------------------------------------------------------
class OwnerEditorOptions(tool.ToolOptions):
"""Defines options and provides handling interface."""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
| beernarrd/gramps | gramps/plugins/tool/ownereditor.py | Python | gpl-2.0 | 7,196 | [
"Brian"
] | 502b85647ff8def3dadb88a9fe3afa644f68a94e48381bae8fbeae24f714ab7b |
'''
Compiler.py
Translate AST to Mara Bytecode.
'''
import node
import scope
import special
import constant
from util.dispatch import method_store, multimethod
from util.functions import unique_id
from util.reflection import deriving
class CompileError(Exception, deriving('eq', 'show')):
def __init__(self, msg, *largs, **kwargs):
self.message = msg.format(*largs, **kwargs)
super(CompileError, self).__init__(self.message)
class Registry(deriving('show')):
def __init__(self):
self.counter = -1
self.regs = {}
def __call__(self, i):
try:
reg = self.regs[i]
except KeyError:
self.counter += 1
reg = self.regs[i] = self.counter
return reg
def next(self):
self.counter += 1
return self.counter
def frame(self):
return RegistryFrame(parent=self)
class RegistryFrame(deriving('show', 'eq')):
def __init__(self, parent):
self.regs = {}
self.parent = parent
self._unique_name = None
def __call__(self, i):
try:
reg = self.regs[i]
except KeyError:
reg = self.regs[i] = self.parent.next()
return reg
@property
def unique_name(self):
if self._unique_name is None:
self._unique_name = unique_id('f')
return self._unique_name
def label(self, name):
return '{f}_{n}'.format(f=self.unique_name, n=name)
class Compiler(object):
'''
Compilation Visitor
'''
_store = method_store()
_builtins = {
'+': 'add',
'-': 'sub',
'*': 'mul',
'/': 'div',
'<': 'lt',
'<=': 'lte',
'>': 'gt',
'>=': 'gte',
'==': 'eq',
'!=': 'neq',
}
def __init__(self):
self.root = scope.Root()
self.scope = self.root
self.block = []
self.registry = Registry()
self.functions = {}
self.pool = None
self._result = None
def result(self, reg=None):
if reg is None:
return self._result
self._result = reg
return reg
def compile(self, ast, pool):
self.pool = pool
try:
bytecodes = self.visit(ast)
except CompileError:
for i, code in enumerate(self.block):
print i, ':', code
raise
bytecodes.append(('halt',))
return bytecodes
def emit(self, *instructions):
self.block += instructions
def hole(self):
index = len(self.block)
self.block.append(None)
return index
def patch(self, index, instruction):
if self.block[index] != None:
raise ValueError('Must patch a hole, not ' + str(self.block[index]))
self.block[index] = instruction
@multimethod(_store)
def visit(self, n):
raise TypeError('Node type {n} not yet supported for compilation'.format(n=n.__class__))
@visit.d(node.NoOp)
def _(self, n):
pass
@visit.d(node.Int)
def _(self, n):
r = self.registry.frame()
self.emit(
('load_c', r(0), n['constant']),
)
return r(0)
@visit.d(node.Bool)
def _(self, n):
r = self.registry.frame()
self.emit(
('load_c', r(0), n['constant'])
)
return r(0)
@visit.d(node.Real)
def _(self, n):
r = self.registry.frame()
self.emit(
('load_c', r(0), n['constant']),
)
return r(0)
@visit.d(node.Val)
def _(self, n):
result = self.visit(n.value)
index = n['index']
self.emit(
('store_p', result, index),
)
return result
@visit.d(node.Var)
def _(self, n):
result = self.visit(n.value)
index = n['index']
self.emit(
('store_p', result, index)
)
return result
@visit.d(node.ValueId)
def _(self, n):
r = self.registry.frame()
identifier = n.value
declaration = n['namespace'][identifier]
index = declaration['index']
self.emit(
('load_p', r(0), index)
)
return self.result(r(0))
@visit.d(node.Assign)
def _(self, n):
identifier = n.name.value
declaration = n['namespace'][identifier]
index = declaration['index']
result = self.visit(n.value)
self.emit(
('store_p', result, index)
)
return self.result(result)
@visit.d(node.Param)
def _(self, n):
r = self.registry.frame()
index = n['index']
self.emit(
('load_p', r(0), index),
)
return self.result(r(0))
@visit.d(node.Def)
def _(self, n):
r = self.registry.frame()
l = r.label
local_variables = n['locals']
# +1 for address load, +1 for hole, +1 for label
address = len(self.block) + 3
# store the address of the function as the result.
self.emit(
('load_v', r(0), address),
('label', l('skip_label')),
('jump', l('end')),
)
# set attributes
n['address'] = address
n['result'] = r(0)
# reserve space for local variables
self.emit(
('reserve', len(local_variables)),
)
save = self.hole()
# track what registers we use in the function,
# +1 because register numbers start at 1 (0 is special)
reg_begin_index = self.registry.counter + 1
# generate the function body
ret = self.visit(n.body)
# record the registers we used
reg_end_index = self.registry.counter + 1
local_registers = range(reg_begin_index, reg_end_index)
# generate the return of the result
self.emit(
('copy', 0, ret),
tuple(['restore'] + local_registers),
('ret',),
('label', l('end')),
)
# patch in the save
self.patch(save, tuple(['save'] + local_registers))
return self.result(r(0))
@visit.d(node.While)
def _(self, n):
r = self.registry.frame()
l = r.label
# capture the "top" of the loop
self.emit(('label', l('begin')))
# compute the predicate
pred_result = self.visit(n.pred)
# generate the skip
self.emit(('branch_zero', pred_result, l('end')))
# generate the body
loop_result = self.visit(n.body)
# loop
self.emit(('jump', l('begin')))
# end of loop
self.emit(('label', l('end')))
return self.result(loop_result)
@visit.d(node.Call)
def _(self, n):
r = self.registry.frame()
identifier = n.func.value
# lookup the function's declaration
declaration = n['namespace'][identifier]
# lookup the address of the function body
address = declaration['address']
# generate evaluations of all the arguments
arg_registers = [
self.visit(value)
for value in n.arg.values
]
# generate the call
self.emit(
tuple(['call', address] + arg_registers),
('copy', r(0), 0),
)
return self.result(r(0))
@visit.d(node.BinOp)
def _(self, n):
r = self.registry.frame()
func = n.func.value
left_expr = n.args[0]
right_expr = n.args[1]
op = self._builtins.get(func, None)
if op is None:
raise CompileError('BinOp {func} is not supported.', func=func)
left = self.visit(left_expr)
right = self.visit(right_expr)
self.emit(
(op, r(0), left, right),
)
return self.result(r(0))
@visit.d(node.If)
def _(self, n):
r = self.registry.frame()
l = r.label
pred_expr = n.pred
if_body_expr = n.if_body
else_body_expr = n.else_body
pred = self.visit(pred_expr)
self.emit(('branch_zero', pred, l('else_body')))
body_result = self.visit(if_body_expr)
self.emit(
('jump', l('if_end')),
('label', l('else_body')),
)
else_result = self.visit(else_body_expr)
self.emit(('label', l('if_end')))
self.emit(
('phi', r(0), body_result, else_result)
)
return self.result(r(0))
@visit.d(node.Unit)
def _(self, n):
r = self.registry.frame()
self.emit(
('load_v', r(0), special.NULL)
)
return self.result(r(0))
@visit.d(node.Module)
def _(self, n):
exprs = n.exprs
for expr in exprs:
self.visit(expr)
return self.block
@visit.d(node.Block)
def _(self, n):
exprs = n.exprs
if len(exprs) == 0:
raise CompileError('Empty Blocks not yet supported.')
for expr in exprs:
result = self.visit(expr)
return self.result(result)
| dacjames/mara-lang | bootstrap/mara/compiler.py | Python | mit | 9,149 | [
"VisIt"
] | 8688f5cecc43cac5375ff46f873671d9db461f1c6f22cbaaef9bc8ab98b912e4 |
#!/usr/bin/env jython
# copyright 2002 the Brothers Wilcox
# <mailto:zooko@zooko.com>
#
# This file is part of OvP.
#
# OvP is open source software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OvP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OvP; if not, write to zooko.com:
# <a mailto:zooko@zooko.com>
#
# See the file COPYING or visit http://www.gnu.org/ for details.
# CVS:
__cvsid = '$Id: CAvCAPieces.py,v 1.1 2002/02/09 22:46:13 zooko Exp $'
import path_fix
# standard Java modules
import java
from java.lang import *
from java.awt import *
from java.awt.event import *
from java.awt.geom import *
from javax.swing import *
from javax.swing.text import *
from java.awt.image import ImageObserver # debug
# standard Python modules
import math
import operator
import time
import traceback
# OvP modules
import Game
import HexBoard
import Images
from util import *
true = 1
false = 0
class Item:
def __init__(self, game, hex):
"""
@precondition `hex' must not be None.: hex is not None
"""
assert hex is not None, "precondition: `hex' must not be None."
# print "Item.__init__(%s@%s)" % (self.__class__.__name__, id(self),)
self.game = game
self.hb = self.game.hb
self.hex = hex
self.hex.items.append(self)
self.hex.repaint()
def mouse_pressed(self):
return false
def __repr__(self):
return "%s <%x> at %s" % (self.__class__.__name__, id(self), self.hex,)
def repaint(self):
self.hex.repaint()
def destroy(self):
self.hex.items.remove(self)
self.repaint()
class OvPImageObserver(ImageObserver):
def __init__(self, hb):
# print "OvPImageObserver.__init__(%s@%s)" % (self.__class__.__name__, id(self),)
self.hb = hb
def imageUpdate(self, img, infoflags, x, y, width, height):
result = self.hb.imageUpdate(img, infoflags, x, y, width, height)
#print "[Debug:] imageUpdate(img=%r, infoflags=%r, x=%r, y=%r, width=%r, height=%r)\n-> %r" %\
# (img, infoflags, x, y, width, height, result)
return result
class Graphical (Item):
IMAGEDEFAULT = None
IMAGEPADDING = 10 # A hackish kludge until scaling works better.
def __init__(self, game, hex, color=Color.black, image=None):
"""
@precondition `hex' must not be None.: hex is not None
"""
assert hex is not None, "precondition: `hex' must not be None."
# print "Graphical.__init__(%s@%s)" % (self.__class__.__name__, id(self),)
self.image = image or self.IMAGEDEFAULT or Images.getImageCache().get(self.__class__.__name__)
self.color = color
Item.__init__(self, game, hex)
self.imageobserver = OvPImageObserver(self.hb)
def paint(self, g):
if self.image is not None:
#print "[Debug:] Graphical.paint(self=%r, g); self.image = %r" % (self, self.image)
dx, dy, dw, dh = unpack_rect(self.hex.boundingrect)
sx, sy, sw, sh = 0, 0, self.image.getWidth(self.imageobserver), self.image.getHeight(self.imageobserver)
pad = self.IMAGEPADDING
g.drawImage(self.image,
pad, pad, dw-pad, dh-pad,
sx, sy, sx+sw, sy+sh,
Color(0, 0, 0, 0), # An alpha of 0; does not seem to work.
self.imageobserver)
class Cell(Graphical):
def __init__(self, cellman, creatureman, hex, color):
Graphical.__init__(self, cellman.game, hex, color=color)
self.cellman = cellman
self.creatureman = creatureman
# print "self.creatureman: %s, traceback.extract_stack(): %s" % (self.creatureman, traceback.extract_stack(),)
self.cellman.cells.append(self)
self.age = 0
def __repr__(self):
return "%s %s <%x> at %s" % (self.color, self.__class__.__name__, id(self), self.hex,)
def paint(self, g):
mg = g.create()
mg.setColor(self.color)
mg.fill(self.hb.hexinnerpoly)
strage = str(self.age)
# (font, ox, oy,) = self.hb.find_fitting_font_nw_vertex(strage, mg)
(font, ox, oy,) = self.hb.find_fitting_font_bottom_half(strage, mg)
mg.setColor(Color.gray)
mg.setFont(font)
mg.drawString(strage, ox, oy)
def get_older(self):
self.age += 1
if self.age > 99:
ScoutCell(self.cellman, self.creatureman, self.hex, self.color)
self.destroy()
self.repaint()
def destroy(self):
assert self in self.hex.items
Item.destroy(self)
self.cellman.cells.remove(self)
class Active(Graphical):
def __init__(self):
# print "Active.__init__(%s@%s)" % (self.__class__.__name__, id(self),)
pass
def is_selected(self):
return self.game.selectedcreature is self
def paint(self, g):
assert self in self.hex.items
mg = g.create()
if self.is_selected():
mg.setColor(Color.yellow)
mg.draw(self.hb.hexinnerpoly)
def mouse_pressed(self):
"""
Active items are selectable as actors.
@return `true' if the event was consumed
"""
# Select this item.
self.select()
return true
def select(self):
# print "%s.select()" % self
assert self.actpleft > 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
self.game.selectedcreature = self
self.hex.repaint()
def unselect(self):
# print "%s.unselect(): st: %s" % (self, traceback.extract_stack(),)
assert self.game.selectedcreature is self
self.game.selectedcreature = None
self.hex.repaint()
def handle_cant_act(self):
"""
Ring the bell.
"""
print "I can't do that!"
Toolkit.getDefaultToolkit().beep()
def user_act(self, hex):
"""
The user has selected you, and then selected `hex'. Try to do something to it!
(The default behavior is to call `handle_cant_act()'. Subclasses should override to implement acts.)
"""
return self.handle_cant_act()
class Creature(Active):
def __init__(self, creatureman, hp, actpoints):
"""
@param creatureman the creatureman for this creature
@param hex the hex the creature inhabits
@param actpoints action points (per turn)
"""
Active.__init__(self)
# print "Creature.__init__(%s@%s)" % (self.__class__.__name__, id(self),)
self.creatureman = creatureman
self.actp = actpoints
self.actpleft = 0 # invalid value
creatureman.creatures.append(self)
creatureman.turnman.register_regular_bot_event(self.handle_new_turn, priority="first")
def __repr__(self):
return Item.__repr__(self) + ", actpleft: %s" % self.actpleft
def paint(self, g):
Graphical.paint(self, g)
Active.paint(self, g)
def is_foe(self, item):
return isinstance(item, Creature) and (item.__class__ is not self.__class__)
def handle_new_turn(self):
self.actpleft = self.actp
assert self.actpleft > 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
def pass(self):
"""
Throw away your remaining action points.
@precondition you must be selected
"""
assert self.is_selected()
self.actpleft = 0
self.unselect()
self.creatureman.turnman.select_next_creature_or_end_turn()
def user_act(self, hex):
"""
The user has selected you, and then selected `hex'. Try to do something to it!
"""
if hex is None:
return
# If it is ourself, then ignore this mousepress.
if hex is self.hex:
return
if not hex.is_adjacent(self.hex):
# path algorithm for creatures with > 1 actp
# print "can't leap that far! self: %s, hex: %s, st: %s" % (self, hex, traceback.extract_stack(),)
print "can't leap that far!"
return self.handle_cant_act()
assert self.actpleft >= 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
if self.actpleft <= 0:
print "can't do any more acts this turn"
return self.handle_cant_act()
assert self.actpleft > 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
if hex.is_empty():
self.act(self.move, kwargs={'hex': hex})
return
assert self.actpleft > 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
# Else, you can't do anything to that hex.
assert self in self.hex.items
self.handle_cant_act()
assert self in self.hex.items
def act(self, act, args=(), kwargs={}):
assert self in self.hex.items
assert self.actpleft > 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
self.actpleft -= 1
assert self.actpleft >= 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
apply(act, args, kwargs)
assert self.actpleft >= 0, "self: %s, self.actpleft: %s, traceback.extract_stack(): %s" % (self, self.actpleft, traceback.extract_stack(),)
if self.actpleft <= 0:
self.creatureman.turnman.select_next_creature_or_end_turn()
assert self in self.hex.items
def move(self, hex):
"""
exit the old hex, move onto the top of the stack of the new one, repaint
"""
assert self in self.hex.items
self.hex.items.remove(self)
hex.items.append(self)
self.repaint() # repaint the old
self.hex = hex
self.repaint() # repaint the new
def destroy(self):
self.creatureman.creatures.remove(self)
class ScoutCell(Cell, Creature):
def __init__(self, cellman, creatureman, hex, color):
Cell.__init__(self, cellman, creatureman.turnman, hex, color)
Creature.__init__(self, creatureman, hp=1, actpoints=1)
def paint(self, g):
assert self in self.hex.items
mg = g.create()
mg.setColor(self.color)
mg.fill(self.hb.hexinnerpoly)
strage = str(self.age)
# (font, ox, oy,) = self.hb.find_fitting_font_nw_vertex(strage, mg)
(font, ox, oy,) = self.hb.find_fitting_font_bottom_half(strage, mg)
mg.setColor(Color.white)
mg.setFont(font)
mg.drawString(strage, ox, oy)
Creature.paint(self, mg)
def get_older(self):
assert self in self.hex.items
self.age += 1
if self.age >= 1:
Cell(self.cellman, self.creatureman, self.hex, self.color)
self.destroy()
self.repaint()
def destroy(self):
assert self in self.hex.items
Cell.destroy(self)
Creature.destroy(self)
| zooko/ogresvpixies | CAvCAPieces.py | Python | gpl-2.0 | 10,331 | [
"VisIt"
] | 5bb386c5de09ad16c95002502840fdf8b8e7a77e0edb154e9ab5c8bcbbe4a8a8 |
""" InfoGetter
Module used to map the policies with the CS.
"""
import copy
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.ResourceStatusSystem.Utilities import RssConfiguration, Utils
__RCSID__ = '$Id: $'
def getPoliciesThatApply( decisionParams ):
"""
Method that sanitizes the input parameters and returns the policies that
match them. Matches the input dictionary with the policies configuration in
the CS. It returns a list of policy dictionaries that matched.
"""
# InfoGetter is being called from SiteInspector Agent
decisionParams = _sanitizedecisionParams( decisionParams )
gLogger.debug("Sanitized decisionParams: %s" % str(decisionParams))
policiesThatApply = []
# Get policies configuration metadata from CS.
policiesConfig = RssConfiguration.getPolicies()
if not policiesConfig[ 'OK' ]:
return policiesConfig
policiesConfig = policiesConfig[ 'Value' ]
gLogger.debug("All policies: %s" %str(policiesConfig))
# Each policy, has the following format
# <policyName>
# \
# policyType = <policyType>
# matchParams
# \
# ...
# configParams
# \
# ...
# Get policies that match the given decisionParameters
for policyName, policySetup in policiesConfig.items():
# The parameter policyType replaces policyName, so if it is not present,
# we pick policyName
try:
policyType = policySetup[ 'policyType' ][ 0 ]
except KeyError:
policyType = policyName
#continue
# The section matchParams is not mandatory, so we set {} as default.
policyMatchParams = policySetup.get( 'matchParams', {} )
gLogger.debug("matchParams of %s: %s" %(policyName, str(policyMatchParams)))
# FIXME: make sure the values in the policyConfigParams dictionary are typed !!
policyConfigParams = {}
#policyConfigParams = policySetup.get( 'configParams', {} )
policyMatch = Utils.configMatch( decisionParams, policyMatchParams )
gLogger.debug("PolicyMatch for decisionParams %s: %s" %(decisionParams, str(policyMatch)))
policyFilter = _filterPolicies( decisionParams, policyMatchParams )
#WARNING: we need an additional filtering function when the matching
#is not straightforward (e.g. when the policy specify a 'domain', while
#the decisionParams has only the name of the element)
if policyMatch and policyFilter:
policiesThatApply.append( ( policyName, policyType, policyConfigParams ) )
gLogger.debug("policies that apply: %s" %str(policiesThatApply))
policiesToBeLoaded = []
# Gets policies parameters from code.
for policyName, policyType, _policyConfigParams in policiesThatApply:
try:
configModule = Utils.voimport( 'DIRAC.ResourceStatusSystem.Policy.Configurations' )
policies = copy.deepcopy( configModule.POLICIESMETA )
policyMeta = policies[ policyType ]
except KeyError:
continue
# We are not going to use name / type anymore, but we keep them for debugging
# and future usage.
policyDict = { 'name' : policyName,
'type' : policyType,
'args' : {}
}
# args is one of the parameters we are going to use on the policies. We copy
# the defaults and then we update if with whatever comes from the CS.
policyDict.update( policyMeta )
policiesToBeLoaded.append( policyDict )
return S_OK( policiesToBeLoaded )
def getPolicyActionsThatApply( decisionParams, singlePolicyResults, policyCombinedResults ):
"""
Method that sanitizes the input parameters and returns the policies actions
that match them. Matches the input dictionary with the policy actions
configuration in the CS. It returns a list of policy actions names that
matched.
"""
decisionParams = _sanitizedecisionParams( decisionParams )
policyActionsThatApply = []
# Get policies configuration metadata from CS.
policyActionsConfig = RssConfiguration.getPolicyActions()
if not policyActionsConfig[ 'OK' ]:
return policyActionsConfig
policyActionsConfig = policyActionsConfig[ 'Value' ]
# Let's create a dictionary to use it with configMatch
policyResults = {}
for policyResult in singlePolicyResults:
try:
policyResults[ policyResult[ 'Policy' ][ 'name' ] ] = policyResult[ 'Status' ]
except KeyError:
continue
# Get policies that match the given decissionParameters
for policyActionName, policyActionConfig in policyActionsConfig.items():
# The parameter policyType is mandatory. If not present, we pick policyActionName
try:
policyActionType = policyActionConfig[ 'actionType' ][ 0 ]
except KeyError:
policyActionType = policyActionName
#continue
# We get matchParams to be compared against decisionParams
policyActionMatchParams = policyActionConfig.get( 'matchParams', {} )
policyMatch = Utils.configMatch( decisionParams, policyActionMatchParams )
# policyMatch = Utils.configMatch( decisionParams, policyActionConfig )
if not policyMatch:
continue
# Let's check single policy results
# Assumed structure:
# ...
# policyResults
# <PolicyName> = <PolicyResult1>,<PolicyResult2>...
policyActionPolicyResults = policyActionConfig.get( 'policyResults', {} )
policyResultsMatch = Utils.configMatch( policyResults, policyActionPolicyResults )
if not policyResultsMatch:
continue
# combinedResult
# \Status = X,Y
# \Reason = asdasd,asdsa
policyActionCombinedResult = policyActionConfig.get( 'combinedResult', {} )
policyCombinedMatch = Utils.configMatch( policyCombinedResults, policyActionCombinedResult )
if not policyCombinedMatch:
continue
#policyActionsThatApply.append( policyActionName )
# They may not be necessarily the same
policyActionsThatApply.append( ( policyActionName, policyActionType ) )
return S_OK( policyActionsThatApply )
######### Utilities ###########
def _sanitizedecisionParams( decisionParams ):
""" Function that filters the input parameters. If the input parameter keys
are no present on the "params" tuple, are not taken into account.
"""
# active is a hook to disable the policy / action if needed
params = ( 'element', 'name', 'elementType', 'statusType', 'status', 'reason', 'tokenOwner', 'active' )
sanitizedParams = {}
for key in params:
if key in decisionParams:
# We can get rid of this now
# In CS names are with upper case, capitalize them here
# sanitizedParams[ key[0].upper() + key[1:] ] = decisionParams[ key ]
sanitizedParams[ key ] = decisionParams[ key ]
return sanitizedParams
def _getComputingElementsByDomainName( targetDomain = None ):
"""
WARNING: TO ADD TO CSHelpers
Gets all computing elements from /Resources/Sites/<>/<>/CE
"""
_basePath = 'Resources/Sites'
ces = []
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return S_ERROR("No domain names have been specified on the CS")
domainNames = domainNames[ 'Value' ]
unknownDomains = list( set(targetDomain) - set(domainNames) )
if unknownDomains:
gLogger.warn( "Domains %s belong to the policy parameters but not to the CS domains" % unknownDomains )
knownDomains = list( set(domainNames) & set(targetDomain) )
if not knownDomains:
gLogger.warn("Policy parameters domain names do not match with any CS domain names")
return S_OK([])
for domainName in knownDomains:
gLogger.info( "Fetching the list of Computing Elements belonging to domain %s" % domainName )
domainSites = gConfig.getSections( '%s/%s' % ( _basePath, domainName ) )
if not domainSites[ 'OK' ]:
return domainSites
domainSites = domainSites[ 'Value' ]
for site in domainSites:
siteCEs = gConfig.getSections( '%s/%s/%s/CEs' % ( _basePath, domainName, site ) )
if not siteCEs[ 'OK' ]:
#return siteCEs
gLogger.error( siteCEs[ 'Message' ] )
continue
siteCEs = siteCEs[ 'Value' ]
ces.extend( siteCEs )
# Remove duplicated ( just in case )
ces = list( set ( ces ) )
gLogger.info( "List of CEs: %s" % str( ces ) )
return S_OK( ces )
def _filterPolicies( decisionParams, policyMatchParams):
"""
Method that checks if the given policy doesn't meet certain conditions
"""
#some policies may apply or not also depending on the VO's domain
# 'CEAvailabilityPolicy' can be applied only if the CE is inside LCG
if 'elementType' in decisionParams and 'name' in decisionParams:
elementType = decisionParams['elementType']
name = decisionParams['name']
if elementType and elementType.upper() == 'CE' and 'domain' in policyMatchParams:
#WARNING: policyMatchParams['domain'] is a list of domains
domains = policyMatchParams['domain']
result = _getComputingElementsByDomainName( targetDomain = domains )
if result['OK']:
ces = result['Value']
#to verify that the given CE is in the list of the LCG CEs
if name not in ces:
gLogger.info( "ComputingElement %s NOT found in domains %s" % ( name, domains ) )
return False
else:
gLogger.info( "ComputingElement %s found in domains %s" % ( name, domains ) )
else:
gLogger.warn( "unable to verify if ComputingElement %s is in domains %s" % ( name, domains ) )
return False
return True
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| Andrew-McNab-UK/DIRAC | ResourceStatusSystem/Utilities/InfoGetter.py | Python | gpl-3.0 | 9,530 | [
"DIRAC"
] | a002041374ca5ab9c8bb038abd7561552bd7e369b832abadccaab6e27c34ba71 |
__author__ = 'AndriiLab'
""" Script calculates ratio between two channels ch1/ch2 from image series.
Next it compares two group of series (e.g. two proteins) and
out xls file with results of comparison and graphs with plotted data"""
import cv2
import glob
from matplotlib import pyplot
import numpy
from openpyxl import load_workbook
from openpyxl import Workbook
import os
from PIL import Image
def xlsFileWriter(filename, sheetname, dataname, x, y, yerr):
"""stands for xls file writing:
filename - name of your xls file
sheetname - name of your xls sheetname
dataname - name of grooped data (e.g. Protein 1 Experiment 1)
x - list with X'es
y - list with Y'es
yerr - list with Y-errors
Output: xls file with results"""
global col
#try to open existing workbook
try:
wb = load_workbook("%s.xlsx" %filename)
except:
#if workbook doesnt exist, creating it
wb = Workbook()
if sheetname in wb.get_sheet_names():
ws = wb.get_sheet_by_name(sheetname)
else:
ws = wb.create_sheet(title=sheetname)
ws.cell(row=1, column=col, value="Folder name")
ws.cell(row=1, column=col+1, value=dataname)
#enter here label of X's values, Y's values
ws.cell(row=2, column=col, value="X's name")
ws.cell(row=2, column=col+1, value="Y's name")
ws.cell(row=2, column=col+2, value="SD")
for r, d1 in enumerate(x, 3):
ws.cell(row=r, column=col, value=d1)
for r, d2 in enumerate(y, 3):
ws.cell(row=r, column=col+1, value=d2)
for r, d3 in enumerate(yerr, 3):
ws.cell(row=r, column=col+2, value=d3)
wb.save("%s.xlsx" %filename)
col+=4
return True
def calcRatio(imgch1, imgch2, prot):
"""calculates ratio between two images
imgch1 - image with channel 1
imgch2 - image with channel 2
prot - switcher between two formulas for convetion from intensity to desired dimension
Output: mean and stddev of ratio ch1/ch2"""
global mean
global stddev
#converting channel 1 and 2 arrays to float data type
ch1 = numpy.array(Image.open(imgch1)).astype(float)
ch2 = numpy.array(Image.open(imgch2)).astype(float)
#uncommit if you want to make lens normalization (removing dust, different intensity in regions etc)
#you need to provide blank images made on your system in YOUR_PATH
#imgch1corr = "YOUR_PATH"
#imgch2corr = "YOUR_PATH"
#ch1corr = numpy.array(Image.open(imgch1corr))/numpy.amax(numpy.array(Image.open(imgch1corr)))
#ch2corr = numpy.array(Image.open(imgch2corr))/numpy.amax(numpy.array(Image.open(imgch2corr)))
#ch1 = ch1/ch1corr
#ch2 = ch2/ch2corr
#filtering noise with Gaussian blur and Otsu thresholding
mask = cv2.imread(imgch1, 0)
blur = cv2.GaussianBlur(mask,(5,5),0)
thresh, mask = cv2.threshold(blur,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ch1 = ch1*mask
ch2 = ch2*mask
#removing 0es and max values (12bit=4095max)
if 4095 in ch1:
ch1[ch1==4095] = numpy.nan
if 4095 in ch2:
ch2[ch2==4095] = numpy.nan
if 0 in ch1:
ch1[ch1==0] = numpy.nan
if 0 in ch2:
ch2[ch2==0] = numpy.nan
#calculating ratio 405/488 (ch2/ch1)
ratio = ch2/ch1
#uncommit if you want additionaly filter your data manually
#mincutoff = 0.1
#maxcutoff = 2.0
#ratio[ratio < mincutoff] = numpy.nan
#ratio[ratio > maxcutoff] = numpy.nan
#for convetion from intensity to desired dimension enter formulae
if prot =="sbb":
ratio = ratio*1 #enter here formula for protein 1
else:
ratio = ratio*2 #enter here formula for protein 2
#calculating mean and SD of the ratio
ratio_wo_nan = numpy.ma.masked_array(ratio,numpy.isnan(ratio))
mean.append(numpy.mean(ratio_wo_nan))
stddev.append(numpy.std(ratio_wo_nan))
return mean, stddev
def parseFolder(imgfolder):
"""parses your folder for folders with images and sorts them as images for Protein 1
and Protein 2. Next, calculates ratio between ch1 and ch2 for image series for this proteins.
Plots data and saves xls file with results
imgfolder - folder for parsing images
Output: xls file with results and graph"""
global mean
global stddev
global col
datalistsbb = []
datalistrd = []
#retriving list of subdirs with data
dirlist = next(os.walk(imgfolder))[1]
#parsing for files with data
for folder in dirlist:
os.chdir(os.path.join(imgfolder+folder))
imgch1 = []
imgch2 = []
mean = []
stddev = []
for file in glob.glob("C1*"): #you can change "C1" to desired channel 1 file name
imgch1.append(file)
for file in glob.glob("C[2-3]*"): #you can change "C[2-3]" to desired channel 2 file name
imgch2.append(file)
#calculating ratio for selected folder
for i in imgch1:
if "SbB" in folder: #you can change "SbB" to desired Protein 1 folder name
calcRatio(i, imgch2[imgch1.index(i)],"sbb")
elif "Rd" in folder: #you can change "Rd" to desired Protein 2 folder name
calcRatio(i, imgch2[imgch1.index(i)],"rd")
else:
print "Protein not identified. Skipping folder."
if "SbB" in folder:
datalistsbb.append([folder, mean, stddev]) #writing data file for protein 1
else:
datalistrd.append([folder, mean, stddev]) #writing data file for protein 2
#showing results as graph
graprhheight = 2
step = 1
#set ymin and ymax
ymin = 5.0
ymax = 7.5
col = 1
os.chdir(imgfolder)
for i in datalistrd:
startpoint = float(i[0][-3:])
pyplot.subplot(graprhheight,1,1), \
pyplot.errorbar(numpy.linspace(startpoint, len(i[1])*step+startpoint-step,len(i[1])), i[1], yerr=i[2]),
pyplot.ylim(ymin, ymax)
pyplot.title("Protein 1")
xlsFileWriter("results", "Protein 1", i[0],
numpy.linspace(startpoint, len(i[1])*step+startpoint-step,len(i[1])), i[1], i[2])
col = 1
for i in datalistsbb:
startpoint = float(i[0][-3:])
pyplot.subplot(graprhheight,1,2), \
pyplot.errorbar(numpy.linspace(startpoint, len(i[1])*step+startpoint-step,len(i[1])), i[1], yerr=i[2]),
pyplot.ylim(ymin, ymax)
pyplot.title("Protein 2")
xlsFileWriter("results", "Protein 1", i[0],
numpy.linspace(startpoint, len(i[1])*step+startpoint-step,len(i[1])), i[1], i[2])
pyplot.xlabel("X's name")
pyplot.ylabel("Y's name")
pyplot.show()
return True
if __name__ == "__main__":
parseFolder("YOUR_PATH") #change YOUR_PATH with desired folder with subfolders with images
| AndriiLab/ImageAnalysisScripts | ratioBetweenTwoChannelsBetweenTwoProteins.py | Python | gpl-2.0 | 6,715 | [
"Gaussian"
] | cde94eb394d05305f490d3d5e890b850c70c4d19e387eaaa11826c85aa9bcff8 |
import random
import string
from django.db import transaction
from django.shortcuts import render, redirect
import haikunator
from .models import Room
def about(request):
return render(request, "chat/about.html")
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = (room.messages.order_by('-timestamp')[:10])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
})
| cloud-fire/signalnews | chat/views.py | Python | bsd-3-clause | 1,271 | [
"VisIt"
] | 8209cc99bb920d64c51cd57640c5984cd2b942ddd2e2f744b6fa3150b6937fed |
#!/usr/bin/env python
'''
CREATED:2014-01-17 16:30:07 by Brian McFee <brm2132@columbia.edu>
Compute hierarchical segmentation evaluation metrics
Usage:
./segment_hier_eval.py -r TRUTH_LEVEL1.TXT [TRUTH_LEVEL2.TXT ...] \
-e PREDICTION_LEVEL1.TXT [PREDICTION_LEVEL2.TXT ...] \
[-o output.json] \
[-w WINDOW_SIZE]
'''
from __future__ import print_function
import argparse
import sys
import eval_utilities
import mir_eval
from os.path import basename
from mir_eval.io import load_labeled_intervals
def process_arguments():
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='mir_eval hierarchical '
'segmentation evaluation')
parser.add_argument('-w',
'--window',
dest='window',
default="15.0",
type=float,
help='Window length for t-measures')
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument('-r',
'--reference',
dest='reference_file',
nargs='+',
type=str,
action='store',
help='path to the reference annotation(s) in '
'.lab format, ordered from top to bottom of '
'the hierarchy')
parser.add_argument('-e',
'--estimate',
dest='estimated_file',
nargs='+',
type=str,
action='store',
help='path to the estimated annotation(s) in '
'.lab format, ordered from top to bottom of '
'the hierarchy')
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()
# load the data
ref_files = parameters['reference_file']
est_files = parameters['estimated_file']
ref = [load_labeled_intervals(_) for _ in ref_files]
est = [load_labeled_intervals(_) for _ in est_files]
ref_intervals = [seg[0] for seg in ref]
ref_labels = [seg[1] for seg in ref]
est_intervals = [seg[0] for seg in est]
est_labels = [seg[1] for seg in est]
# Compute all the scores
scores = mir_eval.hierarchy.evaluate(ref_intervals, ref_labels,
est_intervals, est_labels,
window=parameters['window'])
print("{} [...] vs. {} [...]".format(
basename(parameters['reference_file'][0]),
basename(parameters['estimated_file'][0])))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])
| bmcfee/mir_eval | evaluators/segment_hier_eval.py | Python | mit | 3,231 | [
"Brian"
] | 8a469f67db325651acc2d085c9f4c33d616b33119efd6d2433d97e6006364fba |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visits', '0002_auto_20151112_1103'),
]
operations = [
migrations.RemoveField(
model_name='visit',
name='card_payment_amount',
),
migrations.RemoveField(
model_name='visit',
name='cash_payment_amount',
),
migrations.AlterField(
model_name='visit',
name='total_payment_amount',
field=models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Total Payment Amount'),
),
]
| dogukantufekci/supersalon | supersalon/visits/migrations/0003_auto_20151112_1121.py | Python | bsd-3-clause | 701 | [
"VisIt"
] | 7d9e2d5d8ac8da7a152bfbfb2aaab6a2fac246f1cec79ed69f806484fe7b02a4 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import random
from flask import Flask, request, redirect
import combos
app = Flask(__name__)
def valid_link_from(dirty_link):
link = urllib2.unquote(dirty_link)
if urllib2.urlparse.urlparse(link).scheme is '':
link = 'http://' + link
try:
req = urllib2.Request(link)
req.get_method = lambda:'HEAD'
response = urllib2.urlopen(req)
if response.getcode() >= 400:
return None
except urllib2.URLError:
return None
return link
@app.route('/shorten', methods=['POST'])
def shorten():
link = valid_link_from(request.args['link'])
if link:
combo = combos.create_unique()
combos.add_link(link, combo)
return combo, 201, {'Content-Type' : 'text/plain'}
else:
return "No link given.", 400
@app.route('/<combo>', methods=['GET'])
def visit(combo):
if combos.has(combo):
return redirect(combos.link_for(combo), 301)
else:
return "No url for the given combo.", 404
if __name__ == "__main__":
app.run()
| weeezes/ComboURL | main.py | Python | mit | 1,139 | [
"VisIt"
] | 3dec046da5ed8a5e4a7705df5cdd667959a77840c09022ddeb43b4d1239e46b0 |
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
lttree_check.py
The original template file format supports any variable types or file names.
However if you plan to process template files using lttree.py to create
LAMMPS-readable input/data files, then variables and file names obey certain
naming conventions. This code attempts to insure these conventions are obeyed
and to make sure that necessary variables are defined.
-- This code checks static variables (@) and basic LAMMPS syntax --
This program makes an attempt to check that the variables and file names
which appear in an "lttree" file are not mispelled (or miscapitlised).
It also attempts to check that LAMMPS syntax conventions are obeyed.
(It checks that the appropriate type of variable is located in each column).
It also attempts to check that all of the needed coeffs are defined.
-- This code does NOT check instance variables ($) --
This code does not check to make sure that all references to instance variables
(such as $atom, $bond, $angle, $dihedral, $improper or $mol variables) are valid
This means a user's input script command (like the "group" command) could refer
to an $atom or $mol which was never defined, and this code would not detect it.
(Why: Checking for instance variables requires building the entire instance tree
and checking references uses up additional memory after that. I do not do this
because memory is often very scarce after building the instance tree.)
Instead, we could check for these kinds of errors when post-processing of
the files generated by lttree.py or moltemplate.sh.
-- This is not the pretiest code I've ever written. --
"""
import sys
#from ttree import *
from lttree_styles import *
from lttree import *
from ttree_lex import InputError
if sys.version < '2.7':
raise InputError('Error: Alas, you must upgrade to a newever version of python.')
#g_no_check_msg = \
# "(If this error message is wrong, and/or you would like to continue anyway,\n"+\
# "try running moltemplate again using the \"-nocheck\" command-line-argument.)\n"
g_no_check_msg = \
'(To continue anyway, run moltemplate using the \"-nocheck\" argument.)\n'
def CheckCommonVarNames(prefix, descr_str, suffix, srcloc):
""" Check the name of variables in a lttree-file to confirm
that they follow the conventions used by lttree.
Almost any variable/category name is permitted, except for
names which closely match those reserved by lttree.
"""
cat_name, cat_ptkns, leaf_ptkns = \
DescrToCatLeafPtkns(descr_str,
srcloc)
if (cat_name.lower()=='mol'):
if (cat_name != 'mol'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"mol\"?')
elif (cat_name.lower()=='group'):
if (cat_name != 'group'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"group\"?')
elif (cat_name.lower()=='fix'):
if (cat_name != 'fix'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"fix\" instead.')
elif (cat_name.lower()=='atom'):
if (cat_name != 'atom'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Illegal lttree variable category: \"'+cat_name+'\"\n'+
'Use \"atom\" instead.')
elif (cat_name.lower()=='bond'):
if (cat_name != 'bond'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"bond\" instead.')
elif (cat_name.lower()=='angle'):
if (cat_name != 'angle'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"angle\" instead.')
elif (cat_name.lower()=='dihedral'):
if (cat_name != 'dihedral'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"dihedral\" instead.')
elif (cat_name.lower()=='improper'):
if (cat_name != 'improper'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"improper\" instead.')
else:
sys.stderr.write('-----------------------------------------------------\n'+
'WARNING: in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
' Unrecognised template variable category: \"'+cat_name+'\"\n'+
'-----------------------------------------------------\n')
def CheckDataFileNames(filename,
srcloc,
write_command,
fnames_found):
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix)
section_name = filename[N_data_prefix:]
if ((section_name.lower() == 'atom') or
(section_name.lower() == 'atoms')):
if (filename != data_atoms):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_atoms+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'velocities') or
(section_name.lower() == 'velocity')):
if (filename != data_velocities):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_velocities+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'mass') or
(section_name.lower() == 'masses')):
if (filename != data_masses):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_masses+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'ellipsoids') or
(section_name.lower() == 'ellipsoid') or
(section_name.lower() == 'elipsoids') or
(section_name.lower() == 'elipsoid')):
if (filename != data_ellipsoids):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_ellipsoids+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'triangle') or
(section_name.lower() == 'triangles')):
if (filename != data_triangles):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_triangles+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'line') or
(section_name.lower() == 'lines')):
if (filename != data_lines):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_lines+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('pair coef') == 0) or
(section_name.lower().find('pair_coef') == 0) or
(section_name.lower().find('paircoef') == 0) or
(section_name.lower().find('pair by type') == 0) or
(section_name.lower().find('pair bytype') == 0) or
(section_name.lower().find('pair_by_type') == 0) or
(section_name.lower().find('pair_bytype') == 0) or
(section_name.lower().find('pairbytype') == 0)):
if (filename != data_pair_coeffs):
err_msg = 'Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+\
'Output file name (\"'+filename+'\") does not match,\n'+\
'yet overlaps closely with reserved lttree-file name.\n'+\
'Perhaps you meant \"'+data_pair_coeffs+'\"?'
if ((section_name.lower().find('by type') != -1) or
(section_name.lower().find('by_type') != -1) or
(section_name.lower().find('bytype') != -1)):
err_msg += '\n (Note: "pair" parameters are always assigned by type.\n'+\
' There\'s no need to specify \"by type\")'
raise InputError(err_msg)
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bond coef') == 0) or
(section_name.lower().find('bond_coef') == 0) or
(section_name.lower().find('bondcoef') == 0)):
if (filename != data_bond_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bond_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angle coef') == 0) or
(section_name.lower().find('angle_coef') == 0) or
(section_name.lower().find('anglecoef') == 0)):
if (filename != data_angle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('dihedral coef') == 0) or
(section_name.lower().find('dihedral_coef') == 0) or
(section_name.lower().find('dihedralcoef') == 0)):
if (filename != data_dihedral_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedral_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('improper coef') == 0) or
(section_name.lower().find('improper_coef') == 0) or
(section_name.lower().find('impropercoef') == 0)):
if (filename != data_improper_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_improper_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
# -- class2 data sections --
elif ((section_name.lower().find('bondbond coef') == 0) or
(section_name.lower().find('bondbond_coef') == 0) or
(section_name.lower().find('bondbondcoef') == 0)):
if (filename != data_bondbond_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondbond_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bondangle coef') == 0) or
(section_name.lower().find('bondangle_coef') == 0) or
(section_name.lower().find('bondanglecoef') == 0)):
if (filename != data_bondangle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondangle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('middlebondtorsion coef') == 0) or
(section_name.lower().find('middlebondtorsion_coef') == 0) or
(section_name.lower().find('middlebondtorsioncoef') == 0) or
(section_name.lower().find('middlebondtorision coef') == 0) or
(section_name.lower().find('middlebondtorision_coef') == 0) or
(section_name.lower().find('middlebondtorisioncoef') == 0)):
if (filename != data_middlebondtorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_middlebondtorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('endbondtorsion coef') == 0) or
(section_name.lower().find('endbondtorsion_coef') == 0) or
(section_name.lower().find('endbondtorsioncoef') == 0) or
(section_name.lower().find('endbondtorision coef') == 0) or
(section_name.lower().find('endbondtorision_coef') == 0) or
(section_name.lower().find('endbondtorisioncoef') == 0)):
if (filename != data_endbondtorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_endbondtorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angletorsion coef') == 0) or
(section_name.lower().find('angletorsion_coef') == 0) or
(section_name.lower().find('angletorsioncoef') == 0) or
(section_name.lower().find('angletorision coef') == 0) or
(section_name.lower().find('angletorision_coef') == 0) or
(section_name.lower().find('angletorisioncoef') == 0)):
if (filename != data_angletorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angletorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angleangletorsion coef') == 0) or
(section_name.lower().find('angleangletorsion_coef') == 0) or
(section_name.lower().find('angleangletorsioncoef') == 0) or
(section_name.lower().find('angleangletorision coef') == 0) or
(section_name.lower().find('angleangletorision_coef') == 0) or
(section_name.lower().find('angleangletorisioncoef') == 0)):
if (filename != data_angleangletorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angleangletorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bondbond13 coef') == 0) or
(section_name.lower().find('bondbond13_coef') == 0) or
(section_name.lower().find('bondbond13coef') == 0)):
if (filename != data_bondbond13_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondbond13_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angleangle coef') == 0) or
(section_name.lower().find('angleangle_coef') == 0) or
(section_name.lower().find('angleanglecoef') == 0)):
if (filename != data_angleangle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angleangle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'angles by type') or
(section_name.lower() == 'angles bytype') or
(section_name.lower() == 'angles_by_type') or
(section_name.lower() == 'angles_bytype') or
(section_name.lower() == 'anglesbytype') or
(section_name.lower() == 'angle by type') or
(section_name.lower() == 'angle bytype') or
(section_name.lower() == 'angle_by_type') or
(section_name.lower() == 'angle_bytype') or
(section_name.lower() == 'anglebytype')):
if (filename != data_angles_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angles_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'dihedrals by type') or
(section_name.lower() == 'dihedrals bytype') or
(section_name.lower() == 'dihedrals_by_type') or
(section_name.lower() == 'dihedrals_bytype') or
(section_name.lower() == 'dihedralsbytype') or
(section_name.lower() == 'dihedral by type') or
(section_name.lower() == 'dihedral bytype') or
(section_name.lower() == 'dihedral_by_type') or
(section_name.lower() == 'dihedral_bytype') or
(section_name.lower() == 'dihedralbytype')):
if (filename != data_dihedrals_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedrals_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'impropers by type') or
(section_name.lower() == 'impropers bytype') or
(section_name.lower() == 'impropers_by_type') or
(section_name.lower() == 'impropers_bytype') or
(section_name.lower() == 'impropersbytype') or
(section_name.lower() == 'improper by type') or
(section_name.lower() == 'improper bytype') or
(section_name.lower() == 'improper_by_type') or
(section_name.lower() == 'improper_bytype') or
(section_name.lower() == 'improperbytype')):
if (filename != data_impropers_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_impropers_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'bonds') or
(section_name.lower() == 'bond')):
if (filename != data_bonds):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'angles') or
(section_name.lower() == 'angle')):
if (filename != data_angles):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angles+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'dihedrals') or
(section_name.lower() == 'dihedral')):
if (filename != data_dihedrals):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedrals+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'impropers') or
(section_name.lower() == 'improper')):
if (filename != data_impropers):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_impropers+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'box boundaries') or
(section_name.lower() == 'box boundary') or
(section_name.lower() == 'boundaries') or
(section_name.lower() == 'boundary') or
(section_name.lower() == 'boundary conditions') or
(section_name.lower() == 'periodic boundaries') or
(section_name.lower() == 'periodic boundary conditions') or
(section_name.lower() == 'periodic_boundaries') or
(section_name.lower() == 'periodic_boundary_conditions') or
(section_name.lower() == 'pbc')):
if ((filename != data_boundary) and
(filename != data_pbc)):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_boundary+'\"?\n'
'(Specify periodic boundary conditions this way.)')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif (filename == data_pbc):
sys.stderr.write('WARNING: write_once(\"'+data_pbc+'\") is depreciated.\n'
' Use write_once(\"'+data_boundary+'\") instead.\n')
def CheckCommonFileNames(filename,
srcloc,
write_command,
filenames_found):
"""
Check the write() or write_once() statements in a
lttree-file to make sure that the files being written
follow the conventions used by lttree.
Almost any file name is permitted, except for file names
which closely match those reserved by lttree.
"""
filenames_found.add(filename)
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix_no_space)
if ((filename[:N_data_prefix].lower() == data_prefix.lower()) and
(filename[:N_data_prefix] != data_prefix)):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'The beginning of output file (\"'+filename+'\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"'+data_prefix+'\"). Perhaps you meant \"'+data_prefix+filename[N_data_prefix:]+'\"?')
# check did they forget the space?
if (filename[:N_data_prefix_no_space] == data_prefix_no_space):
if (filename[:N_data_prefix] == data_prefix):
CheckDataFileNames(filename,
srcloc,
write_command,
filenames_found)
else:
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'The beginning of output file (\"'+filename+'\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"'+data_prefix+'\"). Perhaps you meant \"'+data_prefix+filename[N_data_prefix_no_space:]+'\"?')
elif ((filename.lower() == 'box boundaries') or
(filename.lower() == 'box boundary') or
(filename.lower() == 'boundaries') or
(filename.lower() == 'boundary') or
(filename.lower() == 'boundary conditions') or
(filename.lower() == 'periodic boundaries') or
(filename.lower() == 'periodic boundary conditions') or
(filename.lower() == 'periodic_boundaries') or
(filename.lower() == 'periodic_boundary_conditions') or
(filename.lower() == 'pbc')):
# In that case (for one thing) they forgot the data_prefix
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_boundary+'\"?\n'
'(Specify periodic boundary conditions this way.)')
elif ((filename.lower() == 'init') or
(filename.lower() == 'in init') or
(filename.lower() == 'ininit') or
(filename.lower() == 'initialize') or
(filename.lower() == 'in initialize') or
(filename.lower() == 'ininitialize')):
if (filename != in_init):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_init+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((filename.lower() == 'settings') or
(filename.lower() == 'in settings') or
(filename.lower() == 'insettings')):
if (filename != in_settings):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_settings+'\"?')
elif ((filename.lower() == 'set_coords') or
(filename.lower() == 'set coords') or
(filename.lower() == 'setcoords') or
(filename.lower() == 'in set_coords') or
(filename.lower() == 'in set coords') or
(filename.lower() == 'in setcoords')):
if (filename != in_set_coords):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_set_coords+'\"?')
def CheckSyntaxCheap(lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is afile or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
fnames_found = set([])
prematurely_read_token = None
while True:
if prematurely_read_token == None:
command = lex.get_token()
else:
command = prematurely_read_token
prematurely_read_token = None
#print('Parse(): token = \"'+command+'\", '+lex.error_leader())
if command == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((command == 'write') or (command == 'write_once')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren=='{':
# ..then the user neglected to specify the "filename" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
filename = ''
srcloc = lex.GetSrcLoc()
else:
filename = lex.get_token()
if filename == ')':
filename == ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error: in '+lex.error_leader()+'\n\n'
'Syntax error at beginning of '+command+' command.')
filename = RemoveOuterQuotes(filename, lex.quotes)
# The previous line is similar to:
#filename = filename.strip(lex.quotes)
CheckCommonFileNames(filename, lex.GetSrcLoc(), command, fnames_found)
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
for entry in tmpl_contents:
if (type(entry) is VarRef):
CheckCommonVarNames(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
#if (data_velocities not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_velocities+'\" file not found\n'
# '-------------------------------------------------\n')
#if (data_pair_coeffs not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_pair_coeffs+'\" file not found\n'
# '-------------------------------------------------\n')
if (data_atoms not in fnames_found):
sys.stderr.write('WARNING: \"'+data_atoms+'\" file not found\n')
if (data_masses not in fnames_found):
sys.stderr.write('WARNING: \"'+data_masses+'\" file not found\n')
#if (data_bonds not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bonds+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_angles not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angles+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_dihedrals not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedrals+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_impropers not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_impropers+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_bond_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bond_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_angle_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angle_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_dihedral_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedral_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_improper_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_imrpoper_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
if (in_init not in fnames_found):
sys.stderr.write('WARNING: \"'+in_init+'\" file not found\n')
if (in_settings not in fnames_found):
sys.stderr.write('WARNING: \"'+in_settings+'\" file not found\n')
def CheckSyntaxStatic(context_node,
root_node,
atom_column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands):
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
filename = command.filename
if filename == None: # (The "create_var" command causes this)
pass
elif (filename.find(in_prefix) == 0): #if filename begins with "In "
CheckInFileSyntax(command.tmpl_list,
root_node,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined)
elif filename == 'Data Atoms':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) == 0:
pass # skip blank lines
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass # skip comment lines
else:
syntax_err = False
if len(table[i]) < len(atom_column_names):
syntax_err = True
else:
syntax_err = False
for j in range(0, len(atom_column_names)):
if ((atom_column_names[j].lower() == 'atom-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$','${')) and
(ExtractCatName(table[i][j].descr_str) == 'atom')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'molecule-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$','${')) and
(ExtractCatName(table[i][j].descr_str) == 'mol')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'atom-type') and
(not ((j < len(table[i])) and
(isinstance(table[i][j], VarRef)) and
(table[i][j].prefix in ('@', '@{')) and
(table[i][j].nptr.cat_name == 'atom') and
(table[i][j].nptr.cat_node == root_node)))):
syntax_err = True
if syntax_err:
correct_rows_list = [s for s in atom_column_names]
for j in range(0, len(correct_rows_list)):
if correct_rows_list[j].lower() == 'atom-id':
correct_rows_list[j] = '$atom:id'
elif correct_rows_list[j].lower() == 'atom-type':
correct_rows_list[j] = '@atom:type'
elif correct_rows_list[j].lower() == 'molecule-id':
correct_rows_list[j] = '$mol:id'
correct_rows_msg = ' '.join(correct_rows_list)
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid "Data Atoms" syntax.\n'+
'Each line of the \"Data Atoms\" section should have this format:\n\n'
' '+correct_rows_msg+'\n\n'
'You may have forgotten to specify the LAMMPS atom_style.\n'+
'(You can do this running moltemplate with the -atom-style _style_ argument.)\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Bonds':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 4:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'bond') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bonds" syntax.\n'+
'Each line of the \"Data Bonds\" section should have this format:\n\n'
' $bond:id @bond:type $atom:id1 $atom:id2\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Angles':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 5:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'angle'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'angle') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Angles" syntax.\n'+
'Each line of the \"Data Angles\" section should have this format:\n\n'
' $angle:id @angle:type $atom:id1 $atom:id2 $atom:id3\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
elif filename == 'Data Dihedrals':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'dihedral'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'dihedral') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Dihedrals" syntax.\n'+
'Each line of the \"Data Dihedrals\" section should have this format:\n\n'
' $dihedral:id @dihedral:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Impropers':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'improper'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'improper') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Impropers" syntax.\n'+
'Each line of the \"Data Impropers\" section should have this format:\n\n'
' $improper:id @improper:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
# A simple wildcard is the character "*" on its own.
# These are okay.
# A "compound" wildcard expression is something like
# 5*7 or
# 5* or
# *7 or
# @{bond:A}*@bond:B or
# @{bond:A}* or
# *@bond:B
# LAMMPS allows this but in moltemplate this causes
# unintended side-effects. Check for these now.
if filename in set(['Data Bond Coeffs',
'Data Angle Coeffs',
'Data Dihedral Coeffs',
'Data Improper Coeffs',
'Data Pair Coeffs']):
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if (isinstance(table[i][0], TextBlock) and
table[i][0].text == '*'):
if filename == 'Data Bond Coeffs':
data_bond_coeffs_defined.add('*')
elif filename == 'Data Angle Coeffs':
data_angle_coeffs_defined.add('*')
elif filename == 'Data Dihedral Coeffs':
data_dihedral_coeffs_defined.add('*')
elif filename == 'Data Improper Coeffs':
data_improper_coeffs_defined.add('*')
elif filename == 'Data Pair Coeffs':
data_pair_coeffs_defined.add(('*','*'))
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][0],'__len__'):
ltmpl = table[i][0]
else:
ltmpl = [table[i][0]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('--- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within \"'+filename+'\".\n'
'It is safer to specify the parameters for each type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if filename == 'Data Bond Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bond Coeffs" syntax.\n'
' Each line of the \"Data Bond Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @bond:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_bond_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Angle Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Angle Coeffs" syntax.\n'
' Each line of the \"Data Angle Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @angle:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_angle_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Dihedral Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Dihedral Coeffs" syntax.\n'
' Each line of the \"Data Dihedral Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @dihedral:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_dihedral_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Improper Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Improper Coeffs" syntax.\n'
' Each line of the \"Data Improper Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @improper:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_improper_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Pair Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) > 0) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'atom') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Pair Coeffs" syntax.\n'
' Each line of the \"Data Pair Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @atom:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_pair_coeffs_defined.add((table[i][0].binding,
table[i][0].binding))
elif filename == 'Data Angles By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 4) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Angles By Type\" syntax.\n'
' Each line of the \"Data Angles By Type\" section should begin with an\n'
' @angle:type variable followed by 3 atom types (and 2 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Dihedrals By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Dihedrals By Type\" syntax.\n'
' Each line of the \"Data Dihedrals By Type\" section should begin with a\n\n'
' @dihedral:type variable followed by 4 atom types (and 3 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Impropers By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Impropers By Type\" syntax.\n'
' Each line of the \"Data Impropers By Type\" section should begin with an\n\n'
' @improper:type variable followed by 4 atom types (and 3 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children:
CheckSyntaxStatic(child,
root_node,
atom_column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands)
def CheckInFileSyntax(tmpl_list,
root_node,
pair_coeffs_defined,
bond_coeffs_defined,
angle_coeffs_defined,
dihedral_coeffs_defined,
improper_coeffs_defined):
table = TableFromTemplate(tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((isinstance(table[i][0], TextBlock)) and
(table[i][0].text in set(['bond_coeff',
'angle_coeff',
'dihedral_coeff',
'improper_coeff']))):
if len(table[i]) > 1: # if not deal with error later
if (isinstance(table[i][1], TextBlock) and
table[i][1].text == '*'):
if table[i][0].text == 'bond_coeff':
bond_coeffs_defined.add('*')
elif table[i][0].text == 'angle_coeff':
angle_coeffs_defined.add('*')
elif table[i][0].text == 'dihedral_coeff':
dihedral_coeffs_defined.add('*')
elif table[i][0].text == 'improper_coeff':
improper_coeffs_defined.add('*')
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('---- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within a \"'+table[i][0].text+'\".\n'
'command. It is safer to specify the parameters for each bond type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'bondcoeff') or
(table[i][0].text.lower() == 'bond_coeff'))):
if table[i][0].text != 'bond_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"bond_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'bond') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"bond_coeff\" command.\n\n'+
' Each \"bond_coeff\" command should have the following syntax:\n\n'+
' bond_coeff @bond:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
bond_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'anglecoeff') or
(table[i][0].text.lower() == 'angle_coeff'))):
if table[i][0].text != 'angle_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"angle_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'angle') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"angle_coeff\" command.\n\n'+
' Each \"angle_coeff\" command should have the following syntax:\n\n'+
' angle_coeff @angle:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
angle_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'dihedralcoeff') or
(table[i][0].text.lower() == 'dihedral_coeff'))):
if table[i][0].text != 'dihedral_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"dihedral_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'dihedral') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"dihedral_coeff\" command.\n\n'+
' Each \"dihedral_coeff\" command should have the following syntax:\n\n'+
' dihedral_coeff @dihedral:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
dihedral_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'impropercoeff') or
(table[i][0].text.lower() == 'improper_coeff'))):
if table[i][0].text != 'improper_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"improper_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'improper') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"improper_coeff\" command.\n\n'+
' Each \"improper_coeff\" command should have the following syntax:\n\n'+
' improper_coeff @improper:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
improper_coeffs_defined.add(table[i][1].binding)
elif ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'paircoeff') or
(table[i][0].text.lower() == 'pair_coeff'))):
if table[i][0].text != 'pair_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"pair_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if len(table[i]) > 2: # if not, deal with error later
if ((isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and
(isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*'))):
pair_coeffs_defined.add(('*','*'))
else:
compound_wildcard = False
assert(len(table[i]) > 1)
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if hasattr(table[i][2], '__len__'):
ltmpl = table[i][2]
else:
ltmpl = [table[i][2]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('---- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within a \"pair_coeff\" command.\n'
'It is safer to specify the parameters for each bond type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if ((len(table[i]) > 2) and
(isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and
(isinstance(table[i][2], TextBlock) and
(table[i][2].text == '*'))):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 2) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'atom') and
(table[i][1].nptr.cat_node == root_node) and
(isinstance(table[i][2], VarRef)) and
(table[i][2].prefix in ('@', '@{')) and
(table[i][2].nptr.cat_name == 'atom') and
(table[i][2].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"pair_coeff\" command.\n\n'+
' Each \"pair_coeff\" command should have the following syntax:\n\n'+
' pair_coeff @atom:typeI @atom:typeJ [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
pair_coeffs_defined.add((table[i][1].binding, table[i][2].binding))
def LttreeCheckParseArgs(argv, settings):
LttreeParseArgs(argv, settings)
if __name__ == "__main__":
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
settings.infile = argv[1]
try:
settings.lex = TemplateLexer(open(settings.infile, 'r'),
settings.infile) # Parse text from file
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"'+settings.infile+'\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error('+g_program_name+'):\n\n'
' Unrecognized argument.\n'
' (That or there is some other problem with the argument list.)\n'
' The problem begins with these arguments:\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
####### control flow begins here: #######
if __name__ == "__main__":
g_program_name = __file__.split('/')[-1] # = 'lttree_check.py'
g_version_str = '0.73'
g_date_str = '2013-2-15'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
try:
# Parse the argument list and instantiate the lexer we will be using:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeCheckParseArgs(sys.argv, settings)
# Invoke syntax checker pass:
# This first check only checks for very simple mistakes
# (mispelled versions of standard files or variable names).
CheckSyntaxCheap(settings.lex)
settings.lex.instream.close()
# Now read the file again.
# This time parse it using StaticObj.ReadTemplate().
# (This will allow us to check for deeper problems.)
del settings.lex
settings.lex = TemplateLexer(open(settings.infile, 'r'),
settings.infile)
static_tree_root = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
sys.stderr.write(g_program_name+': parsing the class definitions...')
static_tree_root.Parse(settings.lex)
sys.stderr.write(' done\n'+g_program_name+': looking up classes...')
static_tree_root.LookupStaticRefs()
sys.stderr.write(' done\n'+g_program_name+': looking up @variables...')
AssignVarPtrs(static_tree_root,
search_instance_commands=False)
AssignVarPtrs(static_tree_root,
search_instance_commands=True)
sys.stderr.write(' done\n')
#sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
data_pair_coeffs_defined = set([])
data_bond_coeffs_defined = set([])
data_angle_coeffs_defined = set([])
data_dihedral_coeffs_defined = set([])
data_improper_coeffs_defined = set([])
in_pair_coeffs_defined = set([])
in_bond_coeffs_defined = set([])
in_angle_coeffs_defined = set([])
in_dihedral_coeffs_defined = set([])
in_improper_coeffs_defined = set([])
# Now check the static syntax
# Here we check the contents of the the "write_once()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=False)
# Here we check the contents of the the "write()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=True)
if 'bond' in static_tree_root.categories:
if ((len(data_bond_coeffs_defined) > 0) and
(len(in_bond_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"bond_coeff\" commands\n'+
' OR you can have a \"Data Bond Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_bond_coeffs_defined) > 0:
bond_coeffs_defined = data_bond_coeffs_defined
else:
bond_coeffs_defined = in_bond_coeffs_defined
bond_bindings = static_tree_root.categories['bond'].bindings
for nd,bond_binding in bond_bindings.items():
if not nd.IsDeleted():
if ((not (bond_binding in bond_coeffs_defined)) and
(not HasWildCard(bond_binding.full_name)) and
(not ('*' in bond_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing bond coeff.\n\n'+
' No coeffs for the \"'+bond_binding.full_name+'\" bond type have been\n'+
'defined, but a reference to that bond type was discovered\n'+
'near '+ErrorLeader(bond_binding.refs[0].srcloc.infile,
bond_binding.refs[0].srcloc.lineno)+'. Check this file and also check\n'
'your \"bond_coeff\" commands or your \"Data Bond Coeffs" section.\n'
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'angle' in static_tree_root.categories:
if ((len(data_angle_coeffs_defined) > 0) and
(len(in_angle_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"angle_coeff\" commands\n'+
' OR you can have a \"Data Angle Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_angle_coeffs_defined) > 0:
angle_coeffs_defined = data_angle_coeffs_defined
else:
angle_coeffs_defined = in_angle_coeffs_defined
angle_bindings = static_tree_root.categories['angle'].bindings
for nd,angle_binding in angle_bindings.items():
if not nd.IsDeleted():
if ((not (angle_binding in angle_coeffs_defined)) and
#(not HasWildCard(angle_binding.full_name)) and
(not ('*' in angle_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing angle coeff.\n\n'+
' No coeffs for the \"'+angle_binding.full_name+'\" angle type have been\n'+
'defined, but a reference to that angle type was discovered\n'+
'near '+ErrorLeader(angle_binding.refs[0].srcloc.infile,
angle_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"angle_coeff\" commands or your \"Data Angle Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'dihedral' in static_tree_root.categories:
if ((len(data_dihedral_coeffs_defined) > 0) and
(len(in_dihedral_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"dihedral_coeff\" commands\n'+
' OR you can have a \"Data Dihedral Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_dihedral_coeffs_defined) > 0:
dihedral_coeffs_defined = data_dihedral_coeffs_defined
else:
dihedral_coeffs_defined = in_dihedral_coeffs_defined
dihedral_bindings = static_tree_root.categories['dihedral'].bindings
for nd,dihedral_binding in dihedral_bindings.items():
if not nd.IsDeleted():
if ((not (dihedral_binding in dihedral_coeffs_defined)) and
#(not HasWildCard(dihedral_binding.full_name)) and
(not ('*' in dihedral_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing dihedral coeff.\n\n'+
' No coeffs for the \"'+dihedral_binding.full_name+'\" dihedral type have been\n'+
'defined, but a reference to that dihedral type was discovered\n'+
'near '+ErrorLeader(dihedral_binding.refs[0].srcloc.infile,
dihedral_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"dihedral_coeff\" commands or your \"Data Dihedral Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'improper' in static_tree_root.categories:
if ((len(data_improper_coeffs_defined) > 0) and
(len(in_improper_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"improper_coeff\" commands\n'+
' OR you can have a \"Data Improper Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_improper_coeffs_defined) > 0:
improper_coeffs_defined = data_improper_coeffs_defined
else:
improper_coeffs_defined = in_improper_coeffs_defined
improper_bindings = static_tree_root.categories['improper'].bindings
for nd,improper_binding in improper_bindings.items():
if not nd.IsDeleted():
if ((not (improper_binding in improper_coeffs_defined)) and
#(not HasWildCard(improper_binding.full_name)) and
(not ('*' in improper_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing improper coeff.\n\n'+
' No coeffs for the \"'+improper_binding.full_name+'\" improper type have been\n'+
'defined, but a reference to that improper type was discovered\n'+
'near '+ErrorLeader(improper_binding.refs[0].srcloc.infile,
improper_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"improper_coeff\" commands or your \"Data Improper Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'atom' in static_tree_root.categories:
if ((len(data_pair_coeffs_defined) > 0) and
(len(in_pair_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"pair_coeff\" commands\n'+
' OR you can have a \"Data Pair Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_pair_coeffs_defined) > 0:
pair_coeffs_defined = data_pair_coeffs_defined
else:
pair_coeffs_defined = in_pair_coeffs_defined
atom_bindings = static_tree_root.categories['atom'].bindings
for nd,atom_binding in atom_bindings.items():
if not nd.IsDeleted():
if ((not ((atom_binding,atom_binding)
in
pair_coeffs_defined)) and
(not HasWildCard(atom_binding.full_name)) and
(not (('*','*') in pair_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing pair coeff.\n\n'+
' No pair coeffs for the \"'+atom_binding.full_name+'\" atom type have been\n'+
'defined, but a reference to that atom type was discovered\n'+
'near '+ErrorLeader(atom_binding.refs[0].srcloc.infile,
atom_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"pair_coeff\" commands or your \"Data Pair Coeffs" section.\n\n'+
g_no_check_msg)
#else:
# raise InputError('Error: No atom types (@atom) have been defined.\n')
sys.stderr.write(g_program_name+': -- No errors detected. --\n')
exit(0)
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(1)
| CFDEMproject/LAMMPS | tools/moltemplate/src/lttree_check.py | Python | gpl-2.0 | 126,637 | [
"LAMMPS"
] | 29710d144a53c6745f48ff59a04697ee14fee5628c12b24659d2927fa8608867 |
"""
Core visualization operations based on PyVista.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
from distutils.version import LooseVersion
import os
import sys
import warnings
import numpy as np
import vtk
from .base_renderer import _BaseRenderer
from ._utils import _get_colormap_from_array
from ...utils import copy_base_doc_to_subclass_doc
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
from pyvista import Plotter, PolyData, Line, close_all, UnstructuredGrid
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
from pyvista import BackgroundPlotter
from pyvista.utilities import try_callback
from pyvista.plotting.plotting import _ALL_PLOTTERS
VTK9 = LooseVersion(vtk.VTK_VERSION) >= LooseVersion('9.0')
_FIGURES = dict()
class _Figure(object):
def __init__(self, plotter=None,
plotter_class=None,
display=None,
show=False,
title='PyVista Scene',
size=(600, 600),
shape=(1, 1),
background_color='black',
smooth_shading=True,
off_screen=False,
notebook=False):
self.plotter = plotter
self.plotter_class = plotter_class
self.display = display
self.background_color = background_color
self.smooth_shading = smooth_shading
self.notebook = notebook
self.store = dict()
self.store['show'] = show
self.store['title'] = title
self.store['window_size'] = size
self.store['shape'] = shape
self.store['off_screen'] = off_screen
self.store['border'] = False
self.store['auto_update'] = False
def build(self):
if self.plotter_class is None:
self.plotter_class = BackgroundPlotter
if self.notebook:
self.plotter_class = Plotter
if self.plotter_class == Plotter:
self.store.pop('show', None)
self.store.pop('title', None)
self.store.pop('auto_update', None)
if self.plotter is None:
plotter = self.plotter_class(**self.store)
plotter.background_color = self.background_color
self.plotter = plotter
_process_events(self.plotter)
_process_events(self.plotter)
return self.plotter
def is_active(self):
if self.plotter is None:
return False
return hasattr(self.plotter, 'ren_win')
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : None
Scene sensors handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the sensors."""
self.pts.SetVisibility(state)
def _enable_aa(figure, plotter):
"""Enable it everywhere except Azure."""
# XXX for some reason doing this on Azure causes access violations:
# ##[error]Cmd.exe exited with code '-1073741819'
# So for now don't use it there. Maybe has to do with setting these
# before the window has actually been made "active"...?
# For Mayavi we have an "on activated" event or so, we should look into
# using this for Azure at some point, too.
if os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true':
return
if figure.is_active():
if sys.platform != 'darwin':
plotter.enable_anti_aliasing()
plotter.ren_win.LineSmoothingOn()
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
plotter: Plotter
Main PyVista access point.
name: str
Name of the window.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor='black',
name="PyVista Scene", show=False, shape=(1, 1),
notebook=None, smooth_shading=True):
from .renderer import MNE_3D_BACKEND_TESTING
from .._3d import _get_3d_option
figure = _Figure(show=show, title=name, size=size, shape=shape,
background_color=bgcolor, notebook=notebook,
smooth_shading=smooth_shading)
self.font_family = "arial"
self.tube_n_sides = 20
self.shape = shape
antialias = _get_3d_option('antialias')
self.antialias = antialias and not MNE_3D_BACKEND_TESTING
if isinstance(fig, int):
saved_fig = _FIGURES.get(fig)
# Restore only active plotter
if saved_fig is not None and saved_fig.is_active():
self.figure = saved_fig
else:
self.figure = figure
_FIGURES[fig] = self.figure
elif fig is None:
self.figure = figure
else:
self.figure = fig
# Enable off_screen if sphinx-gallery or testing
if pyvista.OFF_SCREEN:
self.figure.store['off_screen'] = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if MNE_3D_BACKEND_TESTING:
self.tube_n_sides = 3
with _disabled_depth_peeling():
self.plotter = self.figure.build()
self.plotter.hide_axes()
if hasattr(self.plotter, "default_camera_tool_bar"):
self.plotter.default_camera_tool_bar.close()
if hasattr(self.plotter, "saved_cameras_tool_bar"):
self.plotter.saved_cameras_tool_bar.close()
if self.antialias:
_enable_aa(self.figure, self.plotter)
@contextmanager
def ensure_minimum_sizes(self):
sz = self.figure.store['window_size']
# plotter: pyvista.plotting.qt_plotting.BackgroundPlotter
# plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa
# plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa
# plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa
# plotter.ren_win: vtkXOpenGLRenderWindow
self.plotter.interactor.setMinimumSize(*sz)
try:
yield
finally:
for _ in range(2):
self.plotter.app.processEvents()
self.plotter.interactor.setMinimumSize(0, 0)
def subplot(self, x, y):
x = np.max([0, np.min([x, self.shape[0] - 1])])
y = np.max([0, np.min([y, self.shape[1] - 1])])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.subplot(x, y)
if self.antialias:
_enable_aa(self.figure, self.plotter)
def scene(self):
return self.figure
def set_interactive(self):
self.plotter.enable_terrain_style()
def polydata(self, mesh, color=None, opacity=1.0, normals=None,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
rgba = False
if color is not None and len(color) == mesh.n_points:
if color.shape[1] == 3:
scalars = np.c_[color, np.ones(mesh.n_points)]
else:
scalars = color
scalars = (scalars * 255).astype('ubyte')
color = None
rgba = True
if isinstance(colormap, np.ndarray):
if colormap.dtype == np.uint8:
colormap = colormap.astype(np.float64) / 255.
from matplotlib.colors import ListedColormap
colormap = ListedColormap(colormap)
if normals is not None:
mesh.point_arrays["Normals"] = normals
mesh.GetPointData().SetActiveNormals("Normals")
else:
_compute_normals(mesh)
actor = _add_mesh(
plotter=self.plotter,
mesh=mesh, color=color, scalars=scalars,
rgba=rgba, opacity=opacity, cmap=colormap,
backface_culling=backface_culling,
rng=[vmin, vmax], show_scalar_bar=False,
smooth_shading=self.figure.smooth_shading,
interpolate_before_map=interpolate_before_map,
style=representation, line_width=line_width, **kwargs,
)
return actor, mesh
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
vertices = np.c_[x, y, z]
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
interpolate_before_map=interpolate_before_map,
representation=representation,
line_width=line_width,
**kwargs,
)
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if colormap is not None:
colormap = _get_colormap_from_array(colormap,
normalized_colormap)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
mesh = PolyData(vertices, triangles)
mesh.point_arrays['scalars'] = scalars
contour = mesh.contour(isosurfaces=contours, rng=(vmin, vmax))
line_width = width
if kind == 'tube':
contour = contour.tube(radius=width, n_sides=self.tube_n_sides)
line_width = 1.0
actor = _add_mesh(
plotter=self.plotter,
mesh=contour,
show_scalar_bar=False,
line_width=line_width,
color=color,
cmap=colormap,
opacity=opacity,
smooth_shading=self.figure.smooth_shading
)
return actor, contour
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
normals = surface.get('nn', None)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
colormap = _get_colormap_from_array(colormap, normalized_colormap)
if scalars is not None:
mesh.point_arrays['scalars'] = scalars
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
)
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
factor = 1.0 if radius is not None else scale
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
if radius is not None:
sphere.SetRadius(radius)
sphere.Update()
geom = sphere.GetOutput()
mesh = PolyData(np.array(center))
glyph = mesh.glyph(orient=False, scale=False,
factor=factor, geom=geom)
actor = _add_mesh(
self.plotter,
mesh=glyph, color=color, opacity=opacity,
backface_culling=backface_culling,
smooth_shading=self.figure.smooth_shading
)
return actor, glyph
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
cmap = _get_colormap_from_array(colormap, normalized_colormap)
for (pointa, pointb) in zip(origin, destination):
line = Line(pointa, pointb)
if scalars is not None:
line.point_arrays['scalars'] = scalars[0, :]
scalars = 'scalars'
color = None
else:
scalars = None
tube = line.tube(radius, n_sides=self.tube_n_sides)
_add_mesh(
plotter=self.plotter,
mesh=tube,
scalars=scalars,
flip_scalars=reverse_lut,
rng=[vmin, vmax],
color=color,
show_scalar_bar=False,
cmap=cmap,
smooth_shading=self.figure.smooth_shading,
)
return tube
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, line_width=2., name=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
factor = scale
vectors = np.c_[u, v, w]
points = np.vstack(np.c_[x, y, z])
n_points = len(points)
cell_type = np.full(n_points, vtk.VTK_VERTEX)
cells = np.c_[np.full(n_points, 1), range(n_points)]
args = (cells, cell_type, points)
if not VTK9:
args = (np.arange(n_points) * 3,) + args
grid = UnstructuredGrid(*args)
grid.point_arrays['vec'] = vectors
if scale_mode == 'scalar':
grid.point_arrays['mag'] = np.array(scalars)
scale = 'mag'
else:
scale = False
if mode == '2darrow':
return _arrow_glyph(grid, factor)
elif mode == 'arrow' or mode == '3darrow':
_add_mesh(
self.plotter,
mesh=grid.glyph(orient='vec',
scale=scale,
factor=factor),
color=color,
opacity=opacity,
backface_culling=backface_culling
)
elif mode == 'cone':
cone = vtk.vtkConeSource()
if glyph_height is not None:
cone.SetHeight(glyph_height)
if glyph_center is not None:
cone.SetCenter(glyph_center)
if glyph_resolution is not None:
cone.SetResolution(glyph_resolution)
cone.Update()
geom = cone.GetOutput()
_add_mesh(
self.plotter,
mesh=grid.glyph(orient='vec',
scale=scale,
factor=factor,
geom=geom),
color=color,
opacity=opacity,
backface_culling=backface_culling
)
elif mode == 'cylinder':
cylinder = vtk.vtkCylinderSource()
cylinder.SetHeight(glyph_height)
cylinder.SetRadius(0.15)
cylinder.SetCenter(glyph_center)
cylinder.SetResolution(glyph_resolution)
cylinder.Update()
# fix orientation
tr = vtk.vtkTransform()
tr.RotateWXYZ(90, 0, 0, 1)
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputData(cylinder.GetOutput())
trp.SetTransform(tr)
trp.Update()
geom = trp.GetOutput()
_add_mesh(
self.plotter,
mesh=grid.glyph(orient='vec',
scale=scale,
factor=factor,
geom=geom),
color=color,
opacity=opacity,
backface_culling=backface_culling
)
def text2d(self, x_window, y_window, text, size=14, color='white',
justification=None):
size = 14 if size is None else size
position = (x_window, y_window)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
actor = self.plotter.add_text(text, position=position,
font_size=size,
font=self.font_family,
color=color,
viewport=True)
if isinstance(justification, str):
if justification == 'left':
actor.GetTextProperty().SetJustificationToLeft()
elif justification == 'center':
actor.GetTextProperty().SetJustificationToCentered()
elif justification == 'right':
actor.GetTextProperty().SetJustificationToRight()
else:
raise ValueError('Expected values for `justification`'
'are `left`, `center` or `right` but '
'got {} instead.'.format(justification))
return actor
def text3d(self, x, y, z, text, scale, color='white'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.add_point_labels(points=[x, y, z],
labels=[text],
point_size=scale,
text_color=color,
font_family=self.font_family,
name=text,
shape_opacity=0)
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.add_scalar_bar(color=color, title=title,
n_labels=n_labels,
use_opacity=False, n_colors=256,
position_x=0.15,
position_y=0.05, width=0.7,
shadow=False, bold=True,
label_font_size=22,
font_family=self.font_family,
background_color=bgcolor)
def show(self):
self.figure.display = self.plotter.show()
if hasattr(self.plotter, "app_window"):
with self.ensure_minimum_sizes():
self.plotter.app_window.show()
_process_events(self.plotter, show=True)
return self.scene()
def close(self):
_close_3d_figure(figure=self.figure)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None):
_set_3d_view(self.figure, azimuth=azimuth, elevation=elevation,
distance=distance, focalpoint=focalpoint)
def reset_camera(self):
self.plotter.reset_camera()
def screenshot(self, mode='rgb', filename=None):
return _take_3d_screenshot(figure=self.figure, mode=mode,
filename=filename)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.plotter, xyz)
xy = dict(zip(ch_names, xy))
# pts = self.fig.children[-1]
pts = self.plotter.renderer.GetActors().GetLastItem()
return _Projection(xy=xy, pts=pts)
def enable_depth_peeling(self):
if not self.figure.store['off_screen']:
for renderer in self.plotter.renderers:
renderer.enable_depth_peeling()
def remove_mesh(self, mesh_data):
actor, _ = mesh_data
self.plotter.renderer.remove_actor(actor)
def _compute_normals(mesh):
"""Patch PyVista compute_normals."""
if 'Normals' not in mesh.point_arrays:
mesh.compute_normals(
cell_normals=False,
consistent_normals=False,
non_manifold_traversal=False,
inplace=True,
)
def _add_mesh(plotter, *args, **kwargs):
"""Patch PyVista add_mesh."""
_process_events(plotter)
mesh = kwargs.get('mesh')
if 'smooth_shading' in kwargs:
smooth_shading = kwargs.pop('smooth_shading')
else:
smooth_shading = True
actor = plotter.add_mesh(*args, **kwargs)
if smooth_shading and 'Normals' in mesh.point_arrays:
prop = actor.GetProperty()
prop.SetInterpolationToPhong()
return actor
def _deg2rad(deg):
return deg * np.pi / 180.
def _rad2deg(rad):
return rad * 180. / np.pi
def _mat_to_array(vtk_mat):
e = [vtk_mat.GetElement(i, j) for i in range(4) for j in range(4)]
arr = np.array(e, dtype=float)
arr.shape = (4, 4)
return arr
def _3d_to_2d(plotter, xyz):
size = plotter.window_size
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(plotter)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(size)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(plotter):
cam = plotter.renderer.camera
scene_size = plotter.window_size
clip_range = cam.GetClippingRange()
aspect_ratio = float(scene_size[0]) / scene_size[1]
vtk_comb_trans_mat = cam.GetCompositeProjectionTransformMatrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = _mat_to_array(vtk_comb_trans_mat)
return vtk_comb_trans_mat
def _get_view_to_display_matrix(size):
x, y = size
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
def _close_all():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
close_all()
def _get_camera_direction(focalpoint, position):
x, y, z = position - focalpoint
r = np.sqrt(x * x + y * y + z * z)
theta = np.arccos(z / r)
phi = np.arctan2(y, x)
return r, theta, phi, focalpoint
def _set_3d_view(figure, azimuth, elevation, focalpoint, distance):
position = np.array(figure.plotter.camera_position[0])
focalpoint = np.array(figure.plotter.camera_position[1])
r, theta, phi, fp = _get_camera_direction(focalpoint, position)
if azimuth is not None:
phi = _deg2rad(azimuth)
if elevation is not None:
theta = _deg2rad(elevation)
renderer = figure.plotter.renderer
bounds = np.array(renderer.ComputeVisiblePropBounds())
if distance is not None:
r = distance
else:
r = max(bounds[1::2] - bounds[::2]) * 2.0
distance = r
if focalpoint is not None:
cen = np.asarray(focalpoint)
else:
cen = (bounds[1::2] + bounds[::2]) * 0.5
focalpoint = cen
# Now calculate the view_up vector of the camera. If the view up is
# close to the 'z' axis, the view plane normal is parallel to the
# camera which is unacceptable, so we use a different view up.
if elevation is None or 5. <= abs(elevation) <= 175.:
view_up = [0, 0, 1]
else:
view_up = [np.sin(phi), np.cos(phi), 0]
position = [
r * np.cos(phi) * np.sin(theta),
r * np.sin(phi) * np.sin(theta),
r * np.cos(theta)]
figure.plotter.camera_position = [
position, cen, view_up]
figure.plotter.renderer._azimuth = azimuth
figure.plotter.renderer._elevation = elevation
figure.plotter.renderer._distance = distance
def _set_3d_title(figure, title, size=16):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
figure.plotter.add_text(title, font_size=size, color='white')
def _check_3d_figure(figure):
if not isinstance(figure, _Figure):
raise TypeError('figure must be an instance of _Figure.')
def _close_3d_figure(figure):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
# close the window
figure.plotter.close()
_process_events(figure.plotter)
# free memory and deregister from the scraper
figure.plotter.deep_clean()
del _ALL_PLOTTERS[figure.plotter._id_name]
_process_events(figure.plotter)
def _take_3d_screenshot(figure, mode='rgb', filename=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_process_events(figure.plotter)
return figure.plotter.screenshot(
transparent_background=(mode == 'rgba'),
filename=filename)
def _process_events(plotter, show=False):
if hasattr(plotter, 'app'):
plotter.app.processEvents()
if show:
plotter.app_window.show()
def _set_colormap_range(actor, ctable, scalar_bar, rng=None):
from vtk.util.numpy_support import numpy_to_vtk
mapper = actor.GetMapper()
lut = mapper.GetLookupTable()
# Catch: FutureWarning: Conversion of the second argument of
# issubdtype from `complex` to `np.complexfloating` is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
lut.SetTable(numpy_to_vtk(ctable))
if rng is not None:
mapper.SetScalarRange(rng[0], rng[1])
lut.SetRange(rng[0], rng[1])
if scalar_bar is not None:
scalar_bar.SetLookupTable(actor.GetMapper().GetLookupTable())
def _set_mesh_scalars(mesh, scalars, name):
# Catch: FutureWarning: Conversion of the second argument of
# issubdtype from `complex` to `np.complexfloating` is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
mesh.point_arrays[name] = scalars
def _update_slider_callback(slider, callback, event_type):
def _the_callback(widget, event):
value = widget.GetRepresentation().GetValue()
if hasattr(callback, '__call__'):
try_callback(callback, value)
return
if event_type == 'start':
event = vtk.vtkCommand.StartInteractionEvent
elif event_type == 'end':
event = vtk.vtkCommand.EndInteractionEvent
elif event_type == 'always':
event = vtk.vtkCommand.InteractionEvent
slider.RemoveObserver(event)
slider.AddObserver(event, _the_callback)
def _add_camera_callback(camera, callback):
camera.AddObserver(vtk.vtkCommand.ModifiedEvent, callback)
def _update_picking_callback(plotter,
on_mouse_move,
on_button_press,
on_button_release,
on_pick):
interactor = plotter.iren
interactor.AddObserver(
vtk.vtkCommand.RenderEvent,
on_mouse_move
)
interactor.AddObserver(
vtk.vtkCommand.LeftButtonPressEvent,
on_button_press
)
interactor.AddObserver(
vtk.vtkCommand.EndInteractionEvent,
on_button_release
)
picker = vtk.vtkCellPicker()
picker.AddObserver(
vtk.vtkCommand.EndPickEvent,
on_pick
)
plotter.picker = picker
def _arrow_glyph(grid, factor):
glyph = vtk.vtkGlyphSource2D()
glyph.SetGlyphTypeToArrow()
glyph.FilledOff()
glyph.Update()
geom = glyph.GetOutput()
# fix position
tr = vtk.vtkTransform()
tr.Translate(0.5, 0., 0.)
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputData(geom)
trp.SetTransform(tr)
trp.Update()
geom = trp.GetOutput()
polydata = _glyph(
grid,
scale_mode='vector',
scalars=False,
orient='vec',
factor=factor,
geom=geom,
)
return pyvista.wrap(polydata)
def _glyph(dataset, scale_mode='scalar', orient=True, scalars=True, factor=1.0,
geom=None, tolerance=0.0, absolute=False, clamping=False, rng=None):
if geom is None:
arrow = vtk.vtkArrowSource()
arrow.Update()
geom = arrow.GetOutput()
alg = vtk.vtkGlyph3D()
alg.SetSourceData(geom)
if isinstance(scalars, str):
dataset.active_scalars_name = scalars
if isinstance(orient, str):
dataset.active_vectors_name = orient
orient = True
if scale_mode == 'scalar':
alg.SetScaleModeToScaleByScalar()
elif scale_mode == 'vector':
alg.SetScaleModeToScaleByVector()
else:
alg.SetScaleModeToDataScalingOff()
if rng is not None:
alg.SetRange(rng)
alg.SetOrient(orient)
alg.SetInputData(dataset)
alg.SetScaleFactor(factor)
alg.SetClamping(clamping)
alg.Update()
return alg.GetOutput()
def _sphere(plotter, center, color, radius):
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(8)
sphere.SetPhiResolution(8)
sphere.SetRadius(radius)
sphere.SetCenter(center)
sphere.Update()
mesh = pyvista.wrap(sphere.GetOutput())
actor = _add_mesh(
plotter,
mesh=mesh,
color=color
)
return actor, mesh
def _require_minimum_version(version_required):
from distutils.version import LooseVersion
version = LooseVersion(pyvista.__version__)
if version < version_required:
raise ImportError('pyvista>={} is required for this module but the '
'version found is {}'.format(version_required,
version))
@contextmanager
def _testing_context(interactive):
from . import renderer
orig_offscreen = pyvista.OFF_SCREEN
orig_testing = renderer.MNE_3D_BACKEND_TESTING
if interactive:
pyvista.OFF_SCREEN = False
renderer.MNE_3D_BACKEND_TESTING = False
else:
pyvista.OFF_SCREEN = True
try:
yield
finally:
pyvista.OFF_SCREEN = orig_offscreen
renderer.MNE_3D_BACKEND_TESTING = orig_testing
@contextmanager
def _disabled_depth_peeling():
from pyvista import rcParams
depth_peeling_enabled = rcParams["depth_peeling"]["enabled"]
rcParams["depth_peeling"]["enabled"] = False
try:
yield
finally:
rcParams["depth_peeling"]["enabled"] = depth_peeling_enabled
| Teekuningas/mne-python | mne/viz/backends/_pyvista.py | Python | bsd-3-clause | 33,533 | [
"Mayavi",
"VTK"
] | d8b55e2c19afe5932d3187bbb0c20f53d152bb01fa65acb8f80d11e884f86339 |
#!/usr/bin/env python
"""
manipulate meteadata in the FileCatalog
"""
import os
import DIRAC
from DIRAC import S_OK, S_ERROR
from COMDIRAC.Interfaces import critical
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import DCatalog
from COMDIRAC.Interfaces import pathFromArgument
class DMetaCommand( object ):
def run( self, lfn, metas ):
raise NotImplementedError
class DMetaAdd( DMetaCommand ):
def __init__( self, fcClient ):
self.fcClient = fcClient
def run( self, lfn, metas ):
metadict = {}
for meta in metas:
name, value = meta.split( "=" )
metadict[name] = value
result = self.fcClient.fc.setMetadata( lfn, metadict )
if not result[ "OK" ]:
print "Error:", result
class DMetaRm( DMetaCommand ):
def __init__( self, fcClient ):
self.fcClient = fcClient
def run( self, lfn, metas ):
for meta in metas:
self.fcClient.do_meta( "remove %s %s" % ( lfn, meta ))
class DMetaList( DMetaCommand ):
def __init__( self, catalog ):
self.catalog = catalog
def run( self, lfn, metas ):
retVal = self.catalog.getMeta( lfn )
if not retVal[ "OK" ]:
print "Error:", retVal[ "Message" ]
DIRAC.exit( -1 )
metadict = retVal[ "Value" ]
if not metas:
for k, v in metadict.items( ):
print k+"="+str( v )
else:
for meta in metas:
if meta in metadict.keys( ):
print meta+"="+metadict[ meta ]
if __name__ == "__main__":
import sys
from DIRAC.Core.Base import Script
class Params:
def __init__ ( self ):
self.index = False
self.listIndex = False
def setIndex( self, arg ):
print "index", arg
self.index = arg
return S_OK( )
def getIndex( self ):
return self.index
def setListIndex( self, arg ):
self.listIndex = True
def getListIndex( self ):
return self.listIndex
params = Params( )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s -I' % Script.scriptName,
' - list metadata indices',
'++ OR ++',
' %s -i r meta...' % Script.scriptName,
' - delete metadata index',
'Arguments:',
' meta: metadata index to be deleted',
'++ OR ++',
' %s -i f|d meta=(int|float|string|date)' % Script.scriptName,
' - add metadata index for files or directories',
'Arguments:',
' meta=type: metadata index to be added',
'++ OR ++',
' %s add|rm|ls lfn meta[=value]...' % Script.scriptName,
' - manipulate metadata for lfn',
'Arguments:',
' lfn: path',
' meta: metadata (with value for add)',
'', 'Examples:',
' $ dmeta add ./some_lfn_file some_meta="some_value"',
' $ dmeta ls ./some_lfn_file',
' $ dmeta rm ./some_lfn_file some_meta',
' $ dmeta ls ./some_lfn_file',
] )
)
Script.registerSwitch( "i:", "index=", "set or remove metadata indices", params.setIndex )
Script.registerSwitch( "I", "list-index", "list defined metadata indices", params.setListIndex )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
session = DSession( )
catalog = DCatalog( )
from DIRAC.DataManagementSystem.Client.FileCatalogClientCLI import FileCatalogClientCLI
fccli = FileCatalogClientCLI( catalog.catalog )
if params.getIndex( ):
if params.getIndex( ) == "r":
for meta in args:
cmdline = "index -r %s" % meta
#print cmdline
fccli.do_meta( cmdline )
else:
for arg in args:
meta, type = arg.split( "=" )
cmdline = "index -%s %s %s" % ( params.getIndex( ), meta, type )
#print cmdline
fccli.do_meta( cmdline )
DIRAC.exit( 0 )
if params.getListIndex( ):
fccli.do_meta( "show" )
DIRAC.exit( 0 )
meta_commands = {
"add" : DMetaAdd( fccli ),
"rm" : DMetaRm( fccli ),
"ls" : DMetaList( catalog )
}
if len( args ) < 2:
print "Error: Not enough arguments provided\n%s:" % Script.scriptName
Script.showHelp( )
DIRAC.exit( -1 )
command = args[ 0 ]
if command not in meta_commands.keys( ):
print "Error: Unknown dmeta command \"%s\"" % command
print "%s:" % Script.scriptName
Script.showHelp( )
DIRAC.exit( -1 )
command = meta_commands[ command ]
lfn = pathFromArgument( session, args[ 1 ] )
metas = args[ 2: ]
command.run( lfn, metas )
| pigay/COMDIRAC | Interfaces/scripts/dmeta.py | Python | gpl-3.0 | 5,307 | [
"DIRAC"
] | f0d320941e86a282c2f7a35e5a5b5728f4ffbc1d10adf263371cde1db315f827 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import logging
import os
import re
import tarfile
from io import TextIOWrapper
import pysam
import paleomix.common.yaml as yaml
from paleomix.common.formats.fasta import FASTA
from paleomix.pipelines.zonkey.common import contig_name_to_plink_name, get_sample_names
_SETTINGS_KEYS = (
"Format",
"Revision",
"Plink",
"NChroms",
"MitoPadding",
"SNPDistance",
)
class BAMInfo:
def __init__(self):
self.nuclear_contigs = {}
self.mt_contig = None
self.mt_length = None
self.mt_padding = None
@property
def is_nuclear(self):
return bool(self.nuclear_contigs)
@property
def is_mitochondrial(self):
return bool(self.mt_contig)
def __repr__(self):
tmpl = "BAMInfo(nuclear=%r, mt_contig=%r, mt_length=%r, mt_padding=%r)"
return tmpl % (
self.nuclear_contigs,
self.mt_contig,
self.mt_length,
self.mt_padding,
)
# Format number for database file; is incremented when the format is changed.
# The 'revision' field specifies updates to the table that do not change the
# format of the database (see below).
_SUPPORTED_DB_FORMAT_MAJOR = 1
_SUPPORTED_DB_FORMAT_MINOR = 20160112
# Required columns in the 'contigs.txt' table; additional columns are ignored
_CONTIGS_TABLE_COLUMNS = frozenset(("ID", "Size", "Checksum"))
# Required columns in the 'samples.txt' table; additional non-group columns are ignored
_SAMPLES_TABLE_COLUMNS = frozenset(("ID", "Species", "Sex", "SampleID", "Publication"))
# Regular expression for parsing Group(K) columns in samples.txt
_SAMPLES_TABLE_GROUP = re.compile(r"^Group\((?P<K>.+)\)$")
class ZonkeyDBError(RuntimeError):
pass
class ZonkeyDB:
def __init__(self, filename):
self.filename = filename
log = logging.getLogger(__name__)
log.info("Reading Zonkey database from %r" % (filename,))
try:
# Require that the file is not gzip / bzip2 compressed
_check_file_compression(filename)
with tarfile.open(filename, "r:") as tar_handle:
log.info("Reading settings")
self.settings = self._read_settings(tar_handle, "settings.yaml")
log.info("Reading list of contigs")
self.contigs = self._read_contigs_table(tar_handle, "contigs.txt")
log.info("Reading list of samples")
self.samples, self.groups = self._read_samples_table(
tar_handle, "samples.txt"
)
log.info("Reading mitochondrial sequences")
self.mitochondria = self._read_mitochondria(
tar_handle, "mitochondria.fasta"
)
log.info("Reading emperical admixture distribution")
self.simulations = self._read_simulations(tar_handle, "simulations.txt")
log.info("Determining sample order")
self.sample_order = self._read_sample_order(tar_handle, "genotypes.txt")
except (OSError, tarfile.TarError) as error:
raise ZonkeyDBError(str(error))
self._cross_validate()
def validate_bam(self, filename):
"""Validates a sample BAM file, checking that it is either a valid
mitochondrial BAM (aligned against one of the referenc mt sequences),
or that it is a valid nuclear BAM (aligned against the reference).
Returns one of INVALID_BAMFILE, NUC_BAMFILE, and MITO_BAMFILE.
"""
log = logging.getLogger(__name__)
log.info("Validating BAM file %r ", filename)
try:
handle = pysam.AlignmentFile(filename)
except (ValueError, IOError) as error:
log.error("Error reading BAM: %s", error)
return
return self.validate_bam_handle(handle)
def validate_bam_handle(self, handle):
if len(get_sample_names(handle)) > 1:
log = logging.getLogger(__name__)
log.warning(
"BAM read-groups specify more than one sample, "
"but this tool treats BAMs as a single sample"
)
info = BAMInfo()
if not _validate_mito_bam(self, handle, info):
return
if not _validate_nuclear_bam(self, handle, info):
return
return info
def _cross_validate(self):
"""Cross validates tables to ensure consistency."""
genotypes = set(self.sample_order)
samples = set(self.samples)
differences = (genotypes | samples) - (genotypes & samples)
if differences:
raise ZonkeyDBError(
"Mismatch between samples in sample-list and "
"genotypes table; some samples not found in "
"both tables: %s" % (",".join(differences),)
)
if self.mitochondria is None:
return
for name, record in self.mitochondria.items():
if name not in self.samples:
# Ignore extra reference sequences
meta = record.meta.upper()
if "EXCLUDE" not in list(map(str.strip, meta.split(";"))):
raise ZonkeyDBError(
"Unexpected mitochondrial sequence: %r" % (name,)
)
@classmethod
def _read_contigs_table(cls, tar_handle, filename):
cls._check_required_file(tar_handle, filename)
table = cls._read_table(tar_handle, filename, _CONTIGS_TABLE_COLUMNS)
for key, row in table.items():
try:
row["Size"] = int(row["Size"])
except ValueError as error:
raise ZonkeyDBError(
"Invalid size specified for sample %r in "
"%r: %r" % (key, filename, error)
)
if row["Size"] <= 0:
raise ZonkeyDBError(
"Contig size must be >= 0 for %r in %r, "
"not %r" % (key, filename, row["Size"])
)
return table
@classmethod
def _read_samples_table(cls, tar_handle, filename):
cls._check_required_file(tar_handle, filename)
samples = cls._read_table(tar_handle, "samples.txt", _SAMPLES_TABLE_COLUMNS)
if not samples:
raise ZonkeyDBError("No samples found in genotypes table!")
for row in samples.values():
if row["Sex"].upper() not in ("MALE", "FEMALE", "NA"):
raise ZonkeyDBError(
"Unexpected sample sex (%r); "
"expected 'MALE', 'FEMALE', or 'NA'" % (row["Sex"],)
)
group_keys = []
for key in next(iter(samples.values())):
match = _SAMPLES_TABLE_GROUP.match(key)
if match is not None:
k_value = match.groupdict()["K"]
if not k_value.isdigit():
raise ZonkeyDBError(
"Malformed Group column name; K is " "not a number: %r" % (key,)
)
elif not (2 <= int(k_value) <= 7):
raise ZonkeyDBError(
"K must be between 2 and 7, but found %r" % (key,)
)
group_keys.append((key, int(k_value)))
groups = {}
for key, k_value in group_keys:
group = {}
for sample_key, sample in samples.items():
group[sample_key] = sample.pop(key)
group_labels = frozenset(group.values())
if group_labels == frozenset("-"):
continue # Allowed for backwards compatibility
elif "-" in group_labels:
raise ZonkeyDBError(
"Not all samples column %r assignd a group" % (key,)
)
elif len(group_labels) != k_value:
raise ZonkeyDBError(
"Expected %i groups in column %r, found %i"
% (k_value, key, len(group_labels))
)
groups[k_value] = group
if not groups:
raise ZonkeyDBError("No valid groups in samples.txt")
return samples, groups
@classmethod
def _read_sample_order(cls, tar_handle, filename):
cls._check_required_file(tar_handle, filename)
handle = TextIOWrapper(tar_handle.extractfile(filename))
header = handle.readline().rstrip("\r\n").split("\t")
sample_order = tuple(header[-1].split(";"))
if len(sample_order) != len(set(sample_order)):
raise ZonkeyDBError("Duplicate sample names in %r" % (filename,))
return sample_order
def _read_mitochondria(self, tar_handle, filename):
try:
tar_handle.getmember(filename)
except KeyError:
# Missing MT file is allowed
return None
handle = TextIOWrapper(tar_handle.extractfile(filename))
results = {}
for record in FASTA.from_lines(handle):
record = FASTA(
name=record.name, meta=record.meta, sequence=record.sequence.upper()
)
unexpected = set(record.sequence) - set("ACGTN-")
if unexpected:
unexpected = ", ".join(map(repr, sorted(unexpected)))
raise ZonkeyDBError(
"Unexpected nucleotide in %s; only A, C, "
"G, T, N, and - are allowed, not %s" % (unexpected, filename)
)
elif record.name in results:
raise ZonkeyDBError(
"Duplicate sequence name in %s: %r" % (filename, record.name)
)
results[record.name] = record
lengths = frozenset(len(record.sequence) for record in results.values())
if not lengths:
raise ZonkeyDBError("No mitochondrial sequences found in %r" % (filename,))
elif len(lengths) > 2:
lengths = tuple(sorted(lengths))
lengths_s = "%s, and %s" % (", ".join(map(str, lengths[:-1])), lengths[-1])
raise ZonkeyDBError(
"At most two different sequence lengths "
"expected for mitochondrial sequences, but "
"found %i different lengths in %r: %s"
% (len(lengths), filename, lengths_s)
)
elif len(lengths) != 1:
# Unpadded sequences are allowed
delta_len = max(lengths) - min(lengths)
mito_padding = self.settings["MitoPadding"]
if delta_len != mito_padding:
raise ZonkeyDBError(
"Length difference between mitochondrial "
"sequences in %r does not match the "
"padding; expected a difference of %i bp, "
"but found a %i bp difference."
% (filename, mito_padding, delta_len)
)
return results
@classmethod
def _read_settings(cls, tar_handle, filename):
cls._check_required_file(tar_handle, filename)
handle = TextIOWrapper(tar_handle.extractfile(filename))
try:
result = yaml.safe_load(handle)
except yaml.YAMLError as error:
raise ZonkeyDBError(
"Error reading settings file %r; %s" % (filename, error)
)
for key in _SETTINGS_KEYS:
if key != "Plink":
if not isinstance(result[key], int) or result[key] < 0:
raise ZonkeyDBError(
"Value for %r in %s must be an non-"
"negative integer, not %r" % (key, filename, result[key])
)
elif not isinstance(result[key], str):
raise ZonkeyDBError(
"Value for %r in %s must be a string, "
"not %r" % (key, filename, result[key])
)
if result["Format"] > _SUPPORTED_DB_FORMAT_MAJOR:
raise ZonkeyDBError(
"Database version is too old; this version of "
"PALEOMIX supports the Zonkey DB v%i, but the "
"database is v%i; download an updated "
"database to continue." % (_SUPPORTED_DB_FORMAT_MAJOR, result["Format"])
)
elif result["Format"] < _SUPPORTED_DB_FORMAT_MAJOR:
raise ZonkeyDBError(
"Database version is too new; this version of "
"PALEOMIX supports the Zonkey DB v%i, but the "
"database is v%i; upgrade PALEOMIX to "
"continue." % (_SUPPORTED_DB_FORMAT_MAJOR, result["Format"])
)
elif result["Revision"] < _SUPPORTED_DB_FORMAT_MINOR:
raise ZonkeyDBError(
"Database version is too old; this version of "
"PALEOMIX supports the Zonkey DB v%i, rev. %i "
"or newer, but the database is v%i rev. %i; "
"please download an updated database to "
"continue."
% (
_SUPPORTED_DB_FORMAT_MAJOR,
_SUPPORTED_DB_FORMAT_MINOR,
result["Format"],
result["Revision"],
)
)
return result
def _read_simulations(self, tar_handle, filename):
try:
handle = TextIOWrapper(tar_handle.extractfile(filename))
except KeyError:
# Missing simulations file is allowed
return None
header = handle.readline().rstrip().split("\t")
required_keys = set(
("NReads", "K", "Sample1", "Sample2", "HasTS", "Percentile", "Value")
)
missing_keys = required_keys - set(header)
if missing_keys:
missing_keys = ", ".join(map(repr, missing_keys))
raise ZonkeyDBError(
"Simulations table %r does not contain all "
"required columns; columns %r are missing!" % (filename, missing_keys)
)
result = []
for linenum, line in enumerate(handle, start=2):
fields = line.strip().split("\t")
if len(fields) != len(header):
raise ZonkeyDBError(
"Line %i in simulations table %r, does "
"not contain the expected number of "
"columns; expected %i, but found %i!"
% (linenum, filename, len(header), len(fields))
)
row = dict(zip(header, fields))
if row["HasTS"] not in ("TRUE", "FALSE"):
pass
row["HasTS"] = row["HasTS"] == "TRUE"
for key in ("NReads", "K"):
try:
row[key] = int(row[key])
except ValueError:
raise ZonkeyDBError(
"Malformed value for column %r at "
"line %i in simulations table %r; "
"expected int, found %r" % (key, linenum, filename, row[key])
)
for key in ("Percentile", "Value"):
try:
row[key] = float(row[key])
except ValueError:
raise ZonkeyDBError(
"Malformed value for column %r at "
"line %i in simulations table %r; "
"expected float, found %r" % (key, linenum, filename, row[key])
)
for key in ("Sample1", "Sample2"):
groups = frozenset(self.groups[int(row["K"])].values())
if row[key] not in groups and row[key] != "-":
raise ZonkeyDBError(
"Invalid group in column %r in "
"simulations table %r: %r" % (key, filename, row[key])
)
result.append(row)
return result
@classmethod
def _check_required_file(cls, tar_handle, filename):
try:
obj = tar_handle.getmember(filename)
except KeyError:
raise ZonkeyDBError(
"Database does not contain required file %r; "
"please ensure that this is a valid Zonkey "
"database file!" % (filename,)
)
if not obj.isfile():
raise ZonkeyDBError(
"Object %r in Zonkey database is not a "
"file; please ensure that this is a valid "
"Zonkey database file!" % (filename,)
)
@classmethod
def _read_table(cls, tar_handle, filename, requied_columns=()):
requied_columns = frozenset(requied_columns) | frozenset(("ID",))
handle = TextIOWrapper(tar_handle.extractfile(filename))
result = {}
try:
header = handle.readline().rstrip("\r\n").split("\t")
if len(header) != len(set(header)):
raise ZonkeyDBError(
"Table %r does contains duplicate columns!" % (filename,)
)
if requied_columns - set(header):
raise ZonkeyDBError(
"Required columns are missign in table "
"%r: %s" % (filename, ", ".join())
)
for linenum, line in enumerate(handle):
fields = line.rstrip("\r\n").split("\t")
if len(fields) != len(header):
raise ZonkeyDBError(
"Error reading %r at line %i; "
"expected %i columns, found %i "
"columns!" % (filename, linenum, len(header), len(fields))
)
row = dict(zip(header, fields))
if row["ID"] in result:
raise ZonkeyDBError(
"Duplicate IDs in %r: %s" % (filename, row["ID"])
)
result[row["ID"]] = row
finally:
handle.close()
return result
def _validate_mito_bam(data, handle, info):
if data.mitochondria is None:
# No mitochondrial data .. skip phylogeny
return True
references = handle.references
min_length = min((len(record.sequence)) for record in data.mitochondria.values())
log = logging.getLogger(__name__)
for bam_contig, bam_length in zip(references, handle.lengths):
if bam_contig not in data.mitochondria:
continue
db_sequence = data.mitochondria[bam_contig].sequence
db_length = len(db_sequence) - db_sequence.count("-")
if bam_length != db_length:
log.error(
"Length of mitochondrial contig %r (%i bp) "
"does not match the length of the corresponding "
"sequence in the database (%i bp)" % (bam_contig, bam_length, db_length)
)
return False
filename = handle.filename.decode("utf-8")
if not os.path.exists(filename + ".bai"):
log.info("Indexing BAM file %r" % (filename,))
pysam.index(filename)
# Workaround for pysam < 0.9 returning list, >= 0.9 returning str
for line in "".join(pysam.idxstats(filename)).split("\n"):
line = line.strip()
if not line:
continue
name, _, hits, _ = line.split("\t")
if (name == bam_contig) and not int(hits):
log.warning(
"Mitochondrial BAM (%r) does not contain "
"any reads aligned to contig %r; inferring an "
"phylogeny is not possible." % (filename, name)
)
return True
info.mt_contig = bam_contig
info.mt_length = bam_length
info.mt_padding = len(db_sequence) - min_length
return True
return True
def _validate_nuclear_bam(data, handle, info):
# Match reference panel contigs with BAM contigs; identification is done
# by size since different repositories use different naming schemes.
bam_contigs = collections.defaultdict(list)
for name, length in zip(handle.references, handle.lengths):
bam_contigs[length].append(name)
log = logging.getLogger(__name__)
panel_names_to_bam = {}
for name, stats in sorted(data.contigs.items()):
bam_contig_names = bam_contigs.get(stats["Size"], ())
if len(bam_contig_names) == 1:
panel_names_to_bam[name] = bam_contig_names[0]
elif len(bam_contig_names) > 1:
candidates = []
for bam_name in bam_contig_names:
if contig_name_to_plink_name(bam_name) == name:
candidates.append(bam_name)
if len(candidates) == 1:
panel_names_to_bam[name] = candidates[0]
else:
log.error(
"Multiple candidates for chr%s with size %i: %s",
name,
stats["Size"],
", ".join(bam_contig_names),
)
if len(panel_names_to_bam) == len(data.contigs):
info.nuclear_contigs = panel_names_to_bam
return True
elif panel_names_to_bam:
log.error("Not all nuclear chromosomes found in BAM:")
for (name, stats) in sorted(data.contigs.items()):
is_found = "OK" if name in panel_names_to_bam else "Not found!"
log.error(" - %s: %s" % (name, is_found))
return False
else:
return True
def _check_file_compression(filename):
with open(filename, "rb") as handle:
header = handle.read(2)
if header == b"\x1f\x8b":
raise ZonkeyDBError(
"Zonkey database is gzip compressed; please decompress to continue:\n"
" $ gunzip %r" % (filename,)
)
elif header == b"BZ":
raise ZonkeyDBError(
"Zonkey database is bzip2 compressed; please decompress to continue:\n"
" $ bunzip2 %r" % (filename,)
)
| MikkelSchubert/paleomix | paleomix/pipelines/zonkey/database.py | Python | mit | 23,270 | [
"pysam"
] | 3dd42581e396a987de384202bed36725cb385c8439e7560dcc666016500ffd6e |
# coding=utf-8
from ._commandbase import RadianceCommand
from ..parameters.gendaymtx import GendaymtxParameters
import os
class Gendaymtx(RadianceCommand):
u"""
gendaymtx - Generate an annual Perez sky matrix from a weather tape.
Attributes:
output_name: An optional name for output file name. If None the name of
.epw file will be used.
wea_file: Full path to input wea file (Default: None).
gendaymtx_parameters: Radiance parameters for gendaymtx. If None Default
parameters will be set. You can use self.gendaymtx_parameters to view,
add or remove the parameters before executing the command.
Usage:
from honeybee_plus.radiance.parameters.gendaymtx import GendaymtxParameters
from honeybee_plus.radiance.command.gendaymtx import Gendaymtx
# create and modify gendaymtx_parameters
# generate sky matrix with default values
gmtx = GendaymtxParameters()
# ask only for direct sun
gmtx.only_direct = True
# create gendaymtx
dmtx = Gendaymtx(wea_file="C:/IZMIR_TUR.wea", gendaymtx_parameters=gmtx)
# run gendaymtx
dmtx.execute()
> c:/radiance/bin/gendaymtx: reading weather tape 'C:/ladybug/IZMIR_TUR.wea'
> c:/radiance/bin/gendaymtx: location 'IZMIR_TUR'
> c:/radiance/bin/gendaymtx: (lat,long)=(38.5,-27.0) degrees north, west
> c:/radiance/bin/gendaymtx: 146 sky patches per time step
> c:/radiance/bin/gendaymtx: stepping through month 1...
> c:/radiance/bin/gendaymtx: stepping through month 2...
> c:/radiance/bin/gendaymtx: stepping through month 3...
> c:/radiance/bin/gendaymtx: stepping through month 4...
> c:/radiance/bin/gendaymtx: stepping through month 5...
> c:/radiance/bin/gendaymtx: stepping through month 6...
> c:/radiance/bin/gendaymtx: stepping through month 7...
> c:/radiance/bin/gendaymtx: stepping through month 8...
> c:/radiance/bin/gendaymtx: stepping through month 9...
> c:/radiance/bin/gendaymtx: stepping through month 10...
> c:/radiance/bin/gendaymtx: stepping through month 11...
> c:/radiance/bin/gendaymtx: stepping through month 12...
> c:/radiance/bin/gendaymtx: writing matrix with 8760 time steps...
> c:/radiance/bin/gendaymtx: done.
# change it not to be verbose
dmtx.gendaymtx_parameters.verbose_report = False
# run it again
dmtx.execute()
>
"""
def __init__(self, output_name=None, wea_file=None, gendaymtx_parameters=None):
"""Init command."""
RadianceCommand.__init__(self)
self.output_name = output_name
self.wea_file = wea_file
self.gendaymtx_parameters = gendaymtx_parameters
@property
def gendaymtx_parameters(self):
"""Get and set gendaymtx_parameters."""
return self.__gendaymtx_parameters
@gendaymtx_parameters.setter
def gendaymtx_parameters(self, mtx):
self.__gendaymtx_parameters = mtx if mtx is not None \
else GendaymtxParameters()
assert hasattr(self.gendaymtx_parameters, "isRadianceParameters"), \
"input gendaymtx_parameters is not a valid parameters type."
@property
def output_file(self):
"""Output file address."""
return os.path.splitext(str(self.wea_file))[0] + ".mtx" \
if self.output_name is None and self.wea_file.normpath is not None \
else self.output_name
def to_rad_string(self, relative_path=False):
"""Return full command as a string."""
# generate the name from self.wea_file
rad_string = "%s %s %s > %s" % (
self.normspace(os.path.join(self.radbin_path, 'gendaymtx')),
self.gendaymtx_parameters.to_rad_string(),
self.normspace(self.wea_file),
self.normspace(self.output_file)
)
# make sure input files are set by user
self.check_input_files(rad_string)
return rad_string
@property
def input_files(self):
"""Input files for this command."""
return self.wea_file,
| ladybug-analysis-tools/honeybee | honeybee_plus/radiance/command/gendaymtx.py | Python | gpl-3.0 | 4,211 | [
"EPW"
] | 5724472c8469d4d455c805316fb2387c14ef8f0aaab5fff47447ba162a613cee |
#
# QAPI command marshaller generator
#
# Copyright IBM, Corp. 2011
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
import re
def gen_command_decl(name, arg_type, ret_type):
return mcgen('''
%(c_type)s qmp_%(c_name)s(%(params)s);
''',
c_type=(ret_type and ret_type.c_type()) or 'void',
c_name=c_name(name),
params=gen_params(arg_type, 'Error **errp'))
def gen_call(name, arg_type, ret_type):
ret = ''
argstr = ''
if arg_type:
for memb in arg_type.members:
if memb.optional:
argstr += 'has_%s, ' % c_name(memb.name)
argstr += '%s, ' % c_name(memb.name)
lhs = ''
if ret_type:
lhs = 'retval = '
ret = mcgen('''
%(lhs)sqmp_%(c_name)s(%(args)s&err);
''',
c_name=c_name(name), args=argstr, lhs=lhs)
if ret_type:
ret += gen_err_check()
ret += mcgen('''
qmp_marshal_output_%(c_name)s(retval, ret, &err);
''',
c_name=ret_type.c_name())
return ret
def gen_marshal_vars(arg_type, ret_type):
ret = mcgen('''
Error *err = NULL;
''')
if ret_type:
ret += mcgen('''
%(c_type)s retval;
''',
c_type=ret_type.c_type())
if arg_type:
ret += mcgen('''
QmpInputVisitor *qiv = qmp_input_visitor_new_strict(QOBJECT(args));
QapiDeallocVisitor *qdv;
Visitor *v;
''')
for memb in arg_type.members:
if memb.optional:
ret += mcgen('''
bool has_%(c_name)s = false;
''',
c_name=c_name(memb.name))
ret += mcgen('''
%(c_type)s %(c_name)s = %(c_null)s;
''',
c_name=c_name(memb.name),
c_type=memb.type.c_type(),
c_null=memb.type.c_null())
ret += '\n'
else:
ret += mcgen('''
(void)args;
''')
return ret
def gen_marshal_input_visit(arg_type, dealloc=False):
ret = ''
if not arg_type:
return ret
if dealloc:
ret += mcgen('''
qmp_input_visitor_cleanup(qiv);
qdv = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(qdv);
''')
else:
ret += mcgen('''
v = qmp_input_get_visitor(qiv);
''')
ret += gen_visit_fields(arg_type.members, skiperr=dealloc)
if dealloc:
ret += mcgen('''
qapi_dealloc_visitor_cleanup(qdv);
''')
return ret
def gen_marshal_output(ret_type):
return mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_type)s ret_in, QObject **ret_out, Error **errp)
{
Error *err = NULL;
QmpOutputVisitor *qov = qmp_output_visitor_new();
QapiDeallocVisitor *qdv;
Visitor *v;
v = qmp_output_get_visitor(qov);
visit_type_%(c_name)s(v, &ret_in, "unused", &err);
if (err) {
goto out;
}
*ret_out = qmp_output_get_qobject(qov);
out:
error_propagate(errp, err);
qmp_output_visitor_cleanup(qov);
qdv = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(qdv);
visit_type_%(c_name)s(v, &ret_in, "unused", NULL);
qapi_dealloc_visitor_cleanup(qdv);
}
''',
c_type=ret_type.c_type(), c_name=ret_type.c_name())
def gen_marshal_proto(name):
ret = 'void qmp_marshal_%s(QDict *args, QObject **ret, Error **errp)' % c_name(name)
if not middle_mode:
ret = 'static ' + ret
return ret
def gen_marshal_decl(name):
return mcgen('''
%(proto)s;
''',
proto=gen_marshal_proto(name))
def gen_marshal(name, arg_type, ret_type):
ret = mcgen('''
%(proto)s
{
''',
proto=gen_marshal_proto(name))
ret += gen_marshal_vars(arg_type, ret_type)
ret += gen_marshal_input_visit(arg_type)
ret += gen_call(name, arg_type, ret_type)
# 'goto out' produced by gen_marshal_input_visit->gen_visit_fields()
# for each arg_type member, and by gen_call() for ret_type
if (arg_type and arg_type.members) or ret_type:
ret += mcgen('''
out:
''')
ret += mcgen('''
error_propagate(errp, err);
''')
ret += gen_marshal_input_visit(arg_type, dealloc=True)
ret += mcgen('''
}
''')
return ret
def gen_register_command(name, success_response):
options = 'QCO_NO_OPTIONS'
if not success_response:
options = 'QCO_NO_SUCCESS_RESP'
ret = mcgen('''
qmp_register_command("%(name)s", qmp_marshal_%(c_name)s, %(opts)s);
''',
name=name, c_name=c_name(name),
opts=options)
return ret
def gen_registry(registry):
ret = mcgen('''
static void qmp_init_marshal(void)
{
''')
ret += registry
ret += mcgen('''
}
qapi_init(qmp_init_marshal);
''')
return ret
class QAPISchemaGenCommandVisitor(QAPISchemaVisitor):
def __init__(self):
self.decl = None
self.defn = None
self._regy = None
self._visited_ret_types = None
def visit_begin(self, schema):
self.decl = ''
self.defn = ''
self._regy = ''
self._visited_ret_types = set()
def visit_end(self):
if not middle_mode:
self.defn += gen_registry(self._regy)
self._regy = None
self._visited_ret_types = None
def visit_command(self, name, info, arg_type, ret_type,
gen, success_response):
if not gen:
return
self.decl += gen_command_decl(name, arg_type, ret_type)
if ret_type and ret_type not in self._visited_ret_types:
self._visited_ret_types.add(ret_type)
self.defn += gen_marshal_output(ret_type)
if middle_mode:
self.decl += gen_marshal_decl(name)
self.defn += gen_marshal(name, arg_type, ret_type)
if not middle_mode:
self._regy += gen_register_command(name, success_response)
middle_mode = False
(input_file, output_dir, do_c, do_h, prefix, opts) = \
parse_command_line("m", ["middle"])
for o, a in opts:
if o in ("-m", "--middle"):
middle_mode = True
c_comment = '''
/*
* schema-defined QMP->QAPI command dispatch
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* schema-defined QAPI function prototypes
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qmp-marshal.c', 'qmp-commands.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu-common.h"
#include "qemu/module.h"
#include "qapi/qmp/types.h"
#include "qapi/qmp/dispatch.h"
#include "qapi/visitor.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-input-visitor.h"
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
#include "%(prefix)sqmp-commands.h"
''',
prefix=prefix))
fdecl.write(mcgen('''
#include "%(prefix)sqapi-types.h"
#include "qapi/qmp/qdict.h"
#include "qapi/error.h"
''',
prefix=prefix))
schema = QAPISchema(input_file)
gen = QAPISchemaGenCommandVisitor()
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl)
| mtottenh/QEMU-TPM | scripts/qapi-commands.py | Python | gpl-2.0 | 7,801 | [
"VisIt"
] | 921cebd0b42f664a5800c927e4131fe76cdd4b79d1094c5cf74854c40a31115d |
"""Experimental code for cleaner support of IPython syntax with unittest.
In IPython up until 0.10, we've used very hacked up nose machinery for running
tests with IPython special syntax, and this has proved to be extremely slow.
This module provides decorators to try a different approach, stemming from a
conversation Brian and I (FP) had about this problem Sept/09.
The goal is to be able to easily write simple functions that can be seen by
unittest as tests, and ultimately for these to support doctests with full
IPython syntax. Nose already offers this based on naming conventions and our
hackish plugins, but we are seeking to move away from nose dependencies if
possible.
This module follows a different approach, based on decorators.
- A decorator called @ipdoctest can mark any function as having a docstring
that should be viewed as a doctest, but after syntax conversion.
Authors
-------
- Fernando Perez <Fernando.Perez@berkeley.edu>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import re
import unittest
from doctest import DocTestFinder, DocTestRunner, TestResults
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def count_failures(runner):
"""Count number of failures in a doctest runner.
Code modeled after the summarize() method in doctest.
"""
return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ]
class IPython2PythonConverter(object):
"""Convert IPython 'syntax' to valid Python.
Eventually this code may grow to be the full IPython syntax conversion
implementation, but for now it only does prompt conversion."""
def __init__(self):
self.rps1 = re.compile(r'In\ \[\d+\]: ')
self.rps2 = re.compile(r'\ \ \ \.\.\.+: ')
self.rout = re.compile(r'Out\[\d+\]: \s*?\n?')
self.pyps1 = '>>> '
self.pyps2 = '... '
self.rpyps1 = re.compile ('(\s*%s)(.*)$' % self.pyps1)
self.rpyps2 = re.compile ('(\s*%s)(.*)$' % self.pyps2)
def __call__(self, ds):
"""Convert IPython prompts to python ones in a string."""
from . import globalipapp
pyps1 = '>>> '
pyps2 = '... '
pyout = ''
dnew = ds
dnew = self.rps1.sub(pyps1, dnew)
dnew = self.rps2.sub(pyps2, dnew)
dnew = self.rout.sub(pyout, dnew)
ip = globalipapp.get_ipython()
# Convert input IPython source into valid Python.
out = []
newline = out.append
for line in dnew.splitlines():
mps1 = self.rpyps1.match(line)
if mps1 is not None:
prompt, text = mps1.groups()
newline(prompt+ip.prefilter(text, False))
continue
mps2 = self.rpyps2.match(line)
if mps2 is not None:
prompt, text = mps2.groups()
newline(prompt+ip.prefilter(text, True))
continue
newline(line)
newline('') # ensure a closing newline, needed by doctest
#print "PYSRC:", '\n'.join(out) # dbg
return '\n'.join(out)
#return dnew
class Doc2UnitTester(object):
"""Class whose instances act as a decorator for docstring testing.
In practice we're only likely to need one instance ever, made below (though
no attempt is made at turning it into a singleton, there is no need for
that).
"""
def __init__(self, verbose=False):
"""New decorator.
Parameters
----------
verbose : boolean, optional (False)
Passed to the doctest finder and runner to control verbosity.
"""
self.verbose = verbose
# We can reuse the same finder for all instances
self.finder = DocTestFinder(verbose=verbose, recurse=False)
def __call__(self, func):
"""Use as a decorator: doctest a function's docstring as a unittest.
This version runs normal doctests, but the idea is to make it later run
ipython syntax instead."""
# Capture the enclosing instance with a different name, so the new
# class below can see it without confusion regarding its own 'self'
# that will point to the test instance at runtime
d2u = self
# Rewrite the function's docstring to have python syntax
if func.__doc__ is not None:
func.__doc__ = ip2py(func.__doc__)
# Now, create a tester object that is a real unittest instance, so
# normal unittest machinery (or Nose, or Trial) can find it.
class Tester(unittest.TestCase):
def test(self):
# Make a new runner per function to be tested
runner = DocTestRunner(verbose=d2u.verbose)
map(runner.run, d2u.finder.find(func, func.__name__))
failed = count_failures(runner)
if failed:
# Since we only looked at a single function's docstring,
# failed should contain at most one item. More than that
# is a case we can't handle and should error out on
if len(failed) > 1:
err = "Invalid number of test results:" % failed
raise ValueError(err)
# Report a normal failure.
self.fail('failed doctests: %s' % str(failed[0]))
# Rename it so test reports have the original signature.
Tester.__name__ = func.__name__
return Tester
def ipdocstring(func):
"""Change the function docstring via ip2py.
"""
if func.__doc__ is not None:
func.__doc__ = ip2py(func.__doc__)
return func
# Make an instance of the classes for public use
ipdoctest = Doc2UnitTester()
ip2py = IPython2PythonConverter()
| Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/IPython/testing/ipunittest.py | Python | apache-2.0 | 6,429 | [
"Brian"
] | 26cd16be2358f0bff5e24ee07e4b564e656670e6582b0d9518164c6cd9581d8f |
# see http://blog.sigfpe.com/2012/03/overloading-python-list-comprehension.html
from ast import *
import sys
class RewriteComp(NodeTransformer):
def visit_func(self, node):
print dump(node)
return node
def visit_Str(self, node):
print dump(node)
namenode = Name(id="FreeString", lineno=node.lineno, col_offset=node.col_offset, ctx=Load())
newnode = Call(func=namenode, keywords=[], args=[node],
lineno=node.lineno, col_offset=node.col_offset)
return newnode
source = open(sys.argv[1]).read()
e = compile(source, "<string>", "exec", PyCF_ONLY_AST)
print dump(e)
#print e
e = RewriteComp().visit(e)
f = compile(e, sys.argv[1], "exec")
#print f
exec f
print "Done"
| pipcet/py-antistring | antistring.py | Python | gpl-2.0 | 746 | [
"VisIt"
] | b248d3518887abbd72e7ac5e89ebab4a5264c16efb6d4910432011b6197e41c4 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the gating feature.
"""
from __future__ import absolute_import
from textwrap import dedent
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from common.test.acceptance.tests.helpers import UniqueCourseTest
class GatingTest(UniqueCourseTest):
"""
Test gating feature in LMS.
"""
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
STUDENT_USERNAME = "STUDENT_TESTER"
STUDENT_EMAIL = "student101@example.com"
shard = 23
def setUp(self):
super(GatingTest, self).setUp()
self.logout_page = LogoutPage(self.browser)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
xml = dedent("""
<problem>
<p>What is height of eiffel tower without the antenna?.</p>
<multiplechoiceresponse>
<choicegroup label="What is height of eiffel tower without the antenna?" type="MultipleChoice">
<choice correct="false">324 meters<choicehint>Antenna is 24 meters high</choicehint></choice>
<choice correct="true">300 meters</choice>
<choice correct="false">224 meters</choice>
<choice correct="false">400 meters</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
self.problem1 = XBlockFixtureDesc('problem', 'HEIGHT OF EIFFEL TOWER', data=xml)
# Install a course with sections/problems
course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fixture.add_advanced_settings({
"enable_subsection_gating": {"value": "true"}, 'enable_proctored_exams': {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
self.problem1
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
),
XBlockFixtureDesc('sequential', 'Test Subsection 3').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3')
),
)
).install()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
self.logout_page.visit()
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _setup_prereq(self):
"""
Make the first subsection a prerequisite
"""
# Login as staff
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
# Make the first subsection a prerequisite
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(0)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.studio_course_outline.make_gating_prerequisite()
def _setup_gated_subsection(self, subsection_index=1):
"""
Gate the given indexed subsection on the first subsection
"""
# Login as staff
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
# Gate the second subsection based on the score achieved in the first subsection
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(subsection_index)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.studio_course_outline.add_prerequisite_to_subsection("80", "")
def _fulfill_prerequisite(self):
"""
Fulfill the prerequisite needed to see gated content
"""
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.wait_for_page().problem_name, 'HEIGHT OF EIFFEL TOWER')
problem_page.click_choice('choice_1')
problem_page.click_submit()
def test_subsection_gating_in_studio(self):
"""
Given that I am a staff member
When I visit the course outline page in studio.
And open the subsection edit dialog
Then I can view all settings related to Gating
And update those settings to gate a subsection
"""
self._setup_prereq()
# Assert settings are displayed correctly for a prerequisite subsection
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(0)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.assertTrue(self.studio_course_outline.gating_prerequisite_checkbox_is_visible())
self.assertTrue(self.studio_course_outline.gating_prerequisite_checkbox_is_checked())
self.assertFalse(self.studio_course_outline.gating_prerequisites_dropdown_is_visible())
self.assertFalse(self.studio_course_outline.gating_prerequisite_min_score_is_visible())
self._setup_gated_subsection()
# Assert settings are displayed correctly for a gated subsection
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog(1)
self.studio_course_outline.select_advanced_tab(desired_item='gated_content')
self.assertTrue(self.studio_course_outline.gating_prerequisite_checkbox_is_visible())
self.assertTrue(self.studio_course_outline.gating_prerequisites_dropdown_is_visible())
self.assertTrue(self.studio_course_outline.gating_prerequisite_min_score_is_visible())
def test_gated_subsection_in_lms_for_student(self):
"""
Given that I am a student
When I visit the LMS Courseware
Then I can see a gated subsection
The gated subsection should have a lock icon
and be in the format: "<Subsection Title> (Prerequisite Required)"
When I fulfill the gating Prerequisite
Then I can see the gated subsection
Now the gated subsection should have an unlock icon
and screen readers should read the section as: "<Subsection Title> Unlocked"
"""
self._setup_prereq()
self._setup_gated_subsection()
self._auto_auth(self.STUDENT_USERNAME, self.STUDENT_EMAIL, False)
self.course_home_page.visit()
self.assertEqual(self.course_home_page.outline.num_subsections, 3)
# Fulfill prerequisite and verify that gated subsection is shown
self.courseware_page.visit()
self._fulfill_prerequisite()
self.course_home_page.visit()
self.assertEqual(self.course_home_page.outline.num_subsections, 3)
def test_gated_subsection_in_lms_for_staff(self):
"""
Given that I am a staff member
When I visit the LMS Courseware
Then I can see all gated subsections
Displayed along with notification banners
Then if I masquerade as a student
Then I can see a gated subsection
The gated subsection should have a lock icon
and be in the format: "<Subsection Title> (Prerequisite Required)"
"""
self._setup_prereq()
self._setup_gated_subsection()
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_home_page.visit()
self.assertEqual(self.course_home_page.preview.staff_view_mode, 'Staff')
self.assertEqual(self.course_home_page.outline.num_subsections, 3)
# Click on gated section and check for banner
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 2')
self.courseware_page.wait_for_page()
self.assertTrue(self.courseware_page.has_banner())
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1')
self.courseware_page.wait_for_page()
self.course_home_page.visit()
self.course_home_page.preview.set_staff_view_mode('Learner')
self.course_home_page.wait_for_page()
self.assertEqual(self.course_home_page.outline.num_subsections, 3)
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1')
self.courseware_page.wait_for_page()
# banner displayed informing section is a prereq
self.assertTrue(self.courseware_page.has_banner())
def test_gated_banner_before_special_exam(self):
"""
When a subsection with a prereq is a special
exam, show the gating banner before starting
the special exam.
Setup the course with a subsection having pre-req
Subsection with pre-req is a special exam
Go the LMS course outline page
Click the special exam subsection
The gated banner asking for completing
prereqs should be visible
Go to the required subsection
Fulfill the requirements
Visit the special exam subsection again
The gated banner is not visible anymore
and user can start the special exam
"""
self._setup_prereq()
# Gating subsection 1 and making it a timed exam
self._setup_gated_subsection()
self.studio_course_outline.open_subsection_settings_dialog(1)
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_timed()
# Gating subsection 2 and making it a proctored exam
self._setup_gated_subsection(2)
self.studio_course_outline.open_subsection_settings_dialog(2)
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_proctored()
self._auto_auth(self.STUDENT_USERNAME, self.STUDENT_EMAIL, False)
self.course_home_page.visit()
# Test gating banner before starting timed exam
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 2')
self.assertTrue(self.courseware_page.is_gating_banner_visible())
# Test gating banner before proctored exams
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 3')
self.assertTrue(self.courseware_page.is_gating_banner_visible())
# Fulfill requirements
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1')
self._fulfill_prerequisite()
# Banner is not visible anymore on timed exam sub-section
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 2')
self.assertFalse(self.courseware_page.is_gating_banner_visible())
# Banner is not visible on proctored exam subsection
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 3')
self.assertFalse(self.courseware_page.is_gating_banner_visible())
| jolyonb/edx-platform | common/test/acceptance/tests/lms/test_lms_gating.py | Python | agpl-3.0 | 12,079 | [
"VisIt"
] | 445e914ef3b2e3c38f7ba94f47f490368a66673ef97667a00b1354a373f8309d |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import pickle
import collections
from pymatgen.core.periodic_table import Element, Specie, DummySpecie, \
PeriodicTable, get_el_sp
from pymatgen.core.composition import Composition
from copy import deepcopy
class ElementTestCase(unittest.TestCase):
def test_init(self):
self.assertEqual("Fe", Element("Fe").symbol, "Fe test failed")
fictional_symbols = ["D", "T", "Zebra"]
for sym in fictional_symbols:
self.assertRaises(KeyError, Element, sym)
#Test caching
self.assertEqual(id(Element("Fe")), id(Element("Fe")))
def test_dict(self):
fe = Element("Fe")
d = fe.as_dict()
self.assertEqual(fe, Element.from_dict(d))
def test_block(self):
testsets = {"O": "p", "Fe": "d", "Li": "s", "U": "f"}
for k, v in testsets.items():
self.assertEqual(Element(k).block, v)
def test_full_electronic_structure(self):
testsets = {"O": [(1, "s", 2), (2, "s", 2), (2, "p", 4)],
"Fe": [(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2),
(3, "p", 6), (3, "d", 6), (4, "s", 2)],
"Li": [(1, "s", 2), (2, "s", 1)],
"U": [(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2),
(3, "p", 6), (3, "d", 10), (4, "s", 2), (4, "p", 6),
(4, "d", 10), (5, "s", 2), (5, "p", 6), (4, "f", 14),
(5, "d", 10), (6, "s", 2), (6, "p", 6), (5, "f", 3),
(6, "d", 1), (7, "s", 2)]}
for k, v in testsets.items():
self.assertEqual(Element(k).full_electronic_structure, v)
def test_attributes(self):
is_true = {("Xe", "Kr"): "is_noble_gas",
("Fe", "Ni"): "is_transition_metal",
("Li", "Cs"): "is_alkali",
("Ca", "Mg"): "is_alkaline",
("F", "Br", "I"): "is_halogen",
("La",): "is_lanthanoid",
("U", "Pu"): "is_actinoid",
("Si", "Ge"): "is_metalloid",
("O", "Te"): "is_chalcogen"}
for k, v in is_true.items():
for sym in k:
self.assertTrue(getattr(Element(sym), v), sym + " is false")
keys = ["name", "mendeleev_no", "atomic_mass",
"electronic_structure", "X", "atomic_radius",
"min_oxidation_state", "max_oxidation_state",
"electrical_resistivity", "velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"thermal_conductivity", "melting_point", "boiling_point",
"liquid_range", "critical_temperature",
"superconduction_temperature",
"bulk_modulus", "youngs_modulus", "brinell_hardness",
"rigidity_modulus", "mineral_hardness",
"vickers_hardness", "density_of_solid",
"coefficient_of_linear_thermal_expansion", "oxidation_states",
"common_oxidation_states", "average_ionic_radius",
"ionic_radii"]
#Test all elements up to Uranium
for i in range(1, 93):
el = Element.from_Z(i)
d = el.data
for k in keys:
k_str = k.capitalize().replace("_", " ")
if k_str in d and (not str(d[k_str]).startswith("no data")):
self.assertIsNotNone(getattr(el, k))
el = Element.from_Z(i)
if len(el.oxidation_states) > 0:
self.assertEqual(max(el.oxidation_states),
el.max_oxidation_state)
self.assertEqual(min(el.oxidation_states),
el.min_oxidation_state)
def test_oxidation_states(self):
el = Element("Fe")
self.assertEqual(el.oxidation_states, (-2, -1, 1, 2, 3, 4, 5, 6))
self.assertEqual(el.common_oxidation_states, (2, 3))
def test_deepcopy(self):
el1 = Element("Fe")
el2 = Element("Na")
ellist = [el1, el2]
self.assertEqual(ellist, deepcopy(ellist),
"Deepcopy operation doesn't produce exact copy")
def test_radii(self):
el = Element("Pd")
self.assertEqual(el.atomic_radius, 1.40)
self.assertEqual(el.atomic_radius_calculated, 1.69)
self.assertEqual(el.van_der_waals_radius, 1.63)
def test_data(self):
self.assertEqual(Element("Pd").data["Atomic radius"], 1.4)
def test_sort(self):
els = [Element("Se"), Element("C")]
self.assertEqual(sorted(els), [Element("C"), Element("Se")])
def test_pickle(self):
el1 = Element("Fe")
o = pickle.dumps(el1)
self.assertEqual(el1, pickle.loads(o))
class SpecieTestCase(unittest.TestCase):
def setUp(self):
self.specie1 = Specie.from_string("Fe2+")
self.specie2 = Specie("Fe", 3)
self.specie3 = Specie("Fe", 2)
self.specie4 = Specie("Fe", 2, {"spin": 5})
def test_init(self):
self.assertRaises(ValueError, Specie, "Fe", 2, {"magmom": 5})
def test_cached(self):
specie5 = Specie("Fe", 2)
self.assertEqual(id(specie5), id(self.specie3))
def test_ionic_radius(self):
self.assertEqual(self.specie2.ionic_radius, 78.5 / 100)
self.assertEqual(self.specie3.ionic_radius, 92 / 100)
def test_eq(self):
self.assertEqual(self.specie1, self.specie3,
"Static and actual constructor gives unequal result!")
self.assertNotEqual(self.specie1, self.specie2,
"Fe2+ should not be equal to Fe3+")
self.assertNotEqual(self.specie4, self.specie3)
self.assertFalse(self.specie1 == Element("Fe"))
self.assertFalse(Element("Fe") == self.specie1)
def test_cmp(self):
self.assertLess(self.specie1, self.specie2, "Fe2+ should be < Fe3+")
self.assertLess(Specie("C", 1), Specie("Se", 1))
def test_attr(self):
self.assertEqual(self.specie1.Z, 26,
"Z attribute for Fe2+ should be = Element Fe.")
self.assertEqual(self.specie4.spin, 5)
def test_deepcopy(self):
el1 = Specie("Fe", 4)
el2 = Specie("Na", 1)
ellist = [el1, el2]
self.assertEqual(ellist, deepcopy(ellist),
"Deepcopy operation doesn't produce exact copy.")
def test_pickle(self):
self.assertEqual(self.specie1, pickle.loads(pickle.dumps(self.specie1)))
def test_get_crystal_field_spin(self):
self.assertEqual(Specie("Fe", 2).get_crystal_field_spin(), 4)
self.assertEqual(Specie("Fe", 3).get_crystal_field_spin(), 5)
self.assertEqual(Specie("Fe", 4).get_crystal_field_spin(), 4)
self.assertEqual(Specie("Co", 3).get_crystal_field_spin(
spin_config="low"), 0)
self.assertEqual(Specie("Co", 4).get_crystal_field_spin(
spin_config="low"), 1)
self.assertEqual(Specie("Ni", 3).get_crystal_field_spin(
spin_config="low"), 1)
self.assertEqual(Specie("Ni", 4).get_crystal_field_spin(
spin_config="low"), 0)
self.assertRaises(AttributeError,
Specie("Li", 1).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("Ge", 4).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("H", 1).get_crystal_field_spin)
self.assertRaises(AttributeError,
Specie("Fe", 10).get_crystal_field_spin)
self.assertRaises(ValueError, Specie("Fe", 2).get_crystal_field_spin,
"hex")
def test_sort(self):
els = map(get_el_sp, ["N3-", "Si4+", "Si3+"])
self.assertEqual(sorted(els), [Specie("Si", 3), Specie("Si", 4),
Specie("N", -3)])
def test_to_from_string(self):
fe3 = Specie("Fe", 3, {"spin": 5})
self.assertEqual(str(fe3), "Fe3+spin=5")
fe = Specie.from_string("Fe3+spin=5")
self.assertEqual(fe.spin, 5)
mo0 = Specie("Mo", 0, {"spin": 5})
self.assertEqual(str(mo0), "Mo0+spin=5")
mo = Specie.from_string("Mo0+spin=4")
self.assertEqual(mo.spin, 4)
class DummySpecieTestCase(unittest.TestCase):
def test_init(self):
self.specie1 = DummySpecie("X")
self.assertRaises(ValueError, DummySpecie, "Xe")
self.assertRaises(ValueError, DummySpecie, "Xec")
self.assertRaises(ValueError, DummySpecie, "Vac")
self.specie2 = DummySpecie("X", 2, {"spin": 3})
self.assertEqual(self.specie2.spin, 3)
def test_eq(self):
self.assertFalse(DummySpecie("Xg") == DummySpecie("Xh"))
self.assertFalse(DummySpecie("Xg") == DummySpecie("Xg", 3))
self.assertTrue(DummySpecie("Xg", 3) == DummySpecie("Xg", 3))
def test_from_string(self):
sp = DummySpecie.from_string("X")
self.assertEqual(sp.oxi_state, 0)
sp = DummySpecie.from_string("X2+")
self.assertEqual(sp.oxi_state, 2)
sp = DummySpecie.from_string("X2+spin=5")
self.assertEqual(sp.oxi_state, 2)
self.assertEqual(sp.spin, 5)
def test_pickle(self):
el1 = DummySpecie("X", 3)
o = pickle.dumps(el1)
self.assertEqual(el1, pickle.loads(o))
def test_sort(self):
r = sorted([Element('Fe'), DummySpecie("X")])
self.assertEqual(r, [DummySpecie("X"), Element('Fe')])
def test_safe_from_composition(self):
c = Composition({'Xa': 1, 'Fe': 1})
self.assertEqual(DummySpecie.safe_from_composition(c).symbol, 'Xb')
self.assertEqual(DummySpecie.safe_from_composition(c, 1).symbol, 'Xb')
class PeriodicTableTestCase(unittest.TestCase):
def test_element(self):
symbols = list()
for i in range(1, 102):
el = Element.from_Z(i)
self.assertGreater(el.atomic_mass, 0,
"Atomic mass cannot be negative!")
self.assertNotIn(el.symbol, symbols,
"Duplicate symbol for " + el.symbol)
symbols.append(""" + el.symbol + """)
self.assertIsNotNone(el.group,
"Group cannot be none for Z=" + str(i))
self.assertIsNotNone(el.row, "Row cannot be none for Z=" + str(i))
#Test all properties
all_attr = ["Z", "symbol", "X", "name", "atomic_mass",
"atomic_radius", "max_oxidation_state",
"min_oxidation_state", "mendeleev_no",
"electrical_resistivity", "velocity_of_sound",
"reflectivity", "refractive_index", "poissons_ratio",
"molar_volume", "electronic_structure",
"thermal_conductivity", "boiling_point",
"melting_point", "critical_temperature",
"superconduction_temperature", "liquid_range",
"bulk_modulus", "youngs_modulus", "brinell_hardness",
"rigidity_modulus", "mineral_hardness",
"vickers_hardness", "density_of_solid",
"coefficient_of_linear_thermal_expansion"]
for a in all_attr:
self.assertIsNotNone(el, a)
def test_print_periodic_table(self):
PeriodicTable().print_periodic_table()
def test_iterable(self):
"""Test whether PeriodicTable supports the iteration protocol"""
table = PeriodicTable()
self.assertTrue(isinstance(table, collections.Iterable))
self.assertEqual(table[14].Z, 14)
self.assertEqual([e.Z for e in table[1:4:2]], [1, 3])
for (idx, element) in enumerate(table):
self.assertEqual(idx+1, element.Z)
class FuncTest(unittest.TestCase):
def test_get_el_sp(self):
self.assertEqual(get_el_sp("Fe2+"), Specie("Fe", 2))
self.assertEqual(get_el_sp("3"), Element("Li"))
self.assertEqual(get_el_sp("3.0"), Element("Li"))
self.assertEqual(get_el_sp("U"), Element("U"))
self.assertEqual(get_el_sp("X2+"), DummySpecie("X", 2))
self.assertEqual(get_el_sp("Mn3+"), Specie("Mn", 3))
if __name__ == "__main__":
unittest.main()
| sonium0/pymatgen | pymatgen/core/tests/test_periodic_table.py | Python | mit | 12,650 | [
"pymatgen"
] | 5ee556c63ac24c99f480b173abc61fad6954ef69c94a0df0a69c74998000f563 |
# -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
from __future__ import absolute_import
import json
import logging
import re
import os
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from provider.oauth2.models import Client
from provider.utils import long_token
from six import text_type
from social_core.backends.base import BaseAuth
from social_core.backends.oauth import OAuthAuth
from social_core.backends.saml import SAMLAuth
from social_core.exceptions import SocialAuthBaseException
from social_core.utils import module_member
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers import get_current_request
from .lti import LTI_PARAMS_KEY, LTIAuthBackend
from .saml import STANDARD_SAML_PROVIDER_KEY, get_saml_idp_choices, get_saml_idp_class
log = logging.getLogger(__name__)
REGISTRATION_FORM_FIELD_BLACKLIST = [
'name',
'username'
]
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
if os.environ.get('PYCHARM_DEBUG') == '1':
_PSA_BACKENDS = {}
_PSA_OAUTH2_BACKENDS = []
_PSA_SAML_BACKENDS = []
_LTI_BACKENDS = []
else:
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
_LTI_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(LTIAuthBackend)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError("Invalid JSON: {}".format(text_type(err)))
if not isinstance(value_python, of_type):
raise ValidationError("Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return re.sub(r'[^-\w]+', '_', username)[:30]
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format(
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
"""
KEY_FIELDS = ('slug',)
icon_class = models.CharField(
max_length=50,
blank=True,
default='fa-sign-in',
help_text=(
'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
# We use a FileField instead of an ImageField here because ImageField
# doesn't support SVG. This means we don't get any image validation, but
# that should be fine because only trusted users should be uploading these
# anyway.
icon_image = models.FileField(
blank=True,
help_text=(
'If there is no Font Awesome icon available for this provider, upload a custom image. '
'SVG images are recommended as they can scale to any size.'
),
)
name = models.CharField(max_length=50, blank=False, help_text="Name of this provider (shown to users)")
slug = models.SlugField(
max_length=30, db_index=True, default='default',
help_text=(
'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this provider configuration belongs to.'
),
on_delete=models.CASCADE,
)
skip_hinted_login_dialog = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users that visit a \"TPA hinted\" URL for this provider "
"(e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to "
"the login URL of the provider instead of being first prompted with a login dialog."
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
send_welcome_email = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be sent a welcome email upon registration."
),
)
visible = models.BooleanField(
default=False,
help_text=_(
"If this option is not selected, users will not be presented with the provider "
"as an option to authenticate with on the login screen, but manual "
"authentication using the correct link is still possible."
),
)
max_session_length = models.PositiveIntegerField(
null=True,
blank=True,
default=None,
verbose_name='Max session length (seconds)',
help_text=_(
"If this option is set, then users logging in using this SSO provider will have "
"their session length limited to no longer than this value. If set to 0 (zero), "
"the session will expire upon the user closing their browser. If left blank, the "
"Django platform session default length will be used."
)
)
send_to_registration_first = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be directed to the registration page "
"immediately after authenticating with the third party instead of the login page."
),
)
sync_learner_profile_data = models.BooleanField(
default=False,
help_text=_(
"Synchronize user profile data received from the identity provider with the edX user "
"account on each SSO login. The user will be notified if the email address associated "
"with their account is changed as a part of this synchronization."
)
)
enable_sso_id_verification = models.BooleanField(
default=False,
help_text="Use the presence of a profile from a trusted third party as proof of identity verification.",
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
accepts_logins = True # Whether to display a sign-in button when the provider is enabled
# "enabled" field is inherited from ConfigurationModel
class Meta(object):
app_label = "third_party_auth"
abstract = True
def clean(self):
""" Ensure that either `icon_class` or `icon_image` is set """
super(ProviderConfig, self).clean()
if bool(self.icon_class) == bool(self.icon_image):
raise ValidationError('Either an icon class or an icon image must be given (but not both)')
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
@property
def full_class_name(self):
""" Get the fully qualified class name of this provider. """
return '{}.{}'.format(self.__module__, self.__class__.__name__)
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
# This is generally the same thing as the UID, expect when one backend is used for multiple providers
assert self.match_social_auth(social_auth)
return social_auth.uid
def get_social_auth_uid(self, remote_id):
"""
Return the uid in social auth.
This is default implementation. Subclass may override with a different one.
"""
return remote_id
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
common.djangoapps.student.views.register_user uses this to populate the
new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
registration_form_data = {}
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details').copy()
# Set the registration form to use the `fullname` detail for the `name` field.
registration_form_data['name'] = details.get('fullname', '')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
registration_form_data['username'] = clean_username(pipeline_kwargs.get('username') or '')
# Any other values that are present in the details dict should be copied
# into the registration form details. This may include details that do
# not map to a value that exists in the registration form. However,
# because the fields that are actually rendered are not based on this
# list, only those values that map to a valid registration form field
# will actually be sent to the form as default values.
for blacklisted_field in REGISTRATION_FORM_FIELD_BLACKLIST:
details.pop(blacklisted_field, None)
registration_form_data.update(details)
return registration_form_data
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
@property
def display_for_login(self):
"""
Determines whether the provider ought to be shown as an option with
which to authenticate on the login screen, registration screen, and elsewhere.
"""
return bool(self.enabled_for_current_site and self.accepts_logins and self.visible)
@property
def enabled_for_current_site(self):
"""
Determines if the provider is able to be used with the current site.
"""
return self.enabled and self.site == Site.objects.get_current(get_current_request())
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
"""
prefix = 'oa2'
backend_name = models.CharField(
max_length=50, blank=False, db_index=True,
help_text=(
"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS - which aws.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name="Client ID")
secret = models.TextField(
blank=True,
verbose_name="Client Secret",
help_text=(
'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} '
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text="Optional JSON object with advanced settings, if any.")
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
"""
KEY_FIELDS = ('site_id', 'slug')
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this SAML configuration belongs to.'
),
on_delete=models.CASCADE,
)
slug = models.SlugField(
max_length=30,
default='default',
help_text=(
'A short string uniquely identifying this configuration. '
'Cannot contain spaces. Examples: "ubc", "mit-staging"'
),
)
private_key = models.TextField(
help_text=(
'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
separate_settings_per_microsite = models.BooleanField(default=False, verbose_name='Separate Settings per Microsite')
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name="Entity ID")
org_info_str = models.TextField(
verbose_name="Organization Info",
default='{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text="JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default='{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "SAML Configuration"
verbose_name_plural = verbose_name
def __str__(self):
"""
Return human-readable string representation.
"""
return "SAMLConfiguration {site}: {slug} on {date:%Y-%m-%d %H:%M:%S}".format(
site=self.site.name,
slug=self.slug,
date=self.change_date,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
default_saml_contact = {
# Default contact information to put into the SAML metadata that gets generated by python-saml.
"givenName": _("{platform_name} Support").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
),
"emailAddress": configuration_helpers.get_value('TECH_SUPPORT_EMAIL', settings.TECH_SUPPORT_EMAIL),
}
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
else:
public_certs = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT', {})
return public_certs.get(self.slug, '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
else:
private_keys = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT', {})
return private_keys.get(self.slug, '')
other_config = {
# These defaults can be overriden by self.other_config_str
"GET_ALL_EXTRA_DATA": True, # Save all attribute values the IdP sends into the UserSocialAuth table
"TECHNICAL_CONTACT": default_saml_contact,
"SUPPORT_CONTACT": default_saml_contact,
}
other_config.update(json.loads(self.other_config_str))
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
def active_saml_configurations_filter():
"""
Returns a mapping to be used for the SAMLProviderConfig to limit the SAMLConfiguration choices to the current set.
"""
query_set = SAMLConfiguration.objects.current_set()
return {'id__in': query_set.values_list('id', flat=True)}
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
"""
prefix = 'saml'
backend_name = models.CharField(
max_length=50, default='tpa-saml', blank=False,
help_text="Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
entity_id = models.CharField(
max_length=255, verbose_name="Entity ID", help_text="Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name="User ID Attribute",
help_text="URN of the SAML attribute that we can use as a unique, persistent user ID. Leave blank for default.")
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name="Full Name Attribute",
help_text="URN of SAML attribute containing the user's full name. Leave blank for default.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name="First Name Attribute",
help_text="URN of SAML attribute containing the user's first name. Leave blank for default.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name="Last Name Attribute",
help_text="URN of SAML attribute containing the user's last name. Leave blank for default.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name="Username Hint Attribute",
help_text="URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name="Email Attribute",
help_text="URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
automatic_refresh_enabled = models.BooleanField(
default=True, verbose_name="Enable automatic metadata refresh",
help_text="When checked, the SAML provider's metadata will be included "
"in the automatic refresh job, if configured."
)
identity_provider_type = models.CharField(
max_length=128, blank=False, verbose_name="Identity Provider Type", default=STANDARD_SAML_PROVIDER_KEY,
choices=get_saml_idp_choices(), help_text=(
"Some SAML providers require special behavior. For example, SAP SuccessFactors SAML providers require an "
"additional API call to retrieve user metadata not provided in the SAML response. Select the provider type "
"which best matches your use case. If in doubt, choose the Standard SAML Provider type."
)
)
debug_mode = models.BooleanField(
default=False, verbose_name="Debug Mode",
help_text=(
"In debug mode, all SAML XML requests and responses will be logged. "
"This is helpful for testing/setup but should always be disabled before users start using this provider."
),
)
other_settings = models.TextField(
verbose_name="Advanced settings", blank=True,
help_text=(
'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports {"requiredEntitlements": ["urn:..."]}, '
'which can be used to require the presence of a specific eduPersonEntitlement, '
'and {"extra_field_definitions": [{"name": "...", "urn": "..."},...]}, which can be '
'used to define registration form fields and the URNs that can be used to retrieve '
'the relevant values from the SAML response. Custom provider types, as selected '
'in the "Identity Provider Type" field, may make use of the information stored '
'in this field for additional configuration.'
))
archived = models.BooleanField(default=False)
saml_configuration = models.ForeignKey(
SAMLConfiguration,
on_delete=models.SET_NULL,
limit_choices_to=active_saml_configurations_filter,
null=True,
blank=True,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.slug) + 1:]
def get_social_auth_uid(self, remote_id):
""" Get social auth uid from remote id by prepending idp_slug to the remote id """
return '{}:{}'.format(self.slug, remote_id)
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
for field in attrs:
val = getattr(self, field)
if val:
conf[field] = val
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error(
'No SAMLProviderData found for provider "%s" with entity id "%s" and IdP slug "%s". '
'Run "manage.py saml pull" to fix or debug.',
self.name, self.entity_id, self.slug
)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
# Add SAMLConfiguration appropriate for this IdP
conf['saml_sp_configuration'] = (
self.saml_configuration or
SAMLConfiguration.current(self.site.id, 'default')
)
idp_class = get_saml_idp_class(self.identity_provider_type)
return idp_class(self.slug, **conf)
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name="SSO URL")
public_key = models.TextField()
class Meta(object):
app_label = "third_party_auth"
verbose_name = "SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
class LTIProviderConfig(ProviderConfig):
"""
Configuration required for this edX instance to act as a LTI
Tool Provider and allow users to authenticate and be enrolled in a
course via third party LTI Tool Consumers.
"""
prefix = 'lti'
backend_name = 'lti'
# This provider is not visible to users
icon_class = None
icon_image = None
secondary = False
# LTI login cannot be initiated by the tool provider
accepts_logins = False
KEY_FIELDS = ('lti_consumer_key', )
lti_consumer_key = models.CharField(
max_length=255,
help_text=(
'The name that the LTI Tool Consumer will use to identify itself'
)
)
lti_hostname = models.CharField(
default='localhost',
max_length=255,
help_text=(
'The domain that will be acting as the LTI consumer.'
),
db_index=True
)
lti_consumer_secret = models.CharField(
default=long_token,
max_length=255,
help_text=(
'The shared secret that the LTI Tool Consumer will use to '
'authenticate requests. Only this edX instance and this '
'tool consumer instance should know this value. '
'For increased security, you can avoid storing this in '
'your database by leaving this field blank and setting '
'SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} '
'in your instance\'s Django setttigs (or lms.auth.json)'
),
blank=True,
)
lti_max_timestamp_age = models.IntegerField(
default=10,
help_text=(
'The maximum age of oauth_timestamp values, in seconds.'
)
)
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.lti_consumer_key + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.lti_consumer_key) + 1:]
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
try:
return (
self.backend_name == pipeline['backend'] and
self.lti_consumer_key == pipeline['kwargs']['response'][LTI_PARAMS_KEY]['oauth_consumer_key']
)
except KeyError:
return False
def get_lti_consumer_secret(self):
""" If the LTI consumer secret is not stored in the database, check Django settings instead """
if self.lti_consumer_secret:
return self.lti_consumer_secret
return getattr(settings, 'SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {}).get(self.lti_consumer_key, '')
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider Configuration (LTI)"
verbose_name_plural = verbose_name
class ProviderApiPermissions(models.Model):
"""
This model links OAuth2 client with provider Id.
It gives permission for a OAuth2 client to access the information under certain IdPs.
"""
client = models.ForeignKey(Client, on_delete=models.CASCADE)
provider_id = models.CharField(
max_length=255,
help_text=(
'Uniquely identify a provider. This is different from backend_name.'
)
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider API Permission"
verbose_name_plural = verbose_name + 's'
class SAMLConfigurationPerMicrosite(models.Model):
"""
Additional SAML configuration per microsite
"""
domain = models.CharField(max_length=255, verbose_name='Microsite Domain Name', unique=True)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name="Entity ID")
org_info_str = models.TextField(
verbose_name="Organization Info",
default='{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text="JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default='{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "SAML Configuration per Microsite"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(SAMLConfigurationPerMicrosite, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "ORG_INFO":
return json.loads(self.org_info_str) if self.org_info_str else None
if name == "SP_ENTITY_ID":
return self.entity_id
other_config = json.loads(self.other_config_str) if self.other_config_str else {}
if other_config and name in ("TECHNICAL_CONTACT", "SUPPORT_CONTACT"):
contact = {
"givenName": "{} Support".format(settings.PLATFORM_NAME),
"emailAddress": settings.TECH_SUPPORT_EMAIL
}
contact.update(other_config.get(name, {}))
return contact
return other_config.get(name, None) # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
| CredoReference/edx-platform | common/djangoapps/third_party_auth/models.py | Python | agpl-3.0 | 37,332 | [
"VisIt"
] | ac4d60a3eaa4e833391008aefa41feb67267dc3c15a3b3e9812d8324b064dd31 |
# This script will use the output from find_raster_path.py
# to generate a mosaic dataset.
# To run the gdal commands you first have to set the path and GDAL_DATA variable in environments.
# C:\Python27\Lib\site-packages\osgeo
# GDAL_DATA C:\Python27\Lib\site-packages\osgeo\data\gdal
from __future__ import print_function
import arcpy
import os
import csv
import subprocess
import string
import numpy
from collections import defaultdict
from operator import itemgetter
from osgeo import ogr
from osgeo import gdal
from osgeo import gdal_merge
# ouput csv header
header = ["STATUS", "NAME", "PATH", "PROJECT", "YEAR", "NAMECLEAN", "NAMENEW", "TYPE", "PATH_OUT"]
# output column assignments
status_col = 0
name_col = 1
path_col = 2
proj_col = 3
year_col = 4
clean_col = 5
new_name_col = 6
type_col = 7
pathout_col = 8
# in_csvfile
#csvfile = r"\\DEQWQNAS01\Lidar01\OR_INVENTORY\Willamette_new2_BE_rasters.csv"
#csvfile = r"\\DEQWQNAS01\Lidar01\OR_INVENTORY\Willamette_new2_HH_rasters.csv"
csvfile = r"\\DEQWQNAS01\Lidar01\OR_INVENTORY\Willamette_new2_VH_rasters.csv"
outpath_warp = r"F:\LiDAR\VH"
proj_final = 'EPSG:2992'
infile_type = "\\hdr.adf"
out_format = "HFA"
out_ext = ".img"
buildpyramids = True
buildmosaic = True
mdname = r"F:\LiDAR\MOSAICS.gdb\VH"
pathletter_dict = {r'\\DEQWQNAS01\Lidar01': r'G:',
r'\\DEQWQNAS01\Lidar02': r'H:',
r'\\DEQWQNAS01\Lidar03': r'I:',
r'\\DEQWQNAS01\Lidar04': r'J:',
r'\\DEQWQNAS01\Lidar05': r'K:',
r'\\DEQWQNAS01\Lidar06': r'L:',
r'\\DEQWQNAS01\Lidar07': r'M:',
r'\\DEQWQNAS01\Lidar08': r'N:'}
def read_csv(csvfile, skipheader = False):
"""Reads an input csv file and returns the header row and data
as a list"""
with open(csvfile, "rb") as f:
reader = csv.reader(f)
if skipheader: reader.next()
csvlist = [row for row in reader]
return(csvlist)
def write_csv(csvlist, csvfile):
"""write the input list to csv"""
import csv
with open(csvfile, "wb") as f:
linewriter = csv.writer(f)
for row in csvlist:
linewriter.writerow(row)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def get_nodata_value(raster_path, band, status):
"""Returns the no data value from a raster band"""
try:
raster = gdal.Open(raster_path)
r_band = raster.GetRasterBand(band)
nodatavalue = r_band.GetNoDataValue()
raster = None
except:
nodatavalue = None
status = "E"
return(nodatavalue,status)
def get_format(raster_list, new_name_col, type_col):
"""Returns the raster list with the output faster format"""
for n, row in enumerate(raster_list):
out_raster = outpath_warp + "\\" + row[new_name_col] + out_ext
raster_list[n][type_col] = arcpy.Describe(out_raster).format
print("done with getting format")
return(raster_list)
def execute_cmd(cmd_list):
"""Executes commands to the command prompt using subprocess module.
Commands must be a string in a list"""
for cmd in cmd_list:
print(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
exit_code=proc.wait()
if exit_code:
# Something went wrong
status = "E"
# Try to delete the temp drive for next iteration
proc = subprocess.Popen(cmd[3], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
print(stderr)
return (status)
else:
print(stdout)
# Yay! we've reached the end without errors
status = "X"
return (status)
# -- Build a list of rasters to itterate through -----------------------
raster_list = read_csv(csvfile, skipheader = False)
tot = len(raster_list)
# -- Warp/reproject the rasters ----------------------------------------
for n, row in enumerate(raster_list):
if n > 0:
status = row[status_col]
inpath = row[path_col]
inpath_server = inpath[:20]
inpath_letter = pathletter_dict[inpath_server]
inpath_temp = inpath.replace(inpath[:20], inpath_letter) + infile_type
out_raster = outpath_warp + "\\" + row[new_name_col] + out_ext
raster_list[n][pathout_col] = out_raster
# check to see if the file already exists
raster_exists = os.path.isfile(out_raster)
# Check if the raster already exists and status
if raster_exists and status in ["X", "#"]:
raster_list[n][status_col] = "X"
write_csv(raster_list, csvfile)
else:
inraster_exists = os.path.isfile(inpath_temp)
if inraster_exists:
# Get the no data value
nodatavalue , status = get_nodata_value(raster_path=inpath, band=1, status=status)
cmd_list = ['gdalwarp -t_srs {0} -q -r bilinear -srcnodata {1} -dstnodata {2} -of {3} -overwrite {4} {5}'.format(proj_final, nodatavalue, nodatavalue, out_format, inpath_temp, out_raster)]
print("warping "+str(n)+" of "+str(tot)+" "+inpath_temp)
if status is not "E":
status = execute_cmd(cmd_list)
raster_list[n][status_col] = status
write_csv(raster_list, csvfile)
else:
print("Error: " + inpath_temp + " does not exist")
status = "E"
if status is "E":
print("gdalwarp Error")
write_csv(raster_list, csvfile)
# -- Build Pyramids/Mosaic ----------------------------------------
if buildpyramids:
print("Building pyramids")
arcpy.BuildPyramidsandStatistics_management(out_raster, 'NONE', 'BUILD_PYRAMIDS', 'CALCULATE_STATISTICS',
'#', '#', '#', '#', '#', '#', '-1', '#', 'NEAREST', '#', '#', 'SKIP_EXISTING')
if buildmosaic:
rastype = "Raster Dataset"
updatecs = "UPDATE_CELL_SIZES"
updatebnd = "NO_BOUNDARY"
updateovr = "NO_OVERVIEWS"
maxlevel = "-1"
maxcs = "#"
maxdim = "#"
spatialref = "#"
inputdatafilter = "#"
subfolder = "NO_SUBFOLDERS"
duplicate = "EXCLUDE_DUPLICATES"
buildpy = "NO_PYRAMIDS"
calcstats = "CALCULATE_STATISTICS"
buildthumb = "NO_THUMBNAILS"
comments = "#"
forcesr = "#"
arcpy.AddRastersToMosaicDataset_management(
mdname, rastype, out_raster, updatecs, updatebnd, updateovr,
maxlevel, maxcs, maxdim, spatialref, inputdatafilter,
subfolder, duplicate, buildpy, calcstats,
buildthumb, comments, forcesr)
print("done warping") | rmichie/PyScripts | Warp_Rasters.py | Python | apache-2.0 | 7,235 | [
"ADF"
] | bd296d945c7055d02daeec1d8c68764dbd526d59e56a916b45283b82bd839576 |
print("""
FEM simulation using getfem++ and siconos.
""")
import siconos.kernel as kernel
import getfemtosiconos as gts
import numpy as np
import getfem as gf
from matplotlib.pyplot import *
t0 = 0.0 # start time
T = 10.0 # end time
h = 0.005 # time step
g = 9.81 # gravity
e = 0.1 # restitution coeficient
mu=0.3 # Friction coefficient
theta = 0.5 # theta scheme
with_friction = False
sico = gts.SiconosFem()
fem_model = gts.import_fem(sico)
# =======================================
# Create the siconos Dynamical System
# =======================================
# Initial position and velocity
v0 = np.zeros(sico.nbdof)
block = kernel.LagrangianLinearTIDS(sico.q0,v0,sico.Mass.full())
F = sico.RHS
block.setFExtPtr(F)
block.setKPtr(sico.Stiff.full())
position_init = sico.pos
# =======================================
# The interaction
# =======================================
dist = 0.5
diminter = 1
b = np.repeat([dist], diminter)
nslaw = kernel.NewtonImpactNSL(e)
k=0
pos0 = np.dot(sico.H,sico.pos)
dimH = sico.H.shape[0]
relation=[]
inter=[]
hh = np.zeros((diminter,sico.nbdof))
for i in range(0,dimH,3):
hh[0,:] = sico.H[i,:]
b2 = b + pos0[i]
relation.append(kernel.LagrangianLinearTIR(hh,b2))
inter.append(kernel.Interaction(diminter, nslaw, relation[k]))
k+=1
nbInter=len(inter)
# =======================================
# The Model
# =======================================
blockModel = kernel.Model(t0,T)
# add the dynamical system to the non smooth dynamical system
blockModel.nonSmoothDynamicalSystem().insertDynamicalSystem(block)
# link the interaction and the dynamical system
for i in range(nbInter):
blockModel.nonSmoothDynamicalSystem().link(inter[i],block);
# =======================================
# The Simulation
# =======================================
# (1) OneStepIntegrators
OSI = kernel.Moreau(theta)
# (2) Time discretisation --
t = kernel.TimeDiscretisation(t0,h)
# (3) one step non smooth problem
if(with_friction):
osnspb = kernel.FrictionContact(3)
osnspb.numericsSolverOptions().iparam[0]=100
osnspb.numericsSolverOptions().iparam[1]=20
osnspb.numericsSolverOptions().iparam[4]=2
osnspb.numericsSolverOptions().dparam[0]=1e-6
osnspb.numericsSolverOptions().dparam[2]=1e-8
osnspb.setMaxSize(1000)
osnspb.setMStorageType(1)
osnspb.setNumericsVerboseMode(0)
osnspb.setKeepLambdaAndYState(true)
else:
osnspb = kernel.LCP()
# (4) Simulation setup with (1) (2) (3)
s = kernel.TimeStepping(t)
s.insertIntegrator(OSI)
s.insertNonSmoothProblem(osnspb)
# simulation initialization
blockModel.initialize(s)
# the number of time steps
N = (T-t0)/h
# Get the values to be plotted
# ->saved in a matrix dataPlot
dataPlot = np.empty((N+1,9))
dataPlot[0, 0] = t0
dataPlot[0, 1] = block.q()[2]
#dataPlot[0, 2] = block.velocity()[2]
dataPlot[0, 2] = block.q()[5]
dataPlot[0, 3] = block.q()[8]
dataPlot[0, 4] = block.q()[11]
dataPlot[0, 5] = block.q()[14]
dataPlot[0, 6] = block.q()[17]
dataPlot[0, 7] = block.q()[20]
dataPlot[0, 8] = block.q()[23]
nbNodes = sico.mesh.pts().shape[1]
k = 1
# time loop
while(s.hasNextEvent()):
s.computeOneStep()
name = 'titi'+str(k)+'.vtk'
dataPlot[k,0]=s.nextTime()
#dataPlot[k,1]=block.q()[2]
#dataPlot[k,2]=block.velocity()[2]
# dataPlot[k, 2] = block.q()[5]
#dataPlot[k, 3] = block.q()[8]
#dataPlot[k, 4] = block.q()[11]
#dataPlot[k, 5] = block.q()[14]
#dataPlot[k, 6] = block.q()[17]
#dataPlot[k, 7] = block.q()[20]
#dataPlot[k, 8] = block.q()[23]
siz = sico.pos.size
reference_pos = sico.pos
current_pos = block.q() + reference_pos
dataPlot[k,1]=current_pos[2]
#dataPlot[k,2]=block.velocity()[2]
dataPlot[k, 2] = current_pos[5]
dataPlot[k, 3] = current_pos[8]
dataPlot[k, 4] = current_pos[11]
dataPlot[k, 5] = current_pos[14]
dataPlot[k, 6] = current_pos[17]
dataPlot[k, 7] = current_pos[20]
dataPlot[k, 8] = current_pos[23]
#bottom_pos = current_pos[0:12]
#new_ref = reference_pos
#translatX = reference_pos[0] - current_pos[0]
#translatY = reference_pos[1] - current_pos[1]
#translatZ = reference_pos[2] - current_pos[2]
#trans = np.repeat([translatX],nbNodes)
#new_ref[0:siz:3] = reference_pos[0:siz:3] - trans
#trans = np.repeat([translatY],nbNodes)
#new_ref[1:siz:3] = reference_pos[1:siz:3] - trans
#trans = np.repeat([translatZ],nbNodes)
#new_ref[2:siz:3] = reference_pos[2:siz:3] - trans
#depl = current_pos - new_ref
#block.setQPtr(depl)
#correction = np.dot(sico.H,new_ref)
#bnew = correction[0:dimH:3]
#for i in range(nbInter):
# bb = b + bnew[i]
# relation[i].setEPtr(bb)
#print "RESUME"
#relation[0].computeOutput(s.nextTime(),0)
#inter[0].y(0).display()
#print current_pos[0:3]
#print new_ref[0:3]
#print depl[0:3]
fem_model.to_variables(block.q())
U = fem_model.variable('u')
#POS = current_pos
sl = gf.Slice(('boundary',),sico.mfu,1)
#sl.export_to_vtk(name, sico.mfu, U,'Displacement', sico.mfu, POS, 'Position')
sl.export_to_vtk(name, sico.mfu, U,'Displacement')
#print s.nextTime()
k += 1
s.nextStep()
subplot(211)
title('position')
plot(dataPlot[:,0], dataPlot[:,1])
plot(dataPlot[:,0], dataPlot[:,2])
plot(dataPlot[:,0], dataPlot[:,3])
plot(dataPlot[:,0], dataPlot[:,4])
plot(dataPlot[:,0], dataPlot[:,5])
plot(dataPlot[:,0], dataPlot[:,6])
plot(dataPlot[:,0], dataPlot[:,7])
plot(dataPlot[:,0], dataPlot[:,8])
grid()
subplot(212)
title('velocity')
plot(dataPlot[:,0], dataPlot[:,2])
grid()
show()
| siconos/siconos-deb | examples/Mechanics/FEM/python/block_multi_inter.py | Python | apache-2.0 | 5,665 | [
"VTK"
] | a0672db77d9bfbcc17fb4a10386a320af04a5557e7e46d38f492e25641411e9f |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
assert_,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
PRM, # ache.prmtop
PRM12, # anti.top
PRM7, # tz2.truncoct.parm7.bz2
PRMpbc,
)
class TOPBase(ParserBase):
parser = mda.topology.TOPParser.TOPParser
expected_attrs = ["names", "types", "type_indices", "charges", "masses",
"resnames"]
expected_n_segments = 1
def test_attr_size(self):
assert_(len(self.top.names) == self.expected_n_atoms)
assert_(len(self.top.types) == self.expected_n_atoms)
assert_(len(self.top.type_indices) == self.expected_n_atoms)
assert_(len(self.top.charges) == self.expected_n_atoms)
assert_(len(self.top.masses) == self.expected_n_atoms)
assert_(len(self.top.resnames) == self.expected_n_residues)
class TestPRMParser(TOPBase):
filename = PRM
expected_n_atoms = 252
expected_n_residues = 14
guessed_attrs = ['elements']
class TestPRM12Parser(TOPBase):
expected_attrs = ["names", "types", "type_indices", "charges", "masses",
"resnames"]
filename = PRM12
expected_n_atoms = 8923
expected_n_residues = 2861
ref_proteinatoms = 0
class TestParm7Parser(TOPBase):
filename = PRM7
expected_n_atoms = 5827
expected_n_residues = 1882
guessed_attrs = ['elements']
class TestPRM2(TOPBase):
filename = PRMpbc
expected_n_atoms = 5071
expected_n_residues = 1686
ref_proteinatoms = 22
guessed_attrs = ['elements']
| alejob/mdanalysis | testsuite/MDAnalysisTests/topology/test_top.py | Python | gpl-2.0 | 2,633 | [
"MDAnalysis"
] | 2cfd846056ffe5dcc8ddbd9ceac917ca95ee15f1083f218a3b4e708b8d22418a |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''ESP cost functions for estimating and testing charges'''
import numpy as np
from horton.log import biblio
from horton.units import angstrom
from horton.grid.cext import UniformGrid
from horton.espfit.cext import setup_esp_cost_cube, multiply_dens_mask, \
multiply_near_mask, multiply_far_mask
from horton.utils import typecheck_geo
__all__ = ['ESPCost', 'setup_weights']
class ESPCost(object):
def __init__(self, A, B, C, natom):
# Set attributes
self._A = A
self._B = B
self._C = C
self.natom = natom
# Rescale parameters not related to atomic charges
@classmethod
def from_hdf5(cls, grp):
return cls(
grp['A'][:],
grp['B'][:],
grp['C'][()],
grp['natom'][()],
)
def to_hdf5(self, grp):
grp.attrs['class'] = self.__class__.__name__
grp['A'] = self._A
grp['B'] = self._B
grp['C'] = self._C
grp['natom'] = self.natom
@classmethod
def from_grid_data(cls, coordinates, ugrid, vref, weights, rcut=20.0, alpha=None, gcut=None):
if len(coordinates.shape) != 2 or coordinates.shape[1] != 3:
raise TypeError('The argument coordinates must be an array with three columns.')
natom = coordinates.shape[0]
if alpha is None:
alpha = 3.0 / rcut
if gcut is None:
gcut = 1.1 * alpha
if isinstance(ugrid, UniformGrid):
natom = len(coordinates)
if (ugrid.pbc == [1, 1, 1]).all():
A = np.zeros((natom+1, natom+1), float)
B = np.zeros(natom+1, float)
C = np.zeros((), float)
setup_esp_cost_cube(ugrid, vref, weights, coordinates, A, B, C, rcut, alpha, gcut)
return cls(A, B, C, natom)
else:
A = np.zeros((natom, natom), float)
B = np.zeros(natom, float)
C = np.zeros((), float)
setup_esp_cost_cube(ugrid, vref, weights, coordinates, A, B, C, 0.0, 0.0, 0.0)
return cls(A, B, C, natom)
else:
raise NotImplementedError
def value(self, x):
return np.dot(x, np.dot(self._A, x) - 2*self._B) + self._C
def value_charges(self, charges):
if self.natom < len(self._A):
# Set up a system of equations where all charges are fixed and the
# remaining parameters are solved for.
A = self._A[self.natom:,self.natom:]
B = self._B[self.natom:] - np.dot(charges, self._A[:self.natom,self.natom:])
C = self._C \
+ np.dot(np.dot(charges, self._A[:self.natom,:self.natom]), charges) \
- 2*np.dot(self._B[:self.natom], charges)
x = np.linalg.solve(A, B)
return C - np.dot(B, x)
else:
return self.value(charges)
def gradient(self, x):
return 2*(np.dot(self._A, x) - self._B)
def worst(self, qtot=0.0):
'''Return a worst-case value for the cost function
**Optional arguments:**
qtot
The total charge of the molecule/crystal
Higher values for the cost function are still possible but if that
happens, it is better not to use charges at all.
'''
charges = np.zeros(self.natom)
charges[:] = qtot/self.natom
return self.value_charges(charges)
def solve(self, qtot=None, ridge=0.0):
# apply regularization to atomic degrees of freedom
A = self._A.copy()
A.ravel()[::len(A)+1][:self.natom] += ridge*np.diag(A)[:self.natom].mean()
# construct preconditioned matrices
norms = np.diag(A)**0.5
A = A/norms/norms.reshape(-1,1)
B = self._B/norms
x = np.linalg.solve(A, B)
if qtot is not None:
# Fix the total charge with a lagrange multiplier
d = np.zeros(len(A))
d[:self.natom] = 1/norms[:self.natom]
d[self.natom:] = 0.0
aid = np.linalg.solve(A, d)
lagrange = (np.dot(aid, B) - qtot)/np.dot(aid, d)
x -= aid*lagrange
x /= norms
return x
def setup_weights(coordinates, numbers, grid, dens=None, near=None, far=None):
'''Define a weight function for the ESPCost
**Arguments:**
coordinates
An array with shape (N, 3) containing atomic coordinates.
numbers
A vector with shape (N,) containing atomic numbers.
grid
A UniformGrid object.
**Optional arguments:**
dens
The density-based criterion. This is a three-tuple with rho, lnrho0
and sigma. rho is the atomic or the pro-atomic electron density on
the same grid as the ESP data. lnrho0 and sigma are parameters
defined in JCTC, 3, 1004 (2007), DOI:10.1021/ct600295n. The weight
function takes the form::
exp(-sigma*(ln(rho) - lnrho0)**2)
Note that the density, rho, should not contain depletions in the
atomic cores, as is often encountered with pseudo-potential
computations. In that case it is recommended to construct a
promolecular density as input for this option.
near
Exclude points near the nuclei. This is a dictionary with as items
(number, (R0, gamma)).
far
Exclude points far away. This is a two-tuple: (R0, gamma).
'''
natom, coordinates, numbers = typecheck_geo(coordinates, numbers, need_pseudo_numbers=False)
weights = np.ones(grid.shape)
# combine three possible mask functions
if dens is not None:
biblio.cite('hu2007', 'for the ESP fitting weight function')
rho, lnrho0, sigma = dens
assert (rho.shape == grid.shape).all()
multiply_dens_mask(rho, lnrho0, sigma, weights)
if near is not None:
for i in xrange(natom):
pair = near.get(numbers[i])
if pair is None:
pair = near.get(0)
if pair is None:
continue
r0, gamma = pair
if r0 > 5*angstrom:
raise ValueError('The wnear radius is excessive. Please keep it below 5 angstrom.')
multiply_near_mask(coordinates[i], grid, r0, gamma, weights)
if far is not None:
r0, gamma = far
multiply_far_mask(coordinates, grid, r0, gamma, weights)
# double that weight goes to zero at non-periodic edges
return weights
| FarnazH/horton | horton/espfit/cost.py | Python | gpl-3.0 | 7,399 | [
"CRYSTAL"
] | e060b27ae802ed528f6af6f19a10eb60dc0acbfb4f6d5fc6a5736efa6e156440 |
nomes_list = [
"Aaren"
,
"Aarika"
,
"Abagael"
,
"Abagail"
,
"Abbe"
,
"Abbey"
,
"Abbi"
,
"Abbie"
,
"Abby"
,
"Abbye"
,
"Abigael"
,
"Abigail"
,
"Abigale"
,
"Abra"
,
"Ada"
,
"Adah"
,
"Adaline"
,
"Adan"
,
"Adara"
,
"Adda"
,
"Addi"
,
"Addia"
,
"Addie"
,
"Addy"
,
"Adel"
,
"Adela"
,
"Adelaida"
,
"Adelaide"
,
"Adele"
,
"Adelheid"
,
"Adelice"
,
"Adelina"
,
"Adelind"
,
"Adeline"
,
"Adella"
,
"Adelle"
,
"Adena"
,
"Adey"
,
"Adi"
,
"Adiana"
,
"Adina"
,
"Adora"
,
"Adore"
,
"Adoree"
,
"Adorne"
,
"Adrea"
,
"Adria"
,
"Adriaens"
,
"Adrian"
,
"Adriana"
,
"Adriane"
,
"Adrianna"
,
"Adrianne"
,
"Adriena"
,
"Adrienne"
,
"Aeriel"
,
"Aeriela"
,
"Aeriell"
,
"Afton"
,
"Ag"
,
"Agace"
,
"Agata"
,
"Agatha"
,
"Agathe"
,
"Aggi"
,
"Aggie"
,
"Aggy"
,
"Agna"
,
"Agnella"
,
"Agnes"
,
"Agnese"
,
"Agnesse"
,
"Agneta"
,
"Agnola"
,
"Agretha"
,
"Aida"
,
"Aidan"
,
"Aigneis"
,
"Aila"
,
"Aile"
,
"Ailee"
,
"Aileen"
,
"Ailene"
,
"Ailey"
,
"Aili"
,
"Ailina"
,
"Ailis"
,
"Ailsun"
,
"Ailyn"
,
"Aime"
,
"Aimee"
,
"Aimil"
,
"Aindrea"
,
"Ainslee"
,
"Ainsley"
,
"Ainslie"
,
"Ajay"
,
"Alaine"
,
"Alameda"
,
"Alana"
,
"Alanah"
,
"Alane"
,
"Alanna"
,
"Alayne"
,
"Alberta"
,
"Albertina"
,
"Albertine"
,
"Albina"
,
"Alecia"
,
"Aleda"
,
"Aleece"
,
"Aleen"
,
"Alejandra"
,
"Alejandrina"
,
"Alena"
,
"Alene"
,
"Alessandra"
,
"Aleta"
,
"Alethea"
,
"Alex"
,
"Alexa"
,
"Alexandra"
,
"Alexandrina"
,
"Alexi"
,
"Alexia"
,
"Alexina"
,
"Alexine"
,
"Alexis"
,
"Alfi"
,
"Alfie"
,
"Alfreda"
,
"Alfy"
,
"Ali"
,
"Alia"
,
"Alica"
,
"Alice"
,
"Alicea"
,
"Alicia"
,
"Alida"
,
"Alidia"
,
"Alie"
,
"Alika"
,
"Alikee"
,
"Alina"
,
"Aline"
,
"Alis"
,
"Alisa"
,
"Alisha"
,
"Alison"
,
"Alissa"
,
"Alisun"
,
"Alix"
,
"Aliza"
,
"Alla"
,
"Alleen"
,
"Allegra"
,
"Allene"
,
"Alli"
,
"Allianora"
,
"Allie"
,
"Allina"
,
"Allis"
,
"Allison"
,
"Allissa"
,
"Allix"
,
"Allsun"
,
"Allx"
,
"Ally"
,
"Allyce"
,
"Allyn"
,
"Allys"
,
"Allyson"
,
"Alma"
,
"Almeda"
,
"Almeria"
,
"Almeta"
,
"Almira"
,
"Almire"
,
"Aloise"
,
"Aloisia"
,
"Aloysia"
,
"Alta"
,
"Althea"
,
"Alvera"
,
"Alverta"
,
"Alvina"
,
"Alvinia"
,
"Alvira"
,
"Alyce"
,
"Alyda"
,
"Alys"
,
"Alysa"
,
"Alyse"
,
"Alysia"
,
"Alyson"
,
"Alyss"
,
"Alyssa"
,
"Amabel"
,
"Amabelle"
,
"Amalea"
,
"Amalee"
,
"Amaleta"
,
"Amalia"
,
"Amalie"
,
"Amalita"
,
"Amalle"
,
"Amanda"
,
"Amandi"
,
"Amandie"
,
"Amandy"
,
"Amara"
,
"Amargo"
,
"Amata"
,
"Amber"
,
"Amberly"
,
"Ambur"
,
"Ame"
,
"Amelia"
,
"Amelie"
,
"Amelina"
,
"Ameline"
,
"Amelita"
,
"Ami"
,
"Amie"
,
"Amii"
,
"Amil"
,
"Amitie"
,
"Amity"
,
"Ammamaria"
,
"Amy"
,
"Amye"
,
"Ana"
,
"Anabal"
,
"Anabel"
,
"Anabella"
,
"Anabelle"
,
"Analiese"
,
"Analise"
,
"Anallese"
,
"Anallise"
,
"Anastasia"
,
"Anastasie"
,
"Anastassia"
,
"Anatola"
,
"Andee"
,
"Andeee"
,
"Anderea"
,
"Andi"
,
"Andie"
,
"Andra"
,
"Andrea"
,
"Andreana"
,
"Andree"
,
"Andrei"
,
"Andria"
,
"Andriana"
,
"Andriette"
,
"Andromache"
,
"Andy"
,
"Anestassia"
,
"Anet"
,
"Anett"
,
"Anetta"
,
"Anette"
,
"Ange"
,
"Angel"
,
"Angela"
,
"Angele"
,
"Angelia"
,
"Angelica"
,
"Angelika"
,
"Angelina"
,
"Angeline"
,
"Angelique"
,
"Angelita"
,
"Angelle"
,
"Angie"
,
"Angil"
,
"Angy"
,
"Ania"
,
"Anica"
,
"Anissa"
,
"Anita"
,
"Anitra"
,
"Anjanette"
,
"Anjela"
,
"Ann"
,
"Ann-Marie"
,
"Anna"
,
"Anna-Diana"
,
"Anna-Diane"
,
"Anna-Maria"
,
"Annabal"
,
"Annabel"
,
"Annabela"
,
"Annabell"
,
"Annabella"
,
"Annabelle"
,
"Annadiana"
,
"Annadiane"
,
"Annalee"
,
"Annaliese"
,
"Annalise"
,
"Annamaria"
,
"Annamarie"
,
"Anne"
,
"Anne-Corinne"
,
"Anne-Marie"
,
"Annecorinne"
,
"Anneliese"
,
"Annelise"
,
"Annemarie"
,
"Annetta"
,
"Annette"
,
"Anni"
,
"Annice"
,
"Annie"
,
"Annis"
,
"Annissa"
,
"Annmaria"
,
"Annmarie"
,
"Annnora"
,
"Annora"
,
"Anny"
,
"Anselma"
,
"Ansley"
,
"Anstice"
,
"Anthe"
,
"Anthea"
,
"Anthia"
,
"Anthiathia"
,
"Antoinette"
,
"Antonella"
,
"Antonetta"
,
"Antonia"
,
"Antonie"
,
"Antonietta"
,
"Antonina"
,
"Anya"
,
"Appolonia"
,
"April"
,
"Aprilette"
,
"Ara"
,
"Arabel"
,
"Arabela"
,
"Arabele"
,
"Arabella"
,
"Arabelle"
,
"Arda"
,
"Ardath"
,
"Ardeen"
,
"Ardelia"
,
"Ardelis"
,
"Ardella"
,
"Ardelle"
,
"Arden"
,
"Ardene"
,
"Ardenia"
,
"Ardine"
,
"Ardis"
,
"Ardisj"
,
"Ardith"
,
"Ardra"
,
"Ardyce"
,
"Ardys"
,
"Ardyth"
,
"Aretha"
,
"Ariadne"
,
"Ariana"
,
"Aridatha"
,
"Ariel"
,
"Ariela"
,
"Ariella"
,
"Arielle"
,
"Arlana"
,
"Arlee"
,
"Arleen"
,
"Arlen"
,
"Arlena"
,
"Arlene"
,
"Arleta"
,
"Arlette"
,
"Arleyne"
,
"Arlie"
,
"Arliene"
,
"Arlina"
,
"Arlinda"
,
"Arline"
,
"Arluene"
,
"Arly"
,
"Arlyn"
,
"Arlyne"
,
"Aryn"
,
"Ashely"
,
"Ashia"
,
"Ashien"
,
"Ashil"
,
"Ashla"
,
"Ashlan"
,
"Ashlee"
,
"Ashleigh"
,
"Ashlen"
,
"Ashley"
,
"Ashli"
,
"Ashlie"
,
"Ashly"
,
"Asia"
,
"Astra"
,
"Astrid"
,
"Astrix"
,
"Atalanta"
,
"Athena"
,
"Athene"
,
"Atlanta"
,
"Atlante"
,
"Auberta"
,
"Aubine"
,
"Aubree"
,
"Aubrette"
,
"Aubrey"
,
"Aubrie"
,
"Aubry"
,
"Audi"
,
"Audie"
,
"Audra"
,
"Audre"
,
"Audrey"
,
"Audrie"
,
"Audry"
,
"Audrye"
,
"Audy"
,
"Augusta"
,
"Auguste"
,
"Augustina"
,
"Augustine"
,
"Aundrea"
,
"Aura"
,
"Aurea"
,
"Aurel"
,
"Aurelea"
,
"Aurelia"
,
"Aurelie"
,
"Auria"
,
"Aurie"
,
"Aurilia"
,
"Aurlie"
,
"Auroora"
,
"Aurora"
,
"Aurore"
,
"Austin"
,
"Austina"
,
"Austine"
,
"Ava"
,
"Aveline"
,
"Averil"
,
"Averyl"
,
"Avie"
,
"Avis"
,
"Aviva"
,
"Avivah"
,
"Avril"
,
"Avrit"
,
"Ayn"
,
"Bab"
,
"Babara"
,
"Babb"
,
"Babbette"
,
"Babbie"
,
"Babette"
,
"Babita"
,
"Babs"
,
"Bambi"
,
"Bambie"
,
"Bamby"
,
"Barb"
,
"Barbabra"
,
"Barbara"
,
"Barbara-Anne"
,
"Barbaraanne"
,
"Barbe"
,
"Barbee"
,
"Barbette"
,
"Barbey"
,
"Barbi"
,
"Barbie"
,
"Barbra"
,
"Barby"
,
"Bari"
,
"Barrie"
,
"Barry"
,
"Basia"
,
"Bathsheba"
,
"Batsheva"
,
"Bea"
,
"Beatrice"
,
"Beatrisa"
,
"Beatrix"
,
"Beatriz"
,
"Bebe"
,
"Becca"
,
"Becka"
,
"Becki"
,
"Beckie"
,
"Becky"
,
"Bee"
,
"Beilul"
,
"Beitris"
,
"Bekki"
,
"Bel"
,
"Belia"
,
"Belicia"
,
"Belinda"
,
"Belita"
,
"Bell"
,
"Bella"
,
"Bellanca"
,
"Belle"
,
"Bellina"
,
"Belva"
,
"Belvia"
,
"Bendite"
,
"Benedetta"
,
"Benedicta"
,
"Benedikta"
,
"Benetta"
,
"Benita"
,
"Benni"
,
"Bennie"
,
"Benny"
,
"Benoite"
,
"Berenice"
,
"Beret"
,
"Berget"
,
"Berna"
,
"Bernadene"
,
"Bernadette"
,
"Bernadina"
,
"Bernadine"
,
"Bernardina"
,
"Bernardine"
,
"Bernelle"
,
"Bernete"
,
"Bernetta"
,
"Bernette"
,
"Berni"
,
"Bernice"
,
"Bernie"
,
"Bernita"
,
"Berny"
,
"Berri"
,
"Berrie"
,
"Berry"
,
"Bert"
,
"Berta"
,
"Berte"
,
"Bertha"
,
"Berthe"
,
"Berti"
,
"Bertie"
,
"Bertina"
,
"Bertine"
,
"Berty"
,
"Beryl"
,
"Beryle"
,
"Bess"
,
"Bessie"
,
"Bessy"
,
"Beth"
,
"Bethanne"
,
"Bethany"
,
"Bethena"
,
"Bethina"
,
"Betsey"
,
"Betsy"
,
"Betta"
,
"Bette"
,
"Bette-Ann"
,
"Betteann"
,
"Betteanne"
,
"Betti"
,
"Bettina"
,
"Bettine"
,
"Betty"
,
"Bettye"
,
"Beulah"
,
"Bev"
,
"Beverie"
,
"Beverlee"
,
"Beverley"
,
"Beverlie"
,
"Beverly"
,
"Bevvy"
,
"Bianca"
,
"Bianka"
,
"Bibbie"
,
"Bibby"
,
"Bibbye"
,
"Bibi"
,
"Biddie"
,
"Biddy"
,
"Bidget"
,
"Bili"
,
"Bill"
,
"Billi"
,
"Billie"
,
"Billy"
,
"Billye"
,
"Binni"
,
"Binnie"
,
"Binny"
,
"Bird"
,
"Birdie"
,
"Birgit"
,
"Birgitta"
,
"Blair"
,
"Blaire"
,
"Blake"
,
"Blakelee"
,
"Blakeley"
,
"Blanca"
,
"Blanch"
,
"Blancha"
,
"Blanche"
,
"Blinni"
,
"Blinnie"
,
"Blinny"
,
"Bliss"
,
"Blisse"
,
"Blithe"
,
"Blondell"
,
"Blondelle"
,
"Blondie"
,
"Blondy"
,
"Blythe"
,
"Bobbe"
,
"Bobbee"
,
"Bobbette"
,
"Bobbi"
,
"Bobbie"
,
"Bobby"
,
"Bobbye"
,
"Bobette"
,
"Bobina"
,
"Bobine"
,
"Bobinette"
,
"Bonita"
,
"Bonnee"
,
"Bonni"
,
"Bonnibelle"
,
"Bonnie"
,
"Bonny"
,
"Brana"
,
"Brandais"
,
"Brande"
,
"Brandea"
,
"Brandi"
,
"Brandice"
,
"Brandie"
,
"Brandise"
,
"Brandy"
,
"Breanne"
,
"Brear"
,
"Bree"
,
"Breena"
,
"Bren"
,
"Brena"
,
"Brenda"
,
"Brenn"
,
"Brenna"
,
"Brett"
,
"Bria"
,
"Briana"
,
"Brianna"
,
"Brianne"
,
"Bride"
,
"Bridget"
,
"Bridgette"
,
"Bridie"
,
"Brier"
,
"Brietta"
,
"Brigid"
,
"Brigida"
,
"Brigit"
,
"Brigitta"
,
"Brigitte"
,
"Brina"
,
"Briney"
,
"Brinn"
,
"Brinna"
,
"Briny"
,
"Brit"
,
"Brita"
,
"Britney"
,
"Britni"
,
"Britt"
,
"Britta"
,
"Brittan"
,
"Brittaney"
,
"Brittani"
,
"Brittany"
,
"Britte"
,
"Britteny"
,
"Brittne"
,
"Brittney"
,
"Brittni"
,
"Brook"
,
"Brooke"
,
"Brooks"
,
"Brunhilda"
,
"Brunhilde"
,
"Bryana"
,
"Bryn"
,
"Bryna"
,
"Brynn"
,
"Brynna"
,
"Brynne"
,
"Buffy"
,
"Bunni"
,
"Bunnie"
,
"Bunny"
,
"Cacilia"
,
"Cacilie"
,
"Cahra"
,
"Cairistiona"
,
"Caitlin"
,
"Caitrin"
,
"Cal"
,
"Calida"
,
"Calla"
,
"Calley"
,
"Calli"
,
"Callida"
,
"Callie"
,
"Cally"
,
"Calypso"
,
"Cam"
,
"Camala"
,
"Camel"
,
"Camella"
,
"Camellia"
,
"Cami"
,
"Camila"
,
"Camile"
,
"Camilla"
,
"Camille"
,
"Cammi"
,
"Cammie"
,
"Cammy"
,
"Candace"
,
"Candi"
,
"Candice"
,
"Candida"
,
"Candide"
,
"Candie"
,
"Candis"
,
"Candra"
,
"Candy"
,
"Caprice"
,
"Cara"
,
"Caralie"
,
"Caren"
,
"Carena"
,
"Caresa"
,
"Caressa"
,
"Caresse"
,
"Carey"
,
"Cari"
,
"Caria"
,
"Carie"
,
"Caril"
,
"Carilyn"
,
"Carin"
,
"Carina"
,
"Carine"
,
"Cariotta"
,
"Carissa"
,
"Carita"
,
"Caritta"
,
"Carla"
,
"Carlee"
,
"Carleen"
,
"Carlen"
,
"Carlene"
,
"Carley"
,
"Carlie"
,
"Carlin"
,
"Carlina"
,
"Carline"
,
"Carlita"
,
"Carlota"
,
"Carlotta"
,
"Carly"
,
"Carlye"
,
"Carlyn"
,
"Carlynn"
,
"Carlynne"
,
"Carma"
,
"Carmel"
,
"Carmela"
,
"Carmelia"
,
"Carmelina"
,
"Carmelita"
,
"Carmella"
,
"Carmelle"
,
"Carmen"
,
"Carmencita"
,
"Carmina"
,
"Carmine"
,
"Carmita"
,
"Carmon"
,
"Caro"
,
"Carol"
,
"Carol-Jean"
,
"Carola"
,
"Carolan"
,
"Carolann"
,
"Carole"
,
"Carolee"
,
"Carolin"
,
"Carolina"
,
"Caroline"
,
"Caroljean"
,
"Carolyn"
,
"Carolyne"
,
"Carolynn"
,
"Caron"
,
"Carree"
,
"Carri"
,
"Carrie"
,
"Carrissa"
,
"Carroll"
,
"Carry"
,
"Cary"
,
"Caryl"
,
"Caryn"
,
"Casandra"
,
"Casey"
,
"Casi"
,
"Casie"
,
"Cass"
,
"Cassandra"
,
"Cassandre"
,
"Cassandry"
,
"Cassaundra"
,
"Cassey"
,
"Cassi"
,
"Cassie"
,
"Cassondra"
,
"Cassy"
,
"Catarina"
,
"Cate"
,
"Caterina"
,
"Catha"
,
"Catharina"
,
"Catharine"
,
"Cathe"
,
"Cathee"
,
"Catherin"
,
"Catherina"
,
"Catherine"
,
"Cathi"
,
"Cathie"
,
"Cathleen"
,
"Cathlene"
,
"Cathrin"
,
"Cathrine"
,
"Cathryn"
,
"Cathy"
,
"Cathyleen"
,
"Cati"
,
"Catie"
,
"Catina"
,
"Catlaina"
,
"Catlee"
,
"Catlin"
,
"Catrina"
,
"Catriona"
,
"Caty"
,
"Caye"
,
"Cayla"
,
"Cecelia"
,
"Cecil"
,
"Cecile"
,
"Ceciley"
,
"Cecilia"
,
"Cecilla"
,
"Cecily"
,
"Ceil"
,
"Cele"
,
"Celene"
,
"Celesta"
,
"Celeste"
,
"Celestia"
,
"Celestina"
,
"Celestine"
,
"Celestyn"
,
"Celestyna"
,
"Celia"
,
"Celie"
,
"Celina"
,
"Celinda"
,
"Celine"
,
"Celinka"
,
"Celisse"
,
"Celka"
,
"Celle"
,
"Cesya"
,
"Chad"
,
"Chanda"
,
"Chandal"
,
"Chandra"
,
"Channa"
,
"Chantal"
,
"Chantalle"
,
"Charil"
,
"Charin"
,
"Charis"
,
"Charissa"
,
"Charisse"
,
"Charita"
,
"Charity"
,
"Charla"
,
"Charlean"
,
"Charleen"
,
"Charlena"
,
"Charlene"
,
"Charline"
,
"Charlot"
,
"Charlotta"
,
"Charlotte"
,
"Charmain"
,
"Charmaine"
,
"Charmane"
,
"Charmian"
,
"Charmine"
,
"Charmion"
,
"Charo"
,
"Charyl"
,
"Chastity"
,
"Chelsae"
,
"Chelsea"
,
"Chelsey"
,
"Chelsie"
,
"Chelsy"
,
"Cher"
,
"Chere"
,
"Cherey"
,
"Cheri"
,
"Cherianne"
,
"Cherice"
,
"Cherida"
,
"Cherie"
,
"Cherilyn"
,
"Cherilynn"
,
"Cherin"
,
"Cherise"
,
"Cherish"
,
"Cherlyn"
,
"Cherri"
,
"Cherrita"
,
"Cherry"
,
"Chery"
,
"Cherye"
,
"Cheryl"
,
"Cheslie"
,
"Chiarra"
,
"Chickie"
,
"Chicky"
,
"Chiquia"
,
"Chiquita"
,
"Chlo"
,
"Chloe"
,
"Chloette"
,
"Chloris"
,
"Chris"
,
"Chrissie"
,
"Chrissy"
,
"Christa"
,
"Christabel"
,
"Christabella"
,
"Christal"
,
"Christalle"
,
"Christan"
,
"Christean"
,
"Christel"
,
"Christen"
,
"Christi"
,
"Christian"
,
"Christiana"
,
"Christiane"
,
"Christie"
,
"Christin"
,
"Christina"
,
"Christine"
,
"Christy"
,
"Christye"
,
"Christyna"
,
"Chrysa"
,
"Chrysler"
,
"Chrystal"
,
"Chryste"
,
"Chrystel"
,
"Cicely"
,
"Cicily"
,
"Ciel"
,
"Cilka"
,
"Cinda"
,
"Cindee"
,
"Cindelyn"
,
"Cinderella"
,
"Cindi"
,
"Cindie"
,
"Cindra"
,
"Cindy"
,
"Cinnamon"
,
"Cissiee"
,
"Cissy"
,
"Clair"
,
"Claire"
,
"Clara"
,
"Clarabelle"
,
"Clare"
,
"Claresta"
,
"Clareta"
,
"Claretta"
,
"Clarette"
,
"Clarey"
,
"Clari"
,
"Claribel"
,
"Clarice"
,
"Clarie"
,
"Clarinda"
,
"Clarine"
,
"Clarissa"
,
"Clarisse"
,
"Clarita"
,
"Clary"
,
"Claude"
,
"Claudelle"
,
"Claudetta"
,
"Claudette"
,
"Claudia"
,
"Claudie"
,
"Claudina"
,
"Claudine"
,
"Clea"
,
"Clem"
,
"Clemence"
,
"Clementia"
,
"Clementina"
,
"Clementine"
,
"Clemmie"
,
"Clemmy"
,
"Cleo"
,
"Cleopatra"
,
"Clerissa"
,
"Clio"
,
"Clo"
,
"Cloe"
,
"Cloris"
,
"Clotilda"
,
"Clovis"
,
"Codee"
,
"Codi"
,
"Codie"
,
"Cody"
,
"Coleen"
,
"Colene"
,
"Coletta"
,
"Colette"
,
"Colleen"
,
"Collen"
,
"Collete"
,
"Collette"
,
"Collie"
,
"Colline"
,
"Colly"
,
"Con"
,
"Concettina"
,
"Conchita"
,
"Concordia"
,
"Conni"
,
"Connie"
,
"Conny"
,
"Consolata"
,
"Constance"
,
"Constancia"
,
"Constancy"
,
"Constanta"
,
"Constantia"
,
"Constantina"
,
"Constantine"
,
"Consuela"
,
"Consuelo"
,
"Cookie"
,
"Cora"
,
"Corabel"
,
"Corabella"
,
"Corabelle"
,
"Coral"
,
"Coralie"
,
"Coraline"
,
"Coralyn"
,
"Cordelia"
,
"Cordelie"
,
"Cordey"
,
"Cordi"
,
"Cordie"
,
"Cordula"
,
"Cordy"
,
"Coreen"
,
"Corella"
,
"Corenda"
,
"Corene"
,
"Coretta"
,
"Corette"
,
"Corey"
,
"Cori"
,
"Corie"
,
"Corilla"
,
"Corina"
,
"Corine"
,
"Corinna"
,
"Corinne"
,
"Coriss"
,
"Corissa"
,
"Corliss"
,
"Corly"
,
"Cornela"
,
"Cornelia"
,
"Cornelle"
,
"Cornie"
,
"Corny"
,
"Correna"
,
"Correy"
,
"Corri"
,
"Corrianne"
,
"Corrie"
,
"Corrina"
,
"Corrine"
,
"Corrinne"
,
"Corry"
,
"Cortney"
,
"Cory"
,
"Cosetta"
,
"Cosette"
,
"Costanza"
,
"Courtenay"
,
"Courtnay"
,
"Courtney"
,
"Crin"
,
"Cris"
,
"Crissie"
,
"Crissy"
,
"Crista"
,
"Cristabel"
,
"Cristal"
,
"Cristen"
,
"Cristi"
,
"Cristie"
,
"Cristin"
,
"Cristina"
,
"Cristine"
,
"Cristionna"
,
"Cristy"
,
"Crysta"
,
"Crystal"
,
"Crystie"
,
"Cthrine"
,
"Cyb"
,
"Cybil"
,
"Cybill"
,
"Cymbre"
,
"Cynde"
,
"Cyndi"
,
"Cyndia"
,
"Cyndie"
,
"Cyndy"
,
"Cynthea"
,
"Cynthia"
,
"Cynthie"
,
"Cynthy"
,
"Dacey"
,
"Dacia"
,
"Dacie"
,
"Dacy"
,
"Dael"
,
"Daffi"
,
"Daffie"
,
"Daffy"
,
"Dagmar"
,
"Dahlia"
,
"Daile"
,
"Daisey"
,
"Daisi"
,
"Daisie"
,
"Daisy"
,
"Dale"
,
"Dalenna"
,
"Dalia"
,
"Dalila"
,
"Dallas"
,
"Daloris"
,
"Damara"
,
"Damaris"
,
"Damita"
,
"Dana"
,
"Danell"
,
"Danella"
,
"Danette"
,
"Dani"
,
"Dania"
,
"Danica"
,
"Danice"
,
"Daniela"
,
"Daniele"
,
"Daniella"
,
"Danielle"
,
"Danika"
,
"Danila"
,
"Danit"
,
"Danita"
,
"Danna"
,
"Danni"
,
"Dannie"
,
"Danny"
,
"Dannye"
,
"Danya"
,
"Danyelle"
,
"Danyette"
,
"Daphene"
,
"Daphna"
,
"Daphne"
,
"Dara"
,
"Darb"
,
"Darbie"
,
"Darby"
,
"Darcee"
,
"Darcey"
,
"Darci"
,
"Darcie"
,
"Darcy"
,
"Darda"
,
"Dareen"
,
"Darell"
,
"Darelle"
,
"Dari"
,
"Daria"
,
"Darice"
,
"Darla"
,
"Darleen"
,
"Darlene"
,
"Darline"
,
"Darlleen"
,
"Daron"
,
"Darrelle"
,
"Darryl"
,
"Darsey"
,
"Darsie"
,
"Darya"
,
"Daryl"
,
"Daryn"
,
"Dasha"
,
"Dasi"
,
"Dasie"
,
"Dasya"
,
"Datha"
,
"Daune"
,
"Daveen"
,
"Daveta"
,
"Davida"
,
"Davina"
,
"Davine"
,
"Davita"
,
"Dawn"
,
"Dawna"
,
"Dayle"
,
"Dayna"
,
"Ddene"
,
"De"
,
"Deana"
,
"Deane"
,
"Deanna"
,
"Deanne"
,
"Deb"
,
"Debbi"
,
"Debbie"
,
"Debby"
,
"Debee"
,
"Debera"
,
"Debi"
,
"Debor"
,
"Debora"
,
"Deborah"
,
"Debra"
,
"Dede"
,
"Dedie"
,
"Dedra"
,
"Dee"
,
"Dee Dee"
,
"Deeann"
,
"Deeanne"
,
"Deedee"
,
"Deena"
,
"Deerdre"
,
"Deeyn"
,
"Dehlia"
,
"Deidre"
,
"Deina"
,
"Deirdre"
,
"Del"
,
"Dela"
,
"Delcina"
,
"Delcine"
,
"Delia"
,
"Delila"
,
"Delilah"
,
"Delinda"
,
"Dell"
,
"Della"
,
"Delly"
,
"Delora"
,
"Delores"
,
"Deloria"
,
"Deloris"
,
"Delphine"
,
"Delphinia"
,
"Demeter"
,
"Demetra"
,
"Demetria"
,
"Demetris"
,
"Dena"
,
"Deni"
,
"Denice"
,
"Denise"
,
"Denna"
,
"Denni"
,
"Dennie"
,
"Denny"
,
"Deny"
,
"Denys"
,
"Denyse"
,
"Deonne"
,
"Desdemona"
,
"Desirae"
,
"Desiree"
,
"Desiri"
,
"Deva"
,
"Devan"
,
"Devi"
,
"Devin"
,
"Devina"
,
"Devinne"
,
"Devon"
,
"Devondra"
,
"Devonna"
,
"Devonne"
,
"Devora"
,
"Di"
,
"Diahann"
,
"Dian"
,
"Diana"
,
"Diandra"
,
"Diane"
,
"Diane-Marie"
,
"Dianemarie"
,
"Diann"
,
"Dianna"
,
"Dianne"
,
"Diannne"
,
"Didi"
,
"Dido"
,
"Diena"
,
"Dierdre"
,
"Dina"
,
"Dinah"
,
"Dinnie"
,
"Dinny"
,
"Dion"
,
"Dione"
,
"Dionis"
,
"Dionne"
,
"Dita"
,
"Dix"
,
"Dixie"
,
"Dniren"
,
"Dode"
,
"Dodi"
,
"Dodie"
,
"Dody"
,
"Doe"
,
"Doll"
,
"Dolley"
,
"Dolli"
,
"Dollie"
,
"Dolly"
,
"Dolores"
,
"Dolorita"
,
"Doloritas"
,
"Domeniga"
,
"Dominga"
,
"Domini"
,
"Dominica"
,
"Dominique"
,
"Dona"
,
"Donella"
,
"Donelle"
,
"Donetta"
,
"Donia"
,
"Donica"
,
"Donielle"
,
"Donna"
,
"Donnamarie"
,
"Donni"
,
"Donnie"
,
"Donny"
,
"Dora"
,
"Doralia"
,
"Doralin"
,
"Doralyn"
,
"Doralynn"
,
"Doralynne"
,
"Dore"
,
"Doreen"
,
"Dorelia"
,
"Dorella"
,
"Dorelle"
,
"Dorena"
,
"Dorene"
,
"Doretta"
,
"Dorette"
,
"Dorey"
,
"Dori"
,
"Doria"
,
"Dorian"
,
"Dorice"
,
"Dorie"
,
"Dorine"
,
"Doris"
,
"Dorisa"
,
"Dorise"
,
"Dorita"
,
"Doro"
,
"Dorolice"
,
"Dorolisa"
,
"Dorotea"
,
"Doroteya"
,
"Dorothea"
,
"Dorothee"
,
"Dorothy"
,
"Dorree"
,
"Dorri"
,
"Dorrie"
,
"Dorris"
,
"Dorry"
,
"Dorthea"
,
"Dorthy"
,
"Dory"
,
"Dosi"
,
"Dot"
,
"Doti"
,
"Dotti"
,
"Dottie"
,
"Dotty"
,
"Dre"
,
"Dreddy"
,
"Dredi"
,
"Drona"
,
"Dru"
,
"Druci"
,
"Drucie"
,
"Drucill"
,
"Drucy"
,
"Drusi"
,
"Drusie"
,
"Drusilla"
,
"Drusy"
,
"Dulce"
,
"Dulcea"
,
"Dulci"
,
"Dulcia"
,
"Dulciana"
,
"Dulcie"
,
"Dulcine"
,
"Dulcinea"
,
"Dulcy"
,
"Dulsea"
,
"Dusty"
,
"Dyan"
,
"Dyana"
,
"Dyane"
,
"Dyann"
,
"Dyanna"
,
"Dyanne"
,
"Dyna"
,
"Dynah"
,
"Eachelle"
,
"Eada"
,
"Eadie"
,
"Eadith"
,
"Ealasaid"
,
"Eartha"
,
"Easter"
,
"Eba"
,
"Ebba"
,
"Ebonee"
,
"Ebony"
,
"Eda"
,
"Eddi"
,
"Eddie"
,
"Eddy"
,
"Ede"
,
"Edee"
,
"Edeline"
,
"Eden"
,
"Edi"
,
"Edie"
,
"Edin"
,
"Edita"
,
"Edith"
,
"Editha"
,
"Edithe"
,
"Ediva"
,
"Edna"
,
"Edwina"
,
"Edy"
,
"Edyth"
,
"Edythe"
,
"Effie"
,
"Eileen"
,
"Eilis"
,
"Eimile"
,
"Eirena"
,
"Ekaterina"
,
"Elaina"
,
"Elaine"
,
"Elana"
,
"Elane"
,
"Elayne"
,
"Elberta"
,
"Elbertina"
,
"Elbertine"
,
"Eleanor"
,
"Eleanora"
,
"Eleanore"
,
"Electra"
,
"Eleen"
,
"Elena"
,
"Elene"
,
"Eleni"
,
"Elenore"
,
"Eleonora"
,
"Eleonore"
,
"Elfie"
,
"Elfreda"
,
"Elfrida"
,
"Elfrieda"
,
"Elga"
,
"Elianora"
,
"Elianore"
,
"Elicia"
,
"Elie"
,
"Elinor"
,
"Elinore"
,
"Elisa"
,
"Elisabet"
,
"Elisabeth"
,
"Elisabetta"
,
"Elise"
,
"Elisha"
,
"Elissa"
,
"Elita"
,
"Eliza"
,
"Elizabet"
,
"Elizabeth"
,
"Elka"
,
"Elke"
,
"Ella"
,
"Elladine"
,
"Elle"
,
"Ellen"
,
"Ellene"
,
"Ellette"
,
"Elli"
,
"Ellie"
,
"Ellissa"
,
"Elly"
,
"Ellyn"
,
"Ellynn"
,
"Elmira"
,
"Elna"
,
"Elnora"
,
"Elnore"
,
"Eloisa"
,
"Eloise"
,
"Elonore"
,
"Elora"
,
"Elsa"
,
"Elsbeth"
,
"Else"
,
"Elset"
,
"Elsey"
,
"Elsi"
,
"Elsie"
,
"Elsinore"
,
"Elspeth"
,
"Elsy"
,
"Elva"
,
"Elvera"
,
"Elvina"
,
"Elvira"
,
"Elwira"
,
"Elyn"
,
"Elyse"
,
"Elysee"
,
"Elysha"
,
"Elysia"
,
"Elyssa"
,
"Em"
,
"Ema"
,
"Emalee"
,
"Emalia"
,
"Emelda"
,
"Emelia"
,
"Emelina"
,
"Emeline"
,
"Emelita"
,
"Emelyne"
,
"Emera"
,
"Emilee"
,
"Emili"
,
"Emilia"
,
"Emilie"
,
"Emiline"
,
"Emily"
,
"Emlyn"
,
"Emlynn"
,
"Emlynne"
,
"Emma"
,
"Emmalee"
,
"Emmaline"
,
"Emmalyn"
,
"Emmalynn"
,
"Emmalynne"
,
"Emmeline"
,
"Emmey"
,
"Emmi"
,
"Emmie"
,
"Emmy"
,
"Emmye"
,
"Emogene"
,
"Emyle"
,
"Emylee"
,
"Engracia"
,
"Enid"
,
"Enrica"
,
"Enrichetta"
,
"Enrika"
,
"Enriqueta"
,
"Eolanda"
,
"Eolande"
,
"Eran"
,
"Erda"
,
"Erena"
,
"Erica"
,
"Ericha"
,
"Ericka"
,
"Erika"
,
"Erin"
,
"Erina"
,
"Erinn"
,
"Erinna"
,
"Erma"
,
"Ermengarde"
,
"Ermentrude"
,
"Ermina"
,
"Erminia"
,
"Erminie"
,
"Erna"
,
"Ernaline"
,
"Ernesta"
,
"Ernestine"
,
"Ertha"
,
"Eryn"
,
"Esma"
,
"Esmaria"
,
"Esme"
,
"Esmeralda"
,
"Essa"
,
"Essie"
,
"Essy"
,
"Esta"
,
"Estel"
,
"Estele"
,
"Estell"
,
"Estella"
,
"Estelle"
,
"Ester"
,
"Esther"
,
"Estrella"
,
"Estrellita"
,
"Ethel"
,
"Ethelda"
,
"Ethelin"
,
"Ethelind"
,
"Etheline"
,
"Ethelyn"
,
"Ethyl"
,
"Etta"
,
"Etti"
,
"Ettie"
,
"Etty"
,
"Eudora"
,
"Eugenia"
,
"Eugenie"
,
"Eugine"
,
"Eula"
,
"Eulalie"
,
"Eunice"
,
"Euphemia"
,
"Eustacia"
,
"Eva"
,
"Evaleen"
,
"Evangelia"
,
"Evangelin"
,
"Evangelina"
,
"Evangeline"
,
"Evania"
,
"Evanne"
,
"Eve"
,
"Eveleen"
,
"Evelina"
,
"Eveline"
,
"Evelyn"
,
"Evey"
,
"Evie"
,
"Evita"
,
"Evonne"
,
"Evvie"
,
"Evvy"
,
"Evy"
,
"Eyde"
,
"Eydie"
,
"Ezmeralda"
,
"Fae"
,
"Faina"
,
"Faith"
,
"Fallon"
,
"Fan"
,
"Fanchette"
,
"Fanchon"
,
"Fancie"
,
"Fancy"
,
"Fanechka"
,
"Fania"
,
"Fanni"
,
"Fannie"
,
"Fanny"
,
"Fanya"
,
"Fara"
,
"Farah"
,
"Farand"
,
"Farica"
,
"Farra"
,
"Farrah"
,
"Farrand"
,
"Faun"
,
"Faunie"
,
"Faustina"
,
"Faustine"
,
"Fawn"
,
"Fawne"
,
"Fawnia"
,
"Fay"
,
"Faydra"
,
"Faye"
,
"Fayette"
,
"Fayina"
,
"Fayre"
,
"Fayth"
,
"Faythe"
,
"Federica"
,
"Fedora"
,
"Felecia"
,
"Felicdad"
,
"Felice"
,
"Felicia"
,
"Felicity"
,
"Felicle"
,
"Felipa"
,
"Felisha"
,
"Felita"
,
"Feliza"
,
"Fenelia"
,
"Feodora"
,
"Ferdinanda"
,
"Ferdinande"
,
"Fern"
,
"Fernanda"
,
"Fernande"
,
"Fernandina"
,
"Ferne"
,
"Fey"
,
"Fiann"
,
"Fianna"
,
"Fidela"
,
"Fidelia"
,
"Fidelity"
,
"Fifi"
,
"Fifine"
,
"Filia"
,
"Filide"
,
"Filippa"
,
"Fina"
,
"Fiona"
,
"Fionna"
,
"Fionnula"
,
"Fiorenze"
,
"Fleur"
,
"Fleurette"
,
"Flo"
,
"Flor"
,
"Flora"
,
"Florance"
,
"Flore"
,
"Florella"
,
"Florence"
,
"Florencia"
,
"Florentia"
,
"Florenza"
,
"Florette"
,
"Flori"
,
"Floria"
,
"Florida"
,
"Florie"
,
"Florina"
,
"Florinda"
,
"Floris"
,
"Florri"
,
"Florrie"
,
"Florry"
,
"Flory"
,
"Flossi"
,
"Flossie"
,
"Flossy"
,
"Flss"
,
"Fran"
,
"Francene"
,
"Frances"
,
"Francesca"
,
"Francine"
,
"Francisca"
,
"Franciska"
,
"Francoise"
,
"Francyne"
,
"Frank"
,
"Frankie"
,
"Franky"
,
"Franni"
,
"Frannie"
,
"Franny"
,
"Frayda"
,
"Fred"
,
"Freda"
,
"Freddi"
,
"Freddie"
,
"Freddy"
,
"Fredelia"
,
"Frederica"
,
"Fredericka"
,
"Frederique"
,
"Fredi"
,
"Fredia"
,
"Fredra"
,
"Fredrika"
,
"Freida"
,
"Frieda"
,
"Friederike"
,
"Fulvia"
,
"Gabbey"
,
"Gabbi"
,
"Gabbie"
,
"Gabey"
,
"Gabi"
,
"Gabie"
,
"Gabriel"
,
"Gabriela"
,
"Gabriell"
,
"Gabriella"
,
"Gabrielle"
,
"Gabriellia"
,
"Gabrila"
,
"Gaby"
,
"Gae"
,
"Gael"
,
"Gail"
,
"Gale"
,
"Gale"
,
"Galina"
,
"Garland"
,
"Garnet"
,
"Garnette"
,
"Gates"
,
"Gavra"
,
"Gavrielle"
,
"Gay"
,
"Gaye"
,
"Gayel"
,
"Gayla"
,
"Gayle"
,
"Gayleen"
,
"Gaylene"
,
"Gaynor"
,
"Gelya"
,
"Gena"
,
"Gene"
,
"Geneva"
,
"Genevieve"
,
"Genevra"
,
"Genia"
,
"Genna"
,
"Genni"
,
"Gennie"
,
"Gennifer"
,
"Genny"
,
"Genovera"
,
"Genvieve"
,
"George"
,
"Georgeanna"
,
"Georgeanne"
,
"Georgena"
,
"Georgeta"
,
"Georgetta"
,
"Georgette"
,
"Georgia"
,
"Georgiana"
,
"Georgianna"
,
"Georgianne"
,
"Georgie"
,
"Georgina"
,
"Georgine"
,
"Geralda"
,
"Geraldine"
,
"Gerda"
,
"Gerhardine"
,
"Geri"
,
"Gerianna"
,
"Gerianne"
,
"Gerladina"
,
"Germain"
,
"Germaine"
,
"Germana"
,
"Gerri"
,
"Gerrie"
,
"Gerrilee"
,
"Gerry"
,
"Gert"
,
"Gerta"
,
"Gerti"
,
"Gertie"
,
"Gertrud"
,
"Gertruda"
,
"Gertrude"
,
"Gertrudis"
,
"Gerty"
,
"Giacinta"
,
"Giana"
,
"Gianina"
,
"Gianna"
,
"Gigi"
,
"Gilberta"
,
"Gilberte"
,
"Gilbertina"
,
"Gilbertine"
,
"Gilda"
,
"Gilemette"
,
"Gill"
,
"Gillan"
,
"Gilli"
,
"Gillian"
,
"Gillie"
,
"Gilligan"
,
"Gilly"
,
"Gina"
,
"Ginelle"
,
"Ginevra"
,
"Ginger"
,
"Ginni"
,
"Ginnie"
,
"Ginnifer"
,
"Ginny"
,
"Giorgia"
,
"Giovanna"
,
"Gipsy"
,
"Giralda"
,
"Gisela"
,
"Gisele"
,
"Gisella"
,
"Giselle"
,
"Giuditta"
,
"Giulia"
,
"Giulietta"
,
"Giustina"
,
"Gizela"
,
"Glad"
,
"Gladi"
,
"Gladys"
,
"Gleda"
,
"Glen"
,
"Glenda"
,
"Glenine"
,
"Glenn"
,
"Glenna"
,
"Glennie"
,
"Glennis"
,
"Glori"
,
"Gloria"
,
"Gloriana"
,
"Gloriane"
,
"Glory"
,
"Glyn"
,
"Glynda"
,
"Glynis"
,
"Glynnis"
,
"Gnni"
,
"Godiva"
,
"Golda"
,
"Goldarina"
,
"Goldi"
,
"Goldia"
,
"Goldie"
,
"Goldina"
,
"Goldy"
,
"Grace"
,
"Gracia"
,
"Gracie"
,
"Grata"
,
"Gratia"
,
"Gratiana"
,
"Gray"
,
"Grayce"
,
"Grazia"
,
"Greer"
,
"Greta"
,
"Gretal"
,
"Gretchen"
,
"Grete"
,
"Gretel"
,
"Grethel"
,
"Gretna"
,
"Gretta"
,
"Grier"
,
"Griselda"
,
"Grissel"
,
"Guendolen"
,
"Guenevere"
,
"Guenna"
,
"Guglielma"
,
"Gui"
,
"Guillema"
,
"Guillemette"
,
"Guinevere"
,
"Guinna"
,
"Gunilla"
,
"Gus"
,
"Gusella"
,
"Gussi"
,
"Gussie"
,
"Gussy"
,
"Gusta"
,
"Gusti"
,
"Gustie"
,
"Gusty"
,
"Gwen"
,
"Gwendolen"
,
"Gwendolin"
,
"Gwendolyn"
,
"Gweneth"
,
"Gwenette"
,
"Gwenneth"
,
"Gwenni"
,
"Gwennie"
,
"Gwenny"
,
"Gwenora"
,
"Gwenore"
,
"Gwyn"
,
"Gwyneth"
,
"Gwynne"
,
"Gypsy"
,
"Hadria"
,
"Hailee"
,
"Haily"
,
"Haleigh"
,
"Halette"
,
"Haley"
,
"Hali"
,
"Halie"
,
"Halimeda"
,
"Halley"
,
"Halli"
,
"Hallie"
,
"Hally"
,
"Hana"
,
"Hanna"
,
"Hannah"
,
"Hanni"
,
"Hannie"
,
"Hannis"
,
"Hanny"
,
"Happy"
,
"Harlene"
,
"Harley"
,
"Harli"
,
"Harlie"
,
"Harmonia"
,
"Harmonie"
,
"Harmony"
,
"Harri"
,
"Harrie"
,
"Harriet"
,
"Harriett"
,
"Harrietta"
,
"Harriette"
,
"Harriot"
,
"Harriott"
,
"Hatti"
,
"Hattie"
,
"Hatty"
,
"Hayley"
,
"Hazel"
,
"Heath"
,
"Heather"
,
"Heda"
,
"Hedda"
,
"Heddi"
,
"Heddie"
,
"Hedi"
,
"Hedvig"
,
"Hedvige"
,
"Hedwig"
,
"Hedwiga"
,
"Hedy"
,
"Heida"
,
"Heidi"
,
"Heidie"
,
"Helaina"
,
"Helaine"
,
"Helen"
,
"Helen-Elizabeth"
,
"Helena"
,
"Helene"
,
"Helenka"
,
"Helga"
,
"Helge"
,
"Helli"
,
"Heloise"
,
"Helsa"
,
"Helyn"
,
"Hendrika"
,
"Henka"
,
"Henrie"
,
"Henrieta"
,
"Henrietta"
,
"Henriette"
,
"Henryetta"
,
"Hephzibah"
,
"Hermia"
,
"Hermina"
,
"Hermine"
,
"Herminia"
,
"Hermione"
,
"Herta"
,
"Hertha"
,
"Hester"
,
"Hesther"
,
"Hestia"
,
"Hetti"
,
"Hettie"
,
"Hetty"
,
"Hilary"
,
"Hilda"
,
"Hildagard"
,
"Hildagarde"
,
"Hilde"
,
"Hildegaard"
,
"Hildegarde"
,
"Hildy"
,
"Hillary"
,
"Hilliary"
,
"Hinda"
,
"Holli"
,
"Hollie"
,
"Holly"
,
"Holly-Anne"
,
"Hollyanne"
,
"Honey"
,
"Honor"
,
"Honoria"
,
"Hope"
,
"Horatia"
,
"Hortense"
,
"Hortensia"
,
"Hulda"
,
"Hyacinth"
,
"Hyacintha"
,
"Hyacinthe"
,
"Hyacinthia"
,
"Hyacinthie"
,
"Hynda"
,
"Ianthe"
,
"Ibbie"
,
"Ibby"
,
"Ida"
,
"Idalia"
,
"Idalina"
,
"Idaline"
,
"Idell"
,
"Idelle"
,
"Idette"
,
"Ileana"
,
"Ileane"
,
"Ilene"
,
"Ilise"
,
"Ilka"
,
"Illa"
,
"Ilsa"
,
"Ilse"
,
"Ilysa"
,
"Ilyse"
,
"Ilyssa"
,
"Imelda"
,
"Imogen"
,
"Imogene"
,
"Imojean"
,
"Ina"
,
"Indira"
,
"Ines"
,
"Inesita"
,
"Inessa"
,
"Inez"
,
"Inga"
,
"Ingaberg"
,
"Ingaborg"
,
"Inge"
,
"Ingeberg"
,
"Ingeborg"
,
"Inger"
,
"Ingrid"
,
"Ingunna"
,
"Inna"
,
"Iolande"
,
"Iolanthe"
,
"Iona"
,
"Iormina"
,
"Ira"
,
"Irena"
,
"Irene"
,
"Irina"
,
"Iris"
,
"Irita"
,
"Irma"
,
"Isa"
,
"Isabel"
,
"Isabelita"
,
"Isabella"
,
"Isabelle"
,
"Isadora"
,
"Isahella"
,
"Iseabal"
,
"Isidora"
,
"Isis"
,
"Isobel"
,
"Issi"
,
"Issie"
,
"Issy"
,
"Ivett"
,
"Ivette"
,
"Ivie"
,
"Ivonne"
,
"Ivory"
,
"Ivy"
,
"Izabel"
,
"Jacenta"
,
"Jacinda"
,
"Jacinta"
,
"Jacintha"
,
"Jacinthe"
,
"Jackelyn"
,
"Jacki"
,
"Jackie"
,
"Jacklin"
,
"Jacklyn"
,
"Jackquelin"
,
"Jackqueline"
,
"Jacky"
,
"Jaclin"
,
"Jaclyn"
,
"Jacquelin"
,
"Jacqueline"
,
"Jacquelyn"
,
"Jacquelynn"
,
"Jacquenetta"
,
"Jacquenette"
,
"Jacquetta"
,
"Jacquette"
,
"Jacqui"
,
"Jacquie"
,
"Jacynth"
,
"Jada"
,
"Jade"
,
"Jaime"
,
"Jaimie"
,
"Jaine"
,
"Jami"
,
"Jamie"
,
"Jamima"
,
"Jammie"
,
"Jan"
,
"Jana"
,
"Janaya"
,
"Janaye"
,
"Jandy"
,
"Jane"
,
"Janean"
,
"Janeczka"
,
"Janeen"
,
"Janel"
,
"Janela"
,
"Janella"
,
"Janelle"
,
"Janene"
,
"Janenna"
,
"Janessa"
,
"Janet"
,
"Janeta"
,
"Janetta"
,
"Janette"
,
"Janeva"
,
"Janey"
,
"Jania"
,
"Janice"
,
"Janie"
,
"Janifer"
,
"Janina"
,
"Janine"
,
"Janis"
,
"Janith"
,
"Janka"
,
"Janna"
,
"Jannel"
,
"Jannelle"
,
"Janot"
,
"Jany"
,
"Jaquelin"
,
"Jaquelyn"
,
"Jaquenetta"
,
"Jaquenette"
,
"Jaquith"
,
"Jasmin"
,
"Jasmina"
,
"Jasmine"
,
"Jayme"
,
"Jaymee"
,
"Jayne"
,
"Jaynell"
,
"Jazmin"
,
"Jean"
,
"Jeana"
,
"Jeane"
,
"Jeanelle"
,
"Jeanette"
,
"Jeanie"
,
"Jeanine"
,
"Jeanna"
,
"Jeanne"
,
"Jeannette"
,
"Jeannie"
,
"Jeannine"
,
"Jehanna"
,
"Jelene"
,
"Jemie"
,
"Jemima"
,
"Jemimah"
,
"Jemmie"
,
"Jemmy"
,
"Jen"
,
"Jena"
,
"Jenda"
,
"Jenelle"
,
"Jeni"
,
"Jenica"
,
"Jeniece"
,
"Jenifer"
,
"Jeniffer"
,
"Jenilee"
,
"Jenine"
,
"Jenn"
,
"Jenna"
,
"Jennee"
,
"Jennette"
,
"Jenni"
,
"Jennica"
,
"Jennie"
,
"Jennifer"
,
"Jennilee"
,
"Jennine"
,
"Jenny"
,
"Jeralee"
,
"Jere"
,
"Jeri"
,
"Jermaine"
,
"Jerrie"
,
"Jerrilee"
,
"Jerrilyn"
,
"Jerrine"
,
"Jerry"
,
"Jerrylee"
,
"Jess"
,
"Jessa"
,
"Jessalin"
,
"Jessalyn"
,
"Jessamine"
,
"Jessamyn"
,
"Jesse"
,
"Jesselyn"
,
"Jessi"
,
"Jessica"
,
"Jessie"
,
"Jessika"
,
"Jessy"
,
"Jewel"
,
"Jewell"
,
"Jewelle"
,
"Jill"
,
"Jillana"
,
"Jillane"
,
"Jillayne"
,
"Jilleen"
,
"Jillene"
,
"Jilli"
,
"Jillian"
,
"Jillie"
,
"Jilly"
,
"Jinny"
,
"Jo"
,
"Jo Ann"
,
"Jo-Ann"
,
"Jo-Anne"
,
"Joan"
,
"Joana"
,
"Joane"
,
"Joanie"
,
"Joann"
,
"Joanna"
,
"Joanne"
,
"Joannes"
,
"Jobey"
,
"Jobi"
,
"Jobie"
,
"Jobina"
,
"Joby"
,
"Jobye"
,
"Jobyna"
,
"Jocelin"
,
"Joceline"
,
"Jocelyn"
,
"Jocelyne"
,
"Jodee"
,
"Jodi"
,
"Jodie"
,
"Jody"
,
"Joeann"
,
"Joela"
,
"Joelie"
,
"Joell"
,
"Joella"
,
"Joelle"
,
"Joellen"
,
"Joelly"
,
"Joellyn"
,
"Joelynn"
,
"Joete"
,
"Joey"
,
"Johanna"
,
"Johannah"
,
"Johna"
,
"Johnath"
,
"Johnette"
,
"Johnna"
,
"Joice"
,
"Jojo"
,
"Jolee"
,
"Joleen"
,
"Jolene"
,
"Joletta"
,
"Joli"
,
"Jolie"
,
"Joline"
,
"Joly"
,
"Jolyn"
,
"Jolynn"
,
"Jonell"
,
"Joni"
,
"Jonie"
,
"Jonis"
,
"Jordain"
,
"Jordan"
,
"Jordana"
,
"Jordanna"
,
"Jorey"
,
"Jori"
,
"Jorie"
,
"Jorrie"
,
"Jorry"
,
"Joscelin"
,
"Josee"
,
"Josefa"
,
"Josefina"
,
"Josepha"
,
"Josephina"
,
"Josephine"
,
"Josey"
,
"Josi"
,
"Josie"
,
"Josselyn"
,
"Josy"
,
"Jourdan"
,
"Joy"
,
"Joya"
,
"Joyan"
,
"Joyann"
,
"Joyce"
,
"Joycelin"
,
"Joye"
,
"Jsandye"
,
"Juana"
,
"Juanita"
,
"Judi"
,
"Judie"
,
"Judith"
,
"Juditha"
,
"Judy"
,
"Judye"
,
"Juieta"
,
"Julee"
,
"Juli"
,
"Julia"
,
"Juliana"
,
"Juliane"
,
"Juliann"
,
"Julianna"
,
"Julianne"
,
"Julie"
,
"Julienne"
,
"Juliet"
,
"Julieta"
,
"Julietta"
,
"Juliette"
,
"Julina"
,
"Juline"
,
"Julissa"
,
"Julita"
,
"June"
,
"Junette"
,
"Junia"
,
"Junie"
,
"Junina"
,
"Justina"
,
"Justine"
,
"Justinn"
,
"Jyoti"
,
"Kacey"
,
"Kacie"
,
"Kacy"
,
"Kaela"
,
"Kai"
,
"Kaia"
,
"Kaila"
,
"Kaile"
,
"Kailey"
,
"Kaitlin"
,
"Kaitlyn"
,
"Kaitlynn"
,
"Kaja"
,
"Kakalina"
,
"Kala"
,
"Kaleena"
,
"Kali"
,
"Kalie"
,
"Kalila"
,
"Kalina"
,
"Kalinda"
,
"Kalindi"
,
"Kalli"
,
"Kally"
,
"Kameko"
,
"Kamila"
,
"Kamilah"
,
"Kamillah"
,
"Kandace"
,
"Kandy"
,
"Kania"
,
"Kanya"
,
"Kara"
,
"Kara-Lynn"
,
"Karalee"
,
"Karalynn"
,
"Kare"
,
"Karee"
,
"Karel"
,
"Karen"
,
"Karena"
,
"Kari"
,
"Karia"
,
"Karie"
,
"Karil"
,
"Karilynn"
,
"Karin"
,
"Karina"
,
"Karine"
,
"Kariotta"
,
"Karisa"
,
"Karissa"
,
"Karita"
,
"Karla"
,
"Karlee"
,
"Karleen"
,
"Karlen"
,
"Karlene"
,
"Karlie"
,
"Karlotta"
,
"Karlotte"
,
"Karly"
,
"Karlyn"
,
"Karmen"
,
"Karna"
,
"Karol"
,
"Karola"
,
"Karole"
,
"Karolina"
,
"Karoline"
,
"Karoly"
,
"Karon"
,
"Karrah"
,
"Karrie"
,
"Karry"
,
"Kary"
,
"Karyl"
,
"Karylin"
,
"Karyn"
,
"Kasey"
,
"Kass"
,
"Kassandra"
,
"Kassey"
,
"Kassi"
,
"Kassia"
,
"Kassie"
,
"Kat"
,
"Kata"
,
"Katalin"
,
"Kate"
,
"Katee"
,
"Katerina"
,
"Katerine"
,
"Katey"
,
"Kath"
,
"Katha"
,
"Katharina"
,
"Katharine"
,
"Katharyn"
,
"Kathe"
,
"Katherina"
,
"Katherine"
,
"Katheryn"
,
"Kathi"
,
"Kathie"
,
"Kathleen"
,
"Kathlin"
,
"Kathrine"
,
"Kathryn"
,
"Kathryne"
,
"Kathy"
,
"Kathye"
,
"Kati"
,
"Katie"
,
"Katina"
,
"Katine"
,
"Katinka"
,
"Katleen"
,
"Katlin"
,
"Katrina"
,
"Katrine"
,
"Katrinka"
,
"Katti"
,
"Kattie"
,
"Katuscha"
,
"Katusha"
,
"Katy"
,
"Katya"
,
"Kay"
,
"Kaycee"
,
"Kaye"
,
"Kayla"
,
"Kayle"
,
"Kaylee"
,
"Kayley"
,
"Kaylil"
,
"Kaylyn"
,
"Keeley"
,
"Keelia"
,
"Keely"
,
"Kelcey"
,
"Kelci"
,
"Kelcie"
,
"Kelcy"
,
"Kelila"
,
"Kellen"
,
"Kelley"
,
"Kelli"
,
"Kellia"
,
"Kellie"
,
"Kellina"
,
"Kellsie"
,
"Kelly"
,
"Kellyann"
,
"Kelsey"
,
"Kelsi"
,
"Kelsy"
,
"Kendra"
,
"Kendre"
,
"Kenna"
,
"Keri"
,
"Keriann"
,
"Kerianne"
,
"Kerri"
,
"Kerrie"
,
"Kerrill"
,
"Kerrin"
,
"Kerry"
,
"Kerstin"
,
"Kesley"
,
"Keslie"
,
"Kessia"
,
"Kessiah"
,
"Ketti"
,
"Kettie"
,
"Ketty"
,
"Kevina"
,
"Kevyn"
,
"Ki"
,
"Kiah"
,
"Kial"
,
"Kiele"
,
"Kiersten"
,
"Kikelia"
,
"Kiley"
,
"Kim"
,
"Kimberlee"
,
"Kimberley"
,
"Kimberli"
,
"Kimberly"
,
"Kimberlyn"
,
"Kimbra"
,
"Kimmi"
,
"Kimmie"
,
"Kimmy"
,
"Kinna"
,
"Kip"
,
"Kipp"
,
"Kippie"
,
"Kippy"
,
"Kira"
,
"Kirbee"
,
"Kirbie"
,
"Kirby"
,
"Kiri"
,
"Kirsten"
,
"Kirsteni"
,
"Kirsti"
,
"Kirstin"
,
"Kirstyn"
,
"Kissee"
,
"Kissiah"
,
"Kissie"
,
"Kit"
,
"Kitti"
,
"Kittie"
,
"Kitty"
,
"Kizzee"
,
"Kizzie"
,
"Klara"
,
"Klarika"
,
"Klarrisa"
,
"Konstance"
,
"Konstanze"
,
"Koo"
,
"Kora"
,
"Koral"
,
"Koralle"
,
"Kordula"
,
"Kore"
,
"Korella"
,
"Koren"
,
"Koressa"
,
"Kori"
,
"Korie"
,
"Korney"
,
"Korrie"
,
"Korry"
,
"Kris"
,
"Krissie"
,
"Krissy"
,
"Krista"
,
"Kristal"
,
"Kristan"
,
"Kriste"
,
"Kristel"
,
"Kristen"
,
"Kristi"
,
"Kristien"
,
"Kristin"
,
"Kristina"
,
"Kristine"
,
"Kristy"
,
"Kristyn"
,
"Krysta"
,
"Krystal"
,
"Krystalle"
,
"Krystle"
,
"Krystyna"
,
"Kyla"
,
"Kyle"
,
"Kylen"
,
"Kylie"
,
"Kylila"
,
"Kylynn"
,
"Kym"
,
"Kynthia"
,
"Kyrstin"
,
"La Verne"
,
"Lacee"
,
"Lacey"
,
"Lacie"
,
"Lacy"
,
"Ladonna"
,
"Laetitia"
,
"Laina"
,
"Lainey"
,
"Lana"
,
"Lanae"
,
"Lane"
,
"Lanette"
,
"Laney"
,
"Lani"
,
"Lanie"
,
"Lanita"
,
"Lanna"
,
"Lanni"
,
"Lanny"
,
"Lara"
,
"Laraine"
,
"Lari"
,
"Larina"
,
"Larine"
,
"Larisa"
,
"Larissa"
,
"Lark"
,
"Laryssa"
,
"Latashia"
,
"Latia"
,
"Latisha"
,
"Latrena"
,
"Latrina"
,
"Laura"
,
"Lauraine"
,
"Laural"
,
"Lauralee"
,
"Laure"
,
"Lauree"
,
"Laureen"
,
"Laurel"
,
"Laurella"
,
"Lauren"
,
"Laurena"
,
"Laurene"
,
"Lauretta"
,
"Laurette"
,
"Lauri"
,
"Laurianne"
,
"Laurice"
,
"Laurie"
,
"Lauryn"
,
"Lavena"
,
"Laverna"
,
"Laverne"
,
"Lavina"
,
"Lavinia"
,
"Lavinie"
,
"Layla"
,
"Layne"
,
"Layney"
,
"Lea"
,
"Leah"
,
"Leandra"
,
"Leann"
,
"Leanna"
,
"Leanor"
,
"Leanora"
,
"Lebbie"
,
"Leda"
,
"Lee"
,
"Leeann"
,
"Leeanne"
,
"Leela"
,
"Leelah"
,
"Leena"
,
"Leesa"
,
"Leese"
,
"Legra"
,
"Leia"
,
"Leigh"
,
"Leigha"
,
"Leila"
,
"Leilah"
,
"Leisha"
,
"Lela"
,
"Lelah"
,
"Leland"
,
"Lelia"
,
"Lena"
,
"Lenee"
,
"Lenette"
,
"Lenka"
,
"Lenna"
,
"Lenora"
,
"Lenore"
,
"Leodora"
,
"Leoine"
,
"Leola"
,
"Leoline"
,
"Leona"
,
"Leonanie"
,
"Leone"
,
"Leonelle"
,
"Leonie"
,
"Leonora"
,
"Leonore"
,
"Leontine"
,
"Leontyne"
,
"Leora"
,
"Leshia"
,
"Lesley"
,
"Lesli"
,
"Leslie"
,
"Lesly"
,
"Lesya"
,
"Leta"
,
"Lethia"
,
"Leticia"
,
"Letisha"
,
"Letitia"
,
"Letizia"
,
"Letta"
,
"Letti"
,
"Lettie"
,
"Letty"
,
"Lexi"
,
"Lexie"
,
"Lexine"
,
"Lexis"
,
"Lexy"
,
"Leyla"
,
"Lezlie"
,
"Lia"
,
"Lian"
,
"Liana"
,
"Liane"
,
"Lianna"
,
"Lianne"
,
"Lib"
,
"Libbey"
,
"Libbi"
,
"Libbie"
,
"Libby"
,
"Licha"
,
"Lida"
,
"Lidia"
,
"Liesa"
,
"Lil"
,
"Lila"
,
"Lilah"
,
"Lilas"
,
"Lilia"
,
"Lilian"
,
"Liliane"
,
"Lilias"
,
"Lilith"
,
"Lilla"
,
"Lilli"
,
"Lillian"
,
"Lillis"
,
"Lilllie"
,
"Lilly"
,
"Lily"
,
"Lilyan"
,
"Lin"
,
"Lina"
,
"Lind"
,
"Linda"
,
"Lindi"
,
"Lindie"
,
"Lindsay"
,
"Lindsey"
,
"Lindsy"
,
"Lindy"
,
"Linea"
,
"Linell"
,
"Linet"
,
"Linette"
,
"Linn"
,
"Linnea"
,
"Linnell"
,
"Linnet"
,
"Linnie"
,
"Linzy"
,
"Lira"
,
"Lisa"
,
"Lisabeth"
,
"Lisbeth"
,
"Lise"
,
"Lisetta"
,
"Lisette"
,
"Lisha"
,
"Lishe"
,
"Lissa"
,
"Lissi"
,
"Lissie"
,
"Lissy"
,
"Lita"
,
"Liuka"
,
"Liv"
,
"Liva"
,
"Livia"
,
"Livvie"
,
"Livvy"
,
"Livvyy"
,
"Livy"
,
"Liz"
,
"Liza"
,
"Lizabeth"
,
"Lizbeth"
,
"Lizette"
,
"Lizzie"
,
"Lizzy"
,
"Loella"
,
"Lois"
,
"Loise"
,
"Lola"
,
"Loleta"
,
"Lolita"
,
"Lolly"
,
"Lona"
,
"Lonee"
,
"Loni"
,
"Lonna"
,
"Lonni"
,
"Lonnie"
,
"Lora"
,
"Lorain"
,
"Loraine"
,
"Loralee"
,
"Loralie"
,
"Loralyn"
,
"Loree"
,
"Loreen"
,
"Lorelei"
,
"Lorelle"
,
"Loren"
,
"Lorena"
,
"Lorene"
,
"Lorenza"
,
"Loretta"
,
"Lorette"
,
"Lori"
,
"Loria"
,
"Lorianna"
,
"Lorianne"
,
"Lorie"
,
"Lorilee"
,
"Lorilyn"
,
"Lorinda"
,
"Lorine"
,
"Lorita"
,
"Lorna"
,
"Lorne"
,
"Lorraine"
,
"Lorrayne"
,
"Lorri"
,
"Lorrie"
,
"Lorrin"
,
"Lorry"
,
"Lory"
,
"Lotta"
,
"Lotte"
,
"Lotti"
,
"Lottie"
,
"Lotty"
,
"Lou"
,
"Louella"
,
"Louisa"
,
"Louise"
,
"Louisette"
,
"Loutitia"
,
"Lu"
,
"Luce"
,
"Luci"
,
"Lucia"
,
"Luciana"
,
"Lucie"
,
"Lucienne"
,
"Lucila"
,
"Lucilia"
,
"Lucille"
,
"Lucina"
,
"Lucinda"
,
"Lucine"
,
"Lucita"
,
"Lucky"
,
"Lucretia"
,
"Lucy"
,
"Ludovika"
,
"Luella"
,
"Luelle"
,
"Luisa"
,
"Luise"
,
"Lula"
,
"Lulita"
,
"Lulu"
,
"Lura"
,
"Lurette"
,
"Lurleen"
,
"Lurlene"
,
"Lurline"
,
"Lusa"
,
"Luz"
,
"Lyda"
,
"Lydia"
,
"Lydie"
,
"Lyn"
,
"Lynda"
,
"Lynde"
,
"Lyndel"
,
"Lyndell"
,
"Lyndsay"
,
"Lyndsey"
,
"Lyndsie"
,
"Lyndy"
,
"Lynea"
,
"Lynelle"
,
"Lynett"
,
"Lynette"
,
"Lynn"
,
"Lynna"
,
"Lynne"
,
"Lynnea"
,
"Lynnell"
,
"Lynnelle"
,
"Lynnet"
,
"Lynnett"
,
"Lynnette"
,
"Lynsey"
,
"Lyssa"
,
"Mab"
,
"Mabel"
,
"Mabelle"
,
"Mable"
,
"Mada"
,
"Madalena"
,
"Madalyn"
,
"Maddalena"
,
"Maddi"
,
"Maddie"
,
"Maddy"
,
"Madel"
,
"Madelaine"
,
"Madeleine"
,
"Madelena"
,
"Madelene"
,
"Madelin"
,
"Madelina"
,
"Madeline"
,
"Madella"
,
"Madelle"
,
"Madelon"
,
"Madelyn"
,
"Madge"
,
"Madlen"
,
"Madlin"
,
"Madonna"
,
"Mady"
,
"Mae"
,
"Maegan"
,
"Mag"
,
"Magda"
,
"Magdaia"
,
"Magdalen"
,
"Magdalena"
,
"Magdalene"
,
"Maggee"
,
"Maggi"
,
"Maggie"
,
"Maggy"
,
"Mahala"
,
"Mahalia"
,
"Maia"
,
"Maible"
,
"Maiga"
,
"Maighdiln"
,
"Mair"
,
"Maire"
,
"Maisey"
,
"Maisie"
,
"Maitilde"
,
"Mala"
,
"Malanie"
,
"Malena"
,
"Malia"
,
"Malina"
,
"Malinda"
,
"Malinde"
,
"Malissa"
,
"Malissia"
,
"Mallissa"
,
"Mallorie"
,
"Mallory"
,
"Malorie"
,
"Malory"
,
"Malva"
,
"Malvina"
,
"Malynda"
,
"Mame"
,
"Mamie"
,
"Manda"
,
"Mandi"
,
"Mandie"
,
"Mandy"
,
"Manon"
,
"Manya"
,
"Mara"
,
"Marabel"
,
"Marcela"
,
"Marcelia"
,
"Marcella"
,
"Marcelle"
,
"Marcellina"
,
"Marcelline"
,
"Marchelle"
,
"Marci"
,
"Marcia"
,
"Marcie"
,
"Marcile"
,
"Marcille"
,
"Marcy"
,
"Mareah"
,
"Maren"
,
"Marena"
,
"Maressa"
,
"Marga"
,
"Margalit"
,
"Margalo"
,
"Margaret"
,
"Margareta"
,
"Margarete"
,
"Margaretha"
,
"Margarethe"
,
"Margaretta"
,
"Margarette"
,
"Margarita"
,
"Margaux"
,
"Marge"
,
"Margeaux"
,
"Margery"
,
"Marget"
,
"Margette"
,
"Margi"
,
"Margie"
,
"Margit"
,
"Margo"
,
"Margot"
,
"Margret"
,
"Marguerite"
,
"Margy"
,
"Mari"
,
"Maria"
,
"Mariam"
,
"Marian"
,
"Mariana"
,
"Mariann"
,
"Marianna"
,
"Marianne"
,
"Maribel"
,
"Maribelle"
,
"Maribeth"
,
"Marice"
,
"Maridel"
,
"Marie"
,
"Marie-Ann"
,
"Marie-Jeanne"
,
"Marieann"
,
"Mariejeanne"
,
"Mariel"
,
"Mariele"
,
"Marielle"
,
"Mariellen"
,
"Marietta"
,
"Mariette"
,
"Marigold"
,
"Marijo"
,
"Marika"
,
"Marilee"
,
"Marilin"
,
"Marillin"
,
"Marilyn"
,
"Marin"
,
"Marina"
,
"Marinna"
,
"Marion"
,
"Mariquilla"
,
"Maris"
,
"Marisa"
,
"Mariska"
,
"Marissa"
,
"Marita"
,
"Maritsa"
,
"Mariya"
,
"Marj"
,
"Marja"
,
"Marje"
,
"Marji"
,
"Marjie"
,
"Marjorie"
,
"Marjory"
,
"Marjy"
,
"Marketa"
,
"Marla"
,
"Marlane"
,
"Marleah"
,
"Marlee"
,
"Marleen"
,
"Marlena"
,
"Marlene"
,
"Marley"
,
"Marlie"
,
"Marline"
,
"Marlo"
,
"Marlyn"
,
"Marna"
,
"Marne"
,
"Marney"
,
"Marni"
,
"Marnia"
,
"Marnie"
,
"Marquita"
,
"Marrilee"
,
"Marris"
,
"Marrissa"
,
"Marsha"
,
"Marsiella"
,
"Marta"
,
"Martelle"
,
"Martguerita"
,
"Martha"
,
"Marthe"
,
"Marthena"
,
"Marti"
,
"Martica"
,
"Martie"
,
"Martina"
,
"Martita"
,
"Marty"
,
"Martynne"
,
"Mary"
,
"Marya"
,
"Maryann"
,
"Maryanna"
,
"Maryanne"
,
"Marybelle"
,
"Marybeth"
,
"Maryellen"
,
"Maryjane"
,
"Maryjo"
,
"Maryl"
,
"Marylee"
,
"Marylin"
,
"Marylinda"
,
"Marylou"
,
"Marylynne"
,
"Maryrose"
,
"Marys"
,
"Marysa"
,
"Masha"
,
"Matelda"
,
"Mathilda"
,
"Mathilde"
,
"Matilda"
,
"Matilde"
,
"Matti"
,
"Mattie"
,
"Matty"
,
"Maud"
,
"Maude"
,
"Maudie"
,
"Maura"
,
"Maure"
,
"Maureen"
,
"Maureene"
,
"Maurene"
,
"Maurine"
,
"Maurise"
,
"Maurita"
,
"Maurizia"
,
"Mavis"
,
"Mavra"
,
"Max"
,
"Maxi"
,
"Maxie"
,
"Maxine"
,
"Maxy"
,
"May"
,
"Maybelle"
,
"Maye"
,
"Mead"
,
"Meade"
,
"Meagan"
,
"Meaghan"
,
"Meara"
,
"Mechelle"
,
"Meg"
,
"Megan"
,
"Megen"
,
"Meggi"
,
"Meggie"
,
"Meggy"
,
"Meghan"
,
"Meghann"
,
"Mehetabel"
,
"Mei"
,
"Mel"
,
"Mela"
,
"Melamie"
,
"Melania"
,
"Melanie"
,
"Melantha"
,
"Melany"
,
"Melba"
,
"Melesa"
,
"Melessa"
,
"Melicent"
,
"Melina"
,
"Melinda"
,
"Melinde"
,
"Melisa"
,
"Melisande"
,
"Melisandra"
,
"Melisenda"
,
"Melisent"
,
"Melissa"
,
"Melisse"
,
"Melita"
,
"Melitta"
,
"Mella"
,
"Melli"
,
"Mellicent"
,
"Mellie"
,
"Mellisa"
,
"Mellisent"
,
"Melloney"
,
"Melly"
,
"Melodee"
,
"Melodie"
,
"Melody"
,
"Melonie"
,
"Melony"
,
"Melosa"
,
"Melva"
,
"Mercedes"
,
"Merci"
,
"Mercie"
,
"Mercy"
,
"Meredith"
,
"Meredithe"
,
"Meridel"
,
"Meridith"
,
"Meriel"
,
"Merilee"
,
"Merilyn"
,
"Meris"
,
"Merissa"
,
"Merl"
,
"Merla"
,
"Merle"
,
"Merlina"
,
"Merline"
,
"Merna"
,
"Merola"
,
"Merralee"
,
"Merridie"
,
"Merrie"
,
"Merrielle"
,
"Merrile"
,
"Merrilee"
,
"Merrili"
,
"Merrill"
,
"Merrily"
,
"Merry"
,
"Mersey"
,
"Meryl"
,
"Meta"
,
"Mia"
,
"Micaela"
,
"Michaela"
,
"Michaelina"
,
"Michaeline"
,
"Michaella"
,
"Michal"
,
"Michel"
,
"Michele"
,
"Michelina"
,
"Micheline"
,
"Michell"
,
"Michelle"
,
"Micki"
,
"Mickie"
,
"Micky"
,
"Midge"
,
"Mignon"
,
"Mignonne"
,
"Miguela"
,
"Miguelita"
,
"Mikaela"
,
"Mil"
,
"Mildred"
,
"Mildrid"
,
"Milena"
,
"Milicent"
,
"Milissent"
,
"Milka"
,
"Milli"
,
"Millicent"
,
"Millie"
,
"Millisent"
,
"Milly"
,
"Milzie"
,
"Mimi"
,
"Min"
,
"Mina"
,
"Minda"
,
"Mindy"
,
"Minerva"
,
"Minetta"
,
"Minette"
,
"Minna"
,
"Minnaminnie"
,
"Minne"
,
"Minni"
,
"Minnie"
,
"Minnnie"
,
"Minny"
,
"Minta"
,
"Miof Mela"
,
"Miquela"
,
"Mira"
,
"Mirabel"
,
"Mirabella"
,
"Mirabelle"
,
"Miran"
,
"Miranda"
,
"Mireielle"
,
"Mireille"
,
"Mirella"
,
"Mirelle"
,
"Miriam"
,
"Mirilla"
,
"Mirna"
,
"Misha"
,
"Missie"
,
"Missy"
,
"Misti"
,
"Misty"
,
"Mitzi"
,
"Modesta"
,
"Modestia"
,
"Modestine"
,
"Modesty"
,
"Moina"
,
"Moira"
,
"Moll"
,
"Mollee"
,
"Molli"
,
"Mollie"
,
"Molly"
,
"Mommy"
,
"Mona"
,
"Monah"
,
"Monica"
,
"Monika"
,
"Monique"
,
"Mora"
,
"Moreen"
,
"Morena"
,
"Morgan"
,
"Morgana"
,
"Morganica"
,
"Morganne"
,
"Morgen"
,
"Moria"
,
"Morissa"
,
"Morna"
,
"Moselle"
,
"Moyna"
,
"Moyra"
,
"Mozelle"
,
"Muffin"
,
"Mufi"
,
"Mufinella"
,
"Muire"
,
"Mureil"
,
"Murial"
,
"Muriel"
,
"Murielle"
,
"Myra"
,
"Myrah"
,
"Myranda"
,
"Myriam"
,
"Myrilla"
,
"Myrle"
,
"Myrlene"
,
"Myrna"
,
"Myrta"
,
"Myrtia"
,
"Myrtice"
,
"Myrtie"
,
"Myrtle"
,
"Nada"
,
"Nadean"
,
"Nadeen"
,
"Nadia"
,
"Nadine"
,
"Nadiya"
,
"Nady"
,
"Nadya"
,
"Nalani"
,
"Nan"
,
"Nana"
,
"Nananne"
,
"Nance"
,
"Nancee"
,
"Nancey"
,
"Nanci"
,
"Nancie"
,
"Nancy"
,
"Nanete"
,
"Nanette"
,
"Nani"
,
"Nanice"
,
"Nanine"
,
"Nannette"
,
"Nanni"
,
"Nannie"
,
"Nanny"
,
"Nanon"
,
"Naoma"
,
"Naomi"
,
"Nara"
,
"Nari"
,
"Nariko"
,
"Nat"
,
"Nata"
,
"Natala"
,
"Natalee"
,
"Natalie"
,
"Natalina"
,
"Nataline"
,
"Natalya"
,
"Natasha"
,
"Natassia"
,
"Nathalia"
,
"Nathalie"
,
"Natividad"
,
"Natka"
,
"Natty"
,
"Neala"
,
"Neda"
,
"Nedda"
,
"Nedi"
,
"Neely"
,
"Neila"
,
"Neile"
,
"Neilla"
,
"Neille"
,
"Nelia"
,
"Nelie"
,
"Nell"
,
"Nelle"
,
"Nelli"
,
"Nellie"
,
"Nelly"
,
"Nerissa"
,
"Nerita"
,
"Nert"
,
"Nerta"
,
"Nerte"
,
"Nerti"
,
"Nertie"
,
"Nerty"
,
"Nessa"
,
"Nessi"
,
"Nessie"
,
"Nessy"
,
"Nesta"
,
"Netta"
,
"Netti"
,
"Nettie"
,
"Nettle"
,
"Netty"
,
"Nevsa"
,
"Neysa"
,
"Nichol"
,
"Nichole"
,
"Nicholle"
,
"Nicki"
,
"Nickie"
,
"Nicky"
,
"Nicol"
,
"Nicola"
,
"Nicole"
,
"Nicolea"
,
"Nicolette"
,
"Nicoli"
,
"Nicolina"
,
"Nicoline"
,
"Nicolle"
,
"Nikaniki"
,
"Nike"
,
"Niki"
,
"Nikki"
,
"Nikkie"
,
"Nikoletta"
,
"Nikolia"
,
"Nina"
,
"Ninetta"
,
"Ninette"
,
"Ninnetta"
,
"Ninnette"
,
"Ninon"
,
"Nissa"
,
"Nisse"
,
"Nissie"
,
"Nissy"
,
"Nita"
,
"Nixie"
,
"Noami"
,
"Noel"
,
"Noelani"
,
"Noell"
,
"Noella"
,
"Noelle"
,
"Noellyn"
,
"Noelyn"
,
"Noemi"
,
"Nola"
,
"Nolana"
,
"Nolie"
,
"Nollie"
,
"Nomi"
,
"Nona"
,
"Nonah"
,
"Noni"
,
"Nonie"
,
"Nonna"
,
"Nonnah"
,
"Nora"
,
"Norah"
,
"Norean"
,
"Noreen"
,
"Norene"
,
"Norina"
,
"Norine"
,
"Norma"
,
"Norri"
,
"Norrie"
,
"Norry"
,
"Novelia"
,
"Nydia"
,
"Nyssa"
,
"Octavia"
,
"Odele"
,
"Odelia"
,
"Odelinda"
,
"Odella"
,
"Odelle"
,
"Odessa"
,
"Odetta"
,
"Odette"
,
"Odilia"
,
"Odille"
,
"Ofelia"
,
"Ofella"
,
"Ofilia"
,
"Ola"
,
"Olenka"
,
"Olga"
,
"Olia"
,
"Olimpia"
,
"Olive"
,
"Olivette"
,
"Olivia"
,
"Olivie"
,
"Oliy"
,
"Ollie"
,
"Olly"
,
"Olva"
,
"Olwen"
,
"Olympe"
,
"Olympia"
,
"Olympie"
,
"Ondrea"
,
"Oneida"
,
"Onida"
,
"Oona"
,
"Opal"
,
"Opalina"
,
"Opaline"
,
"Ophelia"
,
"Ophelie"
,
"Ora"
,
"Oralee"
,
"Oralia"
,
"Oralie"
,
"Oralla"
,
"Oralle"
,
"Orel"
,
"Orelee"
,
"Orelia"
,
"Orelie"
,
"Orella"
,
"Orelle"
,
"Oriana"
,
"Orly"
,
"Orsa"
,
"Orsola"
,
"Ortensia"
,
"Otha"
,
"Othelia"
,
"Othella"
,
"Othilia"
,
"Othilie"
,
"Ottilie"
,
"Page"
,
"Paige"
,
"Paloma"
,
"Pam"
,
"Pamela"
,
"Pamelina"
,
"Pamella"
,
"Pammi"
,
"Pammie"
,
"Pammy"
,
"Pandora"
,
"Pansie"
,
"Pansy"
,
"Paola"
,
"Paolina"
,
"Papagena"
,
"Pat"
,
"Patience"
,
"Patrica"
,
"Patrice"
,
"Patricia"
,
"Patrizia"
,
"Patsy"
,
"Patti"
,
"Pattie"
,
"Patty"
,
"Paula"
,
"Paule"
,
"Pauletta"
,
"Paulette"
,
"Pauli"
,
"Paulie"
,
"Paulina"
,
"Pauline"
,
"Paulita"
,
"Pauly"
,
"Pavia"
,
"Pavla"
,
"Pearl"
,
"Pearla"
,
"Pearle"
,
"Pearline"
,
"Peg"
,
"Pegeen"
,
"Peggi"
,
"Peggie"
,
"Peggy"
,
"Pen"
,
"Penelopa"
,
"Penelope"
,
"Penni"
,
"Pennie"
,
"Penny"
,
"Pepi"
,
"Pepita"
,
"Peri"
,
"Peria"
,
"Perl"
,
"Perla"
,
"Perle"
,
"Perri"
,
"Perrine"
,
"Perry"
,
"Persis"
,
"Pet"
,
"Peta"
,
"Petra"
,
"Petrina"
,
"Petronella"
,
"Petronia"
,
"Petronilla"
,
"Petronille"
,
"Petunia"
,
"Phaedra"
,
"Phaidra"
,
"Phebe"
,
"Phedra"
,
"Phelia"
,
"Phil"
,
"Philipa"
,
"Philippa"
,
"Philippe"
,
"Philippine"
,
"Philis"
,
"Phillida"
,
"Phillie"
,
"Phillis"
,
"Philly"
,
"Philomena"
,
"Phoebe"
,
"Phylis"
,
"Phyllida"
,
"Phyllis"
,
"Phyllys"
,
"Phylys"
,
"Pia"
,
"Pier"
,
"Pierette"
,
"Pierrette"
,
"Pietra"
,
"Piper"
,
"Pippa"
,
"Pippy"
,
"Polly"
,
"Pollyanna"
,
"Pooh"
,
"Poppy"
,
"Portia"
,
"Pris"
,
"Prisca"
,
"Priscella"
,
"Priscilla"
,
"Prissie"
,
"Pru"
,
"Prudence"
,
"Prudi"
,
"Prudy"
,
"Prue"
,
"Queenie"
,
"Quentin"
,
"Querida"
,
"Quinn"
,
"Quinta"
,
"Quintana"
,
"Quintilla"
,
"Quintina"
,
"Rachael"
,
"Rachel"
,
"Rachele"
,
"Rachelle"
,
"Rae"
,
"Raeann"
,
"Raf"
,
"Rafa"
,
"Rafaela"
,
"Rafaelia"
,
"Rafaelita"
,
"Rahal"
,
"Rahel"
,
"Raina"
,
"Raine"
,
"Rakel"
,
"Ralina"
,
"Ramona"
,
"Ramonda"
,
"Rana"
,
"Randa"
,
"Randee"
,
"Randene"
,
"Randi"
,
"Randie"
,
"Randy"
,
"Ranee"
,
"Rani"
,
"Rania"
,
"Ranice"
,
"Ranique"
,
"Ranna"
,
"Raphaela"
,
"Raquel"
,
"Raquela"
,
"Rasia"
,
"Rasla"
,
"Raven"
,
"Ray"
,
"Raychel"
,
"Raye"
,
"Rayna"
,
"Raynell"
,
"Rayshell"
,
"Rea"
,
"Reba"
,
"Rebbecca"
,
"Rebe"
,
"Rebeca"
,
"Rebecca"
,
"Rebecka"
,
"Rebeka"
,
"Rebekah"
,
"Rebekkah"
,
"Ree"
,
"Reeba"
,
"Reena"
,
"Reeta"
,
"Reeva"
,
"Regan"
,
"Reggi"
,
"Reggie"
,
"Regina"
,
"Regine"
,
"Reiko"
,
"Reina"
,
"Reine"
,
"Remy"
,
"Rena"
,
"Renae"
,
"Renata"
,
"Renate"
,
"Rene"
,
"Renee"
,
"Renell"
,
"Renelle"
,
"Renie"
,
"Rennie"
,
"Reta"
,
"Retha"
,
"Revkah"
,
"Rey"
,
"Reyna"
,
"Rhea"
,
"Rheba"
,
"Rheta"
,
"Rhetta"
,
"Rhiamon"
,
"Rhianna"
,
"Rhianon"
,
"Rhoda"
,
"Rhodia"
,
"Rhodie"
,
"Rhody"
,
"Rhona"
,
"Rhonda"
,
"Riane"
,
"Riannon"
,
"Rianon"
,
"Rica"
,
"Ricca"
,
"Rici"
,
"Ricki"
,
"Rickie"
,
"Ricky"
,
"Riki"
,
"Rikki"
,
"Rina"
,
"Risa"
,
"Rita"
,
"Riva"
,
"Rivalee"
,
"Rivi"
,
"Rivkah"
,
"Rivy"
,
"Roana"
,
"Roanna"
,
"Roanne"
,
"Robbi"
,
"Robbie"
,
"Robbin"
,
"Robby"
,
"Robbyn"
,
"Robena"
,
"Robenia"
,
"Roberta"
,
"Robin"
,
"Robina"
,
"Robinet"
,
"Robinett"
,
"Robinetta"
,
"Robinette"
,
"Robinia"
,
"Roby"
,
"Robyn"
,
"Roch"
,
"Rochell"
,
"Rochella"
,
"Rochelle"
,
"Rochette"
,
"Roda"
,
"Rodi"
,
"Rodie"
,
"Rodina"
,
"Rois"
,
"Romola"
,
"Romona"
,
"Romonda"
,
"Romy"
,
"Rona"
,
"Ronalda"
,
"Ronda"
,
"Ronica"
,
"Ronna"
,
"Ronni"
,
"Ronnica"
,
"Ronnie"
,
"Ronny"
,
"Roobbie"
,
"Rora"
,
"Rori"
,
"Rorie"
,
"Rory"
,
"Ros"
,
"Rosa"
,
"Rosabel"
,
"Rosabella"
,
"Rosabelle"
,
"Rosaleen"
,
"Rosalia"
,
"Rosalie"
,
"Rosalind"
,
"Rosalinda"
,
"Rosalinde"
,
"Rosaline"
,
"Rosalyn"
,
"Rosalynd"
,
"Rosamond"
,
"Rosamund"
,
"Rosana"
,
"Rosanna"
,
"Rosanne"
,
"Rose"
,
"Roseann"
,
"Roseanna"
,
"Roseanne"
,
"Roselia"
,
"Roselin"
,
"Roseline"
,
"Rosella"
,
"Roselle"
,
"Rosemaria"
,
"Rosemarie"
,
"Rosemary"
,
"Rosemonde"
,
"Rosene"
,
"Rosetta"
,
"Rosette"
,
"Roshelle"
,
"Rosie"
,
"Rosina"
,
"Rosita"
,
"Roslyn"
,
"Rosmunda"
,
"Rosy"
,
"Row"
,
"Rowe"
,
"Rowena"
,
"Roxana"
,
"Roxane"
,
"Roxanna"
,
"Roxanne"
,
"Roxi"
,
"Roxie"
,
"Roxine"
,
"Roxy"
,
"Roz"
,
"Rozalie"
,
"Rozalin"
,
"Rozamond"
,
"Rozanna"
,
"Rozanne"
,
"Roze"
,
"Rozele"
,
"Rozella"
,
"Rozelle"
,
"Rozina"
,
"Rubetta"
,
"Rubi"
,
"Rubia"
,
"Rubie"
,
"Rubina"
,
"Ruby"
,
"Ruperta"
,
"Ruth"
,
"Ruthann"
,
"Ruthanne"
,
"Ruthe"
,
"Ruthi"
,
"Ruthie"
,
"Ruthy"
,
"Ryann"
,
"Rycca"
,
"Saba"
,
"Sabina"
,
"Sabine"
,
"Sabra"
,
"Sabrina"
,
"Sacha"
,
"Sada"
,
"Sadella"
,
"Sadie"
,
"Sadye"
,
"Saidee"
,
"Sal"
,
"Salaidh"
,
"Sallee"
,
"Salli"
,
"Sallie"
,
"Sally"
,
"Sallyann"
,
"Sallyanne"
,
"Saloma"
,
"Salome"
,
"Salomi"
,
"Sam"
,
"Samantha"
,
"Samara"
,
"Samaria"
,
"Sammy"
,
"Sande"
,
"Sandi"
,
"Sandie"
,
"Sandra"
,
"Sandy"
,
"Sandye"
,
"Sapphira"
,
"Sapphire"
,
"Sara"
,
"Sara-Ann"
,
"Saraann"
,
"Sarah"
,
"Sarajane"
,
"Saree"
,
"Sarena"
,
"Sarene"
,
"Sarette"
,
"Sari"
,
"Sarina"
,
"Sarine"
,
"Sarita"
,
"Sascha"
,
"Sasha"
,
"Sashenka"
,
"Saudra"
,
"Saundra"
,
"Savina"
,
"Sayre"
,
"Scarlet"
,
"Scarlett"
,
"Sean"
,
"Seana"
,
"Seka"
,
"Sela"
,
"Selena"
,
"Selene"
,
"Selestina"
,
"Selia"
,
"Selie"
,
"Selina"
,
"Selinda"
,
"Seline"
,
"Sella"
,
"Selle"
,
"Selma"
,
"Sena"
,
"Sephira"
,
"Serena"
,
"Serene"
,
"Shae"
,
"Shaina"
,
"Shaine"
,
"Shalna"
,
"Shalne"
,
"Shana"
,
"Shanda"
,
"Shandee"
,
"Shandeigh"
,
"Shandie"
,
"Shandra"
,
"Shandy"
,
"Shane"
,
"Shani"
,
"Shanie"
,
"Shanna"
,
"Shannah"
,
"Shannen"
,
"Shannon"
,
"Shanon"
,
"Shanta"
,
"Shantee"
,
"Shara"
,
"Sharai"
,
"Shari"
,
"Sharia"
,
"Sharity"
,
"Sharl"
,
"Sharla"
,
"Sharleen"
,
"Sharlene"
,
"Sharline"
,
"Sharon"
,
"Sharona"
,
"Sharron"
,
"Sharyl"
,
"Shaun"
,
"Shauna"
,
"Shawn"
,
"Shawna"
,
"Shawnee"
,
"Shay"
,
"Shayla"
,
"Shaylah"
,
"Shaylyn"
,
"Shaylynn"
,
"Shayna"
,
"Shayne"
,
"Shea"
,
"Sheba"
,
"Sheela"
,
"Sheelagh"
,
"Sheelah"
,
"Sheena"
,
"Sheeree"
,
"Sheila"
,
"Sheila-Kathryn"
,
"Sheilah"
,
"Shel"
,
"Shela"
,
"Shelagh"
,
"Shelba"
,
"Shelbi"
,
"Shelby"
,
"Shelia"
,
"Shell"
,
"Shelley"
,
"Shelli"
,
"Shellie"
,
"Shelly"
,
"Shena"
,
"Sher"
,
"Sheree"
,
"Sheri"
,
"Sherie"
,
"Sherill"
,
"Sherilyn"
,
"Sherline"
,
"Sherri"
,
"Sherrie"
,
"Sherry"
,
"Sherye"
,
"Sheryl"
,
"Shina"
,
"Shir"
,
"Shirl"
,
"Shirlee"
,
"Shirleen"
,
"Shirlene"
,
"Shirley"
,
"Shirline"
,
"Shoshana"
,
"Shoshanna"
,
"Siana"
,
"Sianna"
,
"Sib"
,
"Sibbie"
,
"Sibby"
,
"Sibeal"
,
"Sibel"
,
"Sibella"
,
"Sibelle"
,
"Sibilla"
,
"Sibley"
,
"Sibyl"
,
"Sibylla"
,
"Sibylle"
,
"Sidoney"
,
"Sidonia"
,
"Sidonnie"
,
"Sigrid"
,
"Sile"
,
"Sileas"
,
"Silva"
,
"Silvana"
,
"Silvia"
,
"Silvie"
,
"Simona"
,
"Simone"
,
"Simonette"
,
"Simonne"
,
"Sindee"
,
"Siobhan"
,
"Sioux"
,
"Siouxie"
,
"Sisely"
,
"Sisile"
,
"Sissie"
,
"Sissy"
,
"Siusan"
,
"Sofia"
,
"Sofie"
,
"Sondra"
,
"Sonia"
,
"Sonja"
,
"Sonni"
,
"Sonnie"
,
"Sonnnie"
,
"Sonny"
,
"Sonya"
,
"Sophey"
,
"Sophi"
,
"Sophia"
,
"Sophie"
,
"Sophronia"
,
"Sorcha"
,
"Sosanna"
,
"Stace"
,
"Stacee"
,
"Stacey"
,
"Staci"
,
"Stacia"
,
"Stacie"
,
"Stacy"
,
"Stafani"
,
"Star"
,
"Starla"
,
"Starlene"
,
"Starlin"
,
"Starr"
,
"Stefa"
,
"Stefania"
,
"Stefanie"
,
"Steffane"
,
"Steffi"
,
"Steffie"
,
"Stella"
,
"Stepha"
,
"Stephana"
,
"Stephani"
,
"Stephanie"
,
"Stephannie"
,
"Stephenie"
,
"Stephi"
,
"Stephie"
,
"Stephine"
,
"Stesha"
,
"Stevana"
,
"Stevena"
,
"Stoddard"
,
"Storm"
,
"Stormi"
,
"Stormie"
,
"Stormy"
,
"Sue"
,
"Suellen"
,
"Sukey"
,
"Suki"
,
"Sula"
,
"Sunny"
,
"Sunshine"
,
"Susan"
,
"Susana"
,
"Susanetta"
,
"Susann"
,
"Susanna"
,
"Susannah"
,
"Susanne"
,
"Susette"
,
"Susi"
,
"Susie"
,
"Susy"
,
"Suzann"
,
"Suzanna"
,
"Suzanne"
,
"Suzette"
,
"Suzi"
,
"Suzie"
,
"Suzy"
,
"Sybil"
,
"Sybila"
,
"Sybilla"
,
"Sybille"
,
"Sybyl"
,
"Sydel"
,
"Sydelle"
,
"Sydney"
,
"Sylvia"
,
"Tabatha"
,
"Tabbatha"
,
"Tabbi"
,
"Tabbie"
,
"Tabbitha"
,
"Tabby"
,
"Tabina"
,
"Tabitha"
,
"Taffy"
,
"Talia"
,
"Tallia"
,
"Tallie"
,
"Tallou"
,
"Tallulah"
,
"Tally"
,
"Talya"
,
"Talyah"
,
"Tamar"
,
"Tamara"
,
"Tamarah"
,
"Tamarra"
,
"Tamera"
,
"Tami"
,
"Tamiko"
,
"Tamma"
,
"Tammara"
,
"Tammi"
,
"Tammie"
,
"Tammy"
,
"Tamqrah"
,
"Tamra"
,
"Tana"
,
"Tandi"
,
"Tandie"
,
"Tandy"
,
"Tanhya"
,
"Tani"
,
"Tania"
,
"Tanitansy"
,
"Tansy"
,
"Tanya"
,
"Tara"
,
"Tarah"
,
"Tarra"
,
"Tarrah"
,
"Taryn"
,
"Tasha"
,
"Tasia"
,
"Tate"
,
"Tatiana"
,
"Tatiania"
,
"Tatum"
,
"Tawnya"
,
"Tawsha"
,
"Ted"
,
"Tedda"
,
"Teddi"
,
"Teddie"
,
"Teddy"
,
"Tedi"
,
"Tedra"
,
"Teena"
,
"TEirtza"
,
"Teodora"
,
"Tera"
,
"Teresa"
,
"Terese"
,
"Teresina"
,
"Teresita"
,
"Teressa"
,
"Teri"
,
"Teriann"
,
"Terra"
,
"Terri"
,
"Terrie"
,
"Terrijo"
,
"Terry"
,
"Terrye"
,
"Tersina"
,
"Terza"
,
"Tess"
,
"Tessa"
,
"Tessi"
,
"Tessie"
,
"Tessy"
,
"Thalia"
,
"Thea"
,
"Theadora"
,
"Theda"
,
"Thekla"
,
"Thelma"
,
"Theo"
,
"Theodora"
,
"Theodosia"
,
"Theresa"
,
"Therese"
,
"Theresina"
,
"Theresita"
,
"Theressa"
,
"Therine"
,
"Thia"
,
"Thomasa"
,
"Thomasin"
,
"Thomasina"
,
"Thomasine"
,
"Tiena"
,
"Tierney"
,
"Tiertza"
,
"Tiff"
,
"Tiffani"
,
"Tiffanie"
,
"Tiffany"
,
"Tiffi"
,
"Tiffie"
,
"Tiffy"
,
"Tilda"
,
"Tildi"
,
"Tildie"
,
"Tildy"
,
"Tillie"
,
"Tilly"
,
"Tim"
,
"Timi"
,
"Timmi"
,
"Timmie"
,
"Timmy"
,
"Timothea"
,
"Tina"
,
"Tine"
,
"Tiphani"
,
"Tiphanie"
,
"Tiphany"
,
"Tish"
,
"Tisha"
,
"Tobe"
,
"Tobey"
,
"Tobi"
,
"Toby"
,
"Tobye"
,
"Toinette"
,
"Toma"
,
"Tomasina"
,
"Tomasine"
,
"Tomi"
,
"Tommi"
,
"Tommie"
,
"Tommy"
,
"Toni"
,
"Tonia"
,
"Tonie"
,
"Tony"
,
"Tonya"
,
"Tonye"
,
"Tootsie"
,
"Torey"
,
"Tori"
,
"Torie"
,
"Torrie"
,
"Tory"
,
"Tova"
,
"Tove"
,
"Tracee"
,
"Tracey"
,
"Traci"
,
"Tracie"
,
"Tracy"
,
"Trenna"
,
"Tresa"
,
"Trescha"
,
"Tressa"
,
"Tricia"
,
"Trina"
,
"Trish"
,
"Trisha"
,
"Trista"
,
"Trix"
,
"Trixi"
,
"Trixie"
,
"Trixy"
,
"Truda"
,
"Trude"
,
"Trudey"
,
"Trudi"
,
"Trudie"
,
"Trudy"
,
"Trula"
,
"Tuesday"
,
"Twila"
,
"Twyla"
,
"Tybi"
,
"Tybie"
,
"Tyne"
,
"Ula"
,
"Ulla"
,
"Ulrica"
,
"Ulrika"
,
"Ulrikaumeko"
,
"Ulrike"
,
"Umeko"
,
"Una"
,
"Ursa"
,
"Ursala"
,
"Ursola"
,
"Ursula"
,
"Ursulina"
,
"Ursuline"
,
"Uta"
,
"Val"
,
"Valaree"
,
"Valaria"
,
"Vale"
,
"Valeda"
,
"Valencia"
,
"Valene"
,
"Valenka"
,
"Valentia"
,
"Valentina"
,
"Valentine"
,
"Valera"
,
"Valeria"
,
"Valerie"
,
"Valery"
,
"Valerye"
,
"Valida"
,
"Valina"
,
"Valli"
,
"Vallie"
,
"Vally"
,
"Valma"
,
"Valry"
,
"Van"
,
"Vanda"
,
"Vanessa"
,
"Vania"
,
"Vanna"
,
"Vanni"
,
"Vannie"
,
"Vanny"
,
"Vanya"
,
"Veda"
,
"Velma"
,
"Velvet"
,
"Venita"
,
"Venus"
,
"Vera"
,
"Veradis"
,
"Vere"
,
"Verena"
,
"Verene"
,
"Veriee"
,
"Verile"
,
"Verina"
,
"Verine"
,
"Verla"
,
"Verna"
,
"Vernice"
,
"Veronica"
,
"Veronika"
,
"Veronike"
,
"Veronique"
,
"Vevay"
,
"Vi"
,
"Vicki"
,
"Vickie"
,
"Vicky"
,
"Victoria"
,
"Vida"
,
"Viki"
,
"Vikki"
,
"Vikky"
,
"Vilhelmina"
,
"Vilma"
,
"Vin"
,
"Vina"
,
"Vinita"
,
"Vinni"
,
"Vinnie"
,
"Vinny"
,
"Viola"
,
"Violante"
,
"Viole"
,
"Violet"
,
"Violetta"
,
"Violette"
,
"Virgie"
,
"Virgina"
,
"Virginia"
,
"Virginie"
,
"Vita"
,
"Vitia"
,
"Vitoria"
,
"Vittoria"
,
"Viv"
,
"Viva"
,
"Vivi"
,
"Vivia"
,
"Vivian"
,
"Viviana"
,
"Vivianna"
,
"Vivianne"
,
"Vivie"
,
"Vivien"
,
"Viviene"
,
"Vivienne"
,
"Viviyan"
,
"Vivyan"
,
"Vivyanne"
,
"Vonni"
,
"Vonnie"
,
"Vonny"
,
"Vyky"
,
"Wallie"
,
"Wallis"
,
"Walliw"
,
"Wally"
,
"Waly"
,
"Wanda"
,
"Wandie"
,
"Wandis"
,
"Waneta"
,
"Wanids"
,
"Wenda"
,
"Wendeline"
,
"Wendi"
,
"Wendie"
,
"Wendy"
,
"Wendye"
,
"Wenona"
,
"Wenonah"
,
"Whitney"
,
"Wileen"
,
"Wilhelmina"
,
"Wilhelmine"
,
"Wilie"
,
"Willa"
,
"Willabella"
,
"Willamina"
,
"Willetta"
,
"Willette"
,
"Willi"
,
"Willie"
,
"Willow"
,
"Willy"
,
"Willyt"
,
"Wilma"
,
"Wilmette"
,
"Wilona"
,
"Wilone"
,
"Wilow"
,
"Windy"
,
"Wini"
,
"Winifred"
,
"Winna"
,
"Winnah"
,
"Winne"
,
"Winni"
,
"Winnie"
,
"Winnifred"
,
"Winny"
,
"Winona"
,
"Winonah"
,
"Wren"
,
"Wrennie"
,
"Wylma"
,
"Wynn"
,
"Wynne"
,
"Wynnie"
,
"Wynny"
,
"Xaviera"
,
"Xena"
,
"Xenia"
,
"Xylia"
,
"Xylina"
,
"Yalonda"
,
"Yasmeen"
,
"Yasmin"
,
"Yelena"
,
"Yetta"
,
"Yettie"
,
"Yetty"
,
"Yevette"
,
"Ynes"
,
"Ynez"
,
"Yoko"
,
"Yolanda"
,
"Yolande"
,
"Yolane"
,
"Yolanthe"
,
"Yoshi"
,
"Yoshiko"
,
"Yovonnda"
,
"Ysabel"
,
"Yvette"
,
"Yvonne"
,
"Zabrina"
,
"Zahara"
,
"Zandra"
,
"Zaneta"
,
"Zara"
,
"Zarah"
,
"Zaria"
,
"Zarla"
,
"Zea"
,
"Zelda"
,
"Zelma"
,
"Zena"
,
"Zenia"
,
"Zia"
,
"Zilvia"
,
"Zita"
,
"Zitella"
,
"Zoe"
,
"Zola"
,
"Zonda"
,
"Zondra"
,
"Zonnya"
,
"Zora"
,
"Zorah"
,
"Zorana"
,
"Zorina"
,
"Zorine"
,
"Zsa Zsa"
,
"Zsazsa"
,
"Zulema"
,
"Zuzana"
]
| Pharaoh00/Pharaoh-Toolkit | 0.0.6/modulos/listas/nomesLista_First.py | Python | mit | 54,952 | [
"Amber",
"CRYSTAL",
"FLEUR"
] | 84c7cb3766ab30a22e849f5aef3aaa0a153047c6e26514c73cf4a7ea077400a4 |
"""Minimum mode follower for finding saddle points in an unbiased way.
There is, currently, only one implemented method: The Dimer method.
"""
import sys
import time
import warnings
import numpy as np
from ase.optimize.optimize import Optimizer
from math import cos, sin, atan, tan, degrees, pi, sqrt
from ase.parallel import rank, size, world
from ase.calculators.singlepoint import SinglePointCalculator
# Handy vector methods
norm = np.linalg.norm
def normalize(vector):
"""Create a unit vector along *vector*"""
return vector / norm(vector)
def parallel_vector(vector, base):
"""Extract the components of *vector* that are parallel to *base*"""
return np.vdot(vector, base) * base
def perpendicular_vector(vector, base):
"""Remove the components of *vector* that are parallel to *base*"""
return vector - parallel_vector(vector, base)
def rotate_vectors(v1i, v2i, angle):
"""Rotate vectors *v1i* and *v2i* by *angle*"""
cAng = cos(angle)
sAng = sin(angle)
v1o = v1i * cAng + v2i * sAng
v2o = v2i * cAng - v1i * sAng
# Ensure the length of the input and output vectors is equal
return normalize(v1o) * norm(v1i), normalize(v2o) * norm(v2i)
class DimerEigenmodeSearch:
"""An implementation of the Dimer's minimum eigenvalue mode search.
This class implements the rotational part of the dimer saddle point
searching method.
Parameters:
atoms : MinModeAtoms object
MinModeAtoms is an extension to the Atoms object, which includes
information about the lowest eigenvalue mode.
control : DimerControl object
Contains the parameters necessary for the eigenmode search.
If no control object is supplied a default DimerControl
will be created and used.
basis : list of xyz-values
Eigenmode. Must be an ndarray of shape (n, 3).
It is possible to constrain the eigenmodes to be orthogonal
to this given eigenmode.
Notes:
The code is inspired, with permission, by code written by the Henkelman
group, which can be found at http://theory.cm.utexas.edu/vtsttools/code/
References:
.. [1] Henkelman and Jonsson, JCP 111, 7010 (1999)
.. [2] Olsen, Kroes, Henkelman, Arnaldsson, and Jonsson, JCP 121,
9776 (2004).
.. [3] Heyden, Bell, and Keil, JCP 123, 224101 (2005).
.. [4] Kastner and Sherwood, JCP 128, 014106 (2008).
"""
def __init__(self, atoms, control=None, eigenmode=None, basis=None, \
**kwargs):
if hasattr(atoms, 'get_eigenmode'):
self.atoms = atoms
else:
e = 'The atoms object must be a MinModeAtoms object'
raise TypeError(e)
self.basis = basis
if eigenmode is None:
self.eigenmode = self.atoms.get_eigenmode()
else:
self.eigenmode = eigenmode
if control is None:
self.control = DimerControl(**kwargs)
w = 'Missing control object in ' + self.__class__.__name__ + \
'. Using default: DimerControl()'
warnings.warn(w, UserWarning)
if self.control.logfile is not None:
self.control.logfile.write('DIM:WARN: ' + w + '\n')
self.control.logfile.flush()
else:
self.control = control
# kwargs must be empty if a control object is supplied
for key in kwargs:
e = '__init__() got an unexpected keyword argument \'%s\'' % \
(key)
raise TypeError(e)
self.dR = self.control.get_parameter('dimer_separation')
self.logfile = self.control.get_logfile()
def converge_to_eigenmode(self):
"""Perform an eigenmode search."""
self.set_up_for_eigenmode_search()
stoprot = False
# Load the relevant parameters from control
f_rot_min = self.control.get_parameter('f_rot_min')
f_rot_max = self.control.get_parameter('f_rot_max')
trial_angle = self.control.get_parameter('trial_angle')
max_num_rot = self.control.get_parameter('max_num_rot')
extrapolate = self.control.get_parameter('extrapolate_forces')
while not stoprot:
if self.forces1E == None:
self.update_virtual_forces()
else:
self.update_virtual_forces(extrapolated_forces = True)
self.forces1A = self.forces1
self.update_curvature()
f_rot_A = self.get_rotational_force()
# Pre rotation stop criteria
if norm(f_rot_A) <= f_rot_min:
self.log(f_rot_A, None)
stoprot = True
else:
n_A = self.eigenmode
rot_unit_A = normalize(f_rot_A)
# Get the curvature and its derivative
c0 = self.get_curvature()
c0d = np.vdot((self.forces2 - self.forces1), rot_unit_A) / \
self.dR
# Trial rotation (no need to store the curvature)
# NYI variable trial angles from [3]
n_B, rot_unit_B = rotate_vectors(n_A, rot_unit_A, trial_angle)
self.eigenmode = n_B
self.update_virtual_forces()
self.forces1B = self.forces1
# Get the curvature's derivative
c1d = np.vdot((self.forces2 - self.forces1), rot_unit_B) / \
self.dR
# Calculate the Fourier coefficients
a1 = c0d * cos(2 * trial_angle) - c1d / \
(2 * sin(2 * trial_angle))
b1 = 0.5 * c0d
a0 = 2 * (c0 - a1)
# Estimate the rotational angle
rotangle = atan(b1 / a1) / 2.0
# Make sure that you didn't find a maximum
cmin = a0 / 2.0 + a1 * cos(2 * rotangle) + \
b1 * sin(2 * rotangle)
if c0 < cmin:
rotangle += pi / 2.0
# Rotate into the (hopefully) lowest eigenmode
# NYI Conjugate gradient rotation
n_min, dummy = rotate_vectors(n_A, rot_unit_A, rotangle)
self.update_eigenmode(n_min)
# Store the curvature estimate instead of the old curvature
self.update_curvature(cmin)
self.log(f_rot_A, rotangle)
# Force extrapolation scheme from [4]
if extrapolate:
self.forces1E = sin(trial_angle - rotangle) / \
sin(trial_angle) * self.forces1A + sin(rotangle) / \
sin(trial_angle) * self.forces1B + \
(1 - cos(rotangle) - sin(rotangle) * \
tan(trial_angle / 2.0)) * self.forces0
else:
self.forces1E = None
# Post rotation stop criteria
if not stoprot:
if self.control.get_counter('rotcount') >= max_num_rot:
stoprot = True
elif norm(f_rot_A) <= f_rot_max:
stoprot = True
def log(self, f_rot_A, angle):
"""Log each rotational step."""
# NYI Log for the trial angle
if self.logfile is not None:
if angle:
l = 'DIM:ROT: %7d %9d %9.4f %9.4f %9.4f\n' % \
(self.control.get_counter('optcount'),
self.control.get_counter('rotcount'),
self.get_curvature(), degrees(angle), norm(f_rot_A))
else:
l = 'DIM:ROT: %7d %9d %9.4f %9s %9.4f\n' % \
(self.control.get_counter('optcount'),
self.control.get_counter('rotcount'),
self.get_curvature(), '---------', norm(f_rot_A))
self.logfile.write(l)
self.logfile.flush()
def get_rotational_force(self):
"""Calculate the rotational force that acts on the dimer."""
rot_force = perpendicular_vector((self.forces1 - self.forces2),
self.eigenmode) / (2.0 * self.dR)
if self.basis is not None:
if len(self.basis) == len(self.atoms) and len(self.basis[0]) == \
3 and isinstance(self.basis[0][0], float):
rot_force = perpendicular_vector(rot_force, self.basis)
else:
for base in self.basis:
rot_force = perpendicular_vector(rot_force, base)
return rot_force
def update_curvature(self, curv = None):
"""Update the curvature in the MinModeAtoms object."""
if curv:
self.curvature = curv
else:
self.curvature = np.vdot((self.forces2 - self.forces1),
self.eigenmode) / (2.0 * self.dR)
def update_eigenmode(self, eigenmode):
"""Update the eigenmode in the MinModeAtoms object."""
self.eigenmode = eigenmode
self.update_virtual_positions()
self.control.increment_counter('rotcount')
def get_eigenmode(self):
"""Returns the current eigenmode."""
return self.eigenmode
def get_curvature(self):
"""Returns the curvature along the current eigenmode."""
return self.curvature
def get_control(self):
"""Return the control object."""
return self.control
def update_center_forces(self):
"""Get the forces at the center of the dimer."""
self.atoms.set_positions(self.pos0)
self.forces0 = self.atoms.get_forces(real = True)
self.energy0 = self.atoms.get_potential_energy()
def update_virtual_forces(self, extrapolated_forces = False):
"""Get the forces at the endpoints of the dimer."""
self.update_virtual_positions()
# Estimate / Calculate the forces at pos1
if extrapolated_forces:
self.forces1 = self.forces1E.copy()
else:
self.forces1 = self.atoms.get_forces(real = True, pos = self.pos1)
# Estimate / Calculate the forces at pos2
if self.control.get_parameter('use_central_forces'):
self.forces2 = 2 * self.forces0 - self.forces1
else:
self.forces2 = self.atoms.get_forces(real = True, pos = self.pos2)
def update_virtual_positions(self):
"""Update the end point positions."""
self.pos1 = self.pos0 + self.eigenmode * self.dR
self.pos2 = self.pos0 - self.eigenmode * self.dR
def set_up_for_eigenmode_search(self):
"""Before eigenmode search, prepare for rotation."""
self.pos0 = self.atoms.get_positions()
self.update_center_forces()
self.update_virtual_positions()
self.control.reset_counter('rotcount')
self.forces1E = None
def set_up_for_optimization_step(self):
"""At the end of rotation, prepare for displacement of the dimer."""
self.atoms.set_positions(self.pos0)
self.forces1E = None
class MinModeControl:
"""A parent class for controlling minimum mode saddle point searches.
Method specific control classes inherit this one. The only thing
inheriting classes need to implement are the log() method and
the *parameters* class variable with default values for ALL
parameters needed by the method in question.
When instantiating control classes default parameter values can
be overwritten.
"""
parameters = {}
def __init__(self, logfile = '-', eigenmode_logfile=None, **kwargs):
# Overwrite the defaults with the input parameters given
for key in kwargs:
if not key in self.parameters.keys():
e = 'Invalid parameter >>%s<< with value >>%s<< in %s' % \
(key, str(kwargs[key]), self.__class__.__name__)
raise ValueError(e)
else:
self.set_parameter(key, kwargs[key], log = False)
# Initialize the log files
self.initialize_logfiles(logfile, eigenmode_logfile)
# Initialize the counters
self.counters = {'forcecalls': 0, 'rotcount': 0, 'optcount': 0}
self.log()
def initialize_logfiles(self, logfile=None, eigenmode_logfile=None):
"""Set up the log files."""
# Set up the regular logfile
if rank != 0:
logfile = None
elif isinstance(logfile, str):
if logfile == '-':
logfile = sys.stdout
else:
logfile = open(logfile, 'a')
self.logfile = logfile
# Set up the eigenmode logfile
if eigenmode_logfile:
if rank != 0:
eigenmode_logfile = None
elif isinstance(eigenmode_logfile, str):
if eigenmode_logfile == '-':
eigenmode_logfile = sys.stdout
else:
eigenmode_logfile = open(eigenmode_logfile, 'a')
self.eigenmode_logfile = eigenmode_logfile
def log(self, parameter=None):
"""Log the parameters of the eigenmode search."""
pass
def set_parameter(self, parameter, value, log=True):
"""Change a parameter's value."""
if not parameter in self.parameters.keys():
e = 'Invalid parameter >>%s<< with value >>%s<<' % \
(parameter, str(value))
raise ValueError(e)
self.parameters[parameter] = value
if log:
self.log(parameter)
def get_parameter(self, parameter):
"""Returns the value of a parameter."""
if not parameter in self.parameters.keys():
e = 'Invalid parameter >>%s<<' % \
(parameter)
raise ValueError(e)
return self.parameters[parameter]
def get_logfile(self):
"""Returns the log file."""
return self.logfile
def get_eigenmode_logfile(self):
"""Returns the eigenmode log file."""
return self.eigenmode_logfile
def get_counter(self, counter):
"""Returns a given counter."""
return self.counters[counter]
def increment_counter(self, counter):
"""Increment a given counter."""
self.counters[counter] += 1
def reset_counter(self, counter):
"""Reset a given counter."""
self.counters[counter] = 0
def reset_all_counters(self):
"""Reset all counters."""
for key in self.counters.keys():
self.counters[key] = 0
class DimerControl(MinModeControl):
"""A class that takes care of the parameters needed for a Dimer search.
Parameters:
eigenmode_method: str
The name of the eigenmode search method.
f_rot_min: float
Size of the rotational force under which no rotation will be
performed.
f_rot_max: float
Size of the rotational force under which only one rotation will be
performed.
max_num_rot: int
Maximum number of rotations per optimizer step.
trial_angle: float
Trial angle for the finite difference estimate of the rotational
angle in radians.
trial_trans_step: float
Trial step size for the MinModeTranslate optimizer.
maximum_translation: float
Maximum step size and forced step size when the curvature is still
positive for the MinModeTranslate optimizer.
cg_translation: bool
Conjugate Gradient for the MinModeTranslate optimizer.
use_central_forces: bool
Only calculate the forces at one end of the dimer and extrapolate
the forces to the other.
dimer_separation: float
Separation of the dimer's images.
initial_eigenmode_method: str
How to construct the initial eigenmode of the dimer. If an eigenmode
is given when creating the MinModeAtoms object, this will be ignored.
Possible choices are: 'gauss' and 'displacement'
extrapolate_forces: bool
When more than one rotation is performed, an extrapolation scheme can
be used to reduce the number of force evaluations.
displacement_method: str
How to displace the atoms. Possible choices are 'gauss' and 'vector'.
gauss_std: float
The standard deviation of the gauss curve used when doing random
displacement.
order: int
How many lowest eigenmodes will be inverted.
mask: list of bool
Which atoms will be moved during displacement.
displacement_center: int or [float, float, float]
The center of displacement, nearby atoms will be displaced.
displacement_radius: float
When choosing which atoms to displace with the *displacement_center*
keyword, this decides how many nearby atoms to displace.
number_of_displacement_atoms: int
The amount of atoms near *displacement_center* to displace.
"""
# Default parameters for the Dimer eigenmode search
parameters = {'eigenmode_method': 'dimer',
'f_rot_min': 0.1,
'f_rot_max': 1.00,
'max_num_rot': 1,
'trial_angle': pi / 4.0,
'trial_trans_step': 0.001,
'maximum_translation': 0.1,
'cg_translation': True,
'use_central_forces': True,
'dimer_separation': 0.0001,
'initial_eigenmode_method': 'gauss',
'extrapolate_forces': False,
'displacement_method': 'gauss',
'gauss_std': 0.1,
'order': 1,
'mask': None, # NB mask should not be a "parameter"
'displacement_center': None,
'displacement_radius': None,
'number_of_displacement_atoms': None}
# NB: Can maybe put this in EigenmodeSearch and MinModeControl
def log(self, parameter=None):
"""Log the parameters of the eigenmode search."""
if self.logfile is not None:
if parameter is not None:
l = 'DIM:CONTROL: Updated Parameter: %s = %s\n' % (parameter,
str(self.get_parameter(parameter)))
else:
l = 'MINMODE:METHOD: Dimer\n'
l += 'DIM:CONTROL: Search Parameters:\n'
l += 'DIM:CONTROL: ------------------\n'
for key in self.parameters:
l += 'DIM:CONTROL: %s = %s\n' % (key,
str(self.get_parameter(key)))
l += 'DIM:CONTROL: ------------------\n'
l += 'DIM:ROT: OPT-STEP ROT-STEP CURVATURE ROT-ANGLE ' + \
'ROT-FORCE\n'
self.logfile.write(l)
self.logfile.flush()
class MinModeAtoms:
"""Wrapper for Atoms with information related to minimum mode searching.
Contains an Atoms object and pipes all unknown function calls to that
object.
Other information that is stored in this object are the estimate for
the lowest eigenvalue, *curvature*, and its corresponding eigenmode,
*eigenmode*. Furthermore, the original configuration of the Atoms
object is stored for use in multiple minimum mode searches.
The forces on the system are modified by inverting the component
along the eigenmode estimate. This eventually brings the system to
a saddle point.
Parameters:
atoms : Atoms object
A regular Atoms object
control : MinModeControl object
Contains the parameters necessary for the eigenmode search.
If no control object is supplied a default DimerControl
will be created and used.
mask: list of bool
Determines which atoms will be moved when calling displace()
random_seed: int
The seed used for the random number generator. Defaults to
modified version the current time.
References:
.. [1] Henkelman and Jonsson, JCP 111, 7010 (1999)
.. [2] Olsen, Kroes, Henkelman, Arnaldsson, and Jonsson, JCP 121,
9776 (2004).
.. [3] Heyden, Bell, and Keil, JCP 123, 224101 (2005).
.. [4] Kastner and Sherwood, JCP 128, 014106 (2008).
"""
def __init__(self, atoms, control=None, eigenmodes=None, random_seed=None, **kwargs):
self.minmode_init = True
self.atoms = atoms
# Initialize to None to avoid strange behaviour due to __getattr__
self.eigenmodes = eigenmodes
self.curvatures = None
if control is None:
self.control = DimerControl(**kwargs)
w = 'Missing control object in ' + self.__class__.__name__ + \
'. Using default: DimerControl()'
warnings.warn(w, UserWarning)
if self.control.logfile is not None:
self.control.logfile.write('DIM:WARN: ' + w + '\n')
self.control.logfile.flush()
else:
self.control = control
logfile = self.control.get_logfile()
mlogfile = self.control.get_eigenmode_logfile()
for key in kwargs:
if key == 'logfile':
logfile = kwargs[key]
elif key == 'eigenmode_logfile':
mlogfile = kwargs[key]
else:
self.control.set_parameter(key, kwargs[key])
self.control.initialize_logfiles(logfile = logfile,
eigenmode_logfile = mlogfile)
# Seed the randomness
if random_seed is None:
t = time.time()
if size > 1:
t = world.sum(t) / float(size)
# Harvest the latter part of the current time
random_seed = int(('%30.9f' % t)[-9:])
self.random_state = np.random.RandomState(random_seed)
# Check the order
self.order = self.control.get_parameter('order')
# Construct the curvatures list
self.curvatures = [100.0] * self.order
# Save the original state of the atoms.
self.atoms0 = self.atoms.copy()
self.save_original_forces()
# Get a reference to the log files
self.logfile = self.control.get_logfile()
self.mlogfile = self.control.get_eigenmode_logfile()
def save_original_forces(self, force_calculation=False):
"""Store the forces (and energy) of the original state."""
# NB: Would be nice if atoms.copy() took care of this.
if self.calc is not None:
# Hack because some calculators do not have calculation_required
if (hasattr(self.calc, 'calculation_required') \
and not self.calc.calculation_required(self.atoms,
['energy', 'forces'])) or force_calculation:
calc = SinglePointCalculator( \
self.atoms.get_potential_energy(), \
self.atoms.get_forces(), None, None, self.atoms0)
self.atoms0.set_calculator(calc)
def initialize_eigenmodes(self, method=None, eigenmodes=None, \
gauss_std=None):
"""Make an initial guess for the eigenmode."""
if eigenmodes is None:
pos = self.get_positions()
old_pos = self.get_original_positions()
if method == None:
method = \
self.control.get_parameter('initial_eigenmode_method')
if method.lower() == 'displacement' and (pos - old_pos).any():
eigenmode = normalize(pos - old_pos)
elif method.lower() == 'gauss':
self.displace(log = False, gauss_std = gauss_std,
method = method)
new_pos = self.get_positions()
eigenmode = normalize(new_pos - pos)
self.set_positions(pos)
else:
e = 'initial_eigenmode must use either \'gauss\' or ' + \
'\'displacement\', if the latter is used the atoms ' + \
'must have moved away from the original positions.' + \
'You have requested \'%s\'.' % method
raise NotImplementedError(e) # NYI
eigenmodes = [eigenmode]
# Create random higher order mode guesses
if self.order > 1:
if len(eigenmodes) == 1:
for k in range(1, self.order):
pos = self.get_positions()
self.displace(log = False, gauss_std = gauss_std,
method = method)
new_pos = self.get_positions()
eigenmode = normalize(new_pos - pos)
self.set_positions(pos)
eigenmodes += [eigenmode]
self.eigenmodes = eigenmodes
# Ensure that the higher order mode guesses are all orthogonal
if self.order > 1:
for k in range(self.order):
self.ensure_eigenmode_orthogonality(k)
self.eigenmode_log()
# NB maybe this name might be confusing in context to
# calc.calculation_required()
def calculation_required(self):
"""Check if a calculation is required."""
return self.minmode_init or self.check_atoms != self.atoms
def calculate_real_forces_and_energies(self, **kwargs):
"""Calculate and store the potential energy and forces."""
if self.minmode_init:
self.minmode_init = False
self.initialize_eigenmodes(eigenmodes = self.eigenmodes)
self.rotation_required = True
self.forces0 = self.atoms.get_forces(**kwargs)
self.energy0 = self.atoms.get_potential_energy()
self.control.increment_counter('forcecalls')
self.check_atoms = self.atoms.copy()
def get_potential_energy(self):
"""Return the potential energy."""
if self.calculation_required():
self.calculate_real_forces_and_energies()
return self.energy0
def get_forces(self, real=False, pos=None, **kwargs):
"""Return the forces, projected or real."""
if self.calculation_required() and pos is None:
self.calculate_real_forces_and_energies(**kwargs)
if real and pos is None:
return self.forces0
elif real and pos != None:
old_pos = self.atoms.get_positions()
self.atoms.set_positions(pos)
forces = self.atoms.get_forces()
self.control.increment_counter('forcecalls')
self.atoms.set_positions(old_pos)
return forces
else:
if self.rotation_required:
self.find_eigenmodes(order = self.order)
self.eigenmode_log()
self.rotation_required = False
self.control.increment_counter('optcount')
return self.get_projected_forces()
def ensure_eigenmode_orthogonality(self, order):
mode = self.eigenmodes[order - 1].copy()
for k in range(order - 1):
mode = perpendicular_vector(mode, self.eigenmodes[k])
self.eigenmodes[order - 1] = normalize(mode)
def find_eigenmodes(self, order=1):
"""Launch eigenmode searches."""
if self.control.get_parameter('eigenmode_method').lower() != 'dimer':
e = 'Only the Dimer control object has been implemented.'
raise NotImplementedError(e) # NYI
for k in range(order):
if k > 0:
self.ensure_eigenmode_orthogonality(k + 1)
search = DimerEigenmodeSearch(self, self.control, \
eigenmode = self.eigenmodes[k], basis = self.eigenmodes[:k])
search.converge_to_eigenmode()
search.set_up_for_optimization_step()
self.eigenmodes[k] = search.get_eigenmode()
self.curvatures[k] = search.get_curvature()
def get_projected_forces(self, pos=None):
"""Return the projected forces."""
if pos is not None:
forces = self.get_forces(real = True, pos = pos).copy()
else:
forces = self.forces0.copy()
# Loop through all the eigenmodes
# NB: Can this be done with a linear combination, instead?
for k, mode in enumerate(self.eigenmodes):
# NYI This If statement needs to be overridable in the control
if self.get_curvature(order = k) > 0.0 and self.order == 1:
forces = -parallel_vector(forces, mode)
else:
forces -= 2 * parallel_vector(forces, mode)
return forces
def restore_original_positions(self):
"""Restore the MinModeAtoms object positions to the original state."""
self.atoms.set_positions(self.get_original_positions())
def get_barrier_energy(self):
"""The energy difference between the current and original states"""
try:
original_energy = self.get_original_potential_energy()
dimer_energy = self.get_potential_energy()
return dimer_energy - original_energy
except RuntimeError:
w = 'The potential energy is not available, without further ' + \
'calculations, most likely at the original state.'
warnings.warn(w, UserWarning)
return np.nan
def get_control(self):
"""Return the control object."""
return self.control
def get_curvature(self, order='max'):
"""Return the eigenvalue estimate."""
if order == 'max':
return max(self.curvatures)
else:
return self.curvatures[order - 1]
def get_eigenmode(self, order=1):
"""Return the current eigenmode guess."""
return self.eigenmodes[order - 1]
def get_atoms(self):
"""Return the unextended Atoms object."""
return self.atoms
def set_atoms(self, atoms):
"""Set a new Atoms object"""
self.atoms = atoms
def set_eigenmode(self, eigenmode, order=1):
"""Set the eigenmode guess."""
self.eigenmodes[order - 1] = eigenmode
def set_curvature(self, curvature, order=1):
"""Set the eigenvalue estimate."""
self.curvatures[order - 1] = curvature
# Pipe all the stuff from Atoms that is not overwritten.
# Pipe all requests for get_original_* to self.atoms0.
def __getattr__(self, attr):
"""Return any value of the Atoms object"""
if 'original' in attr.split('_'):
attr = attr.replace('_original_', '_')
return getattr(self.atoms0, attr)
else:
return getattr(self.atoms, attr)
def displace(self, displacement_vector=None, mask=None, method=None,
displacement_center=None, radius=None, number_of_atoms=None,
gauss_std=None, mic=True, log=True):
"""Move the atoms away from their current position.
This is one of the essential parts of minimum mode searches.
The parameters can all be set in the control object and overwritten
when this method is run, apart from *displacement_vector*.
It is preferred to modify the control values rather than those here
in order for the correct ones to show up in the log file.
*method* can be either 'gauss' for random displacement or 'vector'
to perform a predefined displacement.
*gauss_std* is the standard deviation of the gauss curve that is
used for random displacement.
*displacement_center* can be either the number of an atom or a 3D
position. It must be accompanied by a *radius* (all atoms within it
will be displaced) or a *number_of_atoms* which decides how many of
the closest atoms will be displaced.
*mic* controls the usage of the Minimum Image Convention.
If both *mask* and *displacement_center* are used, the atoms marked
as False in the *mask* will not be affected even though they are
within reach of the *displacement_center*.
The parameters priority order:
1) displacement_vector
2) mask
3) displacement_center (with radius and/or number_of_atoms)
If both *radius* and *number_of_atoms* are supplied with
*displacement_center*, only atoms that fulfill both criteria will
be displaced.
"""
# Fetch the default values from the control
if mask is None:
mask = self.control.get_parameter('mask')
if method is None:
method = self.control.get_parameter('displacement_method')
if gauss_std is None:
gauss_std = self.control.get_parameter('gauss_std')
if displacement_center is None:
displacement_center = \
self.control.get_parameter('displacement_center')
if radius is None:
radius = self.control.get_parameter('displacement_radius')
if number_of_atoms is None:
number_of_atoms = \
self.control.get_parameter('number_of_displacement_atoms')
# Check for conflicts
if displacement_vector is not None and method.lower() != 'vector':
e = 'displacement_vector was supplied but a different method ' + \
'(\'%s\') was chosen.\n' % str(method)
raise ValueError(e)
elif displacement_vector is None and method.lower() == 'vector':
e = 'A displacement_vector must be supplied when using ' + \
'method = \'%s\'.\n' % str(method)
raise ValueError(e)
elif displacement_center is not None and radius is None and \
number_of_atoms is None:
e = 'When displacement_center is chosen, either radius or ' + \
'number_of_atoms must be supplied.\n'
raise ValueError(e)
# Set up the center of displacement mask (c_mask)
if displacement_center is not None:
c = displacement_center
# Construct a distance list
# The center is an atom
if type(c) is int:
# Parse negative indexes
c = displacement_center % len(self)
d = [(k, self.get_distance(k, c, mic = mic)) for k in \
range(len(self))]
# The center is a position in 3D space
elif len(c) == 3 and [type(c_k) for c_k in c] == [float]*3:
# NB: MIC is not considered.
d = [(k, norm(self.get_positions()[k] - c)) \
for k in range(len(self))]
else:
e = 'displacement_center must be either the number of an ' + \
'atom in MinModeAtoms object or a 3D position ' + \
'(3-tuple of floats).'
raise ValueError(e)
# Set up the mask
if radius is not None:
r_mask = [dist[1] < radius for dist in d]
else:
r_mask = [True for _ in self]
if number_of_atoms is not None:
d_sorted = [n[0] for n in sorted(d, key = lambda k: k[1])]
n_nearest = d_sorted[:number_of_atoms]
n_mask = [k in n_nearest for k in range(len(self))]
else:
n_mask = [True for _ in self]
# Resolve n_mask / r_mask conflicts
c_mask = [n_mask[k] and r_mask[k] for k in range(len(self))]
else:
c_mask = None
# Set up a True mask if there is no mask supplied
if mask is None:
mask = [True for _ in self]
if c_mask is None:
w = 'It was not possible to figure out which atoms to ' + \
'displace, Will try to displace all atoms.\n'
warnings.warn(w, UserWarning)
if self.logfile is not None:
self.logfile.write('MINMODE:WARN: ' + w + '\n')
self.logfile.flush()
# Resolve mask / c_mask conflicts
if c_mask is not None:
mask = [mask[k] and c_mask[k] for k in range(len(self))]
if displacement_vector is None:
displacement_vector = []
for k in range(len(self)):
if mask[k]:
diff_line = []
for _ in range(3):
if method.lower() == 'gauss':
if not gauss_std:
gauss_std = \
self.control.get_parameter('gauss_std')
diff = self.random_state.normal(0.0, gauss_std)
else:
e = 'Invalid displacement method >>%s<<' % \
str(method)
raise ValueError(e)
diff_line.append(diff)
displacement_vector.append(diff_line)
else:
displacement_vector.append([0.0]*3)
# Remove displacement of masked atoms
for k in range(len(mask)):
if not mask[k]:
displacement_vector[k] = [0.0]*3
# Perform the displacement and log it
if log:
pos0 = self.get_positions()
self.set_positions(self.get_positions() + displacement_vector)
if log:
parameters = {'mask': mask,
'displacement_method': method,
'gauss_std': gauss_std,
'displacement_center': displacement_center,
'displacement_radius': radius,
'number_of_displacement_atoms': number_of_atoms}
self.displacement_log(self.get_positions() - pos0, parameters)
def eigenmode_log(self):
"""Log the eigenmodes (eigenmode estimates)"""
if self.mlogfile is not None:
l = 'MINMODE:MODE: Optimization Step: %i\n' % \
(self.control.get_counter('optcount'))
for m_num, mode in enumerate(self.eigenmodes):
l += 'MINMODE:MODE: Order: %i\n' % m_num
for k in range(len(mode)):
l += 'MINMODE:MODE: %7i %15.8f %15.8f %15.8f\n' % (k,
mode[k][0], mode[k][1], mode[k][2])
self.mlogfile.write(l)
self.mlogfile.flush()
def displacement_log(self, displacement_vector, parameters):
"""Log the displacement"""
if self.logfile is not None:
lp = 'MINMODE:DISP: Parameters, different from the control:\n'
mod_para = False
for key in parameters:
if parameters[key] != self.control.get_parameter(key):
lp += 'MINMODE:DISP: %s = %s\n' % (str(key),
str(parameters[key]))
mod_para = True
if mod_para:
l = lp
else:
l = ''
for k in range(len(displacement_vector)):
l += 'MINMODE:DISP: %7i %15.8f %15.8f %15.8f\n' % (k,
displacement_vector[k][0], displacement_vector[k][1],
displacement_vector[k][2])
self.logfile.write(l)
self.logfile.flush()
def summarize(self):
"""Summarize the Minimum mode search."""
if self.logfile is None:
logfile = sys.stdout
else:
logfile = self.logfile
c = self.control
label = 'MINMODE:SUMMARY: '
l = label + '-------------------------\n'
l += label + 'Barrier: %16.4f\n' % self.get_barrier_energy()
l += label + 'Curvature: %14.4f\n' % self.get_curvature()
l += label + 'Optimizer steps: %8i\n' % c.get_counter('optcount')
l += label + 'Forcecalls: %13i\n' % c.get_counter('forcecalls')
l += label + '-------------------------\n'
logfile.write(l)
class MinModeTranslate(Optimizer):
"""An Optimizer specifically tailored to minimum mode following."""
def __init__(self, atoms, logfile='-', trajectory=None):
Optimizer.__init__(self, atoms, None, logfile, trajectory)
self.control = atoms.get_control()
# Make a header for the log
if self.logfile is not None:
l = ''
if isinstance(self.control, DimerControl):
l = 'MinModeTranslate: STEP TIME ENERGY ' + \
'MAX-FORCE STEPSIZE CURVATURE ROT-STEPS\n'
self.logfile.write(l)
self.logfile.flush()
# Load the relevant parameters from control
self.cg_on = self.control.get_parameter('cg_translation')
self.trial_step = self.control.get_parameter('trial_trans_step')
self.max_step = self.control.get_parameter('maximum_translation')
# Start conjugate gradient
if self.cg_on:
self.cg_init = True
def initialize(self):
"""Set initial values."""
self.r0 = None
self.f0 = None
def run(self, fmax=0.05, steps=100000000):
"""Run structure optimization algorithm.
This method will return when the forces on all individual
atoms are less than *fmax* or when the number of steps exceeds
*steps*.
"""
self.fmax = fmax
step = 0
while step < steps:
f = self.atoms.get_forces()
self.call_observers()
if self.converged(f):
self.log(f, None)
return
self.step(f)
self.nsteps += 1
step += 1
def step(self, f):
"""Perform the optimization step."""
atoms = self.atoms
r = atoms.get_positions()
curv = atoms.get_curvature()
f0p = f.copy()
r0 = r.copy()
direction = f0p.copy()
if self.cg_on:
direction = self.get_cg_direction(direction)
direction = normalize(direction)
if curv > 0.0:
step = direction * self.max_step
else:
r0t = r0 + direction * self.trial_step
f0tp = self.atoms.get_projected_forces(r0t)
F = np.vdot((f0tp + f0p), direction) / 2.0
C = np.vdot((f0tp - f0p), direction) / self.trial_step
step = ( -F / C + self.trial_step / 2.0 ) * direction
if norm(step) > self.max_step:
step = direction * self.max_step
self.log(f0p, norm(step))
atoms.set_positions(r + step)
self.f0 = f.flat.copy()
self.r0 = r.flat.copy()
def get_cg_direction(self, direction):
"""Apply the Conjugate Gradient algorithm to the step direction."""
if self.cg_init:
self.cg_init = False
self.direction_old = direction.copy()
self.cg_direction = direction.copy()
old_norm = np.vdot(self.direction_old, self.direction_old)
# Polak-Ribiere Conjugate Gradient
if old_norm != 0.0:
betaPR = np.vdot(direction, (direction - self.direction_old)) / \
old_norm
else:
betaPR = 0.0
if betaPR < 0.0:
betaPR = 0.0
self.cg_direction = direction + self.cg_direction * betaPR
self.direction_old = direction.copy()
return self.cg_direction.copy()
def log(self, f, stepsize):
"""Log each step of the optimization."""
if self.logfile is not None:
T = time.localtime()
e = self.atoms.get_potential_energy()
fmax = sqrt((f**2).sum(axis = 1).max())
rotsteps = self.atoms.control.get_counter('rotcount')
curvature = self.atoms.get_curvature()
l = ''
if stepsize:
if isinstance(self.control, DimerControl):
l = '%s: %4d %02d:%02d:%02d %15.6f %12.4f %12.6f ' \
'%12.6f %10d\n' % ('MinModeTranslate', self.nsteps,
T[3], T[4], T[5], e, fmax, stepsize, curvature,
rotsteps)
else:
if isinstance(self.control, DimerControl):
l = '%s: %4d %02d:%02d:%02d %15.6f %12.4f %s ' \
'%12.6f %10d\n' % ('MinModeTranslate', self.nsteps,
T[3], T[4], T[5], e, fmax, ' --------',
curvature, rotsteps)
self.logfile.write(l)
self.logfile.flush()
def read_eigenmode(mlog, index = -1):
"""Read an eigenmode.
To access the pre optimization eigenmode set index = 'null'.
"""
if isinstance(mlog, str):
f = open(mlog, 'r')
else:
f = mlog
lines = f.readlines()
# Detect the amount of atoms and iterations
k = 2
while lines[k].split()[1].lower() not in ['optimization', 'order']:
k += 1
n = k - 2
n_itr = (len(lines) / (n + 1)) - 2
# Locate the correct image.
if type(index) == str:
if index.lower() == 'null':
i = 0
else:
i = int(index) + 1
else:
if index >= 0:
i = index + 1
else:
if index < -n_itr - 1:
raise IndexError('list index out of range')
else:
i = index
mode = np.ndarray(shape = (n, 3), dtype = float)
k_atom = 0
for k in range(1, n + 1):
line = lines[i * (n + 1) + k].split()
for k_dim in range(3):
mode[k_atom][k_dim] = float(line[k_dim + 2])
k_atom += 1
return mode
# Aliases
DimerAtoms = MinModeAtoms
DimerTranslate = MinModeTranslate
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/dimer.py | Python | gpl-2.0 | 45,632 | [
"ASE"
] | 6b40260669064c9fa61512e26cb57d087be79ca9cd5575ef7cf9a96373abbfff |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to use validateOnly SOAP header.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignService.mutate
Api: AdWordsOnly
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service with validate only flag enabled.
client.validate_only = True
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201506')
# Construct operations to add a text ad.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': {
'urls': ['http://www.example.com']
},
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
}
}
}]
ad_group_ad_service.mutate(operations)
# No error means the request is valid.
# Now let's check an invalid ad using a very long line to trigger an error.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for all astronauts in orbit',
'headline': 'Luxury Cruise to Mars'
}
}
}]
try:
ad_group_ad_service.mutate(operations)
except suds.WebFault, e:
print 'Validation correctly failed with \'%s\'.' % str(e)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| cctaylor/googleads-python-lib | examples/adwords/v201506/campaign_management/validate_text_ad.py | Python | apache-2.0 | 2,880 | [
"VisIt"
] | 4442fa5ee2de8580cd44dcc8718a5c7aba599abedd4ea040de024b1c2407ad40 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import passlib.utils
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Hashable, Iterable, Mapping, OrderedDict
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if openerp.tools.config['db_host']:
env['PGHOST'] = openerp.tools.config['db_host']
if openerp.tools.config['db_port']:
env['PGPORT'] = str(openerp.tools.config['db_port'])
if openerp.tools.config['db_user']:
env['PGUSER'] = openerp.tools.config['db_user']
if openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'eu_ES': u'Basque / Euskara',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ka_GE': u'Georgian / ქართული ენა',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(arg))
elif isinstance(arg, Hashable):
return hash(arg)
else:
return id(arg)
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.iteritems()))
class Collector(Mapping):
""" A mapping from keys to lists. This is essentially a space optimization
for ``defaultdict(list)``.
"""
__slots__ = ['_map']
def __init__(self):
self._map = {}
def add(self, key, val):
vals = self._map.setdefault(key, [])
if val not in vals:
vals.append(val)
def __getitem__(self, key):
return self._map.get(key, ())
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif (hasattr(value, '_field') and isinstance(value._field, (float_field, function_field)) and value._field.digits):
digits = value._field.digits[1]
if not digits and digits is not 0:
digits = DEFAULT_DIGITS
if isinstance(value, (str, unicode)) and not value:
return ''
lang = env.user.company_id.partner_id.lang or 'en_US'
lang_objs = env['res.lang'].search([('code', '=', lang)])
if not lang_objs:
lang_objs = env['res.lang'].search([('code', '=', 'en_US')])
lang_obj = lang_objs[0]
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj:
if currency_obj.position == 'after':
res = '%s %s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s %s' % (currency_obj.symbol, res)
return res
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq) | allenp/odoo | openerp/tools/misc.py | Python | gpl-3.0 | 38,315 | [
"VisIt"
] | b0e65f3bfe9ed7afa1931b41d2bb118da5dbf6f8e39bd1d207a017d0c75e66f3 |
#!/usr/bin/env python3
# This Python file uses the following encoding: utf-8
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter, OrderedDict
from pprint import pprint
from scipy.optimize import curve_fit
from kural import Thirukkural
from tamil.utf8 import get_letters, get_tamil_words, total_maaththirai
# Define model function to be used to fit to the data above:
def gauss(x, *p):
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
def main():
eq = Counter()
eqd = {}
kural = Thirukkural()
for kural_no in range(1330):
kural_words = get_tamil_words(get_letters(kural.get_kural_no(kural_no + 1).ta))
mathirai = sum([total_maaththirai(word) for word in kural_words])
if eq[mathirai] == 0:
eqd[mathirai] = [kural_no + 1]
else:
eqd[mathirai].append(kural_no + 1)
eq[mathirai] += 1
eq_sorted = OrderedDict(sorted(eq.items(), key=lambda x: x))
pprint(eq_sorted)
pprint(eq_sorted.values())
pprint(eqd)
print("total = ", sum(eq.values()))
plt.scatter(eq_sorted.keys(), eq_sorted.values())
plt.ylabel(u"குறட்பாக்கள் எண்ணிக்கை", {"fontname": "Catamaran"})
plt.xlabel(u"மாத்திரை அளவு", {"fontname": "Catamaran"}) # Arial Unicode MS'})
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
p0 = [75.0, 20.0, 5.0]
coeff, var_matrix = curve_fit(
gauss, list(eq_sorted.keys()), list(eq_sorted.values()), p0=p0
)
# Get the fitted curve
hist_fit = gauss(list(eq_sorted.keys()), *coeff)
plt.plot(
eq_sorted.keys(),
hist_fit,
label="Gaussian Fitted data (mean=%g, std=%g)" % (coeff[1], coeff[2]),
)
plt.title(
r"குறள் மாத்திரை வரிசை (Gauss \mu=%g, \sigma=%g)" % (coeff[1], coeff[2]),
{"fontname": "Catamaran"},
)
# Finally, lets get the fitting parameters, i.e. the mean and standard deviation:
print("Fitted mean = ", coeff[1])
print("Fitted standard deviation = ", coeff[2])
plt.show()
if __name__ == "__main__":
main()
| Ezhil-Language-Foundation/open-tamil | examples/kural_mathirai.py | Python | mit | 2,246 | [
"Gaussian"
] | 2a7ee0e94975108ab8dcb2b6909dcd623f73ec2f43f2eb8f2516174e0af429d0 |
__author__ = 'dalton'
_world = {}
starting_position = (0, 0)
def load_tiles():
"""Parses a file that describes the world space into the _world object"""
with open('resources/map.txt', 'r') as f:
rows = f.readlines()
x_max = len(rows[0].split('\t')) # Assumes all rows contain the same number of tabs
for y in range(len(rows)):
cols = rows[y].split('\t')
for x in range(x_max):
tile_name = cols[x].replace('\n', '')
if tile_name == 'StartingRoom':
global starting_position
starting_position = (x, y)
_world[(x, y)] = None if tile_name == '' else getattr(__import__('tiles'), tile_name)(x, y)
def tile_exists(x, y):
return _world.get((x, y))
| dalrrard/text-adventure-tut | adventuretutorial/world.py | Python | mit | 758 | [
"Dalton"
] | a7e4162fb9c3f7695947d68a2ce66fd2ee4b61adc5844604f053af342ad502d7 |
# -*- coding: utf-8 -*-
"""
A VTK RenderWindowInteractor widget for wxPython.
Find wxPython info at http://wxPython.org
Created by Prabhu Ramachandran, April 2002
Based on wxVTKRenderWindow.py
Fixes and updates by Charl P. Botha 2003-2008
Updated to new wx namespace and some cleaning up by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindowInteractor(parent, ID, stereo=0, [wx keywords]):
You should create a wx.PySimpleApp() or some other wx**App before
creating the window.
Behaviour:
Uses __getattr__ to make the wxVTKRenderWindowInteractor behave just
like a vtkGenericRenderWindowInteractor.
----------------------------------------
"""
# import usual libraries
import math
import sys
import os
baseClass = object
_useCapture = None
try:
import wx
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
except ImportError as e:
import traceback
#traceback.print_exc(file=sys.stdout)
sys.stderr.write("No proper wx installed'.\n")
try:
import vtk
except Exception as e:
sys.stderr.write("No proper vtk installed'.\n")
# end of configuration items
class EventTimer(wx.Timer):
"""Simple wx.Timer class."""
def __init__(self, iren):
"""
Default class constructor.
@param iren: current render window
"""
wx.Timer.__init__(self)
self.iren = iren
def Notify(self):
"""The timer has expired."""
self.iren.TimerEvent()
class wxVTKRenderWindowInteractor(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
# class variable that can also be used to request instances that use
# stereo; this is overridden by the stereo=1/0 parameter. If you set
# it to True, the NEXT instantiated object will attempt to allocate a
# stereo visual. E.g.:
# wxVTKRenderWindowInteractor.USE_STEREO = True
# myRWI = wxVTKRenderWindowInteractor(parent, -1)
USE_STEREO = False
def __init__(self, parent, ID, *args, **kw):
"""
Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# private attributes
self.__RenderWhenDisabled = 0
# First do special handling of some keywords:
# stereo, position, size, style
stereo = 0
if 'stereo' in kw:
if kw['stereo']:
stereo = 1
del kw['stereo']
elif self.USE_STEREO:
stereo = 1
position, size = wx.DefaultPosition, wx.DefaultSize
if 'position' in kw:
position = kw['position']
del kw['position']
if 'size' in kw:
size = kw['size']
del kw['size']
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
if 'style' in kw:
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
if wx.Platform != '__WXMSW__':
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
if baseClass.__name__ == 'GLCanvas':
# code added by cpbotha to enable stereo and double
# buffering correctly where the user requests this; remember
# that the glXContext in this case is NOT allocated by VTK,
# but by WX, hence all of this.
# Initialize GLCanvas with correct attriblist
attribList = [wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_MIN_RED, 1,
wx.glcanvas.WX_GL_MIN_GREEN, 1,
wx.glcanvas.WX_GL_MIN_BLUE, 1,
wx.glcanvas.WX_GL_DEPTH_SIZE, 16,
wx.glcanvas.WX_GL_DOUBLEBUFFER]
if stereo:
attribList.append(wx.glcanvas.WX_GL_STEREO)
try:
baseClass.__init__(self, parent, id = ID, pos = position, size = size, style = style,
attribList=attribList)
except wx.PyAssertionError:
# visual couldn't be allocated, so we go back to default
baseClass.__init__(self, parent, ID, position, size, style)
if stereo:
# and make sure everyone knows that the stereo
# visual wasn't set.
stereo = 0
else:
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow( vtk.vtkRenderWindow() )
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
try:
self._Iren.GetRenderWindow().SetSize(size.width, size.height)
except AttributeError:
self._Iren.GetRenderWindow().SetSize(size[0], size[1])
if stereo:
self._Iren.GetRenderWindow().StereoCapableWindowOn()
self._Iren.GetRenderWindow().SetStereoTypeToCrystalEyes()
self.__handle = None
self.BindEvents()
# with this, we can make sure that the reparenting logic in
# Render() isn't called before the first OnPaint() has
# successfully been run (and set up the VTK/WX display links)
self.__has_painted = False
# set when we have captured the mouse.
self._own_mouse = False
# used to store WHICH mouse button led to mouse capture
self._mouse_capture_button = 0
# A mapping for cursor changes.
self._cursor_map = {0: wx.CURSOR_ARROW, # VTK_CURSOR_DEFAULT
1: wx.CURSOR_ARROW, # VTK_CURSOR_ARROW
2: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZENE
3: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZENWSE
4: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZESW
5: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZESE
6: wx.CURSOR_SIZENS, # VTK_CURSOR_SIZENS
7: wx.CURSOR_SIZEWE, # VTK_CURSOR_SIZEWE
8: wx.CURSOR_SIZING, # VTK_CURSOR_SIZEALL
9: wx.CURSOR_HAND, # VTK_CURSOR_HAND
10: wx.CURSOR_CROSS, # VTK_CURSOR_CROSSHAIR
}
def BindEvents(self):
"""Binds all the necessary events for navigation, sizing, drawing."""
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self.OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self.OnButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
# the wx 2.8.7.1 documentation states that you HAVE to handle
# this event if you make use of CaptureMouse, which we do.
if _useCapture and hasattr(wx, 'EVT_MOUSE_CAPTURE_LOST'):
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST,
self.OnMouseCaptureLost)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor."""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ + \
" has no attribute named " + attr)
def CreateTimer(self, obj, evt):
"""Creates a timer."""
self._timer = EventTimer(self)
self._timer.Start(10, True)
def DestroyTimer(self, obj, evt):
"""The timer is a one shot timer so will expire automatically."""
return 1
def _CursorChangedEvent(self, obj, evt):
"""Change the wx cursor if the renderwindow's cursor was changed."""
cur = self._cursor_map[obj.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the
# current cursor is not yet set so we defer this by which time
# the current cursor should have been set.
wx.CallAfter(self._CursorChangedEvent, obj, evt)
def HideCursor(self):
"""Hides the cursor."""
c = wx.StockCursor(wx.CURSOR_BLANK)
self.SetCursor(c)
def ShowCursor(self):
"""Shows the cursor."""
rw = self._Iren.GetRenderWindow()
cur = self._cursor_map[rw.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def GetDisplayId(self):
"""
Function to get X11 Display ID from WX and return it in a format that
can be used by VTK Python.
We query the X11 Display with a new call that was added in wxPython
2.6.0.1. The call returns a SWIG object which we can query for the
address and subsequently turn into an old-style SWIG-mangled string
representation to pass to VTK.
"""
d = None
try:
d = wx.GetXDisplay()
except NameError:
# wx.GetXDisplay was added by Robin Dunn in wxPython 2.6.0.1
# if it's not available, we can't pass it. In general,
# things will still work; on some setups, it'll break.
pass
else:
# wx returns None on platforms where wx.GetXDisplay is not relevant
if d:
d = hex(d)
# On wxPython-2.6.3.2 and above there is no leading '0x'.
if not d.startswith('0x'):
d = '0x' + d
# we now have 0xdeadbeef
# VTK wants it as: _deadbeef_void_p (pre-SWIG-1.3 style)
d = '_%s_%s' % (d[2:], 'void_p')
return d
def OnMouseCaptureLost(self, event):
"""
This is signalled when we lose mouse capture due to an external event,
such as when a dialog box is shown.
See the wx documentation.
"""
# the documentation seems to imply that by this time we've
# already lost capture. I have to assume that we don't need
# to call ReleaseMouse ourselves.
if _useCapture and self._own_mouse:
self._own_mouse = False
def OnPaint(self,event):
"""Handles the wx.EVT_PAINT event for wxVTKRenderWindowInteractor."""
# wx should continue event processing after this handler.
# We call this BEFORE Render(), so that if Render() raises
# an exception, wx doesn't re-call OnPaint repeatedly.
event.Skip()
dc = wx.PaintDC(self)
# make sure the RenderWindow is sized correctly
self._Iren.GetRenderWindow().SetSize(self.GetSizeTuple())
# Tell the RenderWindow to render inside the wx.Window.
if not self.__handle:
# on relevant platforms, set the X11 Display ID
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# store the handle
self.__handle = self.GetHandle()
# and give it to VTK
self._Iren.GetRenderWindow().SetWindowInfo(str(self.__handle))
# now that we've painted once, the Render() reparenting logic
# is safe
self.__has_painted = True
self.Render()
def OnSize(self,event):
"""Handles the wx.EVT_SIZE event for wxVTKRenderWindowInteractor."""
# event processing should continue (we call this before the
# Render(), in case it raises an exception)
event.Skip()
try:
width, height = event.GetSize()
except:
width = event.GetSize().width
height = event.GetSize().height
self._Iren.SetSize(width, height)
self._Iren.ConfigureEvent()
# this will check for __handle
self.Render()
def OnMotion(self,event):
"""Handles the wx.EVT_MOTION event for wxVTKRenderWindowInteractor."""
# event processing should continue
# we call this early in case any of the VTK code raises an
# exception.
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.MouseMoveEvent()
def OnEnter(self,event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.EnterEvent()
def OnLeave(self,event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.LeaveEvent()
def OnButtonDown(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindowInteractor."""
# allow wx event processing to continue
# on wxPython 2.6.0.1, omitting this will cause problems with
# the initial focus, resulting in the wxVTKRWI ignoring keypresses
# until we focus elsewhere and then refocus the wxVTKRWI frame
# we do it this early in case any of the following VTK code
# raises an exception.
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
button = 0
if event.RightDown():
self._Iren.RightButtonPressEvent()
button = 'Right'
elif event.LeftDown():
self._Iren.LeftButtonPressEvent()
button = 'Left'
elif event.MiddleDown():
self._Iren.MiddleButtonPressEvent()
button = 'Middle'
# save the button and capture mouse until the button is released
# we only capture the mouse if it hasn't already been captured
if _useCapture and not self._own_mouse:
self._own_mouse = True
self._mouse_capture_button = button
self.CaptureMouse()
def OnButtonUp(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
button = 0
if event.RightUp():
button = 'Right'
elif event.LeftUp():
button = 'Left'
elif event.MiddleUp():
button = 'Middle'
# if the same button is released that captured the mouse, and
# we have the mouse, release it.
# (we need to get rid of this as soon as possible; if we don't
# and one of the event handlers raises an exception, mouse
# is never released.)
if _useCapture and self._own_mouse and \
button==self._mouse_capture_button:
self.ReleaseMouse()
self._own_mouse = False
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if button == 'Right':
self._Iren.RightButtonReleaseEvent()
elif button == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif button == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def OnMouseWheel(self,event):
"""Handles the wx.EVT_MOUSEWHEEL event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if event.GetWheelRotation() > 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def OnKeyDown(self,event):
"""Handles the wx.EVT_KEY_DOWN event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
# wxPython 2.6.0.1 does not return a valid event.Get{X,Y}()
# for this event, so we use the cached position.
(x,y)= self._Iren.GetEventPosition()
self._Iren.SetEventInformation(x, y,
ctrl, shift, key, 0,
keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def OnKeyUp(self,event):
"""Handles the wx.EVT_KEY_UP event for wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, key, 0,
keysym)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow)."""
return self._Iren.GetRenderWindow()
def Render(self):
"""Actually renders the VTK scene on screen."""
RenderAllowed = 1
if not self.__RenderWhenDisabled:
# the user doesn't want us to render when the toplevel frame
# is disabled - first find the top level parent
topParent = wx.GetTopLevelParent(self)
if topParent:
# if it exists, check whether it's enabled
# if it's not enabeld, RenderAllowed will be false
RenderAllowed = topParent.IsEnabled()
if RenderAllowed:
if self.__handle and self.__handle == self.GetHandle():
self._Iren.GetRenderWindow().Render()
elif self.GetHandle() and self.__has_painted:
# this means the user has reparented us; let's adapt to the
# new situation by doing the WindowRemap dance
self._Iren.GetRenderWindow().SetNextWindowInfo(
str(self.GetHandle()))
# make sure the DisplayId is also set correctly
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# do the actual remap with the new parent information
self._Iren.GetRenderWindow().WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._Iren.GetRenderWindow().Render()
def SetRenderWhenDisabled(self, newValue):
"""
Change value of __RenderWhenDisabled ivar.
If __RenderWhenDisabled is false (the default), this widget will not
call Render() on the RenderWindow if the top level frame (i.e. the
containing frame) has been disabled.
This prevents recursive rendering during wx.SafeYield() calls.
wx.SafeYield() can be called during the ProgressMethod() callback of
a VTK object to have progress bars and other GUI elements updated -
it does this by disabling all windows (disallowing user-input to
prevent re-entrancy of code) and then handling all outstanding
GUI events.
However, this often triggers an OnPaint() method for wxVTKRWIs,
resulting in a Render(), resulting in Update() being called whilst
still in progress.
"""
self.__RenderWhenDisabled = bool(newValue)
#--------------------------------------------------------------------
def wxVTKRenderWindowInteractorConeExample():
"""Like it says, just a simple example."""
# every wx app needs an app
app = wx.PySimpleApp()
# create the top-level frame, sizer and wxVTKRWI
frame = wx.Frame(None, -1, "wxVTKRenderWindowInteractor", size=(400,400))
widget = wxVTKRenderWindowInteractor(frame, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(widget, 1, wx.EXPAND)
frame.SetSizer(sizer)
frame.Layout()
# It would be more correct (API-wise) to call widget.Initialize() and
# widget.Start() here, but Initialize() calls RenderWindow.Render().
# That Render() call will get through before we can setup the
# RenderWindow() to render via the wxWidgets-created context; this
# causes flashing on some platforms and downright breaks things on
# other platforms. Instead, we call widget.Enable(). This means
# that the RWI::Initialized ivar is not set, but in THIS SPECIFIC CASE,
# that doesn't matter.
widget.Enable(1)
widget.AddObserver("ExitEvent", lambda o,e,f=frame: f.Close())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowInteractorConeExample()
| florian-wagner/gimli | python/pygimli/gui/vtk/wxVTKRenderWindowInteractor.py | Python | gpl-3.0 | 24,830 | [
"VTK"
] | ea8805bb463b794bd7c28385379e1b49f46ef1264cb33a9227d9802f6b8deb9a |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
from collections import defaultdict
import numpy as np
import numpy.testing as npt
from scipy.stats import pearsonr
from skbio import DistanceMatrix, TreeNode
from skbio.tree import (DuplicateNodeError, NoLengthError,
TreeError, MissingNodeError, NoParentError)
from skbio.util import RepresentationWarning
class TreeNodeSubclass(TreeNode):
pass
class TreeTests(TestCase):
def setUp(self):
"""Prep the self"""
self.simple_t = TreeNode.read(io.StringIO("((a,b)i1,(c,d)i2)root;"))
nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
nodes['a'].append(nodes['b'])
nodes['b'].append(nodes['c'])
nodes['c'].append(nodes['d'])
nodes['c'].append(nodes['e'])
nodes['c'].append(nodes['f'])
nodes['f'].append(nodes['g'])
nodes['a'].append(nodes['h'])
self.TreeRoot = nodes['a']
def rev_f(items):
items.reverse()
def rotate_f(items):
tmp = items[-1]
items[1:] = items[:-1]
items[0] = tmp
self.rev_f = rev_f
self.rotate_f = rotate_f
self.complex_tree = TreeNode.read(io.StringIO(
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);"))
def test_bug_issue_1416(self):
tree = TreeNode.read(['(((a,b,f,g),c),d);'])
new_tree = tree.shear(['a', 'b', 'c', 'f'])
exp = {'a', 'b', 'c', 'f'}
obs = {n.name for n in new_tree.tips()}
self.assertEqual(obs, exp)
self.assertEqual(id(new_tree), id(new_tree.children[0].parent))
self.assertEqual(id(new_tree), id(new_tree.children[1].parent))
def test_observed_node_counts(self):
"""returns observed nodes counts given vector of otu observation counts
"""
# no OTUs observed
otu_counts = {}
expected = defaultdict(int)
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# error on zero count(s)
otu_counts = {'a': 0}
self.assertRaises(ValueError, self.simple_t.observed_node_counts,
otu_counts)
otu_counts = {'a': 0, 'b': 0, 'c': 0, 'd': 0}
self.assertRaises(ValueError, self.simple_t.observed_node_counts,
otu_counts)
# all OTUs observed once
otu_counts = {'a': 1, 'b': 1, 'c': 1, 'd': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 4
expected[self.simple_t.find('i1')] = 2
expected[self.simple_t.find('i2')] = 2
expected[self.simple_t.find('a')] = 1
expected[self.simple_t.find('b')] = 1
expected[self.simple_t.find('c')] = 1
expected[self.simple_t.find('d')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# some OTUs observed twice
otu_counts = {'a': 2, 'b': 1, 'c': 1, 'd': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 5
expected[self.simple_t.find('i1')] = 3
expected[self.simple_t.find('i2')] = 2
expected[self.simple_t.find('a')] = 2
expected[self.simple_t.find('b')] = 1
expected[self.simple_t.find('c')] = 1
expected[self.simple_t.find('d')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
otu_counts = {'a': 2, 'b': 1, 'c': 1, 'd': 2}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 6
expected[self.simple_t.find('i1')] = 3
expected[self.simple_t.find('i2')] = 3
expected[self.simple_t.find('a')] = 2
expected[self.simple_t.find('b')] = 1
expected[self.simple_t.find('c')] = 1
expected[self.simple_t.find('d')] = 2
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# some OTUs observed, others not observed
otu_counts = {'a': 2, 'b': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 3
expected[self.simple_t.find('i1')] = 3
expected[self.simple_t.find('a')] = 2
expected[self.simple_t.find('b')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
otu_counts = {'d': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 1
expected[self.simple_t.find('i2')] = 1
expected[self.simple_t.find('d')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# error on non-tips
otu_counts = {'a': 2, 'e': 1}
self.assertRaises(MissingNodeError, self.simple_t.observed_node_counts,
otu_counts)
otu_counts = {'a': 2, 'i1': 1}
self.assertRaises(MissingNodeError, self.simple_t.observed_node_counts,
otu_counts)
# test with another tree
otu_counts = {}
expected = defaultdict(int)
self.assertEqual(self.complex_tree.observed_node_counts(otu_counts),
expected)
otu_counts = {'e': 42, 'f': 1}
expected[self.complex_tree.root()] = 43
expected[self.complex_tree.find('int5')] = 43
expected[self.complex_tree.find('e')] = 42
expected[self.complex_tree.find('f')] = 1
self.assertEqual(self.complex_tree.observed_node_counts(otu_counts),
expected)
def test_count(self):
"""Get node counts"""
exp = 7
obs = self.simple_t.count()
self.assertEqual(obs, exp)
exp = 4
obs = self.simple_t.count(tips=True)
self.assertEqual(obs, exp)
def test_copy(self):
"""copy a tree"""
self.simple_t.children[0].length = 1.2
self.simple_t.children[1].children[0].length = 0.5
cp = self.simple_t.copy()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
def test_append(self):
"""Append a node to a tree"""
second_tree = TreeNode.read(io.StringIO("(x,y)z;"))
self.simple_t.append(second_tree)
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z')
self.assertEqual(len(self.simple_t.children), 3)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y')
self.assertEqual(second_tree.parent, self.simple_t)
def test_extend(self):
"""Extend a few nodes"""
second_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
third_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
first_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
fourth_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
self.simple_t.extend([second_tree, third_tree])
first_tree.extend(fourth_tree.children)
self.assertEqual(0, len(fourth_tree.children))
self.assertEqual(first_tree.children[0].name, 'x1')
self.assertEqual(first_tree.children[1].name, 'y1')
self.assertEqual(first_tree.children[2].name, 'x2')
self.assertEqual(first_tree.children[3].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z1')
self.assertEqual(self.simple_t.children[3].name, 'z2')
self.assertEqual(len(self.simple_t.children), 4)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x1')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y1')
self.assertEqual(self.simple_t.children[3].children[0].name, 'x2')
self.assertEqual(self.simple_t.children[3].children[1].name, 'y2')
self.assertIs(second_tree.parent, self.simple_t)
self.assertIs(third_tree.parent, self.simple_t)
def test_extend_empty(self):
"""Extend on the empty case should work"""
self.simple_t.extend([])
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(len(self.simple_t.children), 2)
def test_iter(self):
"""iter wraps children"""
exp = ['i1', 'i2']
obs = [n.name for n in self.simple_t]
self.assertEqual(obs, exp)
def test_gops(self):
"""Basic TreeNode operations should work as expected"""
p = TreeNode()
self.assertEqual(str(p), ';\n')
p.name = 'abc'
self.assertEqual(str(p), 'abc;\n')
p.length = 3
self.assertEqual(str(p), 'abc:3;\n') # don't suppress branch from root
q = TreeNode()
p.append(q)
self.assertEqual(str(p), '()abc:3;\n')
r = TreeNode()
q.append(r)
self.assertEqual(str(p), '(())abc:3;\n')
r.name = 'xyz'
self.assertEqual(str(p), '((xyz))abc:3;\n')
q.length = 2
self.assertEqual(str(p), '((xyz):2)abc:3;\n')
def test_pop(self):
"""Pop off a node"""
second_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
third_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
self.simple_t.extend([second_tree, third_tree])
i1 = self.simple_t.pop(0)
z2 = self.simple_t.pop()
self.assertEqual(i1.name, 'i1')
self.assertEqual(z2.name, 'z2')
self.assertEqual(i1.children[0].name, 'a')
self.assertEqual(i1.children[1].name, 'b')
self.assertEqual(z2.children[0].name, 'x2')
self.assertEqual(z2.children[1].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'z1')
self.assertEqual(len(self.simple_t.children), 2)
def test_remove(self):
"""Remove nodes"""
self.assertTrue(self.simple_t.remove(self.simple_t.children[0]))
self.assertEqual(len(self.simple_t.children), 1)
n = TreeNode()
self.assertFalse(self.simple_t.remove(n))
def test_remove_deleted(self):
"""Remove nodes by function"""
def f(node):
return node.name in ['b', 'd']
self.simple_t.remove_deleted(f)
exp = "((a)i1,(c)i2)root;\n"
obs = str(self.simple_t)
self.assertEqual(obs, exp)
def test_adopt(self):
"""Adopt a node!"""
n1 = TreeNode(name='n1')
n2 = TreeNode(name='n2')
n3 = TreeNode(name='n3')
self.simple_t._adopt(n1)
self.simple_t.children[-1]._adopt(n2)
n2._adopt(n3)
# adopt doesn't update .children
self.assertEqual(len(self.simple_t.children), 2)
self.assertIs(n1.parent, self.simple_t)
self.assertIs(n2.parent, self.simple_t.children[-1])
self.assertIs(n3.parent, n2)
def test_remove_node(self):
"""Remove a node by index"""
n = self.simple_t._remove_node(-1)
self.assertEqual(n.parent, None)
self.assertEqual(len(self.simple_t.children), 1)
self.assertEqual(len(n.children), 2)
self.assertNotIn(n, self.simple_t.children)
def test_shear_prune_parent_dropped(self):
bugtree = "((a,b),((c,d),(e,f)));"
to_keep = ['c', 'd']
exp = "(c,d);\n"
obs = str(TreeNode.read(io.StringIO(bugtree)).shear(to_keep))
self.assertEqual(obs, exp)
def test_prune_nested_single_descendent(self):
bugtree = "(((a,b)));"
exp = "(a,b);\n"
t = TreeNode.read(io.StringIO(bugtree))
t.prune()
obs = str(t)
self.assertEqual(obs, exp)
def test_prune_root_single_desc(self):
t = TreeNode.read(["((a,b)c)extra;"])
exp = "(a,b)c;\n"
t.prune()
self.assertEqual(str(t), exp)
def test_prune(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
def test_prune_length(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
for n in self.simple_t.traverse():
n.length = 1.0
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
self.assertEqual(self.simple_t.children[1].length, 2.0)
def test_subset(self):
"""subset should return set of leaves that descends from node"""
t = self.simple_t
self.assertEqual(t.subset(), frozenset('abcd'))
c = t.children[0]
self.assertEqual(c.subset(), frozenset('ab'))
leaf = c.children[1]
self.assertEqual(leaf.subset(), frozenset(''))
def test_subsets(self):
"""subsets should return all subsets descending from a set"""
t = self.simple_t
self.assertEqual(t.subsets(), frozenset(
[frozenset('ab'), frozenset('cd')]))
def test_is_tip(self):
"""see if we're a tip or not"""
self.assertFalse(self.simple_t.is_tip())
self.assertFalse(self.simple_t.children[0].is_tip())
self.assertTrue(self.simple_t.children[0].children[0].is_tip())
def test_is_root(self):
"""see if we're at the root or not"""
self.assertTrue(self.simple_t.is_root())
self.assertFalse(self.simple_t.children[0].is_root())
self.assertFalse(self.simple_t.children[0].children[0].is_root())
def test_root(self):
"""Get the root!"""
root = self.simple_t
self.assertIs(root, self.simple_t.root())
self.assertIs(root, self.simple_t.children[0].root())
self.assertIs(root, self.simple_t.children[1].children[1].root())
def test_invalidate_lookup_caches(self):
root = self.simple_t
root.create_caches()
self.assertNotEqual(root._tip_cache, {})
self.assertNotEqual(root._non_tip_cache, {})
root.invalidate_caches()
self.assertEqual(root._tip_cache, {})
self.assertEqual(root._non_tip_cache, {})
def test_invalidate_attr_caches(self):
tree = TreeNode.read(io.StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
def f(n):
return [n.name] if n.is_tip() else []
tree.cache_attr(f, 'tip_names')
tree.invalidate_caches()
for n in tree.traverse(include_self=True):
self.assertFalse(hasattr(n, 'tip_names'))
def test_create_caches_duplicate_tip_names(self):
with self.assertRaises(DuplicateNodeError):
TreeNode.read(io.StringIO('(a, a);')).create_caches()
def test_find_all(self):
t = TreeNode.read(io.StringIO("((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;"))
exp = [t.children[0],
t.children[1].children[0],
t.children[1],
t.children[2].children[1]]
obs = t.find_all('c')
self.assertEqual(obs, exp)
identity = t.find_all(t)
self.assertEqual(len(identity), 1)
self.assertEqual(identity[0], t)
identity_name = t.find_all('root')
self.assertEqual(len(identity_name), 1)
self.assertEqual(identity_name[0], t)
exp = [t.children[2],
t.children[0].children[0]]
obs = t.find_all('a')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t.find_all('missing')
def test_find(self):
"""Find a node in a tree"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
exp = t.children[0]
obs = t.find('c')
self.assertEqual(obs, exp)
exp = t.children[0].children[1]
obs = t.find('b')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t.find('does not exist')
def test_find_cache_bug(self):
"""First implementation did not force the cache to be at the root"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f,(g,h)f);"))
exp_tip_cache_keys = set(['a', 'b', 'd', 'e', 'g', 'h'])
exp_non_tip_cache_keys = set(['c', 'f'])
tip_a = t.children[0].children[0]
tip_a.create_caches()
self.assertEqual(tip_a._tip_cache, {})
self.assertEqual(set(t._tip_cache), exp_tip_cache_keys)
self.assertEqual(set(t._non_tip_cache), exp_non_tip_cache_keys)
self.assertEqual(t._non_tip_cache['f'], [t.children[1], t.children[2]])
def test_find_by_id(self):
"""Find a node by id"""
t1 = TreeNode.read(io.StringIO("((,),(,,));"))
t2 = TreeNode.read(io.StringIO("((,),(,,));"))
exp = t1.children[1]
obs = t1.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
exp = t2.children[1]
obs = t2.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t1.find_by_id(100)
def test_find_by_func(self):
"""Find nodes by a function"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
def func(x):
return x.parent == t.find('c')
exp = ['a', 'b']
obs = [n.name for n in t.find_by_func(func)]
self.assertEqual(obs, exp)
def test_ancestors(self):
"""Get all the ancestors"""
exp = ['i1', 'root']
obs = self.simple_t.children[0].children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = ['root']
obs = self.simple_t.children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = []
obs = self.simple_t.ancestors()
self.assertEqual([o.name for o in obs], exp)
def test_siblings(self):
"""Get the siblings"""
exp = []
obs = self.simple_t.siblings()
self.assertEqual(obs, exp)
exp = ['i2']
obs = self.simple_t.children[0].siblings()
self.assertEqual([o.name for o in obs], exp)
exp = ['c']
obs = self.simple_t.children[1].children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
self.simple_t.append(TreeNode(name="foo"))
self.simple_t.append(TreeNode(name="bar"))
exp = ['i1', 'foo', 'bar']
obs = self.simple_t.children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
def test_ascii_art(self):
"""Make some ascii trees"""
# unlabeled internal node
tr = TreeNode.read(io.StringIO("(B:0.2,(C:0.3,D:0.4):0.6)F;"))
obs = tr.ascii_art(show_internal=True, compact=False)
exp = (" /-B\n"
"-F-------|\n"
" | /-C\n"
" \\--------|\n"
" \\-D")
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=True, compact=True)
exp = ("-F------- /-B\n"
" \\-------- /-C\n"
" \\-D")
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=False, compact=False)
exp = (" /-B\n"
"---------|\n"
" | /-C\n"
" \\--------|\n"
" \\-D")
self.assertEqual(obs, exp)
def test_ascii_art_with_support(self):
"""Make some ascii trees with support values"""
tr = TreeNode.read(io.StringIO("(B:0.2,(C:0.3,D:0.4)90:0.6)F;"))
exp = " /-B\n-F-------|\n | /-C\n "\
" \\90------|\n \\-D"
obs = tr.ascii_art(show_internal=True, compact=False)
self.assertEqual(obs, exp)
tr.assign_supports()
obs = tr.ascii_art(show_internal=True, compact=False)
self.assertEqual(obs, exp)
tr = TreeNode.read(io.StringIO("((A,B)75,(C,D)'80:spA');"))
exp = " /-A\n /75------|\n | "\
" \\-B\n---------|\n | /-C\n \\"\
"80:spA--|\n \\-D"
obs = tr.ascii_art(show_internal=True, compact=False)
self.assertEqual(obs, exp)
tr.assign_supports()
obs = tr.ascii_art(show_internal=True, compact=False)
self.assertEqual(obs, exp)
def test_ascii_art_three_children(self):
obs = TreeNode.read(io.StringIO('(a,(b,c,d));')).ascii_art()
self.assertEqual(obs, exp_ascii_art_three_children)
def test_accumulate_to_ancestor(self):
"""Get the distance from a node to its ancestor"""
t = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
a = t.find('a')
b = t.find('b')
exp_to_root = 0.1 + 0.3
obs_to_root = a.accumulate_to_ancestor(t)
self.assertEqual(obs_to_root, exp_to_root)
with self.assertRaises(NoParentError):
a.accumulate_to_ancestor(b)
def test_distance_nontip(self):
# example derived from issue #807, credit @wwood
tstr = "((A:1.0,B:2.0)'g__genus1':3.0)root;"
tree = TreeNode.read(io.StringIO(tstr))
self.assertEqual(tree.find('A').distance(tree.find('g__genus1')), 1.0)
def test_distance(self):
"""Get the distance between two nodes"""
t = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
tips = sorted([n for n in t.tips()], key=lambda x: x.name)
npt.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
npt.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
npt.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
with self.assertRaises(NoLengthError):
tips[0].distance(tips[3])
npt.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
npt.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
npt.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
with self.assertRaises(NoLengthError):
tips[1].distance(tips[3])
self.assertEqual(tips[2].distance(tips[0]), 1.3)
self.assertEqual(tips[2].distance(tips[1]), 1.4)
self.assertEqual(tips[2].distance(tips[2]), 0.0)
with self.assertRaises(NoLengthError):
tips[2].distance(tips[3])
def test_lowest_common_ancestor(self):
"""TreeNode lowestCommonAncestor should return LCA for set of tips"""
t1 = TreeNode.read(io.StringIO("((a,(b,c)d)e,f,(g,h)i)j;"))
t2 = t1.copy()
t3 = t1.copy()
t4 = t1.copy()
input1 = ['a'] # return self
input2 = ['a', 'b'] # return e
input3 = ['b', 'c'] # return d
input4 = ['a', 'h', 'g'] # return j
exp1 = t1.find('a')
exp2 = t2.find('e')
exp3 = t3.find('d')
exp4 = t4
obs1 = t1.lowest_common_ancestor(input1)
obs2 = t2.lowest_common_ancestor(input2)
obs3 = t3.lowest_common_ancestor(input3)
obs4 = t4.lowest_common_ancestor(input4)
self.assertEqual(obs1, exp1)
self.assertEqual(obs2, exp2)
self.assertEqual(obs3, exp3)
self.assertEqual(obs4, exp4)
# verify multiple calls work
t_mul = t1.copy()
exp_1 = t_mul.find('d')
exp_2 = t_mul.find('i')
obs_1 = t_mul.lowest_common_ancestor(['b', 'c'])
obs_2 = t_mul.lowest_common_ancestor(['g', 'h'])
self.assertEqual(obs_1, exp_1)
self.assertEqual(obs_2, exp_2)
# empty case
with self.assertRaises(ValueError):
t1.lowest_common_ancestor([])
def test_get_max_distance(self):
"""get_max_distance should get max tip distance across tree"""
tree = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
dist, nodes = tree.get_max_distance()
npt.assert_almost_equal(dist, 1.6)
self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
def test_set_max_distance(self):
"""set_max_distance sets MaxDistTips across tree"""
tree = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
tree._set_max_distance()
tip_a, tip_b = tree.MaxDistTips
self.assertEqual(tip_a[0] + tip_b[0], 1.6)
self.assertEqual(sorted([tip_a[1].name, tip_b[1].name]), ['b', 'e'])
def test_set_max_distance_tie_bug(self):
"""Corresponds to #1077"""
s = io.StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
t = TreeNode.read(s)
exp = ((3.0, t.find('a')), (9.0, t.find('e')))
# the above tree would trigger an exception in max. The central issue
# was that the data being passed to max were a tuple of tuple:
# ((left_d, left_n), (right_d, right_n))
# the call to max would break in this scenario as it would fall onto
# idx 1 of each tuple to assess the "max".
t._set_max_distance()
self.assertEqual(t.MaxDistTips, exp)
def test_set_max_distance_inplace_modification_bug(self):
"""Corresponds to #1223"""
s = io.StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
t = TreeNode.read(s)
exp = [((0.0, t.find('a')), (0.0, t.find('a'))),
((0.0, t.find('b')), (0.0, t.find('b'))),
((1.0, t.find('a')), (1.0, t.find('b'))),
((0.0, t.find('d')), (0.0, t.find('d'))),
((0.0, t.find('e')), (0.0, t.find('e'))),
((3.0, t.find('d')), (4.0, t.find('e'))),
((3.0, t.find('a')), (9.0, t.find('e')))]
t._set_max_distance()
self.assertEqual([n.MaxDistTips for n in t.postorder()], exp)
def test_shear(self):
"""Shear the nodes"""
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
obs = str(t.shear(['G', 'M']))
exp = '(G:3.0,M:3.7);\n'
self.assertEqual(obs, exp)
def test_compare_tip_distances(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((H:1,G:1,O:1):2,R:3):1,X:4);'))
obs = t.compare_tip_distances(t2)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = pearsonr(m1.flat, m2.flat)[0]
self.assertAlmostEqual(obs, (1 - r) / 2)
def test_compare_tip_distances_sample(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((H:1,G:1,O:1):2,R:3):1,X:4);'))
obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = pearsonr(m1.flat, m2.flat)[0]
self.assertAlmostEqual(obs, (1 - r) / 2)
# 4 common taxa, still picking H, G, R
s = '((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
t = TreeNode.read(io.StringIO(s))
s3 = '(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
t3 = TreeNode.read(io.StringIO(s3))
obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
def test_compare_tip_distances_no_common_tips(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((Z:1,Y:1,X:1):2,W:3):1,V:4);'))
with self.assertRaises(ValueError):
t.compare_tip_distances(t2)
def test_compare_tip_distances_single_common_tip(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((R:1,Y:1,X:1):2,W:3):1,V:4);'))
self.assertEqual(t.compare_tip_distances(t2), 1)
self.assertEqual(t2.compare_tip_distances(t), 1)
def test_tip_tip_distances_endpoints(self):
"""Test getting specifc tip distances with tipToTipDistances"""
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
nodes = [t.find('H'), t.find('G'), t.find('M')]
names = ['H', 'G', 'M']
exp = DistanceMatrix(np.array([[0, 2.0, 6.7],
[2.0, 0, 6.7],
[6.7, 6.7, 0.0]]), ['H', 'G', 'M'])
obs = t.tip_tip_distances(endpoints=names)
self.assertEqual(obs, exp)
obs = t.tip_tip_distances(endpoints=nodes)
self.assertEqual(obs, exp)
def test_tip_tip_distances_non_tip_endpoints(self):
t = TreeNode.read(io.StringIO('((H:1,G:1)foo:2,(R:0.5,M:0.7):3);'))
with self.assertRaises(ValueError):
t.tip_tip_distances(endpoints=['foo'])
def test_tip_tip_distances_no_length(self):
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
exp_t = TreeNode.read(io.StringIO("((a:0,b:0)c:0,(d:0,e:0)f:0);"))
exp_t_dm = exp_t.tip_tip_distances()
t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
self.assertEqual(t_dm, exp_t_dm)
for node in t.preorder():
self.assertIs(node.length, None)
def test_tip_tip_distances_missing_length(self):
t = TreeNode.read(io.StringIO("((a,b:6)c:4,(d,e:0)f);"))
exp_t = TreeNode.read(io.StringIO("((a:0,b:6)c:4,(d:0,e:0)f:0);"))
exp_t_dm = exp_t.tip_tip_distances()
t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
self.assertEqual(t_dm, exp_t_dm)
def test_neighbors(self):
"""Get neighbors of a node"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
exp = t.children
obs = t.neighbors()
self.assertEqual(obs, exp)
exp = t.children[0].children + [t]
obs = t.children[0].neighbors()
self.assertEqual(obs, exp)
exp = [t.children[0].children[0]] + [t]
obs = t.children[0].neighbors(ignore=t.children[0].children[1])
self.assertEqual(obs, exp)
exp = [t.children[0]]
obs = t.children[0].children[0].neighbors()
self.assertEqual(obs, exp)
def test_has_children(self):
"""Test if has children"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
self.assertTrue(t.has_children())
self.assertTrue(t.children[0].has_children())
self.assertTrue(t.children[1].has_children())
self.assertFalse(t.children[0].children[0].has_children())
self.assertFalse(t.children[0].children[1].has_children())
self.assertFalse(t.children[1].children[0].has_children())
self.assertFalse(t.children[1].children[1].has_children())
def test_tips(self):
"""Tip traversal of tree"""
exp = ['a', 'b', 'c', 'd']
obs = [n.name for n in self.simple_t.tips()]
self.assertEqual(obs, exp)
obs2 = [n.name for n in self.simple_t.traverse(False, False)]
self.assertEqual(obs2, exp)
def test_tips_self(self):
""" See issue #1509 """
tree = TreeNode.read(['(c, (b,a)x)y;'])
ts = list(tree.find('c').tips(include_self=True))
self.assertEqual(len(ts), 1)
t = ts[0]
self.assertEqual(t.name, 'c')
self.assertTrue(t.is_tip())
def test_pre_and_postorder(self):
"""Pre and post order traversal of the tree"""
exp = ['root', 'i1', 'a', 'b', 'i1', 'i2', 'c', 'd', 'i2', 'root']
obs = [n.name for n in self.simple_t.pre_and_postorder()]
self.assertEqual(obs, exp)
obs2 = [n.name for n in self.simple_t.traverse(True, True)]
self.assertEqual(obs2, exp)
def test_pre_and_postorder_no_children(self):
t = TreeNode('brofist')
# include self
exp = ['brofist']
obs = [n.name for n in t.pre_and_postorder()]
self.assertEqual(obs, exp)
# do not include self
obs = list(t.pre_and_postorder(include_self=False))
self.assertEqual(obs, [])
def test_levelorder(self):
"""Test level order traversal of the tree"""
exp = ['root', 'i1', 'i2', 'a', 'b', 'c', 'd']
obs = [n.name for n in self.simple_t.levelorder()]
self.assertEqual(obs, exp)
def test_bifurcate(self):
t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
t2 = TreeNode.read(io.StringIO('((a,b,c));'))
t3 = t2.copy()
t1.bifurcate()
t2.bifurcate()
t3.bifurcate(insert_length=0)
self.assertEqual(str(t1), '(((a,b),c),(d,e));\n')
self.assertEqual(str(t2), '((c,(a,b)));\n')
self.assertEqual(str(t3), '((c,(a,b):0));\n')
def test_bifurcate_with_subclass(self):
tree = TreeNodeSubclass()
tree.append(TreeNodeSubclass())
tree.append(TreeNodeSubclass())
tree.append(TreeNodeSubclass())
tree.append(TreeNodeSubclass())
tree.bifurcate()
for node in tree.traverse():
self.assertIs(type(node), TreeNodeSubclass)
def test_index_tree_single_node(self):
"""index_tree handles single node tree"""
t1 = TreeNode.read(io.StringIO('root;'))
id_index, child_index = t1.index_tree()
self.assertEqual(id_index[0], t1)
npt.assert_equal(child_index, np.array([[]]))
def test_index_tree(self):
"""index_tree should produce correct index and node map"""
# test for first tree: contains singleton outgroup
t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
t2 = TreeNode.read(io.StringIO('(((a,b),(c,d)),(e,f));'))
t3 = TreeNode.read(io.StringIO('(((a,b,c),(d)),(e,f));'))
id_1, child_1 = t1.index_tree()
nodes_1 = [n.id for n in t1.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_1, [0, 1, 2, 3, 6, 4, 5, 7, 8])
npt.assert_equal(child_1, np.array([[2, 0, 1], [6, 2, 3], [7, 4, 5],
[8, 6, 7]]))
# test for second tree: strictly bifurcating
id_2, child_2 = t2.index_tree()
nodes_2 = [n.id for n in t2.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_2, [0, 1, 4, 2, 3, 5, 8, 6, 7, 9, 10])
npt.assert_equal(child_2, np.array([[4, 0, 1], [5, 2, 3],
[8, 4, 5], [9, 6, 7],
[10, 8, 9]]))
# test for third tree: contains trifurcation and single-child parent
id_3, child_3 = t3.index_tree()
nodes_3 = [n.id for n in t3.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_3, [0, 1, 2, 4, 3, 5, 8, 6, 7, 9, 10])
npt.assert_equal(child_3, np.array([[4, 0, 2], [5, 3, 3], [8, 4, 5],
[9, 6, 7], [10, 8, 9]]))
def test_root_at(self):
"""Form a new root"""
t = TreeNode.read(io.StringIO("(((a,b)c,(d,e)f)g,h)i;"))
with self.assertRaises(TreeError):
t.root_at(t.find('h'))
exp = "(a,b,((d,e)f,(h)g)c)root;\n"
rooted = t.root_at('c')
obs = str(rooted)
self.assertEqual(obs, exp)
def test_root_at_midpoint(self):
"""Root at the midpoint"""
tree1 = self.TreeRoot
for n in tree1.traverse():
n.length = 1
result = tree1.root_at_midpoint()
self.assertEqual(result.distance(result.find('e')), 1.5)
self.assertEqual(result.distance(result.find('g')), 2.5)
exp_dist = tree1.tip_tip_distances()
obs_dist = result.tip_tip_distances()
self.assertEqual(obs_dist, exp_dist)
def test_root_at_midpoint_no_lengths(self):
# should get same tree back (a copy)
nwk = '(a,b)c;\n'
t = TreeNode.read(io.StringIO(nwk))
obs = t.root_at_midpoint()
self.assertEqual(str(obs), nwk)
def test_root_at_midpoint_tie(self):
nwk = "(((a:1,b:1)c:2,(d:3,e:4)f:5),g:1)root;"
t = TreeNode.read(io.StringIO(nwk))
exp = "((d:3,e:4)f:2,((a:1,b:1)c:2,(g:1)):3)root;"
texp = TreeNode.read(io.StringIO(exp))
obs = t.root_at_midpoint()
for o, e in zip(obs.traverse(), texp.traverse()):
self.assertEqual(o.name, e.name)
self.assertEqual(o.length, e.length)
def test_compare_subsets(self):
"""compare_subsets should return the fraction of shared subsets"""
t = TreeNode.read(io.StringIO('((H,G),(R,M));'))
t2 = TreeNode.read(io.StringIO('(((H,G),R),M);'))
t4 = TreeNode.read(io.StringIO('(((H,G),(O,R)),X);'))
result = t.compare_subsets(t)
self.assertEqual(result, 0)
result = t2.compare_subsets(t2)
self.assertEqual(result, 0)
result = t.compare_subsets(t2)
self.assertEqual(result, 0.5)
result = t.compare_subsets(t4)
self.assertEqual(result, 1 - 2. / 5)
result = t.compare_subsets(t4, exclude_absent_taxa=True)
self.assertEqual(result, 1 - 2. / 3)
result = t.compare_subsets(self.TreeRoot, exclude_absent_taxa=True)
self.assertEqual(result, 1)
result = t.compare_subsets(self.TreeRoot)
self.assertEqual(result, 1)
def test_compare_rfd(self):
"""compare_rfd should return the Robinson Foulds distance"""
t = TreeNode.read(io.StringIO('((H,G),(R,M));'))
t2 = TreeNode.read(io.StringIO('(((H,G),R),M);'))
t4 = TreeNode.read(io.StringIO('(((H,G),(O,R)),X);'))
obs = t.compare_rfd(t2)
exp = 2.0
self.assertEqual(obs, exp)
self.assertEqual(t.compare_rfd(t2), t2.compare_rfd(t))
obs = t.compare_rfd(t2, proportion=True)
exp = 0.5
self.assertEqual(obs, exp)
with self.assertRaises(ValueError):
t.compare_rfd(t4)
def test_assign_ids(self):
"""Assign IDs to the tree"""
t1 = TreeNode.read(io.StringIO("(((a,b),c),(e,f),(g));"))
t2 = TreeNode.read(io.StringIO("(((a,b),c),(e,f),(g));"))
t3 = TreeNode.read(io.StringIO("((g),(e,f),(c,(a,b)));"))
t1_copy = t1.copy()
t1.assign_ids()
t2.assign_ids()
t3.assign_ids()
t1_copy.assign_ids()
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t2.traverse()])
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t1_copy.traverse()])
self.assertNotEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t3.traverse()])
def test_assign_ids_index_tree(self):
"""assign_ids and index_tree should assign the same IDs"""
t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
t2 = TreeNode.read(io.StringIO('(((a,b),(c,d)),(e,f));'))
t3 = TreeNode.read(io.StringIO('(((a,b,c),(d)),(e,f));'))
t1_copy = t1.copy()
t2_copy = t2.copy()
t3_copy = t3.copy()
t1.assign_ids()
t1_copy.index_tree()
t2.assign_ids()
t2_copy.index_tree()
t3.assign_ids()
t3_copy.index_tree()
self.assertEqual([n.id for n in t1.traverse()],
[n.id for n in t1_copy.traverse()])
self.assertEqual([n.id for n in t2.traverse()],
[n.id for n in t2_copy.traverse()])
self.assertEqual([n.id for n in t3.traverse()],
[n.id for n in t3_copy.traverse()])
def test_unrooted_deepcopy(self):
"""Do an unrooted_copy"""
t = TreeNode.read(io.StringIO("((a,(b,c)d)e,(f,g)h)i;"))
exp = "(b,c,(a,((f,g)h)e)d)root;\n"
obs = t.find('d').unrooted_deepcopy()
self.assertEqual(str(obs), exp)
t_ids = {id(n) for n in t.traverse()}
obs_ids = {id(n) for n in obs.traverse()}
self.assertEqual(t_ids.intersection(obs_ids), set())
def test_descending_branch_length(self):
"""Calculate descending branch_length"""
tr = TreeNode.read(io.StringIO(
"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
sdbl = tr.descending_branch_length(['A', 'E'])
npt.assert_almost_equal(tdbl, 8.9)
npt.assert_almost_equal(sdbl, 2.2)
self.assertRaises(ValueError, tr.descending_branch_length,
['A', 'DNE'])
self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
npt.assert_almost_equal(tdbl, 8.8)
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
npt.assert_almost_equal(tdbl, 7.9)
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length(['A', 'D', 'E'])
npt.assert_almost_equal(tdbl, 2.1)
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length(['I', 'D', 'E'])
npt.assert_almost_equal(tdbl, 6.6)
# test with a situation where we have unnamed internal nodes
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I:.5)J:1.3);"))
tdbl = tr.descending_branch_length()
npt.assert_almost_equal(tdbl, 7.9)
def test_to_array(self):
"""Convert a tree to arrays"""
t = TreeNode.read(io.StringIO(
'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
id_index, child_index = t.index_tree()
arrayed = t.to_array()
self.assertEqual(id_index, arrayed['id_index'])
npt.assert_equal(child_index, arrayed['child_index'])
exp = np.array([1, 2, 3, 5, 4, 6, 8, 9, 7, 10, np.nan])
obs = arrayed['length']
npt.assert_equal(obs, exp)
exp = np.array(['a', 'b', 'c', 'd', 'x',
'y', 'e', 'f', 'z', 'z', None])
obs = arrayed['name']
npt.assert_equal(obs, exp)
exp = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
obs = arrayed['id']
npt.assert_equal(obs, exp)
def test_to_array_attrs(self):
t = TreeNode.read(io.StringIO(
'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
id_index, child_index = t.index_tree()
arrayed = t.to_array(attrs=[('name', object)])
# should only have id_index, child_index, and name since we specified
# attrs
self.assertEqual(len(arrayed), 3)
self.assertEqual(id_index, arrayed['id_index'])
npt.assert_equal(child_index, arrayed['child_index'])
exp = np.array(['a', 'b', 'c', 'd', 'x',
'y', 'e', 'f', 'z', 'z', None])
obs = arrayed['name']
npt.assert_equal(obs, exp)
# invalid attrs
with self.assertRaises(AttributeError):
t.to_array(attrs=[('name', object), ('brofist', int)])
def test_to_array_nan_length_value(self):
t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root;"))
indexed = t.to_array(nan_length_value=None)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, np.nan], dtype=float))
indexed = t.to_array(nan_length_value=0.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, 0.0], dtype=float))
indexed = t.to_array(nan_length_value=42.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, 42.0], dtype=float))
t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root:4;"))
indexed = t.to_array(nan_length_value=42.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, 4], dtype=float))
t = TreeNode.read(io.StringIO("((a:1, b:2)c)root;"))
indexed = t.to_array(nan_length_value=42.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 42.0, 42.0], dtype=float))
def test_from_taxonomy(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
exp = TreeNode.read(io.StringIO(
"((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
"(((((((3,5)n,(4)q)m)l)k)j)i)h);"))
root = TreeNode.from_taxonomy(input_lineages.items())
self.assertIs(type(root), TreeNode)
self.assertEqual(root.compare_subsets(exp), 0.0)
root = TreeNodeSubclass.from_taxonomy(input_lineages.items())
self.assertIs(type(root), TreeNodeSubclass)
def test_to_taxonomy(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
tree = TreeNode.from_taxonomy(input_lineages.items())
exp = sorted(input_lineages.items())
obs = [(n.name, lin) for n, lin in tree.to_taxonomy(allow_empty=True)]
self.assertEqual(sorted(obs), exp)
def test_to_taxonomy_filter(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l'], # test jagged
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
tree = TreeNode.from_taxonomy(input_lineages.items())
def f(node, lin):
return 'k' in lin or 'x' in lin
exp = [('2', ['a', 'b', 'c', 'x', 'y']),
('3', ['h', 'i', 'j', 'k', 'l']),
('4', ['h', 'i', 'j', 'k', 'l', 'm', 'q']),
('5', ['h', 'i', 'j', 'k', 'l', 'm', 'n'])]
obs = [(n.name, lin) for n, lin in tree.to_taxonomy(filter_f=f)]
self.assertEqual(sorted(obs), exp)
def test_linkage_matrix(self):
# Ensure matches: http://www.southampton.ac.uk/~re1u06/teaching/upgma/
id_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
linkage = np.asarray([[1.0, 5.0, 1.0, 2.0],
[0.0, 3.0, 8.0, 2.0],
[6.0, 7.0, 12.5, 3.0],
[8.0, 9.0, 16.5, 5.0],
[2.0, 10.0, 29.0, 6.0],
[4.0, 11.0, 34.0, 7.0]])
tree = TreeNode.from_linkage_matrix(linkage, id_list)
self.assertIs(type(tree), TreeNode)
self.assertEqual("(E:17.0,(C:14.5,((A:4.0,D:4.0):4.25,(G:6.25,(B:0.5,"
"F:0.5):5.75):2.0):6.25):2.5);\n",
str(tree))
tree = TreeNodeSubclass.from_linkage_matrix(linkage, id_list)
self.assertIs(type(tree), TreeNodeSubclass)
def test_shuffle_invalid_iter(self):
shuffler = self.simple_t.shuffle(n=-1)
with self.assertRaises(ValueError):
next(shuffler)
def test_shuffle_n_2(self):
exp = ["((a,b)i1,(d,c)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((a,b)i1,(d,c)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((a,b)i1,(d,c)i2)root;\n"]
obs_g = self.simple_t.shuffle(k=2, shuffle_f=self.rev_f, n=np.inf)
obs = [str(next(obs_g)) for i in range(5)]
self.assertEqual(obs, exp)
def test_shuffle_n_none(self):
exp = ["((d,c)i1,(b,a)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((d,c)i1,(b,a)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n"]
obs_g = self.simple_t.shuffle(shuffle_f=self.rev_f, n=4)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_complex(self):
exp = ["(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n"]
obs_g = self.complex_tree.shuffle(shuffle_f=self.rev_f,
names=['c', 'd', 'e', 'f'], n=4)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_names(self):
exp = ["((c,a)i1,(b,d)i2)root;\n",
"((b,c)i1,(a,d)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((c,a)i1,(b,d)i2)root;\n"]
obs_g = self.simple_t.shuffle(names=['a', 'b', 'c'],
shuffle_f=self.rotate_f, n=np.inf)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_raises(self):
with self.assertRaises(ValueError):
next(self.simple_t.shuffle(k=1))
with self.assertRaises(ValueError):
next(self.simple_t.shuffle(k=5, names=['a', 'b']))
with self.assertRaises(MissingNodeError):
next(self.simple_t.shuffle(names=['x', 'y']))
def test_assign_supports(self):
"""Extract support values of internal nodes."""
# test nodes with support values alone as labels
tree = TreeNode.read(['((a,b)75,(c,d)90);'])
tree.assign_supports()
node1, node2 = tree.children
# check if internal nodes are assigned correct support values
self.assertEqual(node1.support, 75)
self.assertEqual(node2.support, 90)
# check if original node names are cleared
self.assertIsNone(node1.name)
self.assertIsNone(node2.name)
# check if support values are not assigned to root and tips
self.assertIsNone(tree.support)
for taxon in ('a', 'b', 'c', 'd'):
self.assertIsNone(tree.find(taxon).support)
# test nodes with support values and branch lengths
tree = TreeNode.read(['((a,b)0.85:1.23,(c,d)0.95:4.56);'])
tree.assign_supports()
node1, node2 = tree.children
self.assertEqual(node1.support, 0.85)
self.assertEqual(node2.support, 0.95)
# test whether integer or float support values can be correctly parsed
tree = TreeNode.read(['((a,b)75,(c,d)80.0,(e,f)97.5,(g,h)0.95);'])
tree.assign_supports()
node1, node2, node3, node4 = tree.children
self.assertTrue(isinstance(node1.support, int))
self.assertEqual(node1.support, 75)
self.assertTrue(isinstance(node2.support, float))
self.assertEqual(node2.support, 80.0)
self.assertTrue(isinstance(node3.support, float))
self.assertEqual(node3.support, 97.5)
self.assertTrue(isinstance(node4.support, float))
self.assertEqual(node4.support, 0.95)
# test support values that are negative or scientific notation (not a
# common scenario but can happen)
tree = TreeNode.read(['((a,b)-1.23,(c,d)1.23e-4);'])
tree.assign_supports()
node1, node2 = tree.children
self.assertEqual(node1.support, -1.23)
self.assertEqual(node2.support, 0.000123)
# test nodes with support and extra label
tree = TreeNode.read(['((a,b)\'80:X\',(c,d)\'60:Y\');'])
tree.assign_supports()
node1, node2 = tree.children
self.assertEqual(node1.support, 80)
self.assertEqual(node1.name, 'X')
self.assertEqual(node2.support, 60)
self.assertEqual(node2.name, 'Y')
# test nodes without label, with non-numeric label, and with branch
# length only
tree = TreeNode.read(['((a,b),(c,d)x,(e,f):1.0);'])
tree.assign_supports()
for node in tree.children:
self.assertIsNone(node.support)
def test_unpack(self):
"""Unpack an internal node."""
# test unpacking a node without branch length
tree = TreeNode.read(['((c,d)a,(e,f)b);'])
tree.find('b').unpack()
exp = '((c,d)a,e,f);\n'
self.assertEqual(str(tree), exp)
# test unpacking a node with branch length
tree = TreeNode.read(['((c:2.0,d:3.0)a:1.0,(e:2.0,f:1.0)b:2.0);'])
tree.find('b').unpack()
exp = '((c:2.0,d:3.0)a:1.0,e:4.0,f:3.0);'
self.assertEqual(str(tree).rstrip(), exp)
# test attempting to unpack root
tree = TreeNode.read(['((d,e)b,(f,g)c)a;'])
msg = 'Cannot unpack root.'
with self.assertRaisesRegex(TreeError, msg):
tree.find('a').unpack()
# test attempting to unpack tip
msg = 'Cannot unpack tip.'
with self.assertRaisesRegex(TreeError, msg):
tree.find('d').unpack()
def test_unpack_by_func(self):
"""Unpack internal nodes of a tree by a function."""
# unpack internal nodes with branch length <= 1.0
def func(x):
return x.length <= 1.0
# will unpack node 'a', but not tip 'e'
# will add the branch length of 'a' to its child nodes 'c' and 'd'
tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);'])
tree.unpack_by_func(func)
exp = '((e:1.0,f:2.0)b:2.0,c:3.0,d:4.0);'
self.assertEqual(str(tree).rstrip(), exp)
# unpack internal nodes with branch length < 2.01
# will unpack both 'a' and 'b'
tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);'])
tree.unpack_by_func(lambda x: x.length <= 2.0)
exp = '(c:3.0,d:4.0,e:3.0,f:4.0);'
self.assertEqual(str(tree).rstrip(), exp)
# unpack two nested nodes 'a' and 'c' simultaneously
tree = TreeNode.read(['(((e:3,f:2)c:1,d:3)a:1,b:4);'])
tree.unpack_by_func(lambda x: x.length <= 2.0)
exp = '(b:4.0,d:4.0,e:5.0,f:4.0);'
self.assertEqual(str(tree).rstrip(), exp)
# test a complicated scenario (unpacking nodes 'g', 'h' and 'm')
def func(x):
return x.length < 2.0
tree = TreeNode.read(['(((a:1.04,b:2.32,c:1.44)d:3.20,'
'(e:3.91,f:2.47)g:1.21)h:1.75,'
'(i:4.14,(j:2.06,k:1.58)l:3.32)m:0.77);'])
tree.unpack_by_func(func)
exp = ('((a:1.04,b:2.32,c:1.44)d:4.95,e:6.87,f:5.43,i:4.91,'
'(j:2.06,k:1.58)l:4.09);')
self.assertEqual(str(tree).rstrip(), exp)
# unpack nodes with support < 75
def func(x):
return x.support < 75
tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);'])
tree.assign_supports()
tree.unpack_by_func(func)
exp = '(((a,b)85,(c,d)78)75,(e,f,g)80);'
self.assertEqual(str(tree).rstrip(), exp)
# unpack nodes with support < 85
tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);'])
tree.assign_supports()
tree.unpack_by_func(lambda x: x.support < 85)
exp = '((a,b)85,c,d,e,f,g);'
self.assertEqual(str(tree).rstrip(), exp)
# unpack nodes with support < 0.95
tree = TreeNode.read(['(((a,b)0.97,(c,d)0.98)1.0,(e,(f,g)0.88)0.96);'])
tree.assign_supports()
tree.unpack_by_func(lambda x: x.support < 0.95)
exp = '(((a,b)0.97,(c,d)0.98)1.0,(e,f,g)0.96);'
self.assertEqual(str(tree).rstrip(), exp)
# test a case where there are branch lengths, none support values and
# node labels
tree = TreeNode.read(['(((a:1.02,b:0.33)85:0.12,(c:0.86,d:2.23)'
'70:3.02)75:0.95,(e:1.43,(f:1.69,g:1.92)64:0.20)'
'node:0.35)root;'])
tree.assign_supports()
tree.unpack_by_func(lambda x: x.support is not None and x.support < 75)
exp = ('(((a:1.02,b:0.33)85:0.12,c:3.88,d:5.25)75:0.95,'
'(e:1.43,f:1.89,g:2.12)node:0.35)root;')
self.assertEqual(str(tree).rstrip(), exp)
sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
:0.17710)
:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
:0.09853);
"""
node_data_sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
'A':0.17710)
B:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
C:0.09853);
"""
minimal = "();"
no_names = "((,),(,));"
missing_tip_name = "((a,b),(c,));"
empty = '();'
single = '(abc:3);'
double = '(abc:3, def:4);'
onenest = '(abc:3, (def:4, ghi:5):6 );'
nodedata = '(abc:3, (def:4, ghi:5)jkl:6 );'
exp_ascii_art_three_children = r""" /-a
|
---------| /-b
| |
\--------|--c
|
\-d"""
if __name__ == '__main__':
main()
| gregcaporaso/scikit-bio | skbio/tree/tests/test_tree.py | Python | bsd-3-clause | 59,271 | [
"scikit-bio"
] | aed1df4471c79b018864933f5dc496f61f31c879bea7b511be6f65b5822022fc |
from brian import *
N = 300
v0 = 5 * mV
tau = 20 * ms
sigma = 5 * mV
vt = 10 * mV
vr = 0 * mV
g_gap = 1. / N
beta = 60 * mV * 2 * ms
delta = vt - vr
eqs = '''
dv/dt=(v0-v)/tau+g_gap*(u-N*v)/tau : volt
du/dt=(N*v0-u)/tau : volt # input from other neurons
'''
def myreset(P, spikes):
P.v[spikes] = vr # reset
P.v += g_gap * beta * len(spikes) # spike effect
P.u -= delta * len(spikes)
group = NeuronGroup(N, model=eqs, threshold=vt, reset=myreset)
@network_operation
def noise(cl):
x = randn(N) * sigma * (cl.dt / tau) ** .5
group.v += x
group.u += sum(x)
trace = StateMonitor(group, 'v', record=[0, 1])
spikes = SpikeMonitor(group)
rate = PopulationRateMonitor(group)
run(1 * second)
subplot(311)
raster_plot(spikes)
subplot(312)
plot(trace.times / ms, trace[0] / mV)
plot(trace.times / ms, trace[1] / mV)
subplot(313)
plot(rate.times / ms, rate.smooth_rate(5 * ms) / Hz)
show() | mac389/gap-junctions | network.py | Python | apache-2.0 | 908 | [
"Brian"
] | 740ccd3b423ae7728040d84966a20d062b5ed948482ce510b9e868468f9a7523 |
from astn import AstToGAst, GAstToAst
import ast
import gast
class Ast2ToGAst(AstToGAst):
# stmt
def visit_FunctionDef(self, node):
new_node = gast.FunctionDef(
self._visit(node.name),
self._visit(node.args),
self._visit(node.body),
self._visit(node.decorator_list),
None, # returns
)
ast.copy_location(new_node, node)
return new_node
def visit_ClassDef(self, node):
new_node = gast.ClassDef(
self._visit(node.name),
self._visit(node.bases),
[], # keywords
self._visit(node.body),
self._visit(node.decorator_list),
)
ast.copy_location(new_node, node)
return new_node
def visit_With(self, node):
new_node = gast.With(
[gast.withitem(
self._visit(node.context_expr),
self._visit(node.optional_vars)
)],
self._visit(node.body)
)
ast.copy_location(new_node, node)
return new_node
def visit_Raise(self, node):
ntype = self._visit(node.type)
ninst = self._visit(node.inst)
ntback = self._visit(node.tback)
what = ntype
if ninst is not None:
what = gast.Call(ntype, [ninst], [])
ast.copy_location(what, node)
if ntback is not None:
attr = gast.Attribute(what, 'with_traceback', gast.Load())
ast.copy_location(attr, node)
what = gast.Call(
attr,
[ntback],
[]
)
ast.copy_location(what, node)
new_node = gast.Raise(what, None)
ast.copy_location(new_node, node)
return new_node
def visit_TryExcept(self, node):
new_node = gast.Try(
self._visit(node.body),
self._visit(node.handlers),
self._visit(node.orelse),
[] # finalbody
)
ast.copy_location(new_node, node)
return new_node
def visit_TryFinally(self, node):
new_node = gast.Try(
self._visit(node.body),
[], # handlers
[], # orelse
self._visit(node.finalbody)
)
ast.copy_location(new_node, node)
return new_node
# expr
def visit_Name(self, node):
new_node = gast.Name(
self._visit(node.id),
self._visit(node.ctx),
None,
)
ast.copy_location(new_node, node)
return new_node
def visit_Call(self, node):
if node.starargs:
star = gast.Starred(self._visit(node.starargs), gast.Load())
ast.copy_location(star, node)
starred = [star]
else:
starred = []
if node.kwargs:
kwargs = [gast.keyword(None, self._visit(node.kwargs))]
else:
kwargs = []
new_node = gast.Call(
self._visit(node.func),
self._visit(node.args) + starred,
self._visit(node.keywords) + kwargs,
)
ast.copy_location(new_node, node)
return new_node
def visit_comprehension(self, node):
new_node = gast.comprehension(
target=self._visit(node.target),
iter=self._visit(node.iter),
ifs=self._visit(node.ifs),
is_async=0,
)
return ast.copy_location(new_node, node)
# arguments
def visit_arguments(self, node):
new_node = gast.arguments(
self._visit(node.args),
self._visit(node.vararg),
[], # kwonlyargs
[], # kw_defaults
self._visit(node.kwarg),
self._visit(node.defaults),
)
return new_node
class GAstToAst2(GAstToAst):
# stmt
def visit_FunctionDef(self, node):
new_node = ast.FunctionDef(
self._visit(node.name),
self._visit(node.args),
self._visit(node.body),
self._visit(node.decorator_list),
)
ast.copy_location(new_node, node)
return new_node
def visit_ClassDef(self, node):
new_node = ast.ClassDef(
self._visit(node.name),
self._visit(node.bases),
self._visit(node.body),
self._visit(node.decorator_list),
)
ast.copy_location(new_node, node)
return new_node
def visit_With(self, node):
new_node = ast.With(
self._visit(node.items[0].context_expr),
self._visit(node.items[0].optional_vars),
self._visit(node.body)
)
ast.copy_location(new_node, node)
return new_node
def visit_Raise(self, node):
if isinstance(node.exc, gast.Call) and \
isinstance(node.exc.func, gast.Attribute) and \
node.exc.func.attr == 'with_traceback':
raised = self._visit(node.exc.func.value)
traceback = self._visit(node.exc.args[0])
else:
raised = self._visit(node.exc)
traceback = None
new_node = ast.Raise(raised, None, traceback)
ast.copy_location(new_node, node)
return new_node
def visit_Try(self, node):
if node.finalbody:
new_node = ast.TryFinally(
self._visit(node.body),
self._visit(node.finalbody)
)
else:
new_node = ast.TryExcept(
self._visit(node.body),
self._visit(node.handlers),
self._visit(node.orelse),
)
ast.copy_location(new_node, node)
return new_node
# expr
def visit_Name(self, node):
new_node = ast.Name(
self._visit(node.id),
self._visit(node.ctx),
)
ast.copy_location(new_node, node)
return new_node
def visit_Call(self, node):
if node.args and isinstance(node.args[-1], gast.Starred):
args = node.args[:-1]
starargs = node.args[-1].value
else:
args = node.args
starargs = None
if node.keywords and node.keywords[-1].arg is None:
keywords = node.keywords[:-1]
kwargs = node.keywords[-1].value
else:
keywords = node.keywords
kwargs = None
new_node = ast.Call(
self._visit(node.func),
self._visit(args),
self._visit(keywords),
self._visit(starargs),
self._visit(kwargs),
)
ast.copy_location(new_node, node)
return new_node
def visit_arg(self, node):
new_node = ast.Name(node.arg, ast.Param())
ast.copy_location(new_node, node)
return new_node
# arguments
def visit_arguments(self, node):
new_node = ast.arguments(
self._visit(node.args),
self._visit(node.vararg),
self._visit(node.kwarg),
self._visit(node.defaults),
)
return new_node
def ast_to_gast(node):
return Ast2ToGAst().visit(node)
def gast_to_ast(node):
return GAstToAst2().visit(node)
| ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/gast/ast2.py | Python | mit | 7,224 | [
"VisIt"
] | 436c7c69ab2d4a49c79b9c43fd0ed20ef7a6a1f55f05c2a01691c084f40ed329 |
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array, deprecated
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
k=None, eigen_tol=0.0,
assign_labels='kmeans',
mode=None):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
if not k is None:
warnings.warn("'k' was renamed to n_clusters and will "
"be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if not mode is None:
warnings.warn("'mode' was renamed to eigen_solver "
"and will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma: float
Scaling factor of RBF, polynomial, exponential chi² and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10, k=None,
eigen_tol=0.0, assign_labels='kmeans', mode=None,
degree=3, coef0=1, kernel_params=None):
if k is not None:
warnings.warn("'k' was renamed to n_clusters and "
"will be removed in 0.15.",
DeprecationWarning)
n_clusters = k
if mode is not None:
warnings.warn("'mode' was renamed to eigen_solver and "
"will be removed in 0.15.",
DeprecationWarning)
eigen_solver = mode
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
@property
@deprecated("'mode' was renamed to eigen_solver and will be removed in"
" 0.15.")
def mode(self):
return self.eigen_solver
@property
@deprecated("'k' was renamed to n_clusters and will be removed in"
" 0.15.")
def k(self):
return self.n_clusters
| kmike/scikit-learn | sklearn/cluster/spectral.py | Python | bsd-3-clause | 19,078 | [
"Brian",
"Gaussian"
] | 2660862e4b45a388570837ed95ed72f6c5689b20f8c08a7206260fa234334b48 |
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 8 January 2016
# Last update: 6 April 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This is a module called to write out data to a netCDF file ready for ingestion into the CEDA/ESGF
# archive. It should be CF compliant to at least CF-1.0. Standard names are used where available and
# variable names are taken from PCMDI standard names (or designed to be PCMDI compatible).
#
# A table listing the header info is available for each type of file. For HadISDH they can be found
# here: www.metoffice.gov.uk/hadobs/hadisdh/formattables.html
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Python:
# import numpy as np
# import sys, os
# import scipy.stats
# import struct
# import os.path
# import datetime as dt
# from datetime import datetime
# from matplotlib.dates import date2num,num2date
# from netCDF4 import Dataset
# from netCDF4 import stringtoarr # for putting strings in as netCDF variables
# from scipy.io import netcdf
# import pdb # pdb.set_trace() or c
#
# Kate's:
# from ReadNetCDF import GetGrid - written by Kate Willett, reads in any netCDF grid, can cope with multiple fields
#
# -----------------------
# DATA
# -----------------------
# Any numpy arrays of data, latitude and longtitude info, mdi info etc:
#
# Filename: string containing filepath and filename e.g.,
# /data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/GRIDS/huss_HadISDH_HadOBS_19730101-20141231_v2-0-1-2014p.nc
# Dates: a four elemnt intger dictionary of {'StYr': integer e.g., 1973,
# 'StMon': integer between 1 and 12,
# 'EdYr': integer e.g., 2014,
# 'EdMon': integer between 0 and 11}
# Latitudes: float array of latitude gridbox centres from North to South
# Longitudes: float array of longitude gridbox centres from West to East
# ClimPoints: two element list of start year and end year of climatology period e.g., (1976, 2005)
# DataObject: a list of data arrays to be written out
# ???
# DimObject: a list containing a list of dimension names and then dictionaries of attributes applying to each dimension
# [['time','month','month_name','latitude','longitude','bound_pairs'], # all can be filled with other given info
# [{'dimname':'time',
# 'standard_name': 'time',
# 'long_name': 'time',
# 'units': 'days since 1973-1-1 00:00:00',
# 'axis': 'T',
# 'calendar': 'gregorian',
# 'start_year','1973s',
# etc...
# AttrObject: a list of dictionaries of attributes applying to each data array to be written out
# [[{'varname': 'huss',
# 'standard_name': 'specific_humidity',
# 'long_name': 'near surface (~2m) specific humidity',
# 'cell_methods': 'time: mean(interval: 1 month comment: anomaly from climatology) area: mean where land (stations within gridbox)',
# 'comment': 'gridbox mean monthly mean climate anomalies from stations'
# 'units': 'g/kg'
# 'scale_factor': '100'
#
# }],[{}],etc...]
# GlobAttrObject: a dictionary of global attributes
#
#
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Pass the filename and required variables to the code which will then create a netCDF file storing everything.
# These variables may have been set up using Convert_CEDAESGF_JAN2016.py, which in turns uses ReadNetCDF.py
# to read in the data.
#
# from WriteNetCDF_CEDAESGF_JAN2016 import WriteNCCF
# WriteNCCF(Filename,Dates,Latitudes,Longitudes,ClimPoints,DataObjectDimObject,AttrObject,GlobAttrObject)
#
# -----------------------
# OUTPUT
# -----------------------
# A CEDA/ESGF compliant netCDF file with the provided filename
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 6th Mar 2020
# ---------
#
# Enhancements
# Can now work with marine data - not completed yet
#
# Changes
# Now python 3
# Now doesn't apply scale and offset and so saves as float32 with MDI=-1e30
#
# Bug fixes
#
# Version 1 8th Jan 2016
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
# This may be amended later to apply compression (netCDF4)
#
# Useful links on CF standards:
# Standard Names:
# http://cfconventions.org/Data/cf-standard-names/29/build/cf-standard-name-table.html
# CF Conventions:
# http://cfconventions.org/
# Cell Methods:
# http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/ch07s03.html
# PCMDI standard names:
# http://www-pcmdi.llnl.gov/projects/amip/DATASTDS/VARNAMES/main.html
#
#************************************************************************
# START
#************************************************************************
# Set up python imports
import numpy as np
import sys, os
import scipy.stats
import struct
import os.path
import datetime as dt
from datetime import datetime
from matplotlib.dates import date2num,num2date
from netCDF4 import Dataset
from netCDF4 import stringtoarr # for putting strings in as netCDF variables
from scipy.io import netcdf
import pdb # pdb.set_trace() or c
from ReadNetCDF import GetGrid # written by Kate Willett, reads in any netCDF grid, can cope with multiple fields
# Set up hardwired variables
OLDMDI = -1e30
MDI = -999 # just check how this is going to work with storing everything as integers
MonthName = ['January ',
'February ',
'March ',
'April ',
'May ',
'June ',
'July ',
'August ',
'September ',
'October ',
'November ',
'December ']
MonthDays = [31,28,31,30,31,30,31,31,30,31,30,31]
#************************************************************************
# Subroutines
#************************************************************************
# MakeDaysSince
def MakeDaysSince(TheStYr,TheStMon,TheEdYr,TheEdMon):
''' Take counts of months since styr, stmn (assume 15th day of month) '''
''' Work out counts of days since styr,stmn, January - incl leap days '''
''' Also work out time boundaries 1st and last day of month '''
''' This can cope with incomplete years or individual months '''
# set up arrays for month mid points and month bounds
DaysArray=np.empty(((TheEdYr-TheStYr)+1)*((TheEdMon-TheStMon)+1))
BoundsArray=np.empty((((TheEdYr-TheStYr)+1)*((TheEdMon-TheStMon)+1),2))
# make a date object for each time point and subtract start date
StartDate=datetime(TheStYr,TheStMon,1,0,0,0) # January
TheYear=TheStYr
TheMonth=TheStMon
for mm in range(len(DaysArray)):
if (TheMonth < 12):
DaysArray[mm]=(datetime(TheYear,TheMonth+1,1,0,0,0)-datetime(TheYear,TheMonth,1,0,0,0)).days/2. + (datetime(TheYear,TheMonth,1,0,0,0)-StartDate).days
BoundsArray[mm,0]=(datetime(TheYear,TheMonth,1,0,0,0)-StartDate).days+1
BoundsArray[mm,1]=(datetime(TheYear,TheMonth+1,1,0,0,0)-StartDate).days
else:
DaysArray[mm]=(datetime(TheYear+1,1,1,0,0,0)-datetime(TheYear,TheMonth,1,0,0,0)).days/2. + (datetime(TheYear,TheMonth,1,0,0,0)-StartDate).days
BoundsArray[mm,0]=(datetime(TheYear,TheMonth,1,0,0,0)-StartDate).days+1
BoundsArray[mm,1]=(datetime(TheYear+1,1,1,0,0,0)-StartDate).days
TheMonth=TheMonth+1
if (TheMonth == 13):
TheMonth=1
TheYear=TheYear+1
return DaysArray,BoundsArray
#************************************************************************
# WriteNCCF
def WriteNCCF(FileName,Dates,Latitudes,Longitudes,ClimPoints,DataObject,DimObject,AttrObject,GlobAttrObject):
''' Sort out the date/times to write out and time bounds '''
''' Sort out clim bounds '''
''' Sort out lat and long bounds '''
''' Convert variables using the obtained scale_factor and add_offset: stored_var=int((var-offset)/scale) '''
''' Write to file, set up given dimensions, looping through all potential variables and their attributes, and then the provided dictionary of global attributes '''
# Sort out date/times to write out
print(Dates)
TimPoints,TimBounds = MakeDaysSince(Dates['StYr'],Dates['StMon'],Dates['EdYr'],Dates['EdMon'])
nTims = len(TimPoints)
# Sort out clim bounds - paired strings
ClimBounds = np.empty((12,2),dtype='|S10')
for mm in range(12):
ClimBounds[mm,0] = str(ClimPoints[0])+'-'+str(mm+1)+'-'+str(1)
ClimBounds[mm,1] = str(ClimPoints[1])+'-'+str(mm+1)+'-'+str(MonthDays[mm])
# Sort out LatBounds and LonBounds
LatBounds = np.empty((len(Latitudes),2),dtype='float')
LonBounds = np.empty((len(Longitudes),2),dtype='float')
LatBounds[:,0] = Latitudes - ((Latitudes[1]-Latitudes[0])/2.)
LatBounds[:,1] = Latitudes + ((Latitudes[1]-Latitudes[0])/2.)
LonBounds[:,0] = Longitudes - ((Longitudes[1]-Longitudes[0])/2.)
LonBounds[:,1] = Longitudes + ((Longitudes[1]-Longitudes[0])/2.)
#pdb.set_trace()
# # No need to convert float data using given scale_factor and add_offset to integers - done within writing program (packV = (V-offset)/scale
# # Not sure what this does to float precision though...
# # Change mdi into an integer -999 because these are stored as integers
# for vv in range(len(DataObject)):
# DataObject[vv][np.where(DataObject[vv] == OLDMDI)] = MDI
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw=Dataset(FileName,'w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Write out the global attributes
if ('description' in GlobAttrObject):
ncfw.description = GlobAttrObject['description']
#print(GlobAttrObject['description'])
if ('File_created' in GlobAttrObject):
ncfw.File_created = GlobAttrObject['File_created']
if ('Title' in GlobAttrObject):
ncfw.Title = GlobAttrObject['Title']
if ('Institution' in GlobAttrObject):
ncfw.Institution = GlobAttrObject['Institution']
if ('History' in GlobAttrObject):
ncfw.History = GlobAttrObject['History']
if ('Licence' in GlobAttrObject):
ncfw.Licence = GlobAttrObject['Licence']
if ('Project' in GlobAttrObject):
ncfw.Project = GlobAttrObject['Project']
if ('Processing_level' in GlobAttrObject):
ncfw.Processing_level = GlobAttrObject['Processing_level']
if ('Acknowledgement' in GlobAttrObject):
ncfw.Acknowledgement = GlobAttrObject['Acknowledgement']
if ('Source' in GlobAttrObject):
ncfw.Source = GlobAttrObject['Source']
if ('Comment' in GlobAttrObject):
ncfw.Comment = GlobAttrObject['Comment']
if ('References' in GlobAttrObject):
ncfw.References = GlobAttrObject['References']
if ('Creator_name' in GlobAttrObject):
ncfw.Creator_name = GlobAttrObject['Creator_name']
if ('Creator_email' in GlobAttrObject):
ncfw.Creator_email = GlobAttrObject['Creator_email']
if ('Version' in GlobAttrObject):
ncfw.Version = GlobAttrObject['Version']
if ('doi' in GlobAttrObject):
ncfw.doi = GlobAttrObject['doi']
if ('Conventions' in GlobAttrObject):
ncfw.Conventions = GlobAttrObject['Conventions']
if ('netcdf_type' in GlobAttrObject):
ncfw.netcdf_type = GlobAttrObject['netcdf_type']
# Loop through and set up the dimension names and quantities
for vv in range(len(DimObject[0])):
ncfw.createDimension(DimObject[0][vv],DimObject[1][vv])
# Go through each dimension and set up the variable and attributes for that dimension if needed
for vv in range(len(DimObject)-2): # ignore first two elements of the list but count all other dictionaries
print(DimObject[vv+2]['var_name'])
# NOt 100% sure this works in a loop with overwriting
# initiate variable with name, type and dimensions
MyVar = ncfw.createVariable(DimObject[vv+2]['var_name'],DimObject[vv+2]['var_type'],DimObject[vv+2]['var_dims'])
# Apply any other attributes
if ('standard_name' in DimObject[vv+2]):
MyVar.standard_name = DimObject[vv+2]['standard_name']
if ('long_name' in DimObject[vv+2]):
MyVar.long_name = DimObject[vv+2]['long_name']
if ('units' in DimObject[vv+2]):
MyVar.units = DimObject[vv+2]['units']
if ('axis' in DimObject[vv+2]):
MyVar.axis = DimObject[vv+2]['axis']
if ('calendar' in DimObject[vv+2]):
MyVar.calendar = DimObject[vv+2]['calendar']
if ('start_year' in DimObject[vv+2]):
MyVar.start_year = DimObject[vv+2]['start_year']
if ('end_year' in DimObject[vv+2]):
MyVar.end_year = DimObject[vv+2]['end_year']
if ('start_month' in DimObject[vv+2]):
MyVar.start_month = DimObject[vv+2]['start_month']
if ('end_month' in DimObject[vv+2]):
MyVar.end_month = DimObject[vv+2]['end_month']
if ('bounds' in DimObject[vv+2]):
MyVar.bounds = DimObject[vv+2]['bounds']
if ('climatology' in DimObject[vv+2]):
MyVar.climatology = DimObject[vv+2]['climatology']
if ('point_spacing' in DimObject[vv+2]):
MyVar.point_spacing = DimObject[vv+2]['point_spacing']
# Provide the data to the variable
if (DimObject[vv+2]['var_name'] == 'time'):
MyVar[:] = TimPoints
if (DimObject[vv+2]['var_name'] == 'bounds_time'):
MyVar[:,:] = TimBounds
if (DimObject[vv+2]['var_name'] == 'month'):
for mm in range(12):
MyVar[mm,:] = stringtoarr(MonthName[mm],10)
if (DimObject[vv+2]['var_name'] == 'climbounds'):
for mm in range(12):
MyVar[mm,0,:] = stringtoarr(ClimBounds[mm,0],10)
MyVar[mm,1,:] = stringtoarr(ClimBounds[mm,1],10)
if (DimObject[vv+2]['var_name'] == 'latitude'):
MyVar[:] = Latitudes
if (DimObject[vv+2]['var_name'] == 'bounds_lat'):
MyVar[:,:] = LatBounds
if (DimObject[vv+2]['var_name'] == 'longitude'):
MyVar[:] = Longitudes
if (DimObject[vv+2]['var_name'] == 'bounds_lon'):
MyVar[:,:] = LonBounds
# Go through each variable and set up the variable attributes
for vv in range(len(AttrObject)): # ignore first two elements of the list but count all other dictionaries
print(AttrObject[vv]['var_name'])
# NOt 100% sure this works in a loop with overwriting
# initiate variable with name, type and dimensions
MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],zlib=True,fill_value = AttrObject[vv]['_FillValue'])
# Apply any other attributes
if ('standard_name' in AttrObject[vv]):
MyVar.standard_name = AttrObject[vv]['standard_name']
if ('long_name' in AttrObject[vv]):
MyVar.long_name = AttrObject[vv]['long_name']
# Too many issues with CF compliance
# if ('cell_methods' in AttrObject[vv]):
# MyVar.cell_methods = AttrObject[vv]['cell_methods']
if ('comment' in AttrObject[vv]):
MyVar.comment = AttrObject[vv]['comment']
if ('units' in AttrObject[vv]):
MyVar.units = AttrObject[vv]['units']
if ('axis' in AttrObject[vv]):
MyVar.axis = AttrObject[vv]['axis']
# if ('add_offset' in AttrObject[vv]):
# MyVar.add_offset = AttrObject[vv]['add_offset']
#
# if ('scale_factor' in AttrObject[vv]):
# MyVar.scale_factor = AttrObject[vv]['scale_factor']
# if ('valid_min' in AttrObject[vv]):
# MyVar.valid_min = AttrObject[vv]['valid_min']#
#
# if ('valid_max' in AttrObject[vv]):
# MyVar.valid_max = AttrObject[vv]['valid_max']
# if ('missing_value' in AttrObject[vv]):
# MyVar.missing_value = AttrObject[vv]['missing_value']
# if ('_FillValue' in AttrObject[vv]):
# MyVar._FillValue = AttrObject[vv]['_FillValue']
if ('reference_period' in AttrObject[vv]):
MyVar.reference_period = AttrObject[vv]['reference_period']
if ('ancillary_variables' in AttrObject[vv]):
MyVar.ancillary_variables = AttrObject[vv]['ancillary_variables']
# Provide the data to the variable - depending on howmany dimensions there are
if (len(AttrObject[vv]['var_dims']) == 1):
MyVar[:] = DataObject[vv]
if (len(AttrObject[vv]['var_dims']) == 2):
MyVar[:,:] = DataObject[vv]
if (len(AttrObject[vv]['var_dims']) == 3):
MyVar[:,:,:] = DataObject[vv]
ncfw.close()
return # WriteNCCF
#************************************************************************
| Kate-Willett/Climate_Explorer | PYTHON/WriteNetCDF_CEDAESGF_JAN2016.py | Python | cc0-1.0 | 17,298 | [
"NetCDF"
] | df6ad994bcdaedba41b6bc42ed18a013cccb7b9d26089379ef2e40152b81534b |
"""Pre-processing module.
This module contains several pre-processing routines that can help
detect more patterns with PatternMatcher.
Classes included in this module are:
- ShiftToMult: transforms all left shifts of a constant in
multiplications
- SubToMult: transforms subs operator into + (-1)*
- NotToInv(): transforms ~x in -x - 1
- RemoveUselessAnd: removes & 2^n if expression is on n bits
- all_preprocessings: applies all preprocessing transformations
designed for patterns
- all_target_preprocessings: NotToInv() is not used for patterns
"""
import ast
from sspam.tools import asttools
class ShiftToMult(ast.NodeTransformer):
"""
Transform all left shifts of a constant in multiplications.
"""
def visit_BinOp(self, node):
'Change left shifts into multiplications'
if not isinstance(node.op, ast.LShift):
return self.generic_visit(node)
if isinstance(node.right, ast.Num):
self.generic_visit(node)
return ast.BinOp(node.left, ast.Mult(), ast.Num(2**(node.right.n)))
return self.generic_visit(node)
class SubToMult(ast.NodeTransformer):
"""
Subs are a pain in the ass. Let's change them to *(-1)
"""
def __init__(self, nbits=0):
self.nbits = nbits
def visit_BinOp(self, node):
'Change operator - to a *(-1)'
self.generic_visit(node)
if isinstance(node.op, ast.Sub):
node.op = ast.Add()
cond_mult = (isinstance(node.right, ast.BinOp) and
isinstance(node.right.op, ast.Mult))
if cond_mult:
if isinstance(node.right.left, ast.Num):
coeff = node.right.left
operand = node.right.right
elif isinstance(node.right.right, ast.Num):
coeff = node.right.right
operand = node.right.left
else:
node.right = ast.BinOp(ast.Num(-1), ast.Mult(), node.right)
return node
# trying to "simplify" constant coeffs if possible
if self.nbits:
if (-coeff.n) % 2**self.nbits == 1:
node.right = operand
else:
coeff.n = -coeff.n % 2**self.nbits
else:
coeff.n = -coeff.n
else:
node.right = ast.BinOp(ast.Num(-1), ast.Mult(), node.right)
return node
def visit_UnaryOp(self, node):
'Change -x to (-1)*x'
self.generic_visit(node)
if isinstance(node.op, ast.USub):
ope = node.operand
cond_mult = (isinstance(ope, ast.BinOp) and
isinstance(ope.op, ast.Mult))
if cond_mult:
if isinstance(ope.left, ast.Num):
node = ast.BinOp(ast.Num(-ope.left.n), ast.Mult(),
ope.right)
elif isinstance(ope.right, ast.Num):
node = ast.BinOp(ope.left, ast.Mult(),
ast.Num(-ope.right.n))
else:
node = ast.BinOp(ast.Num(-1), ast.Mult(), ope)
else:
node = ast.BinOp(ast.Num(-1), ast.Mult(), ope)
return node
class NotToInv(ast.NodeTransformer):
"""
Transform a (~X) in (- X - 1).
"""
def visit_UnaryOp(self, node):
'Change ~x to - x - 1'
if isinstance(node.op, ast.Invert):
return ast.BinOp(ast.UnaryOp(ast.USub(), node.operand),
ast.Add(),
ast.Num(-1))
return self.generic_visit(node)
class RemoveUselessAnd(ast.NodeTransformer):
"""
(A & 0xFF...FF) == A
"""
def __init__(self, nbits):
self.nbits = nbits
def visit_BinOp(self, node):
'Change (A & 2**self.nbits - 1) in A'
if isinstance(node.op, ast.BitAnd):
if isinstance(node.right, ast.Num):
if node.right.n != (2**self.nbits - 1):
return self.generic_visit(node)
return self.generic_visit(node.left)
if isinstance(node.left, ast.Num):
if node.left.n != (2**self.nbits - 1):
return self.generic_visit(node)
return self.generic_visit(node.right)
return self.generic_visit(node)
def all_preprocessings(asttarget, nbits=0):
'Apply all pre-processing transforms'
if not nbits:
nbits = asttools.get_default_nbits(asttarget)
asttarget = ShiftToMult().visit(asttarget)
asttarget = SubToMult().visit(asttarget)
asttarget = RemoveUselessAnd(nbits).visit(asttarget)
ast.fix_missing_locations(asttarget)
return asttarget
| quarkslab/sspam | sspam/pre_processing.py | Python | bsd-3-clause | 4,842 | [
"VisIt"
] | 4e26684b4383cba72d2741610a9609302976a04bc50be69c3febece11cff7dc6 |
#!/usr/bin/env python3
# Copyright (C) 2021
# Max Planck Institute for Polymer Research & JGU Mainz
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest, time
import numpy as np
from mpi4py import MPI
import espressopp
from espressopp import Real3D, Int3D
from espressopp.tools import decomp, lattice, velocities
def generate_particles(particles_per_direction):
num_particles = particles_per_direction**3
x, y, z, Lx, Ly, Lz = lattice.createCubic(num_particles, rho=0.8442, perfect=False)
vx, vy, vz = velocities.gaussian(T=0.6, N=num_particles, zero_momentum=True)
return x, y, z, Lx, Ly, Lz, vx, vy, vz
particles_per_direction = 10
x, y, z, Lx, Ly, Lz, vx, vy, vz = generate_particles(particles_per_direction)
num_particles = len(x)
def generate_system():
rc = 2.5
skin = 0.3
timestep = 0.005
temperature = 1.0
comm = MPI.COMM_WORLD
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
nodeGrid = decomp.nodeGrid(comm.size, size, rc, skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
return system
def add_particles(add_particles_array):
system = generate_system()
if add_particles_array:
tstart = time.time()
props = ['id', 'type', 'mass', 'posx', 'posy', 'posz', 'vx', 'vy', 'vz']
ids = np.arange(1,num_particles+1)
types = np.zeros(num_particles)
mass = np.ones(num_particles)
new_particles = np.stack((ids, types, mass, x, y, z, vx, vy, vz), axis=-1)
tprep = time.time()-tstart
tstart = time.time()
system.storage.addParticlesArray(new_particles, *props)
tadd = time.time()-tstart
else:
tstart = time.time()
props = ['id', 'type', 'mass', 'pos', 'v']
new_particles = []
for i in range(num_particles):
new_particles.append([i + 1, 0, 1.0, Real3D(x[i], y[i], z[i]), Real3D(vx[i], vy[i], vz[i])])
tprep = time.time()-tstart
tstart = time.time()
system.storage.addParticles(new_particles, *props)
tadd = time.time()-tstart
system.storage.decompose()
configurations = espressopp.analysis.Configurations(system, pos=True, vel=True)
configurations.gather()
conf = configurations[0]
ids = conf.getIds()
pos = [conf.getCoordinates(pid) for pid in ids]
vel = [conf.getVelocities(pid) for pid in ids]
return pos, vel
def add_missing_props():
system = generate_system()
props = ['id', 'type', 'mass', 'posx', 'posy', 'posz', 'vx', 'vy']
ids = np.arange(1,num_particles+1)
types = np.zeros(num_particles)
mass = np.ones(num_particles)
new_particles = np.stack((ids, types, mass, x, y, z, vx, vy, vz), axis=-1)
system.storage.addParticlesArray(new_particles, *props)
class TestAddParticlesArray(unittest.TestCase):
def compare(self, prop0, prop1):
self.assertEqual(len(prop0), len(prop1))
for i in range(len(prop0)):
for j in range(3):
self.assertEqual(prop0[i][j],prop1[i][j])
def test1(self):
pos0, vel0 = add_particles(False)
pos1, vel1 = add_particles(True)
self.compare(pos0, pos1)
self.compare(vel0, vel1)
def test2(self):
with self.assertRaises(AssertionError):
add_missing_props()
if __name__ == "__main__":
unittest.main()
| espressopp/espressopp | testsuite/storage/testAddParticlesArray.py | Python | gpl-3.0 | 4,284 | [
"ESPResSo",
"Gaussian"
] | d872177800f15ecf2f5db612bb5f94e702f357ffa85d230bd81a47aedbe447f9 |
# BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to HH-suite A3M sequence files
Credits
-------
Stefan Seemayer and his A3MIO project [https://github.com/sseemayer/BioPython-A3MIO]
"""
__author__ = "Felix Simkovic"
__credits__ = "Stefan Seemayer"
__date__ = "11 Sep 2016"
__version__ = "0.1"
import numpy as np
import re
from conkit.io._parser import SequenceFileParser
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
class A3mParser(SequenceFileParser):
"""Parser class for A3M sequence files
"""
def __init__(self):
super(A3mParser, self).__init__()
def read(self, f_handle, f_id="a3m", remove_inserts=True):
"""Read a sequence file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique sequence file identifier
remove_inserts : bool, optional
Remove insert states [default: True]
Returns
-------
:obj:`~conkit.core.sequencefile.SequenceFile`
"""
sequence_file = SequenceFile(f_id)
while True:
line = f_handle.readline().rstrip()
if not line:
continue
elif line.startswith("#"):
sequence_file.remark = line[1:]
elif line.startswith(">"):
break
while True:
if not line.startswith(">"):
raise ValueError("Fasta record needs to start with '>'")
id = line[1:]
chunks = []
line = f_handle.readline().rstrip()
while True:
if not line:
break
elif line.startswith(">"):
break
chunks.append(line)
line = f_handle.readline().rstrip()
seq_string = "".join(chunks)
if remove_inserts:
seq_string = self._remove_inserts(seq_string)
sequence_entry = Sequence(id, seq_string)
try:
sequence_file.add(sequence_entry)
except ValueError:
while True:
new_id = sequence_entry.id + "_{0}".format(np.random.randint(0, 100000))
if new_id in sequence_file:
continue
else:
break
sequence_entry.id = new_id
sequence_file.add(sequence_entry)
if not line:
break
if not remove_inserts:
self._adjust_insert(sequence_file)
return sequence_file
def _adjust_insert(self, hierarchy):
"""Adjust insert states
Credits
-------
This function was adapted from Stefan Seemayer's BioPython-A3MIO
repository - https://github.com/sseemayer/BioPython-A3MIO
"""
# Determine the insert states by splitting the sequences into chunks based
# on the case of the letter.
INSERT_STATE = re.compile(r"([A-Z0-9~-])")
inserts = [INSERT_STATE.split(sequence_entry.seq) for sequence_entry in hierarchy]
# Determine maximum insert length at each position
insert_max_lengths = [max(len(inserts[i][j]) for i in range(len(inserts))) for j in range(len(inserts[0]))]
# Add gaps where gaps are needed
def pad(chunk, length, pad_char="-"):
return chunk + pad_char * (length - len(chunk))
# Manipulate each sequence to match insert states
for sequence_entry, seq in zip(hierarchy, inserts):
sequence_entry.seq = "".join(pad(insert, insert_len) for insert, insert_len in zip(seq, insert_max_lengths))
def _remove_inserts(self, seq):
"""Remove insert states"""
return "".join([char for char in seq if not char.islower()])
def write(self, f_handle, hierarchy):
"""Write a sequence file instance to to file
Parameters
----------
f_handle
Open file handle [write permissions]
hierarchy : :obj:`~conkit.core.sequencefile.SequenceFile`, :obj:`~conkit.core.sequence.Sequence`
"""
sequence_file = self._reconstruct(hierarchy)
content = ""
for remark in sequence_file.remark:
content += "#{remark}\n".format(remark=remark)
for sequence_entry in sequence_file:
header = ">{id}".format(id=sequence_entry.id)
if len(sequence_entry.remark) > 0:
header = "|".join([header] + sequence_entry.remark)
content += header + "\n"
content += sequence_entry.seq + "\n"
f_handle.write(content)
| fsimkovic/cptbx | conkit/io/a3m.py | Python | gpl-3.0 | 6,265 | [
"Biopython"
] | a3f54d386c72fa78c3bb8b879eea8eba09949fe93fe2cd36ac8ede76f927e95e |
from os import getcwd
from unittest import TestCase
from galaxy.jobs.command_factory import build_command
from galaxy.util.bunch import Bunch
MOCK_COMMAND_LINE = "/opt/galaxy/tools/bowtie /mnt/galaxyData/files/000/input000.dat"
TEST_METADATA_LINE = "set_metadata_and_stuff.sh"
TEST_FILES_PATH = "file_path"
class TestCommandFactory(TestCase):
def setUp(self):
self.job_wrapper = MockJobWrapper()
self.workdir_outputs = []
def workdir_outputs(job_wrapper, **kwds):
assert job_wrapper == self.job_wrapper
return self.workdir_outputs
self.runner = Bunch(app=Bunch(model=Bunch(Dataset=Bunch(file_path=TEST_FILES_PATH))), get_work_dir_outputs=workdir_outputs)
self.include_metadata = False
self.include_work_dir_outputs = True
def test_simplest_command(self):
self.include_work_dir_outputs = False
self.__assert_command_is( MOCK_COMMAND_LINE )
def test_shell_commands(self):
self.include_work_dir_outputs = False
dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"]
self.job_wrapper.dependency_shell_commands = dep_commands
self.__assert_command_is( "%s; %s" % (dep_commands[0], MOCK_COMMAND_LINE) )
def test_remote_dependency_resolution(self):
self.include_work_dir_outputs = False
dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"]
self.job_wrapper.dependency_shell_commands = dep_commands
self.__assert_command_is(MOCK_COMMAND_LINE, remote_command_params=dict(dependency_resolution="remote"))
def test_explicit_local_dependency_resolution(self):
self.include_work_dir_outputs = False
dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"]
self.job_wrapper.dependency_shell_commands = dep_commands
self.__assert_command_is("%s; %s" % (dep_commands[0], MOCK_COMMAND_LINE),
remote_command_params=dict(dependency_resolution="local"))
def test_task_prepare_inputs(self):
self.include_work_dir_outputs = False
self.job_wrapper.prepare_input_files_cmds = ["/opt/split1", "/opt/split2"]
self.__assert_command_is( "/opt/split1; /opt/split2; %s" % MOCK_COMMAND_LINE )
def test_workdir_outputs(self):
self.include_work_dir_outputs = True
self.workdir_outputs = [("foo", "bar")]
self.__assert_command_is( '%s; return_code=$?; if [ -f foo ] ; then cp foo bar ; fi; sh -c "exit $return_code"' % MOCK_COMMAND_LINE )
def test_set_metadata_skipped_if_unneeded(self):
self.include_metadata = True
self.include_work_dir_outputs = False
self.__assert_command_is( MOCK_COMMAND_LINE )
def test_set_metadata(self):
self._test_set_metadata()
def test_strips_trailing_semicolons(self):
self.job_wrapper.command_line = "%s;" % MOCK_COMMAND_LINE
self._test_set_metadata()
def _test_set_metadata(self):
self.include_metadata = True
self.include_work_dir_outputs = False
self.job_wrapper.metadata_line = TEST_METADATA_LINE
expected_command = '%s; return_code=$?; cd %s; %s; sh -c "exit $return_code"' % (MOCK_COMMAND_LINE, getcwd(), TEST_METADATA_LINE)
self.__assert_command_is( expected_command )
def test_empty_metadata(self):
"""
As produced by TaskWrapper.
"""
self.include_metadata = True
self.include_work_dir_outputs = False
self.job_wrapper.metadata_line = ' '
# Empty metadata command do not touch command line.
expected_command = '%s' % (MOCK_COMMAND_LINE)
self.__assert_command_is( expected_command )
def test_metadata_kwd_defaults(self):
configured_kwds = self.__set_metadata_with_kwds()
assert configured_kwds['exec_dir'] == getcwd()
assert configured_kwds['tmp_dir'] == self.job_wrapper.working_directory
assert configured_kwds['dataset_files_path'] == TEST_FILES_PATH
assert configured_kwds['output_fnames'] == ['output1']
def test_metadata_kwds_overrride(self):
configured_kwds = self.__set_metadata_with_kwds(
exec_dir="/path/to/remote/galaxy",
tmp_dir="/path/to/remote/staging/directory/job1",
dataset_files_path="/path/to/remote/datasets/",
output_fnames=['/path/to/remote_output1'],
)
assert configured_kwds['exec_dir'] == "/path/to/remote/galaxy"
assert configured_kwds['tmp_dir'] == "/path/to/remote/staging/directory/job1"
assert configured_kwds['dataset_files_path'] == "/path/to/remote/datasets/"
assert configured_kwds['output_fnames'] == ['/path/to/remote_output1']
def __set_metadata_with_kwds(self, **kwds):
self.include_metadata = True
self.include_work_dir_outputs = False
self.job_wrapper.metadata_line = TEST_METADATA_LINE
if kwds:
self.__command(remote_command_params=dict(metadata_kwds=kwds))
else:
self.__command()
return self.job_wrapper.configured_external_metadata_kwds
def __assert_command_is(self, expected_command, **command_kwds):
command = self.__command(**command_kwds)
self.assertEqual(command, expected_command)
def __command(self, **extra_kwds):
kwds = dict(
runner=self.runner,
job_wrapper=self.job_wrapper,
include_metadata=self.include_metadata,
include_work_dir_outputs=self.include_work_dir_outputs,
**extra_kwds
)
return build_command(**kwds)
class MockJobWrapper(object):
def __init__(self):
self.write_version_cmd = None
self.command_line = MOCK_COMMAND_LINE
self.dependency_shell_commands = []
self.metadata_line = None
self.configured_external_metadata_kwds = None
self.working_directory = "job1"
self.prepare_input_files_cmds = None
def get_command_line(self):
return self.command_line
@property
def requires_setting_metadata(self):
return self.metadata_line is not None
def setup_external_metadata(self, *args, **kwds):
self.configured_external_metadata_kwds = kwds
return self.metadata_line
def get_output_fnames(self):
return ["output1"]
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/unit/jobs/test_command_factory.py | Python | gpl-3.0 | 6,344 | [
"Bowtie",
"Galaxy"
] | 03bf27c23d9f2b00cc7a4b3e1d1be27a457b36805a909fed849619268c7c8d8c |
import ocl
import pyocl
import camvtk
import time
import vtk
import datetime
import math
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
stl = camvtk.STLSurf("../stl/mount_rush.stl")
#stl = camvtk.STLSurf("../stl/pycam-textbox.stl")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STL surface with", s.size(), "triangles read")
# define a cutter
length=5
cutter = ocl.BallCutter(15.4321, length)
#cutter = ocl.CylCutter(1.123, length)
#cutter = ocl.BullCutter(1.123, 0.2, length)
#cutter = ocl.ConeCutter(0.43, math.pi/7, length)
print(cutter)
#define grid of CL-points
minx=-42
dx=0.5
maxx=47
miny=-27
dy=0.2
maxy=20
z=-55
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
print("generated grid with", len(clpoints)," CL-points")
# batchdropcutter
bdc1 = ocl.BatchDropCutter()
bdc1.setSTL(s)
bdc1.setCutter(cutter)
# push the points to ocl
for p in clpoints:
bdc1.appendPoint(p)
# run the actual calculation
t_before = time.time()
bdc1.run()
t_after = time.time()
calctime = t_after-t_before
print(" done in ", calctime," s" )
# get back results from ocl
clpts = bdc1.getCLPoints()
# draw the results
print("rendering...",)
camvtk.drawCLPointCloud(myscreen, clpts)
print("done")
myscreen.camera.SetPosition(25, 23, 15)
myscreen.camera.SetFocalPoint(4, 5, 0)
# ocl text
t = camvtk.Text()
t.SetText("OpenCAMLib")
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
# other text
t2 = camvtk.Text()
stltext = "%i triangles\n%i CL-points\n%0.1f seconds" % (s.size(), len(clpts), calctime)
t2.SetText(stltext)
t2.SetPos( (50, myscreen.height-100) )
myscreen.addActor( t2)
t3 = camvtk.Text()
ctext = "Cutter: %s" % ( str(cutter) )
t3.SetText(ctext)
t3.SetPos( (50, myscreen.height-150) )
myscreen.addActor( t3)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
| aewallin/opencamlib | examples/python/drop-cutter/batchdropcutter_mtrush.py | Python | lgpl-2.1 | 2,392 | [
"VTK"
] | 74889aa453e7703557c641a1f4cb64aff046ca351f1709cc50ce122d08ee8d96 |
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides testing capabilities and customisations specific to Iris.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
The primary class for this module is :class:`IrisTest`.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import codecs
import collections
import contextlib
import difflib
import filecmp
import functools
import gzip
import hashlib
import inspect
import json
import io
import logging
import os
import os.path
import shutil
import subprocess
import sys
import unittest
import warnings
import xml.dom.minidom
import zlib
try:
from unittest import mock
except ImportError:
import mock
import filelock
import numpy as np
import numpy.ma as ma
import iris.cube
import iris.config
import iris.util
# Test for availability of matplotlib.
# (And remove matplotlib as an iris.tests dependency.)
try:
import matplotlib
matplotlib.use('agg')
import matplotlib.testing.compare as mcompare
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
else:
MPL_AVAILABLE = True
try:
from osgeo import gdal
except ImportError:
GDAL_AVAILABLE = False
else:
GDAL_AVAILABLE = True
try:
import iris_grib
GRIB_AVAILABLE = True
from iris_grib.message import GribMessage
except ImportError:
try:
import gribapi
GRIB_AVAILABLE = True
from iris.fileformats.grib.message import GribMessage
except ImportError:
GRIB_AVAILABLE = False
try:
import iris_sample_data
except ImportError:
SAMPLE_DATA_AVAILABLE = False
else:
SAMPLE_DATA_AVAILABLE = True
try:
import nc_time_axis
NC_TIME_AXIS_AVAILABLE = True
except ImportError:
NC_TIME_AXIS_AVAILABLE = False
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'results')
if '--data-files-used' in sys.argv:
sys.argv.remove('--data-files-used')
fname = '/var/tmp/all_iris_test_resource_paths.txt'
print('saving list of files used by tests to %s' % fname)
_EXPORT_DATAPATHS_FILE = open(fname, 'w')
else:
_EXPORT_DATAPATHS_FILE = None
if '--create-missing' in sys.argv:
sys.argv.remove('--create-missing')
print('Allowing creation of missing test results.')
os.environ['IRIS_TEST_CREATE_MISSING'] = 'true'
# A shared logger for use by unit tests
logger = logging.getLogger('tests')
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
if (MPL_AVAILABLE and '-d' in sys.argv):
sys.argv.remove('-d')
plt.switch_backend('tkagg')
_DISPLAY_FIGURES = True
_DEFAULT_IMAGE_TOLERANCE = 10.0
def main():
"""A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
if '-h' in sys.argv or '--help' in sys.argv:
stdout = sys.stdout
buff = io.StringIO()
# NB. unittest.main() raises an exception after it's shown the help text
try:
sys.stdout = buff
unittest.main()
finally:
sys.stdout = stdout
lines = buff.getvalue().split('\n')
lines.insert(9, 'Iris-specific options:')
lines.insert(10, ' -d Display matplotlib figures (uses tkagg).')
lines.insert(11, ' NOTE: To compare results of failing tests, ')
lines.insert(12, ' use idiff.py instead')
lines.insert(13, ' --data-files-used Save a list of files used to a temporary file')
lines.insert(
14, ' -m Create missing test results')
print('\n'.join(lines))
else:
unittest.main()
def get_data_path(relative_path):
"""
Return the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
data_path = os.path.join(iris.config.TEST_DATA_DIR, relative_path)
if _EXPORT_DATAPATHS_FILE is not None:
_EXPORT_DATAPATHS_FILE.write(data_path + '\n')
if isinstance(data_path, six.string_types) and not os.path.exists(data_path):
# if the file is gzipped, ungzip it and return the path of the ungzipped
# file.
gzipped_fname = data_path + '.gz'
if os.path.exists(gzipped_fname):
with gzip.open(gzipped_fname, 'rb') as gz_fh:
try:
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
except IOError:
# Put ungzipped data file in a temporary path, since we
# can't write to the original path (maybe it is owned by
# the system.)
_, ext = os.path.splitext(data_path)
data_path = iris.util.create_temp_filename(suffix=ext)
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
return data_path
class IrisTest(unittest.TestCase):
"""A subclass of unittest.TestCase which provides Iris specific testing functionality."""
_assertion_counts = collections.defaultdict(int)
@classmethod
def setUpClass(cls):
# Ensure that the CF profile if turned-off for testing.
iris.site_configuration['cf_profile'] = None
def _assert_str_same(self, reference_str, test_str, reference_filename, type_comparison_name='Strings'):
if reference_str != test_str:
diff = ''.join(difflib.unified_diff(reference_str.splitlines(1), test_str.splitlines(1),
'Reference', 'Test result', '', '', 0))
self.fail("%s do not match: %s\n%s" % (type_comparison_name, reference_filename, diff))
@staticmethod
def get_result_path(relative_path):
"""
Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
def result_path(self, basename=None, ext=''):
"""
Return the full path to a test result, generated from the \
calling file, class and, optionally, method.
Optional kwargs :
* basename - File basename. If omitted, this is \
generated from the calling method.
* ext - Appended file extension.
"""
if ext and not ext.startswith('.'):
ext = '.' + ext
# Generate the folder name from the calling file name.
path = os.path.abspath(inspect.getfile(self.__class__))
path = os.path.splitext(path)[0]
sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:]
# Generate the file name from the calling function name?
if basename is None:
stack = inspect.stack()
for frame in stack[1:]:
if 'test_' in frame[3]:
basename = frame[3].replace('test_', '')
break
filename = basename + ext
result = os.path.join(self.get_result_path(''),
sub_path.replace('test_', ''),
self.__class__.__name__.replace('Test_', ''),
filename)
return result
def assertCMLApproxData(self, cubes, reference_filename=None, *args,
**kwargs):
# passes args and kwargs on to approx equal
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
reference_filename = [self.get_result_path(reference_filename)]
for i, cube in enumerate(cubes):
fname = list(reference_filename)
# don't want the ".cml" for the numpy data file
if fname[-1].endswith(".cml"):
fname[-1] = fname[-1][:-4]
fname[-1] += '.data.%d.npy' % i
self.assertCubeDataAlmostEqual(cube, fname, *args, **kwargs)
self.assertCML(cubes, reference_filename, checksum=False)
def assertCDL(self, netcdf_filename, reference_filename=None, flags='-h'):
"""
Test that the CDL for the given netCDF file matches the contents
of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* netcdf_filename:
The path to the netCDF file.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* flags:
Command-line flags for `ncdump`, as either a whitespace
separated string or an iterable. Defaults to '-h'.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'cdl')
else:
reference_path = self.get_result_path(reference_filename)
# Convert the netCDF file to CDL file format.
cdl_filename = iris.util.create_temp_filename(suffix='.cdl')
if flags is None:
flags = []
elif isinstance(flags, six.string_types):
flags = flags.split()
else:
flags = list(map(str, flags))
with open(cdl_filename, 'w') as cdl_file:
subprocess.check_call(['ncdump'] + flags + [netcdf_filename],
stderr=cdl_file, stdout=cdl_file)
# Ingest the CDL for comparison, excluding first line.
with open(cdl_filename, 'r') as cdl_file:
lines = cdl_file.readlines()[1:]
# Sort the dimensions (except for the first, which can be unlimited).
# This gives consistent CDL across different platforms.
sort_key = lambda line: ('UNLIMITED' not in line, line)
dimension_lines = slice(lines.index('dimensions:\n') + 1,
lines.index('variables:\n'))
lines[dimension_lines] = sorted(lines[dimension_lines], key=sort_key)
cdl = ''.join(lines)
os.remove(cdl_filename)
self._check_same(cdl, reference_path, type_comparison_name='CDL')
def assertCML(self, cubes, reference_filename=None, checksum=True):
"""
Test that the CML for the given cubes matches the contents of
the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* cubes:
Either a Cube or a sequence of Cubes.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* checksum:
When True, causes the CML to include a checksum for each
Cube's data. Defaults to True.
"""
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
if isinstance(cubes, (list, tuple)):
xml = iris.cube.CubeList(cubes).xml(checksum=checksum, order=False,
byteorder=False)
else:
xml = cubes.xml(checksum=checksum, order=False, byteorder=False)
reference_path = self.get_result_path(reference_filename)
self._check_same(xml, reference_path)
def assertTextFile(self, source_filename, reference_filename, desc="text file"):
"""Check if two text files are the same, printing any diffs."""
with open(source_filename) as source_file:
source_text = source_file.readlines()
with open(reference_filename) as reference_file:
reference_text = reference_file.readlines()
if reference_text != source_text:
diff = ''.join(difflib.unified_diff(reference_text, source_text, 'Reference', 'Test result', '', '', 0))
self.fail("%s does not match reference file: %s\n%s" % (desc, reference_filename, diff))
def assertCubeDataAlmostEqual(self, cube, reference_filename, *args, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
result = np.load(reference_path)
if isinstance(result, np.lib.npyio.NpzFile):
self.assertIsInstance(cube.data, ma.MaskedArray, 'Cube data was not a masked array.')
# Avoid comparing any non-initialised array data.
data = cube.data.filled()
np.testing.assert_array_almost_equal(data, result['data'],
*args, **kwargs)
np.testing.assert_array_equal(cube.data.mask, result['mask'])
else:
np.testing.assert_array_almost_equal(cube.data, result, *args, **kwargs)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
if isinstance(cube.data, ma.MaskedArray):
# Avoid recording any non-initialised array data.
data = cube.data.filled()
with open(reference_path, 'wb') as reference_file:
np.savez(reference_file, data=data, mask=cube.data.mask)
else:
with open(reference_path, 'wb') as reference_file:
np.save(reference_file, cube.data)
def assertFilesEqual(self, test_filename, reference_filename):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
fmt = 'test file {!r} does not match reference {!r}.'
self.assertTrue(filecmp.cmp(test_filename, reference_path),
fmt.format(test_filename, reference_path))
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
shutil.copy(test_filename, reference_path)
def assertString(self, string, reference_filename=None):
"""
Test that `string` matches the contents of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* string:
The string to check.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'txt')
else:
reference_path = self.get_result_path(reference_filename)
self._check_same(string, reference_path,
type_comparison_name='Strings')
def assertRepr(self, obj, reference_filename):
self.assertString(repr(obj), reference_filename)
def _check_same(self, item, reference_path, type_comparison_name='CML'):
if self._check_reference_file(reference_path):
with open(reference_path, 'rb') as reference_fh:
reference = ''.join(part.decode('utf-8')
for part in reference_fh.readlines())
self._assert_str_same(reference, item, reference_path,
type_comparison_name)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
with open(reference_path, 'wb') as reference_fh:
reference_fh.writelines(
part.encode('utf-8')
for part in item)
def assertXMLElement(self, obj, reference_filename):
"""
Calls the xml_element method given obj and asserts the result is the same as the test file.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(obj.xml_element(doc))
pretty_xml = doc.toprettyxml(indent=" ")
reference_path = self.get_result_path(reference_filename)
self._check_same(pretty_xml, reference_path,
type_comparison_name='XML')
def assertArrayEqual(self, a, b, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
# Define helper function to extract unmasked values as a 1d
# array.
def unmasked_data_as_1d_array(array):
if array.ndim == 0:
if array.mask:
data = np.array([])
else:
data = np.array([array.data])
else:
data = array.data[~ma.getmaskarray(array)]
return data
# Compare masks. This will also check that the array shapes
# match, which is not tested when comparing unmasked values if
# strict is False.
a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
np.testing.assert_array_equal(a_mask, b_mask)
if strict:
assertion(a.data, b.data, **kwargs)
else:
assertion(unmasked_data_as_1d_array(a),
unmasked_data_as_1d_array(b),
**kwargs)
def assertMaskedArrayEqual(self, a, b, strict=False):
"""
Check that masked arrays are equal. This requires the
unmasked values and masks to be identical.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
"""
self._assertMaskedArray(np.testing.assert_array_equal, a, b, strict)
def assertArrayAlmostEqual(self, a, b, decimal=6):
np.testing.assert_array_almost_equal(a, b, decimal=decimal)
def assertMaskedArrayAlmostEqual(self, a, b, decimal=6, strict=False):
"""
Check that masked arrays are almost equal. This requires the
masks to be identical, and the unmasked values to be almost
equal.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
* decimal (int):
Equality tolerance level for
:meth:`numpy.testing.assert_array_almost_equal`, with the meaning
'abs(desired-actual) < 0.5 * 10**(-decimal)'
"""
self._assertMaskedArray(np.testing.assert_array_almost_equal, a, b,
strict, decimal=decimal)
def assertArrayAllClose(self, a, b, rtol=1.0e-7, atol=0.0, **kwargs):
"""
Check arrays are equal, within given relative + absolute tolerances.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* rtol, atol (float):
Relative and absolute tolerances to apply.
Any additional kwargs are passed to numpy.testing.assert_allclose.
Performs pointwise toleranced comparison, and raises an assertion if
the two are not equal 'near enough'.
For full details see underlying routine numpy.testing.assert_allclose.
"""
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, **kwargs)
@contextlib.contextmanager
def temp_filename(self, suffix=''):
filename = iris.util.create_temp_filename(suffix)
try:
yield filename
finally:
os.remove(filename)
def file_checksum(self, file_path):
"""
Generate checksum from file.
"""
with open(file_path, "rb") as in_file:
return zlib.crc32(in_file.read())
def _unique_id(self):
"""
Returns the unique ID for the current assertion.
The ID is composed of two parts: a unique ID for the current test
(which is itself composed of the module, class, and test names), and
a sequential counter (specific to the current test) that is incremented
on each call.
For example, calls from a "test_tx" routine followed by a "test_ty"
routine might result in::
test_plot.TestContourf.test_tx.0
test_plot.TestContourf.test_tx.1
test_plot.TestContourf.test_tx.2
test_plot.TestContourf.test_ty.0
"""
# Obtain a consistent ID for the current test.
# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')
if bits[0] == '__main__':
floc = sys.modules['__main__'].__file__
path, file_name = os.path.split(os.path.abspath(floc))
bits[0] = os.path.splitext(file_name)[0]
folder, location = os.path.split(path)
bits = [location] + bits
while location not in ['iris', 'example_tests']:
folder, location = os.path.split(folder)
bits = [location] + bits
test_id = '.'.join(bits)
# Derive the sequential assertion ID within the test
assertion_id = self._assertion_counts[test_id]
self._assertion_counts[test_id] += 1
return test_id + '.' + str(assertion_id)
def _check_reference_file(self, reference_path):
reference_exists = os.path.isfile(reference_path)
if not (reference_exists or
os.environ.get('IRIS_TEST_CREATE_MISSING')):
msg = 'Missing test result: {}'.format(reference_path)
raise AssertionError(msg)
return reference_exists
def _ensure_folder(self, path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
logger.warning('Creating folder: %s', dir_path)
os.makedirs(dir_path)
def _assert_graphic(self):
"""
Check the hash of the current matplotlib figure matches the expected
image hash for the current graphic test.
To create missing image test results, set the IRIS_TEST_CREATE_MISSING
environment variable before running the tests. This will result in new
and appropriately "<hash>.png" image files being generated in the image
output directory, and the imagerepo.json file being updated.
"""
dev_mode = os.environ.get('IRIS_TEST_CREATE_MISSING')
unique_id = self._unique_id()
repo_fname = os.path.join(os.path.dirname(__file__),
'results', 'imagerepo.json')
with open(repo_fname, 'rb') as fi:
repo = json.load(codecs.getreader('utf-8')(fi))
try:
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError('Write access to a local disk is required '
'to run image tests. Run the tests from a '
'current working directory you have write '
'access to to avoid this issue.')
else:
image_output_directory = os.path.join(
os.getcwd(), 'iris_image_test_output')
result_fname = os.path.join(image_output_directory,
'result-' + unique_id + '.png')
if not os.path.isdir(os.path.dirname(result_fname)):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(os.path.dirname(result_fname))
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
def _save_figure_hash():
plt.gcf().savefig(result_fname)
# Determine the test result image hash using sha1.
with open(result_fname, 'rb') as fi:
sha1 = hashlib.sha1(fi.read())
return sha1
def _create_missing():
fname = sha1.hexdigest() + '.png'
base_uri = ('https://scitools.github.io/test-images-scitools/'
'image_files/{}')
uri = base_uri.format(fname)
hash_fname = os.path.join(image_output_directory, fname)
uris = repo.setdefault(unique_id, [])
uris.append(uri)
print('Creating image file: {}'.format(hash_fname))
os.rename(result_fname, hash_fname)
msg = 'Creating imagerepo entry: {} -> {}'
print(msg.format(unique_id, uri))
with open(repo_fname, 'wb') as fo:
json.dump(repo, codecs.getwriter('utf-8')(fo), indent=4,
sort_keys=True)
sha1 = _save_figure_hash()
if unique_id not in repo:
if dev_mode:
_create_missing()
else:
emsg = 'Missing image test result: {}.'
raise ValueError(emsg.format(unique_id))
else:
uris = repo[unique_id]
# Cherry-pick the registered expected hashes from the
# test case uri/s.
expected = [os.path.splitext(os.path.basename(uri))[0]
for uri in uris]
if sha1.hexdigest() not in expected:
# This can be an accidental failure, unusual, but it occurs
# https://github.com/SciTools/iris/issues/2195
# retry once, in case it passes second time round.
sha1 = _save_figure_hash()
if sha1.hexdigest() not in expected:
if dev_mode:
_create_missing()
else:
emsg = 'Actual SHA1 {} not in expected {} for test {}.'
emsg = emsg.format(sha1.hexdigest(), expected,
unique_id)
if _DISPLAY_FIGURES:
print('Image comparison would have failed. '
'Message: %s' % emsg)
else:
raise ValueError('Image comparison failed. '
'Message: {}'.format(emsg))
else:
# There is no difference between the actual and expected
# result, so remove the actual result file.
os.remove(result_fname)
if _DISPLAY_FIGURES:
plt.show()
finally:
plt.close()
def check_graphic(self, tol=None):
"""
Checks that the image hash for the current matplotlib figure matches
the expected image hash for the current test.
"""
fname = os.path.join(os.path.dirname(__file__),
'results', 'imagerepo.lock')
lock = filelock.FileLock(fname)
# The imagerepo.json file is a critical resource, so ensure thread
# safe read/write behaviour via platform independent file locking.
with lock.acquire(timeout=600):
self._assert_graphic()
def _remove_testcase_patches(self):
"""Helper to remove per-testcase patches installed by :meth:`patch`."""
# Remove all patches made, ignoring errors.
for p in self.testcase_patches:
p.stop()
# Reset per-test patch control variable.
self.testcase_patches.clear()
def patch(self, *args, **kwargs):
"""
Install a mock.patch, to be removed after the current test.
The patch is created with mock.patch(*args, **kwargs).
Returns:
The substitute object returned by patch.start().
For example::
mock_call = self.patch('module.Class.call', return_value=1)
module_Class_instance.call(3, 4)
self.assertEqual(mock_call.call_args_list, [mock.call(3, 4)])
"""
# Make the new patch and start it.
patch = mock.patch(*args, **kwargs)
start_result = patch.start()
# Create the per-testcases control variable if it does not exist.
# NOTE: this mimics a setUp method, but continues to work when a
# subclass defines its own setUp.
if not hasattr(self, 'testcase_patches'):
self.testcase_patches = {}
# When installing the first patch, schedule remove-all at cleanup.
if not self.testcase_patches:
self.addCleanup(self._remove_testcase_patches)
# Record the new patch and start object for reference.
self.testcase_patches[patch] = start_result
# Return patch replacement object.
return start_result
def assertArrayShapeStats(self, result, shape, mean, std_dev):
"""
Assert that the result, a cube, has the provided shape and that the
mean and standard deviation of the data array are also as provided.
Thus build confidence that a cube processing operation, such as a
cube.regrid, has maintained its behaviour.
"""
self.assertEqual(result.shape, shape)
self.assertAlmostEqual(result.data.mean(), mean, places=5)
self.assertAlmostEqual(result.data.std(), std_dev, places=5)
get_result_path = IrisTest.get_result_path
class GraphicsTest(IrisTest):
def setUp(self):
# Make sure we have no unclosed plots from previous tests before
# generating this one.
if MPL_AVAILABLE:
plt.close('all')
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
if MPL_AVAILABLE:
plt.close('all')
class TestGribMessage(IrisTest):
def assertGribMessageContents(self, filename, contents):
"""
Evaluate whether all messages in a GRIB2 file contain the provided
contents.
* filename (string)
The path on disk of an existing GRIB file
* contents
An iterable of GRIB message keys and expected values.
"""
messages = GribMessage.messages_from_filename(filename)
for message in messages:
for element in contents:
section, key, val = element
self.assertEqual(message.sections[section][key], val)
def assertGribMessageDifference(self, filename1, filename2, diffs,
skip_keys=(), skip_sections=()):
"""
Evaluate that the two messages only differ in the ways specified.
* filename[0|1] (string)
The path on disk of existing GRIB files
* diffs
An dictionary of GRIB message keys and expected diff values:
{key: (m1val, m2val),...} .
* skip_keys
An iterable of key names to ignore during comparison.
* skip_sections
An iterable of section numbers to ignore during comparison.
"""
messages1 = list(GribMessage.messages_from_filename(filename1))
messages2 = list(GribMessage.messages_from_filename(filename2))
self.assertEqual(len(messages1), len(messages2))
for m1, m2 in zip(messages1, messages2):
m1_sect = set(m1.sections.keys())
m2_sect = set(m2.sections.keys())
for missing_section in (m1_sect ^ m2_sect):
what = ('introduced'
if missing_section in m1_sect else 'removed')
# Assert that an introduced section is in the diffs.
self.assertIn(missing_section, skip_sections,
msg='Section {} {}'.format(missing_section,
what))
for section in (m1_sect & m2_sect):
# For each section, check that the differences are
# known diffs.
m1_keys = set(m1.sections[section]._keys)
m2_keys = set(m2.sections[section]._keys)
difference = m1_keys ^ m2_keys
unexpected_differences = difference - set(skip_keys)
if unexpected_differences:
self.fail("There were keys in section {} which \n"
"weren't in both messages and which weren't "
"skipped.\n{}"
"".format(section,
', '.join(unexpected_differences)))
keys_to_compare = m1_keys & m2_keys - set(skip_keys)
for key in keys_to_compare:
m1_value = m1.sections[section][key]
m2_value = m2.sections[section][key]
msg = '{} {} != {}'
if key not in diffs:
# We have a key which we expect to be the same for
# both messages.
if isinstance(m1_value, np.ndarray):
# A large tolerance appears to be required for
# gribapi 1.12, but not for 1.14.
self.assertArrayAlmostEqual(m1_value, m2_value,
decimal=2)
else:
self.assertEqual(m1_value, m2_value,
msg=msg.format(key, m1_value,
m2_value))
else:
# We have a key which we expect to be different
# for each message.
self.assertEqual(m1_value, diffs[key][0],
msg=msg.format(key, m1_value,
diffs[key][0]))
self.assertEqual(m2_value, diffs[key][1],
msg=msg.format(key, m2_value,
diffs[key][1]))
def skip_data(fn):
"""
Decorator to choose whether to run tests, based on the availability of
external data.
Example usage:
@skip_data
class MyDataTests(tests.IrisTest):
...
"""
no_data = (not iris.config.TEST_DATA_DIR
or not os.path.isdir(iris.config.TEST_DATA_DIR)
or os.environ.get('IRIS_TEST_NO_DATA'))
skip = unittest.skipIf(
condition=no_data,
reason='Test(s) require external data.')
return skip(fn)
def skip_gdal(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
GDAL library.
Example usage:
@skip_gdal
class MyGeoTiffTests(test.IrisTest):
...
"""
skip = unittest.skipIf(
condition=not GDAL_AVAILABLE,
reason="Test requires 'gdal'.")
return skip(fn)
def skip_plot(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
matplotlib library.
Example usage:
@skip_plot
class MyPlotTests(test.GraphicsTest):
...
"""
skip = unittest.skipIf(
condition=not MPL_AVAILABLE,
reason='Graphics tests require the matplotlib library.')
return skip(fn)
skip_grib = unittest.skipIf(not GRIB_AVAILABLE, 'Test(s) require "gribapi", '
'which is not available.')
skip_sample_data = unittest.skipIf(not SAMPLE_DATA_AVAILABLE,
('Test(s) require "iris_sample_data", '
'which is not available.'))
skip_nc_time_axis = unittest.skipIf(
not NC_TIME_AXIS_AVAILABLE,
'Test(s) require "nc_time_axis", which is not available.')
def no_warnings(func):
"""
Provides a decorator to ensure that there are no warnings raised
within the test, otherwise the test will fail.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
with mock.patch('warnings.warn') as warn:
result = func(self, *args, **kwargs)
self.assertEqual(0, warn.call_count,
('Got unexpected warnings.'
' \n{}'.format(warn.call_args_list)))
return result
return wrapped
| jswanljung/iris | lib/iris/tests/__init__.py | Python | lgpl-3.0 | 39,426 | [
"NetCDF"
] | 0d896e731f1874c8519fd8a0e4a99fe410b1c3d7e98fdd16ee04b53a8577ece3 |
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Apr 28, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2012"
import unittest
import os
from pymatgen.core.structure import Molecule
from pymatgen.io.xyz import XYZ
from pymatgen.io.babel import BabelMolAdaptor
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "molecules")
try:
import openbabel as ob
import pybel as pb
except ImportError:
pb = None
ob = None
@unittest.skipIf(not (pb and ob), "OpenBabel not present. Skipping...")
class BabelMolAdaptorTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_init(self):
adaptor = BabelMolAdaptor(self.mol)
obmol = adaptor.openbabel_mol
self.assertEqual(obmol.NumAtoms(), 5)
adaptor = BabelMolAdaptor(adaptor.openbabel_mol)
self.assertEqual(adaptor.pymatgen_mol.formula, "H4 C1")
def test_from_file(self):
adaptor = BabelMolAdaptor.from_file(
os.path.join(test_dir, "Ethane_e.pdb"), "pdb")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H6 C2")
def test_from_string(self):
xyz = XYZ(self.mol)
adaptor = BabelMolAdaptor.from_string(str(xyz), "xyz")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H4 C1")
def test_localopt(self):
self.mol[1] = "H", [0, 0, 1.05]
adaptor = BabelMolAdaptor(self.mol)
adaptor.localopt()
optmol = adaptor.pymatgen_mol
for site in optmol[1:]:
self.assertAlmostEqual(site.distance(optmol[0]), 1.09216, 2)
if __name__ == "__main__":
unittest.main()
| rousseab/pymatgen | pymatgen/io/tests/test_babelio.py | Python | mit | 2,167 | [
"Pybel",
"pymatgen"
] | e7ec0cb1fa8f15aef191a230292464b8ad8cd0005d107631efadfde60e2dd849 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import factory
reload(factory)
from factory import *
osVersions = [
'macosx',
# 'linux-arm',
'linux32',
'linux64',
'win32',
'win64'
]
@properties.renderer
def get_short_revision_mist(props):
if 'got_revision' in props:
return props['got_revision']['mist'][:7]
return None
@properties.renderer
def folder_version(props):
if 'version' in props:
return props['version'].replace('.', '-')
return None
def mist_factory(branch='master', isPullRequest=False):
factory = BuildFactory()
for step in [
Git(
haltOnFailure=True,
logEnviron=False,
repourl='https://github.com/ethereum/mist.git',
branch=branch,
mode='full',
method='copy',
codebase='mist',
retry=(5, 3)
),
SetPropertyFromCommand(
haltOnFailure=True,
logEnviron=False,
name="set-version",
command='sed -ne "s/.*\\"version\\": \\"\([0-9]*\.[0-9]*\.[0-9]*\)\\".*/\\1/p" package.json',
property="version"
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="npm-install",
command=["npm", "install"],
description="npm installing",
descriptionDone="npm install"
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="gulp-mist",
command=["gulp", "mist"],
description="gulping mist",
descriptionDone="gulp mist"
)
]: factory.addStep(step)
if not isPullRequest:
for arch in osVersions:
for step in [
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="pack-mist-%s" % arch,
description='packing %s' % arch,
descriptionDone='pack %s' % arch,
command=['zip' if arch.startswith('win') else 'tar',
'-r' if arch.startswith('win') else '-cjf',
Interpolate("Mist-%(prop:version)s-%(kw:arch)s-%(kw:short_revision)s.%(kw:ext)s",
arch=arch,
short_revision=get_short_revision_mist,
ext='zip' if arch.startswith('win') else 'tar.bz2'),
Interpolate('Mist-%(kw:arch)s-%(kw:folder_version)s',
arch=arch,
folder_version=folder_version)],
workdir='build/dist_mist'
),
SetPropertyFromCommand(
haltOnFailure=True,
logEnviron=False,
name="sha256sum-%s" % arch,
command=Interpolate('sha256sum Mist-%(prop:version)s-%(kw:arch)s-%(kw:short_revision)s.%(kw:ext)s | grep -o -w "\w\{64\}"',
arch=arch,
short_revision=get_short_revision_mist,
ext='zip' if arch.startswith('win') else 'tar.bz2'),
property='sha256sum-%s' % arch,
workdir='build/dist_mist'
),
FileUpload(
haltOnFailure=True,
name='upload-mist-%s' % arch,
slavesrc=Interpolate("dist_mist/Mist-%(prop:version)s-%(kw:arch)s-%(kw:short_revision)s.%(kw:ext)s",
arch=arch,
short_revision=get_short_revision_mist,
ext='zip' if arch.startswith('win') else 'tar.bz2'),
masterdest=Interpolate("public_html/builds/%(prop:buildername)s/Mist-%(prop:version)s-%(kw:arch)s-%(kw:short_revision)s.%(kw:ext)s",
arch=arch,
short_revision=get_short_revision_mist,
ext='zip' if arch.startswith('win') else 'tar.bz2'),
url=Interpolate("/builds/%(prop:buildername)s/Mist-%(prop:version)s-%(kw:arch)s-%(kw:short_revision)s.%(kw:ext)s",
arch=arch,
short_revision=get_short_revision_mist,
ext='zip' if arch.startswith('win') else 'tar.bz2')
),
MasterShellCommand(
name="clean-latest-link-%s" % arch,
description='cleaning latest link %s' % arch,
descriptionDone='clean latest link %s' % arch,
command=['rm', '-f', Interpolate("public_html/builds/%(prop:buildername)s/Mist-%(kw:arch)s-latest.%(kw:ext)s",
arch=arch,
ext='zip' if arch.startswith('win') else 'tar.bz2')]
),
MasterShellCommand(
haltOnFailure=True,
name="link-latest-%s" % arch,
description='linking latest %s' % arch,
descriptionDone='link latest %s' % arch,
command=['ln', '-sf',
Interpolate("Mist-%(prop:version)s-%(kw:arch)s-%(kw:short_revision)s.%(kw:ext)s",
arch=arch,
short_revision=get_short_revision_mist,
ext='zip' if arch.startswith('win') else 'tar.bz2'),
Interpolate("public_html/builds/%(prop:buildername)s/Mist-%(kw:arch)s-latest.%(kw:ext)s",
arch=arch,
ext='zip' if arch.startswith('win') else 'tar.bz2')]
)
]: factory.addStep(step)
return factory
| ethereum/ethereum-buildbot | factories/mist.py | Python | mit | 6,168 | [
"GULP"
] | 9de4d5693eb6ea4223bf185783d0ad2496ae12e3262a149aec149292bff55795 |
# For details on how netCDF4 builds on HDF5:
# http://www.unidata.ucar.edu/software/netcdf/docs/file_format_specifications.html#netcdf_4_spec
import os.path
import warnings
import weakref
from collections import ChainMap, Counter, OrderedDict, defaultdict
from collections.abc import Mapping
import h5py
import numpy as np
from packaging import version
from . import __version__
from .attrs import Attributes
from .dimensions import Dimension, Dimensions
from .utils import Frozen
try:
import h5pyd
except ImportError:
no_h5pyd = True
h5_group_types = (h5py.Group,)
h5_dataset_types = (h5py.Dataset,)
else:
no_h5pyd = False
h5_group_types = (h5py.Group, h5pyd.Group)
h5_dataset_types = (h5py.Dataset, h5pyd.Dataset)
_NC_PROPERTIES = "version=2,h5netcdf=%s,hdf5=%s,h5py=%s" % (
__version__,
h5py.version.hdf5_version,
h5py.__version__,
)
NOT_A_VARIABLE = b"This is a netCDF dimension but not a netCDF variable."
def _join_h5paths(parent_path, child_path):
return "/".join([parent_path.rstrip("/"), child_path.lstrip("/")])
def _name_from_dimension(dim):
# First value in a dimension is the actual dimension scale
# which we'll use to extract the name.
return dim[0].name.split("/")[-1]
class CompatibilityError(Exception):
"""Raised when using features that are not part of the NetCDF4 API."""
def _invalid_netcdf_feature(feature, allow):
if not allow:
msg = (
"{} are not a supported NetCDF feature, and are not allowed by "
"h5netcdf unless invalid_netcdf=True.".format(feature)
)
raise CompatibilityError(msg)
def _transform_1d_boolean_indexers(key):
"""Find and transform 1D boolean indexers to int"""
key = [
np.asanyarray(k).nonzero()[0]
if isinstance(k, (np.ndarray, list)) and type(k[0]) in (bool, np.bool_)
else k
for k in key
]
return tuple(key)
def _expanded_indexer(key, ndim):
"""Expand indexing key to tuple with length equal the number of dimensions."""
# ToDo: restructure this routine to gain more performance
# short circuit, if we have only slice
if key is tuple and all(isinstance(k, slice) for k in key):
return key
# always return tuple and force colons to slices
key = np.index_exp[key]
# dimensions
len_key = len(key)
# find Ellipsis
ellipsis = [i for i, k in enumerate(key) if k is Ellipsis]
if len(ellipsis) > 1:
raise IndexError(
f"an index can only have a single ellipsis ('...'), {len(ellipsis)} given"
)
else:
# expand Ellipsis wherever it is
len_key -= len(ellipsis)
res_dim_cnt = ndim - len_key
res_dims = res_dim_cnt * (slice(None),)
ellipsis = ellipsis[0] if ellipsis else None
# check for correct dimensionality
if ndim and res_dim_cnt < 0:
raise IndexError(
f"too many indices for array: array is {ndim}-dimensional, but {len_key} were indexed"
)
# convert remaining integer indices to slices
key = tuple([slice(k, k + 1) if isinstance(k, int) else k for k in key])
# slices to build resulting key
k1 = slice(ellipsis)
k2 = slice(len_key, None) if ellipsis is None else slice(ellipsis + 1, None)
return key[k1] + res_dims + key[k2]
class BaseVariable(object):
def __init__(self, parent, name, dimensions=None):
self._parent_ref = weakref.ref(parent)
self._root_ref = weakref.ref(parent._root)
self._h5path = _join_h5paths(parent.name, name)
self._dimensions = dimensions
self._initialized = True
@property
def _parent(self):
return self._parent_ref()
@property
def _root(self):
return self._root_ref()
@property
def _h5ds(self):
# Always refer to the root file and store not h5py object
# subclasses:
return self._root._h5file[self._h5path]
@property
def name(self):
# fix name if _nc4_non_coord_
return self._h5ds.name.replace("_nc4_non_coord_", "")
def _lookup_dimensions(self):
attrs = self._h5ds.attrs
# coordinate variable and dimension, eg. 1D ("time") or 2D string variable
if (
"_Netcdf4Coordinates" in attrs
and attrs.get("CLASS", None) == b"DIMENSION_SCALE"
):
order_dim = {
value._dimid: key for key, value in self._parent._all_dimensions.items()
}
return tuple(
order_dim[coord_id] for coord_id in attrs["_Netcdf4Coordinates"]
)
# normal variable carrying DIMENSION_LIST
# extract hdf5 file references and get objects name
if "DIMENSION_LIST" in attrs:
return tuple(
self._root._h5file[ref[0]].name.split("/")[-1]
for ref in list(self._h5ds.attrs.get("DIMENSION_LIST", []))
)
# need to use the h5ds name here to distinguish from collision dimensions
child_name = self._h5ds.name.split("/")[-1]
if child_name in self._parent._all_dimensions:
return (child_name,)
dims = []
phony_dims = defaultdict(int)
for axis, dim in enumerate(self._h5ds.dims):
if len(dim):
name = _name_from_dimension(dim)
else:
# if unlabeled dimensions are found
if self._root._phony_dims_mode is None:
raise ValueError(
"variable %r has no dimension scale "
"associated with axis %s. \n"
"Use phony_dims=%r for sorted naming or "
"phony_dims=%r for per access naming."
% (self.name, axis, "sort", "access")
)
else:
# get current dimension
dimsize = self._h5ds.shape[axis]
# get dimension names
dim_names = [
d.name
# for phony dims we need to look only in the current group
for d in self._parent._all_dimensions.maps[0].values()
if d.size == dimsize
]
# extract wanted dimension name
name = dim_names[phony_dims[dimsize]].split("/")[-1]
phony_dims[dimsize] += 1
dims.append(name)
return tuple(dims)
def _attach_dim_scales(self):
"""Attach dimension scales"""
for n, dim in enumerate(self.dimensions):
# find and attach dimensions also in parent groups
self._h5ds.dims[n].attach_scale(self._parent._all_dimensions[dim]._h5ds)
def _attach_coords(self):
dims = self.dimensions
# find dimensions also in parent groups
coord_ids = np.array(
[self._parent._all_dimensions[d]._dimid for d in dims],
"int32",
)
if len(coord_ids) > 1:
self._h5ds.attrs["_Netcdf4Coordinates"] = coord_ids
def _ensure_dim_id(self):
"""Set _Netcdf4Dimid"""
# set _Netcdf4Dimid, use id of first dimension
# netCDF4 does this when the first variable's data is written
if self.dimensions and not self._h5ds.attrs.get("_Netcdf4Dimid", False):
dim = self._parent._all_h5groups[self.dimensions[0]]
if "_Netcdf4Dimid" in dim.attrs:
self._h5ds.attrs["_Netcdf4Dimid"] = dim.attrs["_Netcdf4Dimid"]
def _maybe_resize_dimensions(self, key, value):
"""Resize according to given (expanded) key with respect to variable dimensions"""
new_shape = ()
v = None
for i, dim in enumerate(self.dimensions):
# is unlimited dimensions (check in all dimensions)
if self._parent._all_dimensions[dim].isunlimited():
if key[i].stop is None:
# if stop is None, get dimensions from value,
# they must match with variable dimension
if v is None:
v = np.asarray(value)
if v.ndim == self.ndim:
new_max = max(v.shape[i], self._h5ds.shape[i])
elif v.ndim == 0:
# for scalars we take the current dimension size (check in all dimensions
new_max = self._parent._all_dimensions[dim].size
else:
raise IndexError("shape of data does not conform to slice")
else:
new_max = max(key[i].stop, self._h5ds.shape[i])
# resize unlimited dimension if needed but no other variables
# this is in line with `netcdf4-python` which only resizes
# the dimension and this variable
if self._parent._all_dimensions[dim].size < new_max:
self._parent.resize_dimension(dim, new_max)
new_shape += (new_max,)
else:
new_shape += (self._parent._all_dimensions[dim].size,)
# increase variable size if shape is changing
if self._h5ds.shape != new_shape:
self._h5ds.resize(new_shape)
@property
def dimensions(self):
if self._dimensions is None:
self._dimensions = self._lookup_dimensions()
return self._dimensions
@property
def shape(self):
# return actual dimensions sizes, this is in line with netcdf4-python
return tuple([self._parent._all_dimensions[d].size for d in self.dimensions])
@property
def ndim(self):
return len(self.shape)
def __len__(self):
return self.shape[0]
@property
def dtype(self):
return self._h5ds.dtype
def __array__(self, *args, **kwargs):
return self._h5ds.__array__(*args, **kwargs)
def __getitem__(self, key):
from .legacyapi import Dataset
if isinstance(self._parent._root, Dataset):
# this is only for legacyapi
key = _expanded_indexer(key, self.ndim)
key = _transform_1d_boolean_indexers(key)
if getattr(self._root, "decode_vlen_strings", False):
string_info = h5py.check_string_dtype(self._h5ds.dtype)
if string_info and string_info.length is None:
return self._h5ds.asstr()[key]
# return array padded with fillvalue (both api)
if self.dtype != str and self.dtype.kind in ["f", "i", "u"]:
sdiff = [d0 - d1 for d0, d1 in zip(self.shape, self._h5ds.shape)]
if sum(sdiff):
fv = self.dtype.type(self._h5ds.fillvalue)
padding = [(0, s) for s in sdiff]
return np.pad(
self._h5ds,
pad_width=padding,
mode="constant",
constant_values=fv,
)[key]
return self._h5ds[key]
def __setitem__(self, key, value):
from .legacyapi import Dataset
if isinstance(self._parent._root, Dataset):
# resize on write only for legacyapi
key = _expanded_indexer(key, self.ndim)
key = _transform_1d_boolean_indexers(key)
# resize on write only for legacy API
self._maybe_resize_dimensions(key, value)
self._h5ds[key] = value
@property
def attrs(self):
return Attributes(self._h5ds.attrs, self._root._check_valid_netcdf_dtype)
_cls_name = "h5netcdf.Variable"
def __repr__(self):
if self._parent._root._closed:
return "<Closed %s>" % self._cls_name
header = "<%s %r: dimensions %s, shape %s, dtype %s>" % (
self._cls_name,
self.name,
self.dimensions,
self.shape,
self.dtype,
)
return "\n".join(
[header]
+ ["Attributes:"]
+ [" %s: %r" % (k, v) for k, v in self.attrs.items()]
)
class Variable(BaseVariable):
@property
def chunks(self):
return self._h5ds.chunks
@property
def compression(self):
return self._h5ds.compression
@property
def compression_opts(self):
return self._h5ds.compression_opts
@property
def fletcher32(self):
return self._h5ds.fletcher32
@property
def shuffle(self):
return self._h5ds.shuffle
class _LazyObjectLookup(Mapping):
def __init__(self, parent, object_cls):
self._parent_ref = weakref.ref(parent)
self._object_cls = object_cls
self._objects = OrderedDict()
@property
def _parent(self):
return self._parent_ref()
def __setitem__(self, name, obj):
self._objects[name] = obj
def add(self, name):
self._objects[name] = None
def __iter__(self):
for name in self._objects:
# fix variable name for variable which clashes with dim name
yield name.replace("_nc4_non_coord_", "")
def __len__(self):
return len(self._objects)
def __getitem__(self, key):
# check for _nc4_non_coord_ variable
if key not in self._objects and "_nc4_non_coord_" + key in self._objects:
key = "_nc4_non_coord_" + key
if self._objects[key] is not None:
return self._objects[key]
else:
self._objects[key] = self._object_cls(self._parent, key)
return self._objects[key]
def _netcdf_dimension_but_not_variable(h5py_dataset):
return NOT_A_VARIABLE in h5py_dataset.attrs.get("NAME", b"")
def _unlabeled_dimension_mix(h5py_dataset):
dims = sum([len(j) for j in h5py_dataset.dims])
if dims:
if dims != h5py_dataset.ndim:
name = h5py_dataset.name.split("/")[-1]
raise ValueError(
"malformed variable {0} has mixing of labeled and "
"unlabeled dimensions.".format(name)
)
return dims
class Group(Mapping):
_variable_cls = Variable
_dimension_cls = Dimension
@property
def _group_cls(self):
return Group
def __init__(self, parent, name):
self._parent_ref = weakref.ref(parent)
self._root_ref = weakref.ref(parent._root)
self._h5path = _join_h5paths(parent._h5path, name)
self._dimensions = Dimensions(self)
# this map keeps track of all dimensions
if parent is self:
self._all_dimensions = ChainMap(self._dimensions)
else:
self._all_dimensions = parent._all_dimensions.new_child(self._dimensions)
self._all_h5groups = parent._all_h5groups.new_child(self._h5group)
self._variables = _LazyObjectLookup(self, self._variable_cls)
self._groups = _LazyObjectLookup(self, self._group_cls)
# initialize phony dimension counter
if self._root._phony_dims_mode is not None:
phony_dims = Counter()
for k, v in self._h5group.items():
if isinstance(v, h5_group_types):
# add to the groups collection if this is a h5py(d) Group
# instance
self._groups.add(k)
else:
if v.attrs.get("CLASS") == b"DIMENSION_SCALE":
# add dimension and retrieve size
self._dimensions.add(k)
else:
if self._root._phony_dims_mode is not None:
# check if malformed variable
if not _unlabeled_dimension_mix(v):
# if unscaled variable, get phony dimensions
phony_dims |= Counter(v.shape)
if not _netcdf_dimension_but_not_variable(v):
if isinstance(v, h5_dataset_types):
self._variables.add(k)
# iterate over found phony dimensions and create them
if self._root._phony_dims_mode is not None:
# retrieve labeled dims count from already acquired dimensions
labeled_dims = Counter(
[d._maxsize for d in self._dimensions.values() if not d._phony]
)
for size, cnt in phony_dims.items():
# only create missing dimensions
for pcnt in range(labeled_dims[size], cnt):
name = self._root._phony_dim_count
# for sort mode, we need to add precalculated max_dim_id + 1
if self._root._phony_dims_mode == "sort":
name += self._root._max_dim_id + 1
name = "phony_dim_{}".format(name)
self._dimensions[name] = size
self._initialized = True
@property
def _root(self):
return self._root_ref()
@property
def _parent(self):
return self._parent_ref()
@property
def _h5group(self):
# Always refer to the root file and store not h5py object
# subclasses:
return self._root._h5file[self._h5path]
@property
def _track_order(self):
# TODO: make a suggestion to upstream to create a property
# for files to get if they track the order
# As of version 3.6.0 this property did not exist
from h5py.h5p import CRT_ORDER_INDEXED, CRT_ORDER_TRACKED
gcpl = self._h5group.id.get_create_plist()
attr_creation_order = gcpl.get_attr_creation_order()
order_tracked = bool(attr_creation_order & CRT_ORDER_TRACKED)
order_indexed = bool(attr_creation_order & CRT_ORDER_INDEXED)
return order_tracked and order_indexed
@property
def name(self):
from .legacyapi import Dataset
name = self._h5group.name
# get group name only instead of full path for legacyapi
if isinstance(self._parent._root, Dataset) and len(name) > 1:
name = name.split("/")[-1]
return name
@property
def dimensions(self):
return self._dimensions
@dimensions.setter
def dimensions(self, value):
for k, v in self._all_dimensions.maps[0].items():
if k in value:
if v != value[k]:
raise ValueError("cannot modify existing dimension %r" % k)
else:
raise ValueError(
"new dimensions do not include existing dimension %r" % k
)
self._dimensions.update(value)
def _create_child_group(self, name):
if name in self:
raise ValueError("unable to create group %r (name already exists)" % name)
self._h5group.create_group(name, track_order=self._track_order)
self._groups[name] = self._group_cls(self, name)
return self._groups[name]
def _require_child_group(self, name):
try:
return self._groups[name]
except KeyError:
return self._create_child_group(name)
def create_group(self, name):
if name.startswith("/"):
return self._root.create_group(name[1:])
keys = name.split("/")
group = self
for k in keys[:-1]:
group = group._require_child_group(k)
return group._create_child_group(keys[-1])
def _create_child_variable(
self,
name,
dimensions,
dtype,
data,
fillvalue,
chunks,
chunking_heuristic,
**kwargs,
):
if name in self:
raise ValueError(
"unable to create variable %r " "(name already exists)" % name
)
if data is not None:
data = np.asarray(data)
if dtype is None:
dtype = data.dtype
if dtype == np.bool_:
# never warn since h5netcdf has always errored here
_invalid_netcdf_feature(
"boolean dtypes",
self._root.invalid_netcdf,
)
else:
self._root._check_valid_netcdf_dtype(dtype)
if "scaleoffset" in kwargs:
_invalid_netcdf_feature(
"scale-offset filters",
self._root.invalid_netcdf,
)
# maybe create new dimensions depending on data
if data is not None:
for d, s in zip(dimensions, data.shape):
# create new dimensions only ever if
# - they are not known via parent-groups
# - they are given in dimensions
# - it's not a coordinate variable, they will get special handling later
if d not in self._all_dimensions and d in dimensions and d is not name:
# calls _create_dimension
self.dimensions[d] = s
# coordinate variable
need_dim_adding = False
if dimensions:
for dim in dimensions:
if name not in self._all_dimensions and name == dim:
need_dim_adding = True
# variable <-> dimension name clash
if name in self._dimensions and (
name not in dimensions or (len(dimensions) > 1 and dimensions[0] != name)
):
h5name = "_nc4_non_coord_" + name
else:
h5name = name
# get shape from all dimensions
shape = tuple(self._all_dimensions[d].size for d in dimensions)
maxshape = tuple(self._all_dimensions[d]._maxsize for d in dimensions if d)
# If it is passed directly it will change the default compression
# settings.
if shape != maxshape:
kwargs["maxshape"] = maxshape
warn_h5py_chunking = False
has_unsized_dims = 0 in shape
if has_unsized_dims and chunks in {None, True}:
# TODO: set default to "h5netcdf" in h5netcdf>=1.0, and remove warning
if chunking_heuristic is None:
warn_h5py_chunking = True
chunking_heuristic = "h5py"
if chunking_heuristic == "h5py":
# do nothing -> h5py will handle chunks internally
pass
elif chunking_heuristic == "h5netcdf":
chunks = _get_default_chunksizes(shape, dtype)
else:
raise ValueError(
"got unrecognized value %s for chunking_heuristic argument "
'(has to be "h5py" or "h5netcdf")' % chunking_heuristic
)
# Clear dummy HDF5 datasets with this name that were created for a
# dimension scale without a corresponding variable.
# Keep the references, to re-attach later
refs = None
if h5name in self._dimensions and h5name in self._h5group:
refs = self._dimensions[name]._scale_refs
self._dimensions[name]._detach_scale()
del self._h5group[name]
# create hdf5 variable
h5ds = self._h5group.create_dataset(
h5name,
shape,
dtype=dtype,
data=data,
chunks=chunks,
fillvalue=fillvalue,
track_order=self._track_order,
**kwargs,
)
if warn_h5py_chunking:
h5netcdf_chunks = _get_default_chunksizes(shape, dtype)
warnings.warn(
"Using h5py's default chunking with unlimited dimensions can lead "
"to increased file sizes and degraded performance (using chunks: %r). "
'Consider passing ``chunking_heuristic="h5netcdf"`` (would give chunks: %r; '
"default in h5netcdf >= 1.0), or set chunk sizes explicitly. "
'To silence this warning, pass ``chunking_heuristic="h5py"``. '
% (h5ds.chunks, h5netcdf_chunks),
FutureWarning,
)
# create variable class instance
variable = self._variable_cls(self, h5name, dimensions)
self._variables[h5name] = variable
# need to put coordinate variable into dimensions
if need_dim_adding:
self._dimensions.add(name)
# Re-create dim-scale and re-attach references to coordinate variable.
if name in self._all_dimensions and h5name in self._h5group:
self._all_dimensions[name]._create_scale()
if refs is not None:
self._all_dimensions[name]._attach_scale(refs)
# In case of data variables attach dim_scales and coords.
if name in self.variables and h5name not in self._dimensions:
variable._attach_dim_scales()
variable._attach_coords()
# This is a bit of a hack, netCDF4 attaches _Netcdf4Dimid to every variable
# when a variable is first written to, after variable creation.
# Here we just attach it to every variable on creation.
# Todo: get this consistent with netcdf-c/netcdf4-python
variable._ensure_dim_id()
if fillvalue is not None:
value = variable.dtype.type(fillvalue)
variable.attrs._h5attrs["_FillValue"] = value
return variable
def create_variable(
self,
name,
dimensions=(),
dtype=None,
data=None,
fillvalue=None,
chunks=None,
chunking_heuristic=None,
**kwargs,
):
# if root-variable
if name.startswith("/"):
return self._root.create_variable(
name[1:],
dimensions,
dtype,
data,
fillvalue,
chunks,
chunking_heuristic,
**kwargs,
)
# else split groups and iterate child groups
keys = name.split("/")
group = self
for k in keys[:-1]:
group = group._require_child_group(k)
return group._create_child_variable(
keys[-1],
dimensions,
dtype,
data,
fillvalue,
chunks,
chunking_heuristic,
**kwargs,
)
def _get_child(self, key):
try:
return self.variables[key]
except KeyError:
return self.groups[key]
def __getitem__(self, key):
if key.startswith("/"):
return self._root[key[1:]]
keys = key.split("/")
item = self
for k in keys:
item = item._get_child(k)
return item
def __iter__(self):
for name in self.groups:
yield name
for name in self.variables:
yield name
def __len__(self):
return len(self.variables) + len(self.groups)
@property
def parent(self):
return self._parent
def flush(self):
self._root.flush()
sync = flush
@property
def groups(self):
return Frozen(self._groups)
@property
def variables(self):
return Frozen(self._variables)
@property
def dims(self):
return Frozen(self._dimensions)
@property
def attrs(self):
return Attributes(self._h5group.attrs, self._root._check_valid_netcdf_dtype)
_cls_name = "h5netcdf.Group"
def _repr_body(self):
return (
["Dimensions:"]
+ [
" %s: %s"
% (
k,
("Unlimited (current: %s)" % self._dimensions[k].size)
if v is None
else v,
)
for k, v in self.dimensions.items()
]
+ ["Groups:"]
+ [" %s" % g for g in self.groups]
+ ["Variables:"]
+ [
" %s: %r %s" % (k, v.dimensions, v.dtype)
for k, v in self.variables.items()
]
+ ["Attributes:"]
+ [" %s: %r" % (k, v) for k, v in self.attrs.items()]
)
def __repr__(self):
if self._root._closed:
return "<Closed %s>" % self._cls_name
header = "<%s %r (%s members)>" % (self._cls_name, self.name, len(self))
return "\n".join([header] + self._repr_body())
def resize_dimension(self, dim, size):
"""Resize a dimension to a certain size.
It will pad with the underlying HDF5 data sets' fill values (usually
zero) where necessary.
"""
self._dimensions[dim]._resize(size)
class File(Group):
def __init__(
self, path, mode=None, invalid_netcdf=False, phony_dims=None, **kwargs
):
"""NetCDF4 file constructor.
Parameters
----------
path: path-like
Location of the netCDF4 file to be accessed.
mode: "r", "r+", "a", "w"
A valid file access mode. See
invalid_netcdf: bool
Allow writing netCDF4 with data types and attributes that would
otherwise not generate netCDF4 files that can be read by other
applications. See
https://github.com/h5netcdf/h5netcdf#invalid-netcdf-files
for more details.
phony_dims: 'sort', 'access'
See:
https://github.com/h5netcdf/h5netcdf#datasets-with-missing-dimension-scales
**kwargs:
Additional keyword arguments to be passed to the ``h5py.File``
constructor.
Notes
-----
In h5netcdf version 0.12.0 and earlier, order tracking was disabled in
HDF5 file. As this is a requirement for the current netCDF4 standard,
it has been enabled without deprecation as of version 0.13.0 [1]_.
Datasets created with h5netcdf version 0.12.0 that are opened with
newer versions of h5netcdf will continue to disable order tracker.
.. [1] https://github.com/h5netcdf/h5netcdf/issues/128
"""
# 2022/01/09
# netCDF4 wants the track_order parameter to be true
# through this might be getting relaxed in a more recent version of the
# standard
# https://github.com/Unidata/netcdf-c/issues/2054
# https://github.com/h5netcdf/h5netcdf/issues/128
# 2022/01/20: hmaarrfk
# However, it was found that this causes issues with attrs and h5py
# https://github.com/h5netcdf/h5netcdf/issues/136
# https://github.com/h5py/h5py/issues/1385
track_order = kwargs.pop("track_order", False)
# When the issues with track_order in h5py are resolved, we
# can consider uncommenting the code below
# if not track_order:
# self._closed = True
# raise ValueError(
# f"track_order, if specified must be set to to True (got {track_order})"
# "to conform to the netCDF4 file format. Please see "
# "https://github.com/h5netcdf/h5netcdf/issues/130 "
# "for more details."
# )
# Deprecating mode='a' in favor of mode='r'
# If mode is None default to 'a' and issue a warning
if mode is None:
msg = (
"Falling back to mode='a'. "
"In future versions, mode will default to read-only. "
"It is recommended to explicitly set mode='r' to prevent any unintended "
"changes to the file."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
mode = "a"
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
self.decode_vlen_strings = kwargs.pop("decode_vlen_strings", None)
try:
if isinstance(path, str):
if path.startswith(("http://", "https://", "hdf5://")):
if no_h5pyd:
raise ImportError(
"No module named 'h5pyd'. h5pyd is required for "
"opening urls: {}".format(path)
)
try:
with h5pyd.File(path, "r") as f: # noqa
pass
self._preexisting_file = True
except IOError:
self._preexisting_file = False
self._h5file = h5pyd.File(
path, mode, track_order=track_order, **kwargs
)
else:
self._preexisting_file = os.path.exists(path) and mode != "w"
self._h5file = h5py.File(
path, mode, track_order=track_order, **kwargs
)
else: # file-like object
if version.parse(h5py.__version__) < version.parse("2.9.0"):
raise TypeError(
"h5py version ({}) must be greater than 2.9.0 to load "
"file-like objects.".format(h5py.__version__)
)
else:
self._preexisting_file = mode in {"r", "r+", "a"}
self._h5file = h5py.File(
path, mode, track_order=track_order, **kwargs
)
except Exception:
self._closed = True
raise
else:
self._closed = False
self._mode = mode
self._writable = mode != "r"
self._root_ref = weakref.ref(self)
self._h5path = "/"
self.invalid_netcdf = invalid_netcdf
# phony dimension handling
self._phony_dims_mode = phony_dims
if phony_dims is not None:
self._phony_dim_count = 0
if phony_dims not in ["sort", "access"]:
raise ValueError(
"unknown value %r for phony_dims\n"
"Use phony_dims=%r for sorted naming, "
"phony_dims=%r for per access naming."
% (phony_dims, "sort", "access")
)
# string decoding
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
if "legacy" in self._cls_name:
if self.decode_vlen_strings is not None:
msg = (
"'decode_vlen_strings' keyword argument is not allowed in h5netcdf "
"legacy API."
)
raise TypeError(msg)
self.decode_vlen_strings = True
else:
if self.decode_vlen_strings is None:
msg = (
"String decoding changed with h5py >= 3.0. "
"See https://docs.h5py.org/en/latest/strings.html and "
"https://github.com/h5netcdf/h5netcdf/issues/132 for more details. "
"Currently backwards compatibility with h5py < 3.0 is kept by "
"decoding vlen strings per default. This will change in future "
"versions for consistency with h5py >= 3.0. To silence this "
"warning set kwarg ``decode_vlen_strings=False`` which will "
"return Python bytes from variables containing vlen strings. Setting "
"``decode_vlen_strings=True`` forces vlen string decoding which returns "
"Python strings from variables containing vlen strings."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
self.decode_vlen_strings = True
self._max_dim_id = -1
# This maps keeps track of all HDF5 datasets corresponding to this group.
self._all_h5groups = ChainMap(self._h5group)
super(File, self).__init__(self, self._h5path)
# get maximum dimension id and count of labeled dimensions
if self._writable:
self._max_dim_id = self._get_maximum_dimension_id()
# initialize all groups to detect/create phony dimensions
# mimics netcdf-c style naming
if phony_dims == "sort":
self._determine_phony_dimensions()
def _get_maximum_dimension_id(self):
dimids = []
def _dimids(name, obj):
if obj.attrs.get("CLASS", None) == b"DIMENSION_SCALE":
dimids.append(obj.attrs.get("_Netcdf4Dimid", -1))
self._h5file.visititems(_dimids)
return max(dimids) if dimids else -1
def _determine_phony_dimensions(self):
def create_phony_dimensions(grp):
for name in grp.groups:
create_phony_dimensions(grp[name])
create_phony_dimensions(self)
def _check_valid_netcdf_dtype(self, dtype):
dtype = np.dtype(dtype)
if dtype == bool:
description = "boolean"
elif dtype == complex:
description = "complex"
elif h5py.check_dtype(enum=dtype) is not None:
description = "enum"
elif h5py.check_dtype(ref=dtype) is not None:
description = "reference"
elif h5py.check_dtype(vlen=dtype) not in {None, str, bytes}:
description = "non-string variable length"
else:
description = None
if description is not None:
_invalid_netcdf_feature(
"{} dtypes".format(description),
self.invalid_netcdf,
)
@property
def mode(self):
return self._h5file.mode
@property
def filename(self):
return self._h5file.filename
@property
def parent(self):
return None
def flush(self):
if self._writable:
if not self._preexisting_file and not self.invalid_netcdf:
self.attrs._h5attrs["_NCProperties"] = np.array(
_NC_PROPERTIES,
dtype=h5py.string_dtype(
encoding="ascii", length=len(_NC_PROPERTIES)
),
)
sync = flush
def close(self):
if not self._closed:
self.flush()
self._h5file.close()
self._closed = True
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
_cls_name = "h5netcdf.File"
def __repr__(self):
if self._closed:
return "<Closed %s>" % self._cls_name
header = "<%s %r (mode %s)>" % (
self._cls_name,
self.filename.split("/")[-1],
self.mode,
)
return "\n".join([header] + self._repr_body())
def _get_default_chunksizes(dimsizes, dtype):
# This is a modified version of h5py's default chunking heuristic
# https://github.com/h5py/h5py/blob/aa31f03bef99e5807d1d6381e36233325d944279/h5py/_hl/filters.py#L334-L389
# (published under BSD-3-Clause, included at licenses/H5PY_LICENSE.txt)
# See also https://github.com/h5py/h5py/issues/2029 for context.
CHUNK_BASE = 16 * 1024 # Multiplier by which chunks are adjusted
CHUNK_MIN = 8 * 1024 # Soft lower limit (8k)
CHUNK_MAX = 1024 * 1024 # Hard upper limit (1M)
type_size = np.dtype(dtype).itemsize
is_unlimited = np.array([x == 0 for x in dimsizes])
# For unlimited dimensions start with a guess of 1024
chunks = np.array([x if x != 0 else 1024 for x in dimsizes], dtype="=f8")
ndims = len(dimsizes)
if ndims == 0:
raise ValueError("Chunks not allowed for scalar datasets.")
if not np.all(np.isfinite(chunks)):
raise ValueError("Illegal value in chunk tuple")
# Determine the optimal chunk size in bytes using a PyTables expression.
# This is kept as a float.
dset_size = np.product(chunks[~is_unlimited]) * type_size
target_size = CHUNK_BASE * (2 ** np.log10(dset_size / (1024 * 1024)))
if target_size > CHUNK_MAX:
target_size = CHUNK_MAX
elif target_size < CHUNK_MIN:
target_size = CHUNK_MIN
i = 0
while True:
# Repeatedly loop over the axes, dividing them by 2.
# Start by reducing unlimited axes first.
# Stop when:
# 1a. We're smaller than the target chunk size, OR
# 1b. We're within 50% of the target chunk size, AND
# 2. The chunk is smaller than the maximum chunk size
idx = i % ndims
chunk_bytes = np.product(chunks) * type_size
done = (
chunk_bytes < target_size
or abs(chunk_bytes - target_size) / target_size < 0.5
) and chunk_bytes < CHUNK_MAX
if done:
break
if np.product(chunks) == 1:
break # Element size larger than CHUNK_MAX
nelem_unlim = np.product(chunks[is_unlimited])
if nelem_unlim == 1 or is_unlimited[idx]:
chunks[idx] = np.ceil(chunks[idx] / 2.0)
i += 1
return tuple(int(x) for x in chunks)
| shoyer/h5netcdf | h5netcdf/core.py | Python | bsd-3-clause | 40,898 | [
"NetCDF"
] | 4ad17e209b1f85a70eb973b2683e6b5d721d69ad30e11f7d536fb20daaf9d364 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.